##// END OF EJS Templates
branching: merge stable into default
Raphaël Gomès -
r50059:a932cad2 merge default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,345 +1,345 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 """usage: %s DOC ...
2 """usage: %s DOC ...
3
3
4 where DOC is the name of a document
4 where DOC is the name of a document
5 """
5 """
6
6
7
7
8 import os
8 import os
9 import sys
9 import sys
10 import textwrap
10 import textwrap
11
11
12 try:
12 try:
13 import msvcrt
13 import msvcrt
14
14
15 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
15 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
16 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
16 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
17 except ImportError:
17 except ImportError:
18 pass
18 pass
19
19
20 # This script is executed during installs and may not have C extensions
20 # This script is executed during installs and may not have C extensions
21 # available. Relax C module requirements.
21 # available. Relax C module requirements.
22 os.environ['HGMODULEPOLICY'] = 'allow'
22 os.environ['HGMODULEPOLICY'] = 'allow'
23 # import from the live mercurial repo
23 # import from the live mercurial repo
24 sys.path.insert(0, "..")
24 sys.path.insert(0, os.path.abspath(".."))
25 from mercurial import demandimport
25 from mercurial import demandimport
26
26
27 demandimport.enable()
27 demandimport.enable()
28
28
29 from mercurial import (
29 from mercurial import (
30 commands,
30 commands,
31 encoding,
31 encoding,
32 extensions,
32 extensions,
33 fancyopts,
33 fancyopts,
34 help,
34 help,
35 minirst,
35 minirst,
36 pycompat,
36 pycompat,
37 ui as uimod,
37 ui as uimod,
38 )
38 )
39 from mercurial.i18n import (
39 from mercurial.i18n import (
40 gettext,
40 gettext,
41 _,
41 _,
42 )
42 )
43 from mercurial.utils import stringutil
43 from mercurial.utils import stringutil
44
44
45 table = commands.table
45 table = commands.table
46 globalopts = commands.globalopts
46 globalopts = commands.globalopts
47 helptable = help.helptable
47 helptable = help.helptable
48 loaddoc = help.loaddoc
48 loaddoc = help.loaddoc
49
49
50
50
51 def get_desc(docstr):
51 def get_desc(docstr):
52 if not docstr:
52 if not docstr:
53 return b"", b""
53 return b"", b""
54 # sanitize
54 # sanitize
55 docstr = docstr.strip(b"\n")
55 docstr = docstr.strip(b"\n")
56 docstr = docstr.rstrip()
56 docstr = docstr.rstrip()
57 shortdesc = docstr.splitlines()[0].strip()
57 shortdesc = docstr.splitlines()[0].strip()
58
58
59 i = docstr.find(b"\n")
59 i = docstr.find(b"\n")
60 if i != -1:
60 if i != -1:
61 desc = docstr[i + 2 :]
61 desc = docstr[i + 2 :]
62 else:
62 else:
63 desc = shortdesc
63 desc = shortdesc
64
64
65 desc = textwrap.dedent(desc.decode('latin1')).encode('latin1')
65 desc = textwrap.dedent(desc.decode('latin1')).encode('latin1')
66
66
67 return (shortdesc, desc)
67 return (shortdesc, desc)
68
68
69
69
70 def get_opts(opts):
70 def get_opts(opts):
71 for opt in opts:
71 for opt in opts:
72 if len(opt) == 5:
72 if len(opt) == 5:
73 shortopt, longopt, default, desc, optlabel = opt
73 shortopt, longopt, default, desc, optlabel = opt
74 else:
74 else:
75 shortopt, longopt, default, desc = opt
75 shortopt, longopt, default, desc = opt
76 optlabel = _(b"VALUE")
76 optlabel = _(b"VALUE")
77 allopts = []
77 allopts = []
78 if shortopt:
78 if shortopt:
79 allopts.append(b"-%s" % shortopt)
79 allopts.append(b"-%s" % shortopt)
80 if longopt:
80 if longopt:
81 allopts.append(b"--%s" % longopt)
81 allopts.append(b"--%s" % longopt)
82 if isinstance(default, list):
82 if isinstance(default, list):
83 allopts[-1] += b" <%s[+]>" % optlabel
83 allopts[-1] += b" <%s[+]>" % optlabel
84 elif (default is not None) and not isinstance(default, bool):
84 elif (default is not None) and not isinstance(default, bool):
85 allopts[-1] += b" <%s>" % optlabel
85 allopts[-1] += b" <%s>" % optlabel
86 if b'\n' in desc:
86 if b'\n' in desc:
87 # only remove line breaks and indentation
87 # only remove line breaks and indentation
88 desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
88 desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
89 if isinstance(default, fancyopts.customopt):
89 if isinstance(default, fancyopts.customopt):
90 default = default.getdefaultvalue()
90 default = default.getdefaultvalue()
91 if default:
91 if default:
92 default = stringutil.forcebytestr(default)
92 default = stringutil.forcebytestr(default)
93 desc += _(b" (default: %s)") % default
93 desc += _(b" (default: %s)") % default
94 yield (b", ".join(allopts), desc)
94 yield (b", ".join(allopts), desc)
95
95
96
96
97 def get_cmd(cmd, cmdtable):
97 def get_cmd(cmd, cmdtable):
98 d = {}
98 d = {}
99 attr = cmdtable[cmd]
99 attr = cmdtable[cmd]
100 cmds = cmd.lstrip(b"^").split(b"|")
100 cmds = cmd.lstrip(b"^").split(b"|")
101
101
102 d[b'cmd'] = cmds[0]
102 d[b'cmd'] = cmds[0]
103 d[b'aliases'] = cmd.split(b"|")[1:]
103 d[b'aliases'] = cmd.split(b"|")[1:]
104 d[b'desc'] = get_desc(gettext(pycompat.getdoc(attr[0])))
104 d[b'desc'] = get_desc(gettext(pycompat.getdoc(attr[0])))
105 d[b'opts'] = list(get_opts(attr[1]))
105 d[b'opts'] = list(get_opts(attr[1]))
106
106
107 s = b'hg ' + cmds[0]
107 s = b'hg ' + cmds[0]
108 if len(attr) > 2:
108 if len(attr) > 2:
109 if not attr[2].startswith(b'hg'):
109 if not attr[2].startswith(b'hg'):
110 s += b' ' + attr[2]
110 s += b' ' + attr[2]
111 else:
111 else:
112 s = attr[2]
112 s = attr[2]
113 d[b'synopsis'] = s.strip()
113 d[b'synopsis'] = s.strip()
114
114
115 return d
115 return d
116
116
117
117
118 def showdoc(ui):
118 def showdoc(ui):
119 # print options
119 # print options
120 ui.write(minirst.section(_(b"Options")))
120 ui.write(minirst.section(_(b"Options")))
121 multioccur = False
121 multioccur = False
122 for optstr, desc in get_opts(globalopts):
122 for optstr, desc in get_opts(globalopts):
123 ui.write(b"%s\n %s\n\n" % (optstr, desc))
123 ui.write(b"%s\n %s\n\n" % (optstr, desc))
124 if optstr.endswith(b"[+]>"):
124 if optstr.endswith(b"[+]>"):
125 multioccur = True
125 multioccur = True
126 if multioccur:
126 if multioccur:
127 ui.write(_(b"\n[+] marked option can be specified multiple times\n"))
127 ui.write(_(b"\n[+] marked option can be specified multiple times\n"))
128 ui.write(b"\n")
128 ui.write(b"\n")
129
129
130 # print cmds
130 # print cmds
131 ui.write(minirst.section(_(b"Commands")))
131 ui.write(minirst.section(_(b"Commands")))
132 commandprinter(ui, table, minirst.subsection, minirst.subsubsection)
132 commandprinter(ui, table, minirst.subsection, minirst.subsubsection)
133
133
134 # print help topics
134 # print help topics
135 # The config help topic is included in the hgrc.5 man page.
135 # The config help topic is included in the hgrc.5 man page.
136 helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
136 helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
137
137
138 ui.write(minirst.section(_(b"Extensions")))
138 ui.write(minirst.section(_(b"Extensions")))
139 ui.write(
139 ui.write(
140 _(
140 _(
141 b"This section contains help for extensions that are "
141 b"This section contains help for extensions that are "
142 b"distributed together with Mercurial. Help for other "
142 b"distributed together with Mercurial. Help for other "
143 b"extensions is available in the help system."
143 b"extensions is available in the help system."
144 )
144 )
145 )
145 )
146 ui.write(
146 ui.write(
147 (
147 (
148 b"\n\n"
148 b"\n\n"
149 b".. contents::\n"
149 b".. contents::\n"
150 b" :class: htmlonly\n"
150 b" :class: htmlonly\n"
151 b" :local:\n"
151 b" :local:\n"
152 b" :depth: 1\n\n"
152 b" :depth: 1\n\n"
153 )
153 )
154 )
154 )
155
155
156 for extensionname in sorted(allextensionnames()):
156 for extensionname in sorted(allextensionnames()):
157 mod = extensions.load(ui, extensionname, None)
157 mod = extensions.load(ui, extensionname, None)
158 ui.write(minirst.subsection(extensionname))
158 ui.write(minirst.subsection(extensionname))
159 ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod)))
159 ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod)))
160 cmdtable = getattr(mod, 'cmdtable', None)
160 cmdtable = getattr(mod, 'cmdtable', None)
161 if cmdtable:
161 if cmdtable:
162 ui.write(minirst.subsubsection(_(b'Commands')))
162 ui.write(minirst.subsubsection(_(b'Commands')))
163 commandprinter(
163 commandprinter(
164 ui,
164 ui,
165 cmdtable,
165 cmdtable,
166 minirst.subsubsubsection,
166 minirst.subsubsubsection,
167 minirst.subsubsubsubsection,
167 minirst.subsubsubsubsection,
168 )
168 )
169
169
170
170
171 def showtopic(ui, topic):
171 def showtopic(ui, topic):
172 extrahelptable = [
172 extrahelptable = [
173 ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
173 ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
174 ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
174 ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
175 ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
175 ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
176 (
176 (
177 [b"hgignore.5"],
177 [b"hgignore.5"],
178 b'',
178 b'',
179 loaddoc(b'hgignore.5'),
179 loaddoc(b'hgignore.5'),
180 help.TOPIC_CATEGORY_CONFIG,
180 help.TOPIC_CATEGORY_CONFIG,
181 ),
181 ),
182 ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
182 ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
183 (
183 (
184 [b"hgignore.5.gendoc"],
184 [b"hgignore.5.gendoc"],
185 b'',
185 b'',
186 loaddoc(b'hgignore'),
186 loaddoc(b'hgignore'),
187 help.TOPIC_CATEGORY_CONFIG,
187 help.TOPIC_CATEGORY_CONFIG,
188 ),
188 ),
189 (
189 (
190 [b"hgrc.5.gendoc"],
190 [b"hgrc.5.gendoc"],
191 b'',
191 b'',
192 loaddoc(b'config'),
192 loaddoc(b'config'),
193 help.TOPIC_CATEGORY_CONFIG,
193 help.TOPIC_CATEGORY_CONFIG,
194 ),
194 ),
195 ]
195 ]
196 helpprinter(ui, helptable + extrahelptable, None, include=[topic])
196 helpprinter(ui, helptable + extrahelptable, None, include=[topic])
197
197
198
198
199 def helpprinter(ui, helptable, sectionfunc, include=[], exclude=[]):
199 def helpprinter(ui, helptable, sectionfunc, include=[], exclude=[]):
200 for h in helptable:
200 for h in helptable:
201 names, sec, doc = h[0:3]
201 names, sec, doc = h[0:3]
202 if exclude and names[0] in exclude:
202 if exclude and names[0] in exclude:
203 continue
203 continue
204 if include and names[0] not in include:
204 if include and names[0] not in include:
205 continue
205 continue
206 for name in names:
206 for name in names:
207 ui.write(b".. _%s:\n" % name)
207 ui.write(b".. _%s:\n" % name)
208 ui.write(b"\n")
208 ui.write(b"\n")
209 if sectionfunc:
209 if sectionfunc:
210 ui.write(sectionfunc(sec))
210 ui.write(sectionfunc(sec))
211 if callable(doc):
211 if callable(doc):
212 doc = doc(ui)
212 doc = doc(ui)
213 ui.write(doc)
213 ui.write(doc)
214 ui.write(b"\n")
214 ui.write(b"\n")
215
215
216
216
217 def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc):
217 def commandprinter(ui, cmdtable, sectionfunc, subsectionfunc):
218 """Render restructuredtext describing a list of commands and their
218 """Render restructuredtext describing a list of commands and their
219 documentations, grouped by command category.
219 documentations, grouped by command category.
220
220
221 Args:
221 Args:
222 ui: UI object to write the output to
222 ui: UI object to write the output to
223 cmdtable: a dict that maps a string of the command name plus its aliases
223 cmdtable: a dict that maps a string of the command name plus its aliases
224 (separated with pipes) to a 3-tuple of (the command's function, a list
224 (separated with pipes) to a 3-tuple of (the command's function, a list
225 of its option descriptions, and a string summarizing available
225 of its option descriptions, and a string summarizing available
226 options). Example, with aliases added for demonstration purposes:
226 options). Example, with aliases added for demonstration purposes:
227
227
228 'phase|alias1|alias2': (
228 'phase|alias1|alias2': (
229 <function phase at 0x7f0816b05e60>,
229 <function phase at 0x7f0816b05e60>,
230 [ ('p', 'public', False, 'set changeset phase to public'),
230 [ ('p', 'public', False, 'set changeset phase to public'),
231 ...,
231 ...,
232 ('r', 'rev', [], 'target revision', 'REV')],
232 ('r', 'rev', [], 'target revision', 'REV')],
233 '[-p|-d|-s] [-f] [-r] [REV...]'
233 '[-p|-d|-s] [-f] [-r] [REV...]'
234 )
234 )
235 sectionfunc: minirst function to format command category headers
235 sectionfunc: minirst function to format command category headers
236 subsectionfunc: minirst function to format command headers
236 subsectionfunc: minirst function to format command headers
237 """
237 """
238 h = {}
238 h = {}
239 for c, attr in cmdtable.items():
239 for c, attr in cmdtable.items():
240 f = c.split(b"|")[0]
240 f = c.split(b"|")[0]
241 f = f.lstrip(b"^")
241 f = f.lstrip(b"^")
242 h[f] = c
242 h[f] = c
243 cmds = h.keys()
243 cmds = h.keys()
244
244
245 def helpcategory(cmd):
245 def helpcategory(cmd):
246 """Given a canonical command name from `cmds` (above), retrieve its
246 """Given a canonical command name from `cmds` (above), retrieve its
247 help category. If helpcategory is None, default to CATEGORY_NONE.
247 help category. If helpcategory is None, default to CATEGORY_NONE.
248 """
248 """
249 fullname = h[cmd]
249 fullname = h[cmd]
250 details = cmdtable[fullname]
250 details = cmdtable[fullname]
251 helpcategory = details[0].helpcategory
251 helpcategory = details[0].helpcategory
252 return helpcategory or help.registrar.command.CATEGORY_NONE
252 return helpcategory or help.registrar.command.CATEGORY_NONE
253
253
254 cmdsbycategory = {category: [] for category in help.CATEGORY_ORDER}
254 cmdsbycategory = {category: [] for category in help.CATEGORY_ORDER}
255 for cmd in cmds:
255 for cmd in cmds:
256 # If a command category wasn't registered, the command won't get
256 # If a command category wasn't registered, the command won't get
257 # rendered below, so we raise an AssertionError.
257 # rendered below, so we raise an AssertionError.
258 if helpcategory(cmd) not in cmdsbycategory:
258 if helpcategory(cmd) not in cmdsbycategory:
259 raise AssertionError(
259 raise AssertionError(
260 "The following command did not register its (category) in "
260 "The following command did not register its (category) in "
261 "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd))
261 "help.CATEGORY_ORDER: %s (%s)" % (cmd, helpcategory(cmd))
262 )
262 )
263 cmdsbycategory[helpcategory(cmd)].append(cmd)
263 cmdsbycategory[helpcategory(cmd)].append(cmd)
264
264
265 # Print the help for each command. We present the commands grouped by
265 # Print the help for each command. We present the commands grouped by
266 # category, and we use help.CATEGORY_ORDER as a guide for a helpful order
266 # category, and we use help.CATEGORY_ORDER as a guide for a helpful order
267 # in which to present the categories.
267 # in which to present the categories.
268 for category in help.CATEGORY_ORDER:
268 for category in help.CATEGORY_ORDER:
269 categorycmds = cmdsbycategory[category]
269 categorycmds = cmdsbycategory[category]
270 if not categorycmds:
270 if not categorycmds:
271 # Skip empty categories
271 # Skip empty categories
272 continue
272 continue
273 # Print a section header for the category.
273 # Print a section header for the category.
274 # For now, the category header is at the same level as the headers for
274 # For now, the category header is at the same level as the headers for
275 # the commands in the category; this is fixed in the next commit.
275 # the commands in the category; this is fixed in the next commit.
276 ui.write(sectionfunc(help.CATEGORY_NAMES[category]))
276 ui.write(sectionfunc(help.CATEGORY_NAMES[category]))
277 # Print each command in the category
277 # Print each command in the category
278 for f in sorted(categorycmds):
278 for f in sorted(categorycmds):
279 if f.startswith(b"debug"):
279 if f.startswith(b"debug"):
280 continue
280 continue
281 d = get_cmd(h[f], cmdtable)
281 d = get_cmd(h[f], cmdtable)
282 ui.write(subsectionfunc(d[b'cmd']))
282 ui.write(subsectionfunc(d[b'cmd']))
283 # short description
283 # short description
284 ui.write(d[b'desc'][0])
284 ui.write(d[b'desc'][0])
285 # synopsis
285 # synopsis
286 ui.write(b"::\n\n")
286 ui.write(b"::\n\n")
287 synopsislines = d[b'synopsis'].splitlines()
287 synopsislines = d[b'synopsis'].splitlines()
288 for line in synopsislines:
288 for line in synopsislines:
289 # some commands (such as rebase) have a multi-line
289 # some commands (such as rebase) have a multi-line
290 # synopsis
290 # synopsis
291 ui.write(b" %s\n" % line)
291 ui.write(b" %s\n" % line)
292 ui.write(b'\n')
292 ui.write(b'\n')
293 # description
293 # description
294 ui.write(b"%s\n\n" % d[b'desc'][1])
294 ui.write(b"%s\n\n" % d[b'desc'][1])
295 # options
295 # options
296 opt_output = list(d[b'opts'])
296 opt_output = list(d[b'opts'])
297 if opt_output:
297 if opt_output:
298 opts_len = max([len(line[0]) for line in opt_output])
298 opts_len = max([len(line[0]) for line in opt_output])
299 ui.write(_(b"Options:\n\n"))
299 ui.write(_(b"Options:\n\n"))
300 multioccur = False
300 multioccur = False
301 for optstr, desc in opt_output:
301 for optstr, desc in opt_output:
302 if desc:
302 if desc:
303 s = b"%-*s %s" % (opts_len, optstr, desc)
303 s = b"%-*s %s" % (opts_len, optstr, desc)
304 else:
304 else:
305 s = optstr
305 s = optstr
306 ui.write(b"%s\n" % s)
306 ui.write(b"%s\n" % s)
307 if optstr.endswith(b"[+]>"):
307 if optstr.endswith(b"[+]>"):
308 multioccur = True
308 multioccur = True
309 if multioccur:
309 if multioccur:
310 ui.write(
310 ui.write(
311 _(
311 _(
312 b"\n[+] marked option can be specified"
312 b"\n[+] marked option can be specified"
313 b" multiple times\n"
313 b" multiple times\n"
314 )
314 )
315 )
315 )
316 ui.write(b"\n")
316 ui.write(b"\n")
317 # aliases
317 # aliases
318 if d[b'aliases']:
318 if d[b'aliases']:
319 # Note the empty comment, this is required to separate this
319 # Note the empty comment, this is required to separate this
320 # (which should be a blockquote) from any preceding things (such
320 # (which should be a blockquote) from any preceding things (such
321 # as a definition list).
321 # as a definition list).
322 ui.write(
322 ui.write(
323 _(b"..\n\n aliases: %s\n\n") % b" ".join(d[b'aliases'])
323 _(b"..\n\n aliases: %s\n\n") % b" ".join(d[b'aliases'])
324 )
324 )
325
325
326
326
327 def allextensionnames():
327 def allextensionnames():
328 return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
328 return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
329
329
330
330
331 if __name__ == "__main__":
331 if __name__ == "__main__":
332 doc = b'hg.1.gendoc'
332 doc = b'hg.1.gendoc'
333 if len(sys.argv) > 1:
333 if len(sys.argv) > 1:
334 doc = encoding.strtolocal(sys.argv[1])
334 doc = encoding.strtolocal(sys.argv[1])
335
335
336 ui = uimod.ui.load()
336 ui = uimod.ui.load()
337 # Trigger extensions to load. This is disabled by default because it uses
337 # Trigger extensions to load. This is disabled by default because it uses
338 # the current user's configuration, which is often not what is wanted.
338 # the current user's configuration, which is often not what is wanted.
339 if encoding.environ.get(b'GENDOC_LOAD_CONFIGURED_EXTENSIONS', b'0') != b'0':
339 if encoding.environ.get(b'GENDOC_LOAD_CONFIGURED_EXTENSIONS', b'0') != b'0':
340 extensions.loadall(ui)
340 extensions.loadall(ui)
341
341
342 if doc == b'hg.1.gendoc':
342 if doc == b'hg.1.gendoc':
343 showdoc(ui)
343 showdoc(ui)
344 else:
344 else:
345 showtopic(ui, encoding.strtolocal(sys.argv[1]))
345 showtopic(ui, encoding.strtolocal(sys.argv[1]))
@@ -1,4884 +1,4902 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import string
24 import string
25 import subprocess
25 import subprocess
26 import sys
26 import sys
27 import time
27 import time
28
28
29 from .i18n import _
29 from .i18n import _
30 from .node import (
30 from .node import (
31 bin,
31 bin,
32 hex,
32 hex,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 dirstateutils,
49 encoding,
50 encoding,
50 error,
51 error,
51 exchange,
52 exchange,
52 extensions,
53 extensions,
53 filemerge,
54 filemerge,
54 filesetlang,
55 filesetlang,
55 formatter,
56 formatter,
56 hg,
57 hg,
57 httppeer,
58 httppeer,
58 localrepo,
59 localrepo,
59 lock as lockmod,
60 lock as lockmod,
60 logcmdutil,
61 logcmdutil,
61 mergestate as mergestatemod,
62 mergestate as mergestatemod,
62 metadata,
63 metadata,
63 obsolete,
64 obsolete,
64 obsutil,
65 obsutil,
65 pathutil,
66 pathutil,
66 phases,
67 phases,
67 policy,
68 policy,
68 pvec,
69 pvec,
69 pycompat,
70 pycompat,
70 registrar,
71 registrar,
71 repair,
72 repair,
72 repoview,
73 repoview,
73 requirements,
74 requirements,
74 revlog,
75 revlog,
75 revset,
76 revset,
76 revsetlang,
77 revsetlang,
77 scmutil,
78 scmutil,
78 setdiscovery,
79 setdiscovery,
79 simplemerge,
80 simplemerge,
80 sshpeer,
81 sshpeer,
81 sslutil,
82 sslutil,
82 streamclone,
83 streamclone,
83 strip,
84 strip,
84 tags as tagsmod,
85 tags as tagsmod,
85 templater,
86 templater,
86 treediscovery,
87 treediscovery,
87 upgrade,
88 upgrade,
88 url as urlmod,
89 url as urlmod,
89 util,
90 util,
90 vfs as vfsmod,
91 vfs as vfsmod,
91 wireprotoframing,
92 wireprotoframing,
92 wireprotoserver,
93 wireprotoserver,
93 )
94 )
94 from .interfaces import repository
95 from .interfaces import repository
95 from .utils import (
96 from .utils import (
96 cborutil,
97 cborutil,
97 compression,
98 compression,
98 dateutil,
99 dateutil,
99 procutil,
100 procutil,
100 stringutil,
101 stringutil,
101 urlutil,
102 urlutil,
102 )
103 )
103
104
104 from .revlogutils import (
105 from .revlogutils import (
105 deltas as deltautil,
106 deltas as deltautil,
106 nodemap,
107 nodemap,
107 rewrite,
108 rewrite,
108 sidedata,
109 sidedata,
109 )
110 )
110
111
111 release = lockmod.release
112 release = lockmod.release
112
113
113 table = {}
114 table = {}
114 table.update(strip.command._table)
115 table.update(strip.command._table)
115 command = registrar.command(table)
116 command = registrar.command(table)
116
117
117
118
118 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
119 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
119 def debugancestor(ui, repo, *args):
120 def debugancestor(ui, repo, *args):
120 """find the ancestor revision of two revisions in a given index"""
121 """find the ancestor revision of two revisions in a given index"""
121 if len(args) == 3:
122 if len(args) == 3:
122 index, rev1, rev2 = args
123 index, rev1, rev2 = args
123 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
124 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
124 lookup = r.lookup
125 lookup = r.lookup
125 elif len(args) == 2:
126 elif len(args) == 2:
126 if not repo:
127 if not repo:
127 raise error.Abort(
128 raise error.Abort(
128 _(b'there is no Mercurial repository here (.hg not found)')
129 _(b'there is no Mercurial repository here (.hg not found)')
129 )
130 )
130 rev1, rev2 = args
131 rev1, rev2 = args
131 r = repo.changelog
132 r = repo.changelog
132 lookup = repo.lookup
133 lookup = repo.lookup
133 else:
134 else:
134 raise error.Abort(_(b'either two or three arguments required'))
135 raise error.Abort(_(b'either two or three arguments required'))
135 a = r.ancestor(lookup(rev1), lookup(rev2))
136 a = r.ancestor(lookup(rev1), lookup(rev2))
136 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
137 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
137
138
138
139
139 @command(b'debugantivirusrunning', [])
140 @command(b'debugantivirusrunning', [])
140 def debugantivirusrunning(ui, repo):
141 def debugantivirusrunning(ui, repo):
141 """attempt to trigger an antivirus scanner to see if one is active"""
142 """attempt to trigger an antivirus scanner to see if one is active"""
142 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
143 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
143 f.write(
144 f.write(
144 util.b85decode(
145 util.b85decode(
145 # This is a base85-armored version of the EICAR test file. See
146 # This is a base85-armored version of the EICAR test file. See
146 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
147 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
147 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
148 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
148 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
149 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
149 )
150 )
150 )
151 )
151 # Give an AV engine time to scan the file.
152 # Give an AV engine time to scan the file.
152 time.sleep(2)
153 time.sleep(2)
153 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
154 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
154
155
155
156
156 @command(b'debugapplystreamclonebundle', [], b'FILE')
157 @command(b'debugapplystreamclonebundle', [], b'FILE')
157 def debugapplystreamclonebundle(ui, repo, fname):
158 def debugapplystreamclonebundle(ui, repo, fname):
158 """apply a stream clone bundle file"""
159 """apply a stream clone bundle file"""
159 f = hg.openpath(ui, fname)
160 f = hg.openpath(ui, fname)
160 gen = exchange.readbundle(ui, f, fname)
161 gen = exchange.readbundle(ui, f, fname)
161 gen.apply(repo)
162 gen.apply(repo)
162
163
163
164
164 @command(
165 @command(
165 b'debugbuilddag',
166 b'debugbuilddag',
166 [
167 [
167 (
168 (
168 b'm',
169 b'm',
169 b'mergeable-file',
170 b'mergeable-file',
170 None,
171 None,
171 _(b'add single file mergeable changes'),
172 _(b'add single file mergeable changes'),
172 ),
173 ),
173 (
174 (
174 b'o',
175 b'o',
175 b'overwritten-file',
176 b'overwritten-file',
176 None,
177 None,
177 _(b'add single file all revs overwrite'),
178 _(b'add single file all revs overwrite'),
178 ),
179 ),
179 (b'n', b'new-file', None, _(b'add new file at each rev')),
180 (b'n', b'new-file', None, _(b'add new file at each rev')),
180 (
181 (
181 b'',
182 b'',
182 b'from-existing',
183 b'from-existing',
183 None,
184 None,
184 _(b'continue from a non-empty repository'),
185 _(b'continue from a non-empty repository'),
185 ),
186 ),
186 ],
187 ],
187 _(b'[OPTION]... [TEXT]'),
188 _(b'[OPTION]... [TEXT]'),
188 )
189 )
189 def debugbuilddag(
190 def debugbuilddag(
190 ui,
191 ui,
191 repo,
192 repo,
192 text=None,
193 text=None,
193 mergeable_file=False,
194 mergeable_file=False,
194 overwritten_file=False,
195 overwritten_file=False,
195 new_file=False,
196 new_file=False,
196 from_existing=False,
197 from_existing=False,
197 ):
198 ):
198 """builds a repo with a given DAG from scratch in the current empty repo
199 """builds a repo with a given DAG from scratch in the current empty repo
199
200
200 The description of the DAG is read from stdin if not given on the
201 The description of the DAG is read from stdin if not given on the
201 command line.
202 command line.
202
203
203 Elements:
204 Elements:
204
205
205 - "+n" is a linear run of n nodes based on the current default parent
206 - "+n" is a linear run of n nodes based on the current default parent
206 - "." is a single node based on the current default parent
207 - "." is a single node based on the current default parent
207 - "$" resets the default parent to null (implied at the start);
208 - "$" resets the default parent to null (implied at the start);
208 otherwise the default parent is always the last node created
209 otherwise the default parent is always the last node created
209 - "<p" sets the default parent to the backref p
210 - "<p" sets the default parent to the backref p
210 - "*p" is a fork at parent p, which is a backref
211 - "*p" is a fork at parent p, which is a backref
211 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
212 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
212 - "/p2" is a merge of the preceding node and p2
213 - "/p2" is a merge of the preceding node and p2
213 - ":tag" defines a local tag for the preceding node
214 - ":tag" defines a local tag for the preceding node
214 - "@branch" sets the named branch for subsequent nodes
215 - "@branch" sets the named branch for subsequent nodes
215 - "#...\\n" is a comment up to the end of the line
216 - "#...\\n" is a comment up to the end of the line
216
217
217 Whitespace between the above elements is ignored.
218 Whitespace between the above elements is ignored.
218
219
219 A backref is either
220 A backref is either
220
221
221 - a number n, which references the node curr-n, where curr is the current
222 - a number n, which references the node curr-n, where curr is the current
222 node, or
223 node, or
223 - the name of a local tag you placed earlier using ":tag", or
224 - the name of a local tag you placed earlier using ":tag", or
224 - empty to denote the default parent.
225 - empty to denote the default parent.
225
226
226 All string valued-elements are either strictly alphanumeric, or must
227 All string valued-elements are either strictly alphanumeric, or must
227 be enclosed in double quotes ("..."), with "\\" as escape character.
228 be enclosed in double quotes ("..."), with "\\" as escape character.
228 """
229 """
229
230
230 if text is None:
231 if text is None:
231 ui.status(_(b"reading DAG from stdin\n"))
232 ui.status(_(b"reading DAG from stdin\n"))
232 text = ui.fin.read()
233 text = ui.fin.read()
233
234
234 cl = repo.changelog
235 cl = repo.changelog
235 if len(cl) > 0 and not from_existing:
236 if len(cl) > 0 and not from_existing:
236 raise error.Abort(_(b'repository is not empty'))
237 raise error.Abort(_(b'repository is not empty'))
237
238
238 # determine number of revs in DAG
239 # determine number of revs in DAG
239 total = 0
240 total = 0
240 for type, data in dagparser.parsedag(text):
241 for type, data in dagparser.parsedag(text):
241 if type == b'n':
242 if type == b'n':
242 total += 1
243 total += 1
243
244
244 if mergeable_file:
245 if mergeable_file:
245 linesperrev = 2
246 linesperrev = 2
246 # make a file with k lines per rev
247 # make a file with k lines per rev
247 initialmergedlines = [
248 initialmergedlines = [
248 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
249 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
249 ]
250 ]
250 initialmergedlines.append(b"")
251 initialmergedlines.append(b"")
251
252
252 tags = []
253 tags = []
253 progress = ui.makeprogress(
254 progress = ui.makeprogress(
254 _(b'building'), unit=_(b'revisions'), total=total
255 _(b'building'), unit=_(b'revisions'), total=total
255 )
256 )
256 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
257 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
257 at = -1
258 at = -1
258 atbranch = b'default'
259 atbranch = b'default'
259 nodeids = []
260 nodeids = []
260 id = 0
261 id = 0
261 progress.update(id)
262 progress.update(id)
262 for type, data in dagparser.parsedag(text):
263 for type, data in dagparser.parsedag(text):
263 if type == b'n':
264 if type == b'n':
264 ui.note((b'node %s\n' % pycompat.bytestr(data)))
265 ui.note((b'node %s\n' % pycompat.bytestr(data)))
265 id, ps = data
266 id, ps = data
266
267
267 files = []
268 files = []
268 filecontent = {}
269 filecontent = {}
269
270
270 p2 = None
271 p2 = None
271 if mergeable_file:
272 if mergeable_file:
272 fn = b"mf"
273 fn = b"mf"
273 p1 = repo[ps[0]]
274 p1 = repo[ps[0]]
274 if len(ps) > 1:
275 if len(ps) > 1:
275 p2 = repo[ps[1]]
276 p2 = repo[ps[1]]
276 pa = p1.ancestor(p2)
277 pa = p1.ancestor(p2)
277 base, local, other = [
278 base, local, other = [
278 x[fn].data() for x in (pa, p1, p2)
279 x[fn].data() for x in (pa, p1, p2)
279 ]
280 ]
280 m3 = simplemerge.Merge3Text(base, local, other)
281 m3 = simplemerge.Merge3Text(base, local, other)
281 ml = [
282 ml = [
282 l.strip()
283 l.strip()
283 for l in simplemerge.render_minimized(m3)[0]
284 for l in simplemerge.render_minimized(m3)[0]
284 ]
285 ]
285 ml.append(b"")
286 ml.append(b"")
286 elif at > 0:
287 elif at > 0:
287 ml = p1[fn].data().split(b"\n")
288 ml = p1[fn].data().split(b"\n")
288 else:
289 else:
289 ml = initialmergedlines
290 ml = initialmergedlines
290 ml[id * linesperrev] += b" r%i" % id
291 ml[id * linesperrev] += b" r%i" % id
291 mergedtext = b"\n".join(ml)
292 mergedtext = b"\n".join(ml)
292 files.append(fn)
293 files.append(fn)
293 filecontent[fn] = mergedtext
294 filecontent[fn] = mergedtext
294
295
295 if overwritten_file:
296 if overwritten_file:
296 fn = b"of"
297 fn = b"of"
297 files.append(fn)
298 files.append(fn)
298 filecontent[fn] = b"r%i\n" % id
299 filecontent[fn] = b"r%i\n" % id
299
300
300 if new_file:
301 if new_file:
301 fn = b"nf%i" % id
302 fn = b"nf%i" % id
302 files.append(fn)
303 files.append(fn)
303 filecontent[fn] = b"r%i\n" % id
304 filecontent[fn] = b"r%i\n" % id
304 if len(ps) > 1:
305 if len(ps) > 1:
305 if not p2:
306 if not p2:
306 p2 = repo[ps[1]]
307 p2 = repo[ps[1]]
307 for fn in p2:
308 for fn in p2:
308 if fn.startswith(b"nf"):
309 if fn.startswith(b"nf"):
309 files.append(fn)
310 files.append(fn)
310 filecontent[fn] = p2[fn].data()
311 filecontent[fn] = p2[fn].data()
311
312
312 def fctxfn(repo, cx, path):
313 def fctxfn(repo, cx, path):
313 if path in filecontent:
314 if path in filecontent:
314 return context.memfilectx(
315 return context.memfilectx(
315 repo, cx, path, filecontent[path]
316 repo, cx, path, filecontent[path]
316 )
317 )
317 return None
318 return None
318
319
319 if len(ps) == 0 or ps[0] < 0:
320 if len(ps) == 0 or ps[0] < 0:
320 pars = [None, None]
321 pars = [None, None]
321 elif len(ps) == 1:
322 elif len(ps) == 1:
322 pars = [nodeids[ps[0]], None]
323 pars = [nodeids[ps[0]], None]
323 else:
324 else:
324 pars = [nodeids[p] for p in ps]
325 pars = [nodeids[p] for p in ps]
325 cx = context.memctx(
326 cx = context.memctx(
326 repo,
327 repo,
327 pars,
328 pars,
328 b"r%i" % id,
329 b"r%i" % id,
329 files,
330 files,
330 fctxfn,
331 fctxfn,
331 date=(id, 0),
332 date=(id, 0),
332 user=b"debugbuilddag",
333 user=b"debugbuilddag",
333 extra={b'branch': atbranch},
334 extra={b'branch': atbranch},
334 )
335 )
335 nodeid = repo.commitctx(cx)
336 nodeid = repo.commitctx(cx)
336 nodeids.append(nodeid)
337 nodeids.append(nodeid)
337 at = id
338 at = id
338 elif type == b'l':
339 elif type == b'l':
339 id, name = data
340 id, name = data
340 ui.note((b'tag %s\n' % name))
341 ui.note((b'tag %s\n' % name))
341 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
342 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
342 elif type == b'a':
343 elif type == b'a':
343 ui.note((b'branch %s\n' % data))
344 ui.note((b'branch %s\n' % data))
344 atbranch = data
345 atbranch = data
345 progress.update(id)
346 progress.update(id)
346
347
347 if tags:
348 if tags:
348 repo.vfs.write(b"localtags", b"".join(tags))
349 repo.vfs.write(b"localtags", b"".join(tags))
349
350
350
351
351 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
352 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
352 indent_string = b' ' * indent
353 indent_string = b' ' * indent
353 if all:
354 if all:
354 ui.writenoi18n(
355 ui.writenoi18n(
355 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
356 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
356 % indent_string
357 % indent_string
357 )
358 )
358
359
359 def showchunks(named):
360 def showchunks(named):
360 ui.write(b"\n%s%s\n" % (indent_string, named))
361 ui.write(b"\n%s%s\n" % (indent_string, named))
361 for deltadata in gen.deltaiter():
362 for deltadata in gen.deltaiter():
362 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
363 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
363 ui.write(
364 ui.write(
364 b"%s%s %s %s %s %s %d\n"
365 b"%s%s %s %s %s %s %d\n"
365 % (
366 % (
366 indent_string,
367 indent_string,
367 hex(node),
368 hex(node),
368 hex(p1),
369 hex(p1),
369 hex(p2),
370 hex(p2),
370 hex(cs),
371 hex(cs),
371 hex(deltabase),
372 hex(deltabase),
372 len(delta),
373 len(delta),
373 )
374 )
374 )
375 )
375
376
376 gen.changelogheader()
377 gen.changelogheader()
377 showchunks(b"changelog")
378 showchunks(b"changelog")
378 gen.manifestheader()
379 gen.manifestheader()
379 showchunks(b"manifest")
380 showchunks(b"manifest")
380 for chunkdata in iter(gen.filelogheader, {}):
381 for chunkdata in iter(gen.filelogheader, {}):
381 fname = chunkdata[b'filename']
382 fname = chunkdata[b'filename']
382 showchunks(fname)
383 showchunks(fname)
383 else:
384 else:
384 if isinstance(gen, bundle2.unbundle20):
385 if isinstance(gen, bundle2.unbundle20):
385 raise error.Abort(_(b'use debugbundle2 for this file'))
386 raise error.Abort(_(b'use debugbundle2 for this file'))
386 gen.changelogheader()
387 gen.changelogheader()
387 for deltadata in gen.deltaiter():
388 for deltadata in gen.deltaiter():
388 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
389 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
389 ui.write(b"%s%s\n" % (indent_string, hex(node)))
390 ui.write(b"%s%s\n" % (indent_string, hex(node)))
390
391
391
392
392 def _debugobsmarkers(ui, part, indent=0, **opts):
393 def _debugobsmarkers(ui, part, indent=0, **opts):
393 """display version and markers contained in 'data'"""
394 """display version and markers contained in 'data'"""
394 opts = pycompat.byteskwargs(opts)
395 opts = pycompat.byteskwargs(opts)
395 data = part.read()
396 data = part.read()
396 indent_string = b' ' * indent
397 indent_string = b' ' * indent
397 try:
398 try:
398 version, markers = obsolete._readmarkers(data)
399 version, markers = obsolete._readmarkers(data)
399 except error.UnknownVersion as exc:
400 except error.UnknownVersion as exc:
400 msg = b"%sunsupported version: %s (%d bytes)\n"
401 msg = b"%sunsupported version: %s (%d bytes)\n"
401 msg %= indent_string, exc.version, len(data)
402 msg %= indent_string, exc.version, len(data)
402 ui.write(msg)
403 ui.write(msg)
403 else:
404 else:
404 msg = b"%sversion: %d (%d bytes)\n"
405 msg = b"%sversion: %d (%d bytes)\n"
405 msg %= indent_string, version, len(data)
406 msg %= indent_string, version, len(data)
406 ui.write(msg)
407 ui.write(msg)
407 fm = ui.formatter(b'debugobsolete', opts)
408 fm = ui.formatter(b'debugobsolete', opts)
408 for rawmarker in sorted(markers):
409 for rawmarker in sorted(markers):
409 m = obsutil.marker(None, rawmarker)
410 m = obsutil.marker(None, rawmarker)
410 fm.startitem()
411 fm.startitem()
411 fm.plain(indent_string)
412 fm.plain(indent_string)
412 cmdutil.showmarker(fm, m)
413 cmdutil.showmarker(fm, m)
413 fm.end()
414 fm.end()
414
415
415
416
416 def _debugphaseheads(ui, data, indent=0):
417 def _debugphaseheads(ui, data, indent=0):
417 """display version and markers contained in 'data'"""
418 """display version and markers contained in 'data'"""
418 indent_string = b' ' * indent
419 indent_string = b' ' * indent
419 headsbyphase = phases.binarydecode(data)
420 headsbyphase = phases.binarydecode(data)
420 for phase in phases.allphases:
421 for phase in phases.allphases:
421 for head in headsbyphase[phase]:
422 for head in headsbyphase[phase]:
422 ui.write(indent_string)
423 ui.write(indent_string)
423 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
424 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
424
425
425
426
426 def _quasirepr(thing):
427 def _quasirepr(thing):
427 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
428 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
428 return b'{%s}' % (
429 return b'{%s}' % (
429 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
430 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
430 )
431 )
431 return pycompat.bytestr(repr(thing))
432 return pycompat.bytestr(repr(thing))
432
433
433
434
434 def _debugbundle2(ui, gen, all=None, **opts):
435 def _debugbundle2(ui, gen, all=None, **opts):
435 """lists the contents of a bundle2"""
436 """lists the contents of a bundle2"""
436 if not isinstance(gen, bundle2.unbundle20):
437 if not isinstance(gen, bundle2.unbundle20):
437 raise error.Abort(_(b'not a bundle2 file'))
438 raise error.Abort(_(b'not a bundle2 file'))
438 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
439 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
439 parttypes = opts.get('part_type', [])
440 parttypes = opts.get('part_type', [])
440 for part in gen.iterparts():
441 for part in gen.iterparts():
441 if parttypes and part.type not in parttypes:
442 if parttypes and part.type not in parttypes:
442 continue
443 continue
443 msg = b'%s -- %s (mandatory: %r)\n'
444 msg = b'%s -- %s (mandatory: %r)\n'
444 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
445 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
445 if part.type == b'changegroup':
446 if part.type == b'changegroup':
446 version = part.params.get(b'version', b'01')
447 version = part.params.get(b'version', b'01')
447 cg = changegroup.getunbundler(version, part, b'UN')
448 cg = changegroup.getunbundler(version, part, b'UN')
448 if not ui.quiet:
449 if not ui.quiet:
449 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
450 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
450 if part.type == b'obsmarkers':
451 if part.type == b'obsmarkers':
451 if not ui.quiet:
452 if not ui.quiet:
452 _debugobsmarkers(ui, part, indent=4, **opts)
453 _debugobsmarkers(ui, part, indent=4, **opts)
453 if part.type == b'phase-heads':
454 if part.type == b'phase-heads':
454 if not ui.quiet:
455 if not ui.quiet:
455 _debugphaseheads(ui, part, indent=4)
456 _debugphaseheads(ui, part, indent=4)
456
457
457
458
458 @command(
459 @command(
459 b'debugbundle',
460 b'debugbundle',
460 [
461 [
461 (b'a', b'all', None, _(b'show all details')),
462 (b'a', b'all', None, _(b'show all details')),
462 (b'', b'part-type', [], _(b'show only the named part type')),
463 (b'', b'part-type', [], _(b'show only the named part type')),
463 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
464 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
464 ],
465 ],
465 _(b'FILE'),
466 _(b'FILE'),
466 norepo=True,
467 norepo=True,
467 )
468 )
468 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
469 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
469 """lists the contents of a bundle"""
470 """lists the contents of a bundle"""
470 with hg.openpath(ui, bundlepath) as f:
471 with hg.openpath(ui, bundlepath) as f:
471 if spec:
472 if spec:
472 spec = exchange.getbundlespec(ui, f)
473 spec = exchange.getbundlespec(ui, f)
473 ui.write(b'%s\n' % spec)
474 ui.write(b'%s\n' % spec)
474 return
475 return
475
476
476 gen = exchange.readbundle(ui, f, bundlepath)
477 gen = exchange.readbundle(ui, f, bundlepath)
477 if isinstance(gen, bundle2.unbundle20):
478 if isinstance(gen, bundle2.unbundle20):
478 return _debugbundle2(ui, gen, all=all, **opts)
479 return _debugbundle2(ui, gen, all=all, **opts)
479 _debugchangegroup(ui, gen, all=all, **opts)
480 _debugchangegroup(ui, gen, all=all, **opts)
480
481
481
482
482 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
483 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
483 def debugcapabilities(ui, path, **opts):
484 def debugcapabilities(ui, path, **opts):
484 """lists the capabilities of a remote peer"""
485 """lists the capabilities of a remote peer"""
485 opts = pycompat.byteskwargs(opts)
486 opts = pycompat.byteskwargs(opts)
486 peer = hg.peer(ui, opts, path)
487 peer = hg.peer(ui, opts, path)
487 try:
488 try:
488 caps = peer.capabilities()
489 caps = peer.capabilities()
489 ui.writenoi18n(b'Main capabilities:\n')
490 ui.writenoi18n(b'Main capabilities:\n')
490 for c in sorted(caps):
491 for c in sorted(caps):
491 ui.write(b' %s\n' % c)
492 ui.write(b' %s\n' % c)
492 b2caps = bundle2.bundle2caps(peer)
493 b2caps = bundle2.bundle2caps(peer)
493 if b2caps:
494 if b2caps:
494 ui.writenoi18n(b'Bundle2 capabilities:\n')
495 ui.writenoi18n(b'Bundle2 capabilities:\n')
495 for key, values in sorted(b2caps.items()):
496 for key, values in sorted(b2caps.items()):
496 ui.write(b' %s\n' % key)
497 ui.write(b' %s\n' % key)
497 for v in values:
498 for v in values:
498 ui.write(b' %s\n' % v)
499 ui.write(b' %s\n' % v)
499 finally:
500 finally:
500 peer.close()
501 peer.close()
501
502
502
503
503 @command(
504 @command(
504 b'debugchangedfiles',
505 b'debugchangedfiles',
505 [
506 [
506 (
507 (
507 b'',
508 b'',
508 b'compute',
509 b'compute',
509 False,
510 False,
510 b"compute information instead of reading it from storage",
511 b"compute information instead of reading it from storage",
511 ),
512 ),
512 ],
513 ],
513 b'REV',
514 b'REV',
514 )
515 )
515 def debugchangedfiles(ui, repo, rev, **opts):
516 def debugchangedfiles(ui, repo, rev, **opts):
516 """list the stored files changes for a revision"""
517 """list the stored files changes for a revision"""
517 ctx = logcmdutil.revsingle(repo, rev, None)
518 ctx = logcmdutil.revsingle(repo, rev, None)
518 files = None
519 files = None
519
520
520 if opts['compute']:
521 if opts['compute']:
521 files = metadata.compute_all_files_changes(ctx)
522 files = metadata.compute_all_files_changes(ctx)
522 else:
523 else:
523 sd = repo.changelog.sidedata(ctx.rev())
524 sd = repo.changelog.sidedata(ctx.rev())
524 files_block = sd.get(sidedata.SD_FILES)
525 files_block = sd.get(sidedata.SD_FILES)
525 if files_block is not None:
526 if files_block is not None:
526 files = metadata.decode_files_sidedata(sd)
527 files = metadata.decode_files_sidedata(sd)
527 if files is not None:
528 if files is not None:
528 for f in sorted(files.touched):
529 for f in sorted(files.touched):
529 if f in files.added:
530 if f in files.added:
530 action = b"added"
531 action = b"added"
531 elif f in files.removed:
532 elif f in files.removed:
532 action = b"removed"
533 action = b"removed"
533 elif f in files.merged:
534 elif f in files.merged:
534 action = b"merged"
535 action = b"merged"
535 elif f in files.salvaged:
536 elif f in files.salvaged:
536 action = b"salvaged"
537 action = b"salvaged"
537 else:
538 else:
538 action = b"touched"
539 action = b"touched"
539
540
540 copy_parent = b""
541 copy_parent = b""
541 copy_source = b""
542 copy_source = b""
542 if f in files.copied_from_p1:
543 if f in files.copied_from_p1:
543 copy_parent = b"p1"
544 copy_parent = b"p1"
544 copy_source = files.copied_from_p1[f]
545 copy_source = files.copied_from_p1[f]
545 elif f in files.copied_from_p2:
546 elif f in files.copied_from_p2:
546 copy_parent = b"p2"
547 copy_parent = b"p2"
547 copy_source = files.copied_from_p2[f]
548 copy_source = files.copied_from_p2[f]
548
549
549 data = (action, copy_parent, f, copy_source)
550 data = (action, copy_parent, f, copy_source)
550 template = b"%-8s %2s: %s, %s;\n"
551 template = b"%-8s %2s: %s, %s;\n"
551 ui.write(template % data)
552 ui.write(template % data)
552
553
553
554
554 @command(b'debugcheckstate', [], b'')
555 @command(b'debugcheckstate', [], b'')
555 def debugcheckstate(ui, repo):
556 def debugcheckstate(ui, repo):
556 """validate the correctness of the current dirstate"""
557 """validate the correctness of the current dirstate"""
557 parent1, parent2 = repo.dirstate.parents()
558 parent1, parent2 = repo.dirstate.parents()
558 m1 = repo[parent1].manifest()
559 m1 = repo[parent1].manifest()
559 m2 = repo[parent2].manifest()
560 m2 = repo[parent2].manifest()
560 errors = 0
561 errors = 0
561 for err in repo.dirstate.verify(m1, m2):
562 for err in repo.dirstate.verify(m1, m2):
562 ui.warn(err[0] % err[1:])
563 ui.warn(err[0] % err[1:])
563 errors += 1
564 errors += 1
564 if errors:
565 if errors:
565 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
566 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
566 raise error.Abort(errstr)
567 raise error.Abort(errstr)
567
568
568
569
569 @command(
570 @command(
570 b'debugcolor',
571 b'debugcolor',
571 [(b'', b'style', None, _(b'show all configured styles'))],
572 [(b'', b'style', None, _(b'show all configured styles'))],
572 b'hg debugcolor',
573 b'hg debugcolor',
573 )
574 )
574 def debugcolor(ui, repo, **opts):
575 def debugcolor(ui, repo, **opts):
575 """show available color, effects or style"""
576 """show available color, effects or style"""
576 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
577 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
577 if opts.get('style'):
578 if opts.get('style'):
578 return _debugdisplaystyle(ui)
579 return _debugdisplaystyle(ui)
579 else:
580 else:
580 return _debugdisplaycolor(ui)
581 return _debugdisplaycolor(ui)
581
582
582
583
583 def _debugdisplaycolor(ui):
584 def _debugdisplaycolor(ui):
584 ui = ui.copy()
585 ui = ui.copy()
585 ui._styles.clear()
586 ui._styles.clear()
586 for effect in color._activeeffects(ui).keys():
587 for effect in color._activeeffects(ui).keys():
587 ui._styles[effect] = effect
588 ui._styles[effect] = effect
588 if ui._terminfoparams:
589 if ui._terminfoparams:
589 for k, v in ui.configitems(b'color'):
590 for k, v in ui.configitems(b'color'):
590 if k.startswith(b'color.'):
591 if k.startswith(b'color.'):
591 ui._styles[k] = k[6:]
592 ui._styles[k] = k[6:]
592 elif k.startswith(b'terminfo.'):
593 elif k.startswith(b'terminfo.'):
593 ui._styles[k] = k[9:]
594 ui._styles[k] = k[9:]
594 ui.write(_(b'available colors:\n'))
595 ui.write(_(b'available colors:\n'))
595 # sort label with a '_' after the other to group '_background' entry.
596 # sort label with a '_' after the other to group '_background' entry.
596 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
597 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
597 for colorname, label in items:
598 for colorname, label in items:
598 ui.write(b'%s\n' % colorname, label=label)
599 ui.write(b'%s\n' % colorname, label=label)
599
600
600
601
601 def _debugdisplaystyle(ui):
602 def _debugdisplaystyle(ui):
602 ui.write(_(b'available style:\n'))
603 ui.write(_(b'available style:\n'))
603 if not ui._styles:
604 if not ui._styles:
604 return
605 return
605 width = max(len(s) for s in ui._styles)
606 width = max(len(s) for s in ui._styles)
606 for label, effects in sorted(ui._styles.items()):
607 for label, effects in sorted(ui._styles.items()):
607 ui.write(b'%s' % label, label=label)
608 ui.write(b'%s' % label, label=label)
608 if effects:
609 if effects:
609 # 50
610 # 50
610 ui.write(b': ')
611 ui.write(b': ')
611 ui.write(b' ' * (max(0, width - len(label))))
612 ui.write(b' ' * (max(0, width - len(label))))
612 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
613 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
613 ui.write(b'\n')
614 ui.write(b'\n')
614
615
615
616
616 @command(b'debugcreatestreamclonebundle', [], b'FILE')
617 @command(b'debugcreatestreamclonebundle', [], b'FILE')
617 def debugcreatestreamclonebundle(ui, repo, fname):
618 def debugcreatestreamclonebundle(ui, repo, fname):
618 """create a stream clone bundle file
619 """create a stream clone bundle file
619
620
620 Stream bundles are special bundles that are essentially archives of
621 Stream bundles are special bundles that are essentially archives of
621 revlog files. They are commonly used for cloning very quickly.
622 revlog files. They are commonly used for cloning very quickly.
622 """
623 """
623 # TODO we may want to turn this into an abort when this functionality
624 # TODO we may want to turn this into an abort when this functionality
624 # is moved into `hg bundle`.
625 # is moved into `hg bundle`.
625 if phases.hassecret(repo):
626 if phases.hassecret(repo):
626 ui.warn(
627 ui.warn(
627 _(
628 _(
628 b'(warning: stream clone bundle will contain secret '
629 b'(warning: stream clone bundle will contain secret '
629 b'revisions)\n'
630 b'revisions)\n'
630 )
631 )
631 )
632 )
632
633
633 requirements, gen = streamclone.generatebundlev1(repo)
634 requirements, gen = streamclone.generatebundlev1(repo)
634 changegroup.writechunks(ui, gen, fname)
635 changegroup.writechunks(ui, gen, fname)
635
636
636 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
637 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
637
638
638
639
639 @command(
640 @command(
640 b'debugdag',
641 b'debugdag',
641 [
642 [
642 (b't', b'tags', None, _(b'use tags as labels')),
643 (b't', b'tags', None, _(b'use tags as labels')),
643 (b'b', b'branches', None, _(b'annotate with branch names')),
644 (b'b', b'branches', None, _(b'annotate with branch names')),
644 (b'', b'dots', None, _(b'use dots for runs')),
645 (b'', b'dots', None, _(b'use dots for runs')),
645 (b's', b'spaces', None, _(b'separate elements by spaces')),
646 (b's', b'spaces', None, _(b'separate elements by spaces')),
646 ],
647 ],
647 _(b'[OPTION]... [FILE [REV]...]'),
648 _(b'[OPTION]... [FILE [REV]...]'),
648 optionalrepo=True,
649 optionalrepo=True,
649 )
650 )
650 def debugdag(ui, repo, file_=None, *revs, **opts):
651 def debugdag(ui, repo, file_=None, *revs, **opts):
651 """format the changelog or an index DAG as a concise textual description
652 """format the changelog or an index DAG as a concise textual description
652
653
653 If you pass a revlog index, the revlog's DAG is emitted. If you list
654 If you pass a revlog index, the revlog's DAG is emitted. If you list
654 revision numbers, they get labeled in the output as rN.
655 revision numbers, they get labeled in the output as rN.
655
656
656 Otherwise, the changelog DAG of the current repo is emitted.
657 Otherwise, the changelog DAG of the current repo is emitted.
657 """
658 """
658 spaces = opts.get('spaces')
659 spaces = opts.get('spaces')
659 dots = opts.get('dots')
660 dots = opts.get('dots')
660 if file_:
661 if file_:
661 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
662 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
662 revs = {int(r) for r in revs}
663 revs = {int(r) for r in revs}
663
664
664 def events():
665 def events():
665 for r in rlog:
666 for r in rlog:
666 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
667 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
667 if r in revs:
668 if r in revs:
668 yield b'l', (r, b"r%i" % r)
669 yield b'l', (r, b"r%i" % r)
669
670
670 elif repo:
671 elif repo:
671 cl = repo.changelog
672 cl = repo.changelog
672 tags = opts.get('tags')
673 tags = opts.get('tags')
673 branches = opts.get('branches')
674 branches = opts.get('branches')
674 if tags:
675 if tags:
675 labels = {}
676 labels = {}
676 for l, n in repo.tags().items():
677 for l, n in repo.tags().items():
677 labels.setdefault(cl.rev(n), []).append(l)
678 labels.setdefault(cl.rev(n), []).append(l)
678
679
679 def events():
680 def events():
680 b = b"default"
681 b = b"default"
681 for r in cl:
682 for r in cl:
682 if branches:
683 if branches:
683 newb = cl.read(cl.node(r))[5][b'branch']
684 newb = cl.read(cl.node(r))[5][b'branch']
684 if newb != b:
685 if newb != b:
685 yield b'a', newb
686 yield b'a', newb
686 b = newb
687 b = newb
687 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
688 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
688 if tags:
689 if tags:
689 ls = labels.get(r)
690 ls = labels.get(r)
690 if ls:
691 if ls:
691 for l in ls:
692 for l in ls:
692 yield b'l', (r, l)
693 yield b'l', (r, l)
693
694
694 else:
695 else:
695 raise error.Abort(_(b'need repo for changelog dag'))
696 raise error.Abort(_(b'need repo for changelog dag'))
696
697
697 for line in dagparser.dagtextlines(
698 for line in dagparser.dagtextlines(
698 events(),
699 events(),
699 addspaces=spaces,
700 addspaces=spaces,
700 wraplabels=True,
701 wraplabels=True,
701 wrapannotations=True,
702 wrapannotations=True,
702 wrapnonlinear=dots,
703 wrapnonlinear=dots,
703 usedots=dots,
704 usedots=dots,
704 maxlinewidth=70,
705 maxlinewidth=70,
705 ):
706 ):
706 ui.write(line)
707 ui.write(line)
707 ui.write(b"\n")
708 ui.write(b"\n")
708
709
709
710
710 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
711 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
711 def debugdata(ui, repo, file_, rev=None, **opts):
712 def debugdata(ui, repo, file_, rev=None, **opts):
712 """dump the contents of a data file revision"""
713 """dump the contents of a data file revision"""
713 opts = pycompat.byteskwargs(opts)
714 opts = pycompat.byteskwargs(opts)
714 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
715 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
715 if rev is not None:
716 if rev is not None:
716 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
717 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
717 file_, rev = None, file_
718 file_, rev = None, file_
718 elif rev is None:
719 elif rev is None:
719 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
720 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
721 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
721 try:
722 try:
722 ui.write(r.rawdata(r.lookup(rev)))
723 ui.write(r.rawdata(r.lookup(rev)))
723 except KeyError:
724 except KeyError:
724 raise error.Abort(_(b'invalid revision identifier %s') % rev)
725 raise error.Abort(_(b'invalid revision identifier %s') % rev)
725
726
726
727
727 @command(
728 @command(
728 b'debugdate',
729 b'debugdate',
729 [(b'e', b'extended', None, _(b'try extended date formats'))],
730 [(b'e', b'extended', None, _(b'try extended date formats'))],
730 _(b'[-e] DATE [RANGE]'),
731 _(b'[-e] DATE [RANGE]'),
731 norepo=True,
732 norepo=True,
732 optionalrepo=True,
733 optionalrepo=True,
733 )
734 )
734 def debugdate(ui, date, range=None, **opts):
735 def debugdate(ui, date, range=None, **opts):
735 """parse and display a date"""
736 """parse and display a date"""
736 if opts["extended"]:
737 if opts["extended"]:
737 d = dateutil.parsedate(date, dateutil.extendeddateformats)
738 d = dateutil.parsedate(date, dateutil.extendeddateformats)
738 else:
739 else:
739 d = dateutil.parsedate(date)
740 d = dateutil.parsedate(date)
740 ui.writenoi18n(b"internal: %d %d\n" % d)
741 ui.writenoi18n(b"internal: %d %d\n" % d)
741 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
742 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
742 if range:
743 if range:
743 m = dateutil.matchdate(range)
744 m = dateutil.matchdate(range)
744 ui.writenoi18n(b"match: %s\n" % m(d[0]))
745 ui.writenoi18n(b"match: %s\n" % m(d[0]))
745
746
746
747
747 @command(
748 @command(
748 b'debugdeltachain',
749 b'debugdeltachain',
749 cmdutil.debugrevlogopts + cmdutil.formatteropts,
750 cmdutil.debugrevlogopts + cmdutil.formatteropts,
750 _(b'-c|-m|FILE'),
751 _(b'-c|-m|FILE'),
751 optionalrepo=True,
752 optionalrepo=True,
752 )
753 )
753 def debugdeltachain(ui, repo, file_=None, **opts):
754 def debugdeltachain(ui, repo, file_=None, **opts):
754 """dump information about delta chains in a revlog
755 """dump information about delta chains in a revlog
755
756
756 Output can be templatized. Available template keywords are:
757 Output can be templatized. Available template keywords are:
757
758
758 :``rev``: revision number
759 :``rev``: revision number
759 :``chainid``: delta chain identifier (numbered by unique base)
760 :``chainid``: delta chain identifier (numbered by unique base)
760 :``chainlen``: delta chain length to this revision
761 :``chainlen``: delta chain length to this revision
761 :``prevrev``: previous revision in delta chain
762 :``prevrev``: previous revision in delta chain
762 :``deltatype``: role of delta / how it was computed
763 :``deltatype``: role of delta / how it was computed
763 :``compsize``: compressed size of revision
764 :``compsize``: compressed size of revision
764 :``uncompsize``: uncompressed size of revision
765 :``uncompsize``: uncompressed size of revision
765 :``chainsize``: total size of compressed revisions in chain
766 :``chainsize``: total size of compressed revisions in chain
766 :``chainratio``: total chain size divided by uncompressed revision size
767 :``chainratio``: total chain size divided by uncompressed revision size
767 (new delta chains typically start at ratio 2.00)
768 (new delta chains typically start at ratio 2.00)
768 :``lindist``: linear distance from base revision in delta chain to end
769 :``lindist``: linear distance from base revision in delta chain to end
769 of this revision
770 of this revision
770 :``extradist``: total size of revisions not part of this delta chain from
771 :``extradist``: total size of revisions not part of this delta chain from
771 base of delta chain to end of this revision; a measurement
772 base of delta chain to end of this revision; a measurement
772 of how much extra data we need to read/seek across to read
773 of how much extra data we need to read/seek across to read
773 the delta chain for this revision
774 the delta chain for this revision
774 :``extraratio``: extradist divided by chainsize; another representation of
775 :``extraratio``: extradist divided by chainsize; another representation of
775 how much unrelated data is needed to load this delta chain
776 how much unrelated data is needed to load this delta chain
776
777
777 If the repository is configured to use the sparse read, additional keywords
778 If the repository is configured to use the sparse read, additional keywords
778 are available:
779 are available:
779
780
780 :``readsize``: total size of data read from the disk for a revision
781 :``readsize``: total size of data read from the disk for a revision
781 (sum of the sizes of all the blocks)
782 (sum of the sizes of all the blocks)
782 :``largestblock``: size of the largest block of data read from the disk
783 :``largestblock``: size of the largest block of data read from the disk
783 :``readdensity``: density of useful bytes in the data read from the disk
784 :``readdensity``: density of useful bytes in the data read from the disk
784 :``srchunks``: in how many data hunks the whole revision would be read
785 :``srchunks``: in how many data hunks the whole revision would be read
785
786
786 The sparse read can be enabled with experimental.sparse-read = True
787 The sparse read can be enabled with experimental.sparse-read = True
787 """
788 """
788 opts = pycompat.byteskwargs(opts)
789 opts = pycompat.byteskwargs(opts)
789 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
790 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
790 index = r.index
791 index = r.index
791 start = r.start
792 start = r.start
792 length = r.length
793 length = r.length
793 generaldelta = r._generaldelta
794 generaldelta = r._generaldelta
794 withsparseread = getattr(r, '_withsparseread', False)
795 withsparseread = getattr(r, '_withsparseread', False)
795
796
796 def revinfo(rev):
797 def revinfo(rev):
797 e = index[rev]
798 e = index[rev]
798 compsize = e[1]
799 compsize = e[1]
799 uncompsize = e[2]
800 uncompsize = e[2]
800 chainsize = 0
801 chainsize = 0
801
802
802 if generaldelta:
803 if generaldelta:
803 if e[3] == e[5]:
804 if e[3] == e[5]:
804 deltatype = b'p1'
805 deltatype = b'p1'
805 elif e[3] == e[6]:
806 elif e[3] == e[6]:
806 deltatype = b'p2'
807 deltatype = b'p2'
807 elif e[3] == rev - 1:
808 elif e[3] == rev - 1:
808 deltatype = b'prev'
809 deltatype = b'prev'
809 elif e[3] == rev:
810 elif e[3] == rev:
810 deltatype = b'base'
811 deltatype = b'base'
811 else:
812 else:
812 deltatype = b'other'
813 deltatype = b'other'
813 else:
814 else:
814 if e[3] == rev:
815 if e[3] == rev:
815 deltatype = b'base'
816 deltatype = b'base'
816 else:
817 else:
817 deltatype = b'prev'
818 deltatype = b'prev'
818
819
819 chain = r._deltachain(rev)[0]
820 chain = r._deltachain(rev)[0]
820 for iterrev in chain:
821 for iterrev in chain:
821 e = index[iterrev]
822 e = index[iterrev]
822 chainsize += e[1]
823 chainsize += e[1]
823
824
824 return compsize, uncompsize, deltatype, chain, chainsize
825 return compsize, uncompsize, deltatype, chain, chainsize
825
826
826 fm = ui.formatter(b'debugdeltachain', opts)
827 fm = ui.formatter(b'debugdeltachain', opts)
827
828
828 fm.plain(
829 fm.plain(
829 b' rev chain# chainlen prev delta '
830 b' rev chain# chainlen prev delta '
830 b'size rawsize chainsize ratio lindist extradist '
831 b'size rawsize chainsize ratio lindist extradist '
831 b'extraratio'
832 b'extraratio'
832 )
833 )
833 if withsparseread:
834 if withsparseread:
834 fm.plain(b' readsize largestblk rddensity srchunks')
835 fm.plain(b' readsize largestblk rddensity srchunks')
835 fm.plain(b'\n')
836 fm.plain(b'\n')
836
837
837 chainbases = {}
838 chainbases = {}
838 for rev in r:
839 for rev in r:
839 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
840 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
840 chainbase = chain[0]
841 chainbase = chain[0]
841 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
842 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
842 basestart = start(chainbase)
843 basestart = start(chainbase)
843 revstart = start(rev)
844 revstart = start(rev)
844 lineardist = revstart + comp - basestart
845 lineardist = revstart + comp - basestart
845 extradist = lineardist - chainsize
846 extradist = lineardist - chainsize
846 try:
847 try:
847 prevrev = chain[-2]
848 prevrev = chain[-2]
848 except IndexError:
849 except IndexError:
849 prevrev = -1
850 prevrev = -1
850
851
851 if uncomp != 0:
852 if uncomp != 0:
852 chainratio = float(chainsize) / float(uncomp)
853 chainratio = float(chainsize) / float(uncomp)
853 else:
854 else:
854 chainratio = chainsize
855 chainratio = chainsize
855
856
856 if chainsize != 0:
857 if chainsize != 0:
857 extraratio = float(extradist) / float(chainsize)
858 extraratio = float(extradist) / float(chainsize)
858 else:
859 else:
859 extraratio = extradist
860 extraratio = extradist
860
861
861 fm.startitem()
862 fm.startitem()
862 fm.write(
863 fm.write(
863 b'rev chainid chainlen prevrev deltatype compsize '
864 b'rev chainid chainlen prevrev deltatype compsize '
864 b'uncompsize chainsize chainratio lindist extradist '
865 b'uncompsize chainsize chainratio lindist extradist '
865 b'extraratio',
866 b'extraratio',
866 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
867 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
867 rev,
868 rev,
868 chainid,
869 chainid,
869 len(chain),
870 len(chain),
870 prevrev,
871 prevrev,
871 deltatype,
872 deltatype,
872 comp,
873 comp,
873 uncomp,
874 uncomp,
874 chainsize,
875 chainsize,
875 chainratio,
876 chainratio,
876 lineardist,
877 lineardist,
877 extradist,
878 extradist,
878 extraratio,
879 extraratio,
879 rev=rev,
880 rev=rev,
880 chainid=chainid,
881 chainid=chainid,
881 chainlen=len(chain),
882 chainlen=len(chain),
882 prevrev=prevrev,
883 prevrev=prevrev,
883 deltatype=deltatype,
884 deltatype=deltatype,
884 compsize=comp,
885 compsize=comp,
885 uncompsize=uncomp,
886 uncompsize=uncomp,
886 chainsize=chainsize,
887 chainsize=chainsize,
887 chainratio=chainratio,
888 chainratio=chainratio,
888 lindist=lineardist,
889 lindist=lineardist,
889 extradist=extradist,
890 extradist=extradist,
890 extraratio=extraratio,
891 extraratio=extraratio,
891 )
892 )
892 if withsparseread:
893 if withsparseread:
893 readsize = 0
894 readsize = 0
894 largestblock = 0
895 largestblock = 0
895 srchunks = 0
896 srchunks = 0
896
897
897 for revschunk in deltautil.slicechunk(r, chain):
898 for revschunk in deltautil.slicechunk(r, chain):
898 srchunks += 1
899 srchunks += 1
899 blkend = start(revschunk[-1]) + length(revschunk[-1])
900 blkend = start(revschunk[-1]) + length(revschunk[-1])
900 blksize = blkend - start(revschunk[0])
901 blksize = blkend - start(revschunk[0])
901
902
902 readsize += blksize
903 readsize += blksize
903 if largestblock < blksize:
904 if largestblock < blksize:
904 largestblock = blksize
905 largestblock = blksize
905
906
906 if readsize:
907 if readsize:
907 readdensity = float(chainsize) / float(readsize)
908 readdensity = float(chainsize) / float(readsize)
908 else:
909 else:
909 readdensity = 1
910 readdensity = 1
910
911
911 fm.write(
912 fm.write(
912 b'readsize largestblock readdensity srchunks',
913 b'readsize largestblock readdensity srchunks',
913 b' %10d %10d %9.5f %8d',
914 b' %10d %10d %9.5f %8d',
914 readsize,
915 readsize,
915 largestblock,
916 largestblock,
916 readdensity,
917 readdensity,
917 srchunks,
918 srchunks,
918 readsize=readsize,
919 readsize=readsize,
919 largestblock=largestblock,
920 largestblock=largestblock,
920 readdensity=readdensity,
921 readdensity=readdensity,
921 srchunks=srchunks,
922 srchunks=srchunks,
922 )
923 )
923
924
924 fm.plain(b'\n')
925 fm.plain(b'\n')
925
926
926 fm.end()
927 fm.end()
927
928
928
929
929 @command(
930 @command(
930 b'debugdirstate|debugstate',
931 b'debugdirstate|debugstate',
931 [
932 [
932 (
933 (
933 b'',
934 b'',
934 b'nodates',
935 b'nodates',
935 None,
936 None,
936 _(b'do not display the saved mtime (DEPRECATED)'),
937 _(b'do not display the saved mtime (DEPRECATED)'),
937 ),
938 ),
938 (b'', b'dates', True, _(b'display the saved mtime')),
939 (b'', b'dates', True, _(b'display the saved mtime')),
939 (b'', b'datesort', None, _(b'sort by saved mtime')),
940 (b'', b'datesort', None, _(b'sort by saved mtime')),
940 (
941 (
941 b'',
942 b'',
943 b'docket',
944 False,
945 _(b'display the docket (metadata file) instead'),
946 ),
947 (
948 b'',
942 b'all',
949 b'all',
943 False,
950 False,
944 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
951 _(b'display dirstate-v2 tree nodes that would not exist in v1'),
945 ),
952 ),
946 ],
953 ],
947 _(b'[OPTION]...'),
954 _(b'[OPTION]...'),
948 )
955 )
949 def debugstate(ui, repo, **opts):
956 def debugstate(ui, repo, **opts):
950 """show the contents of the current dirstate"""
957 """show the contents of the current dirstate"""
951
958
959 if opts.get("docket"):
960 if not repo.dirstate._use_dirstate_v2:
961 raise error.Abort(_(b'dirstate v1 does not have a docket'))
962
963 docket = repo.dirstate._map.docket
964 (
965 start_offset,
966 root_nodes,
967 nodes_with_entry,
968 nodes_with_copy,
969 unused_bytes,
970 _unused,
971 ignore_pattern,
972 ) = dirstateutils.v2.TREE_METADATA.unpack(docket.tree_metadata)
973
974 ui.write(_(b"size of dirstate data: %d\n") % docket.data_size)
975 ui.write(_(b"data file uuid: %s\n") % docket.uuid)
976 ui.write(_(b"start offset of root nodes: %d\n") % start_offset)
977 ui.write(_(b"number of root nodes: %d\n") % root_nodes)
978 ui.write(_(b"nodes with entries: %d\n") % nodes_with_entry)
979 ui.write(_(b"nodes with copies: %d\n") % nodes_with_copy)
980 ui.write(_(b"number of unused bytes: %d\n") % unused_bytes)
981 ui.write(
982 _(b"ignore pattern hash: %s\n") % binascii.hexlify(ignore_pattern)
983 )
984 return
985
952 nodates = not opts['dates']
986 nodates = not opts['dates']
953 if opts.get('nodates') is not None:
987 if opts.get('nodates') is not None:
954 nodates = True
988 nodates = True
955 datesort = opts.get('datesort')
989 datesort = opts.get('datesort')
956
990
957 if datesort:
991 if datesort:
958
992
959 def keyfunc(entry):
993 def keyfunc(entry):
960 filename, _state, _mode, _size, mtime = entry
994 filename, _state, _mode, _size, mtime = entry
961 return (mtime, filename)
995 return (mtime, filename)
962
996
963 else:
997 else:
964 keyfunc = None # sort by filename
998 keyfunc = None # sort by filename
965 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
999 entries = list(repo.dirstate._map.debug_iter(all=opts['all']))
966 entries.sort(key=keyfunc)
1000 entries.sort(key=keyfunc)
967 for entry in entries:
1001 for entry in entries:
968 filename, state, mode, size, mtime = entry
1002 filename, state, mode, size, mtime = entry
969 if mtime == -1:
1003 if mtime == -1:
970 timestr = b'unset '
1004 timestr = b'unset '
971 elif nodates:
1005 elif nodates:
972 timestr = b'set '
1006 timestr = b'set '
973 else:
1007 else:
974 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
1008 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(mtime))
975 timestr = encoding.strtolocal(timestr)
1009 timestr = encoding.strtolocal(timestr)
976 if mode & 0o20000:
1010 if mode & 0o20000:
977 mode = b'lnk'
1011 mode = b'lnk'
978 else:
1012 else:
979 mode = b'%3o' % (mode & 0o777 & ~util.umask)
1013 mode = b'%3o' % (mode & 0o777 & ~util.umask)
980 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
1014 ui.write(b"%c %s %10d %s%s\n" % (state, mode, size, timestr, filename))
981 for f in repo.dirstate.copies():
1015 for f in repo.dirstate.copies():
982 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
1016 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
983
1017
984
1018
985 @command(
1019 @command(
986 b'debugdirstateignorepatternshash',
987 [],
988 _(b''),
989 )
990 def debugdirstateignorepatternshash(ui, repo, **opts):
991 """show the hash of ignore patterns stored in dirstate if v2,
992 or nothing for dirstate-v2
993 """
994 if repo.dirstate._use_dirstate_v2:
995 docket = repo.dirstate._map.docket
996 hash_len = 20 # 160 bits for SHA-1
997 hash_bytes = docket.tree_metadata[-hash_len:]
998 ui.write(binascii.hexlify(hash_bytes) + b'\n')
999
1000
1001 @command(
1002 b'debugdiscovery',
1020 b'debugdiscovery',
1003 [
1021 [
1004 (b'', b'old', None, _(b'use old-style discovery')),
1022 (b'', b'old', None, _(b'use old-style discovery')),
1005 (
1023 (
1006 b'',
1024 b'',
1007 b'nonheads',
1025 b'nonheads',
1008 None,
1026 None,
1009 _(b'use old-style discovery with non-heads included'),
1027 _(b'use old-style discovery with non-heads included'),
1010 ),
1028 ),
1011 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1029 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1012 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1030 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1013 (
1031 (
1014 b'',
1032 b'',
1015 b'local-as-revs',
1033 b'local-as-revs',
1016 b"",
1034 b"",
1017 b'treat local has having these revisions only',
1035 b'treat local has having these revisions only',
1018 ),
1036 ),
1019 (
1037 (
1020 b'',
1038 b'',
1021 b'remote-as-revs',
1039 b'remote-as-revs',
1022 b"",
1040 b"",
1023 b'use local as remote, with only these revisions',
1041 b'use local as remote, with only these revisions',
1024 ),
1042 ),
1025 ]
1043 ]
1026 + cmdutil.remoteopts
1044 + cmdutil.remoteopts
1027 + cmdutil.formatteropts,
1045 + cmdutil.formatteropts,
1028 _(b'[--rev REV] [OTHER]'),
1046 _(b'[--rev REV] [OTHER]'),
1029 )
1047 )
1030 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1048 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1031 """runs the changeset discovery protocol in isolation
1049 """runs the changeset discovery protocol in isolation
1032
1050
1033 The local peer can be "replaced" by a subset of the local repository by
1051 The local peer can be "replaced" by a subset of the local repository by
1034 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1052 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1035 be "replaced" by a subset of the local repository using the
1053 be "replaced" by a subset of the local repository using the
1036 `--local-as-revs` flag. This is useful to efficiently debug pathological
1054 `--local-as-revs` flag. This is useful to efficiently debug pathological
1037 discovery situation.
1055 discovery situation.
1038
1056
1039 The following developer oriented config are relevant for people playing with this command:
1057 The following developer oriented config are relevant for people playing with this command:
1040
1058
1041 * devel.discovery.exchange-heads=True
1059 * devel.discovery.exchange-heads=True
1042
1060
1043 If False, the discovery will not start with
1061 If False, the discovery will not start with
1044 remote head fetching and local head querying.
1062 remote head fetching and local head querying.
1045
1063
1046 * devel.discovery.grow-sample=True
1064 * devel.discovery.grow-sample=True
1047
1065
1048 If False, the sample size used in set discovery will not be increased
1066 If False, the sample size used in set discovery will not be increased
1049 through the process
1067 through the process
1050
1068
1051 * devel.discovery.grow-sample.dynamic=True
1069 * devel.discovery.grow-sample.dynamic=True
1052
1070
1053 When discovery.grow-sample.dynamic is True, the default, the sample size is
1071 When discovery.grow-sample.dynamic is True, the default, the sample size is
1054 adapted to the shape of the undecided set (it is set to the max of:
1072 adapted to the shape of the undecided set (it is set to the max of:
1055 <target-size>, len(roots(undecided)), len(heads(undecided)
1073 <target-size>, len(roots(undecided)), len(heads(undecided)
1056
1074
1057 * devel.discovery.grow-sample.rate=1.05
1075 * devel.discovery.grow-sample.rate=1.05
1058
1076
1059 the rate at which the sample grow
1077 the rate at which the sample grow
1060
1078
1061 * devel.discovery.randomize=True
1079 * devel.discovery.randomize=True
1062
1080
1063 If andom sampling during discovery are deterministic. It is meant for
1081 If andom sampling during discovery are deterministic. It is meant for
1064 integration tests.
1082 integration tests.
1065
1083
1066 * devel.discovery.sample-size=200
1084 * devel.discovery.sample-size=200
1067
1085
1068 Control the initial size of the discovery sample
1086 Control the initial size of the discovery sample
1069
1087
1070 * devel.discovery.sample-size.initial=100
1088 * devel.discovery.sample-size.initial=100
1071
1089
1072 Control the initial size of the discovery for initial change
1090 Control the initial size of the discovery for initial change
1073 """
1091 """
1074 opts = pycompat.byteskwargs(opts)
1092 opts = pycompat.byteskwargs(opts)
1075 unfi = repo.unfiltered()
1093 unfi = repo.unfiltered()
1076
1094
1077 # setup potential extra filtering
1095 # setup potential extra filtering
1078 local_revs = opts[b"local_as_revs"]
1096 local_revs = opts[b"local_as_revs"]
1079 remote_revs = opts[b"remote_as_revs"]
1097 remote_revs = opts[b"remote_as_revs"]
1080
1098
1081 # make sure tests are repeatable
1099 # make sure tests are repeatable
1082 random.seed(int(opts[b'seed']))
1100 random.seed(int(opts[b'seed']))
1083
1101
1084 if not remote_revs:
1102 if not remote_revs:
1085
1103
1086 remoteurl, branches = urlutil.get_unique_pull_path(
1104 remoteurl, branches = urlutil.get_unique_pull_path(
1087 b'debugdiscovery', repo, ui, remoteurl
1105 b'debugdiscovery', repo, ui, remoteurl
1088 )
1106 )
1089 remote = hg.peer(repo, opts, remoteurl)
1107 remote = hg.peer(repo, opts, remoteurl)
1090 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1108 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1091 else:
1109 else:
1092 branches = (None, [])
1110 branches = (None, [])
1093 remote_filtered_revs = logcmdutil.revrange(
1111 remote_filtered_revs = logcmdutil.revrange(
1094 unfi, [b"not (::(%s))" % remote_revs]
1112 unfi, [b"not (::(%s))" % remote_revs]
1095 )
1113 )
1096 remote_filtered_revs = frozenset(remote_filtered_revs)
1114 remote_filtered_revs = frozenset(remote_filtered_revs)
1097
1115
1098 def remote_func(x):
1116 def remote_func(x):
1099 return remote_filtered_revs
1117 return remote_filtered_revs
1100
1118
1101 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1119 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1102
1120
1103 remote = repo.peer()
1121 remote = repo.peer()
1104 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1122 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1105
1123
1106 if local_revs:
1124 if local_revs:
1107 local_filtered_revs = logcmdutil.revrange(
1125 local_filtered_revs = logcmdutil.revrange(
1108 unfi, [b"not (::(%s))" % local_revs]
1126 unfi, [b"not (::(%s))" % local_revs]
1109 )
1127 )
1110 local_filtered_revs = frozenset(local_filtered_revs)
1128 local_filtered_revs = frozenset(local_filtered_revs)
1111
1129
1112 def local_func(x):
1130 def local_func(x):
1113 return local_filtered_revs
1131 return local_filtered_revs
1114
1132
1115 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1133 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1116 repo = repo.filtered(b'debug-discovery-local-filter')
1134 repo = repo.filtered(b'debug-discovery-local-filter')
1117
1135
1118 data = {}
1136 data = {}
1119 if opts.get(b'old'):
1137 if opts.get(b'old'):
1120
1138
1121 def doit(pushedrevs, remoteheads, remote=remote):
1139 def doit(pushedrevs, remoteheads, remote=remote):
1122 if not util.safehasattr(remote, b'branches'):
1140 if not util.safehasattr(remote, b'branches'):
1123 # enable in-client legacy support
1141 # enable in-client legacy support
1124 remote = localrepo.locallegacypeer(remote.local())
1142 remote = localrepo.locallegacypeer(remote.local())
1125 common, _in, hds = treediscovery.findcommonincoming(
1143 common, _in, hds = treediscovery.findcommonincoming(
1126 repo, remote, force=True, audit=data
1144 repo, remote, force=True, audit=data
1127 )
1145 )
1128 common = set(common)
1146 common = set(common)
1129 if not opts.get(b'nonheads'):
1147 if not opts.get(b'nonheads'):
1130 ui.writenoi18n(
1148 ui.writenoi18n(
1131 b"unpruned common: %s\n"
1149 b"unpruned common: %s\n"
1132 % b" ".join(sorted(short(n) for n in common))
1150 % b" ".join(sorted(short(n) for n in common))
1133 )
1151 )
1134
1152
1135 clnode = repo.changelog.node
1153 clnode = repo.changelog.node
1136 common = repo.revs(b'heads(::%ln)', common)
1154 common = repo.revs(b'heads(::%ln)', common)
1137 common = {clnode(r) for r in common}
1155 common = {clnode(r) for r in common}
1138 return common, hds
1156 return common, hds
1139
1157
1140 else:
1158 else:
1141
1159
1142 def doit(pushedrevs, remoteheads, remote=remote):
1160 def doit(pushedrevs, remoteheads, remote=remote):
1143 nodes = None
1161 nodes = None
1144 if pushedrevs:
1162 if pushedrevs:
1145 revs = logcmdutil.revrange(repo, pushedrevs)
1163 revs = logcmdutil.revrange(repo, pushedrevs)
1146 nodes = [repo[r].node() for r in revs]
1164 nodes = [repo[r].node() for r in revs]
1147 common, any, hds = setdiscovery.findcommonheads(
1165 common, any, hds = setdiscovery.findcommonheads(
1148 ui, repo, remote, ancestorsof=nodes, audit=data
1166 ui, repo, remote, ancestorsof=nodes, audit=data
1149 )
1167 )
1150 return common, hds
1168 return common, hds
1151
1169
1152 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1170 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1153 localrevs = opts[b'rev']
1171 localrevs = opts[b'rev']
1154
1172
1155 fm = ui.formatter(b'debugdiscovery', opts)
1173 fm = ui.formatter(b'debugdiscovery', opts)
1156 if fm.strict_format:
1174 if fm.strict_format:
1157
1175
1158 @contextlib.contextmanager
1176 @contextlib.contextmanager
1159 def may_capture_output():
1177 def may_capture_output():
1160 ui.pushbuffer()
1178 ui.pushbuffer()
1161 yield
1179 yield
1162 data[b'output'] = ui.popbuffer()
1180 data[b'output'] = ui.popbuffer()
1163
1181
1164 else:
1182 else:
1165 may_capture_output = util.nullcontextmanager
1183 may_capture_output = util.nullcontextmanager
1166 with may_capture_output():
1184 with may_capture_output():
1167 with util.timedcm('debug-discovery') as t:
1185 with util.timedcm('debug-discovery') as t:
1168 common, hds = doit(localrevs, remoterevs)
1186 common, hds = doit(localrevs, remoterevs)
1169
1187
1170 # compute all statistics
1188 # compute all statistics
1171 heads_common = set(common)
1189 heads_common = set(common)
1172 heads_remote = set(hds)
1190 heads_remote = set(hds)
1173 heads_local = set(repo.heads())
1191 heads_local = set(repo.heads())
1174 # note: they cannot be a local or remote head that is in common and not
1192 # note: they cannot be a local or remote head that is in common and not
1175 # itself a head of common.
1193 # itself a head of common.
1176 heads_common_local = heads_common & heads_local
1194 heads_common_local = heads_common & heads_local
1177 heads_common_remote = heads_common & heads_remote
1195 heads_common_remote = heads_common & heads_remote
1178 heads_common_both = heads_common & heads_remote & heads_local
1196 heads_common_both = heads_common & heads_remote & heads_local
1179
1197
1180 all = repo.revs(b'all()')
1198 all = repo.revs(b'all()')
1181 common = repo.revs(b'::%ln', common)
1199 common = repo.revs(b'::%ln', common)
1182 roots_common = repo.revs(b'roots(::%ld)', common)
1200 roots_common = repo.revs(b'roots(::%ld)', common)
1183 missing = repo.revs(b'not ::%ld', common)
1201 missing = repo.revs(b'not ::%ld', common)
1184 heads_missing = repo.revs(b'heads(%ld)', missing)
1202 heads_missing = repo.revs(b'heads(%ld)', missing)
1185 roots_missing = repo.revs(b'roots(%ld)', missing)
1203 roots_missing = repo.revs(b'roots(%ld)', missing)
1186 assert len(common) + len(missing) == len(all)
1204 assert len(common) + len(missing) == len(all)
1187
1205
1188 initial_undecided = repo.revs(
1206 initial_undecided = repo.revs(
1189 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1207 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1190 )
1208 )
1191 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1209 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1192 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1210 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1193 common_initial_undecided = initial_undecided & common
1211 common_initial_undecided = initial_undecided & common
1194 missing_initial_undecided = initial_undecided & missing
1212 missing_initial_undecided = initial_undecided & missing
1195
1213
1196 data[b'elapsed'] = t.elapsed
1214 data[b'elapsed'] = t.elapsed
1197 data[b'nb-common-heads'] = len(heads_common)
1215 data[b'nb-common-heads'] = len(heads_common)
1198 data[b'nb-common-heads-local'] = len(heads_common_local)
1216 data[b'nb-common-heads-local'] = len(heads_common_local)
1199 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1217 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1200 data[b'nb-common-heads-both'] = len(heads_common_both)
1218 data[b'nb-common-heads-both'] = len(heads_common_both)
1201 data[b'nb-common-roots'] = len(roots_common)
1219 data[b'nb-common-roots'] = len(roots_common)
1202 data[b'nb-head-local'] = len(heads_local)
1220 data[b'nb-head-local'] = len(heads_local)
1203 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1221 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1204 data[b'nb-head-remote'] = len(heads_remote)
1222 data[b'nb-head-remote'] = len(heads_remote)
1205 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1223 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1206 heads_common_remote
1224 heads_common_remote
1207 )
1225 )
1208 data[b'nb-revs'] = len(all)
1226 data[b'nb-revs'] = len(all)
1209 data[b'nb-revs-common'] = len(common)
1227 data[b'nb-revs-common'] = len(common)
1210 data[b'nb-revs-missing'] = len(missing)
1228 data[b'nb-revs-missing'] = len(missing)
1211 data[b'nb-missing-heads'] = len(heads_missing)
1229 data[b'nb-missing-heads'] = len(heads_missing)
1212 data[b'nb-missing-roots'] = len(roots_missing)
1230 data[b'nb-missing-roots'] = len(roots_missing)
1213 data[b'nb-ini_und'] = len(initial_undecided)
1231 data[b'nb-ini_und'] = len(initial_undecided)
1214 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1232 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1215 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1233 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1216 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1234 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1217 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1235 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1218
1236
1219 fm.startitem()
1237 fm.startitem()
1220 fm.data(**pycompat.strkwargs(data))
1238 fm.data(**pycompat.strkwargs(data))
1221 # display discovery summary
1239 # display discovery summary
1222 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1240 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1223 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1241 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1224 fm.plain(b"queries: %(total-queries)9d\n" % data)
1242 fm.plain(b"queries: %(total-queries)9d\n" % data)
1225 fm.plain(b"heads summary:\n")
1243 fm.plain(b"heads summary:\n")
1226 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1244 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1227 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1245 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1228 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1246 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1229 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1247 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1230 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1248 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1231 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1249 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1232 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1250 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1233 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1251 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1234 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1252 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1235 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1253 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1236 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1254 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1237 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1255 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1238 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1256 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1239 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1257 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1240 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1258 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1241 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1259 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1242 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1260 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1243 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1261 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1244 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1262 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1245 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1263 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1246 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1264 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1247 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1265 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1248
1266
1249 if ui.verbose:
1267 if ui.verbose:
1250 fm.plain(
1268 fm.plain(
1251 b"common heads: %s\n"
1269 b"common heads: %s\n"
1252 % b" ".join(sorted(short(n) for n in heads_common))
1270 % b" ".join(sorted(short(n) for n in heads_common))
1253 )
1271 )
1254 fm.end()
1272 fm.end()
1255
1273
1256
1274
1257 _chunksize = 4 << 10
1275 _chunksize = 4 << 10
1258
1276
1259
1277
1260 @command(
1278 @command(
1261 b'debugdownload',
1279 b'debugdownload',
1262 [
1280 [
1263 (b'o', b'output', b'', _(b'path')),
1281 (b'o', b'output', b'', _(b'path')),
1264 ],
1282 ],
1265 optionalrepo=True,
1283 optionalrepo=True,
1266 )
1284 )
1267 def debugdownload(ui, repo, url, output=None, **opts):
1285 def debugdownload(ui, repo, url, output=None, **opts):
1268 """download a resource using Mercurial logic and config"""
1286 """download a resource using Mercurial logic and config"""
1269 fh = urlmod.open(ui, url, output)
1287 fh = urlmod.open(ui, url, output)
1270
1288
1271 dest = ui
1289 dest = ui
1272 if output:
1290 if output:
1273 dest = open(output, b"wb", _chunksize)
1291 dest = open(output, b"wb", _chunksize)
1274 try:
1292 try:
1275 data = fh.read(_chunksize)
1293 data = fh.read(_chunksize)
1276 while data:
1294 while data:
1277 dest.write(data)
1295 dest.write(data)
1278 data = fh.read(_chunksize)
1296 data = fh.read(_chunksize)
1279 finally:
1297 finally:
1280 if output:
1298 if output:
1281 dest.close()
1299 dest.close()
1282
1300
1283
1301
1284 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1302 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1285 def debugextensions(ui, repo, **opts):
1303 def debugextensions(ui, repo, **opts):
1286 '''show information about active extensions'''
1304 '''show information about active extensions'''
1287 opts = pycompat.byteskwargs(opts)
1305 opts = pycompat.byteskwargs(opts)
1288 exts = extensions.extensions(ui)
1306 exts = extensions.extensions(ui)
1289 hgver = util.version()
1307 hgver = util.version()
1290 fm = ui.formatter(b'debugextensions', opts)
1308 fm = ui.formatter(b'debugextensions', opts)
1291 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1309 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1292 isinternal = extensions.ismoduleinternal(extmod)
1310 isinternal = extensions.ismoduleinternal(extmod)
1293 extsource = None
1311 extsource = None
1294
1312
1295 if util.safehasattr(extmod, '__file__'):
1313 if util.safehasattr(extmod, '__file__'):
1296 extsource = pycompat.fsencode(extmod.__file__)
1314 extsource = pycompat.fsencode(extmod.__file__)
1297 elif getattr(sys, 'oxidized', False):
1315 elif getattr(sys, 'oxidized', False):
1298 extsource = pycompat.sysexecutable
1316 extsource = pycompat.sysexecutable
1299 if isinternal:
1317 if isinternal:
1300 exttestedwith = [] # never expose magic string to users
1318 exttestedwith = [] # never expose magic string to users
1301 else:
1319 else:
1302 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1320 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1303 extbuglink = getattr(extmod, 'buglink', None)
1321 extbuglink = getattr(extmod, 'buglink', None)
1304
1322
1305 fm.startitem()
1323 fm.startitem()
1306
1324
1307 if ui.quiet or ui.verbose:
1325 if ui.quiet or ui.verbose:
1308 fm.write(b'name', b'%s\n', extname)
1326 fm.write(b'name', b'%s\n', extname)
1309 else:
1327 else:
1310 fm.write(b'name', b'%s', extname)
1328 fm.write(b'name', b'%s', extname)
1311 if isinternal or hgver in exttestedwith:
1329 if isinternal or hgver in exttestedwith:
1312 fm.plain(b'\n')
1330 fm.plain(b'\n')
1313 elif not exttestedwith:
1331 elif not exttestedwith:
1314 fm.plain(_(b' (untested!)\n'))
1332 fm.plain(_(b' (untested!)\n'))
1315 else:
1333 else:
1316 lasttestedversion = exttestedwith[-1]
1334 lasttestedversion = exttestedwith[-1]
1317 fm.plain(b' (%s!)\n' % lasttestedversion)
1335 fm.plain(b' (%s!)\n' % lasttestedversion)
1318
1336
1319 fm.condwrite(
1337 fm.condwrite(
1320 ui.verbose and extsource,
1338 ui.verbose and extsource,
1321 b'source',
1339 b'source',
1322 _(b' location: %s\n'),
1340 _(b' location: %s\n'),
1323 extsource or b"",
1341 extsource or b"",
1324 )
1342 )
1325
1343
1326 if ui.verbose:
1344 if ui.verbose:
1327 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1345 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1328 fm.data(bundled=isinternal)
1346 fm.data(bundled=isinternal)
1329
1347
1330 fm.condwrite(
1348 fm.condwrite(
1331 ui.verbose and exttestedwith,
1349 ui.verbose and exttestedwith,
1332 b'testedwith',
1350 b'testedwith',
1333 _(b' tested with: %s\n'),
1351 _(b' tested with: %s\n'),
1334 fm.formatlist(exttestedwith, name=b'ver'),
1352 fm.formatlist(exttestedwith, name=b'ver'),
1335 )
1353 )
1336
1354
1337 fm.condwrite(
1355 fm.condwrite(
1338 ui.verbose and extbuglink,
1356 ui.verbose and extbuglink,
1339 b'buglink',
1357 b'buglink',
1340 _(b' bug reporting: %s\n'),
1358 _(b' bug reporting: %s\n'),
1341 extbuglink or b"",
1359 extbuglink or b"",
1342 )
1360 )
1343
1361
1344 fm.end()
1362 fm.end()
1345
1363
1346
1364
1347 @command(
1365 @command(
1348 b'debugfileset',
1366 b'debugfileset',
1349 [
1367 [
1350 (
1368 (
1351 b'r',
1369 b'r',
1352 b'rev',
1370 b'rev',
1353 b'',
1371 b'',
1354 _(b'apply the filespec on this revision'),
1372 _(b'apply the filespec on this revision'),
1355 _(b'REV'),
1373 _(b'REV'),
1356 ),
1374 ),
1357 (
1375 (
1358 b'',
1376 b'',
1359 b'all-files',
1377 b'all-files',
1360 False,
1378 False,
1361 _(b'test files from all revisions and working directory'),
1379 _(b'test files from all revisions and working directory'),
1362 ),
1380 ),
1363 (
1381 (
1364 b's',
1382 b's',
1365 b'show-matcher',
1383 b'show-matcher',
1366 None,
1384 None,
1367 _(b'print internal representation of matcher'),
1385 _(b'print internal representation of matcher'),
1368 ),
1386 ),
1369 (
1387 (
1370 b'p',
1388 b'p',
1371 b'show-stage',
1389 b'show-stage',
1372 [],
1390 [],
1373 _(b'print parsed tree at the given stage'),
1391 _(b'print parsed tree at the given stage'),
1374 _(b'NAME'),
1392 _(b'NAME'),
1375 ),
1393 ),
1376 ],
1394 ],
1377 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1395 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1378 )
1396 )
1379 def debugfileset(ui, repo, expr, **opts):
1397 def debugfileset(ui, repo, expr, **opts):
1380 '''parse and apply a fileset specification'''
1398 '''parse and apply a fileset specification'''
1381 from . import fileset
1399 from . import fileset
1382
1400
1383 fileset.symbols # force import of fileset so we have predicates to optimize
1401 fileset.symbols # force import of fileset so we have predicates to optimize
1384 opts = pycompat.byteskwargs(opts)
1402 opts = pycompat.byteskwargs(opts)
1385 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1403 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
1386
1404
1387 stages = [
1405 stages = [
1388 (b'parsed', pycompat.identity),
1406 (b'parsed', pycompat.identity),
1389 (b'analyzed', filesetlang.analyze),
1407 (b'analyzed', filesetlang.analyze),
1390 (b'optimized', filesetlang.optimize),
1408 (b'optimized', filesetlang.optimize),
1391 ]
1409 ]
1392 stagenames = {n for n, f in stages}
1410 stagenames = {n for n, f in stages}
1393
1411
1394 showalways = set()
1412 showalways = set()
1395 if ui.verbose and not opts[b'show_stage']:
1413 if ui.verbose and not opts[b'show_stage']:
1396 # show parsed tree by --verbose (deprecated)
1414 # show parsed tree by --verbose (deprecated)
1397 showalways.add(b'parsed')
1415 showalways.add(b'parsed')
1398 if opts[b'show_stage'] == [b'all']:
1416 if opts[b'show_stage'] == [b'all']:
1399 showalways.update(stagenames)
1417 showalways.update(stagenames)
1400 else:
1418 else:
1401 for n in opts[b'show_stage']:
1419 for n in opts[b'show_stage']:
1402 if n not in stagenames:
1420 if n not in stagenames:
1403 raise error.Abort(_(b'invalid stage name: %s') % n)
1421 raise error.Abort(_(b'invalid stage name: %s') % n)
1404 showalways.update(opts[b'show_stage'])
1422 showalways.update(opts[b'show_stage'])
1405
1423
1406 tree = filesetlang.parse(expr)
1424 tree = filesetlang.parse(expr)
1407 for n, f in stages:
1425 for n, f in stages:
1408 tree = f(tree)
1426 tree = f(tree)
1409 if n in showalways:
1427 if n in showalways:
1410 if opts[b'show_stage'] or n != b'parsed':
1428 if opts[b'show_stage'] or n != b'parsed':
1411 ui.write(b"* %s:\n" % n)
1429 ui.write(b"* %s:\n" % n)
1412 ui.write(filesetlang.prettyformat(tree), b"\n")
1430 ui.write(filesetlang.prettyformat(tree), b"\n")
1413
1431
1414 files = set()
1432 files = set()
1415 if opts[b'all_files']:
1433 if opts[b'all_files']:
1416 for r in repo:
1434 for r in repo:
1417 c = repo[r]
1435 c = repo[r]
1418 files.update(c.files())
1436 files.update(c.files())
1419 files.update(c.substate)
1437 files.update(c.substate)
1420 if opts[b'all_files'] or ctx.rev() is None:
1438 if opts[b'all_files'] or ctx.rev() is None:
1421 wctx = repo[None]
1439 wctx = repo[None]
1422 files.update(
1440 files.update(
1423 repo.dirstate.walk(
1441 repo.dirstate.walk(
1424 scmutil.matchall(repo),
1442 scmutil.matchall(repo),
1425 subrepos=list(wctx.substate),
1443 subrepos=list(wctx.substate),
1426 unknown=True,
1444 unknown=True,
1427 ignored=True,
1445 ignored=True,
1428 )
1446 )
1429 )
1447 )
1430 files.update(wctx.substate)
1448 files.update(wctx.substate)
1431 else:
1449 else:
1432 files.update(ctx.files())
1450 files.update(ctx.files())
1433 files.update(ctx.substate)
1451 files.update(ctx.substate)
1434
1452
1435 m = ctx.matchfileset(repo.getcwd(), expr)
1453 m = ctx.matchfileset(repo.getcwd(), expr)
1436 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1454 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1437 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1455 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1438 for f in sorted(files):
1456 for f in sorted(files):
1439 if not m(f):
1457 if not m(f):
1440 continue
1458 continue
1441 ui.write(b"%s\n" % f)
1459 ui.write(b"%s\n" % f)
1442
1460
1443
1461
1444 @command(
1462 @command(
1445 b"debug-repair-issue6528",
1463 b"debug-repair-issue6528",
1446 [
1464 [
1447 (
1465 (
1448 b'',
1466 b'',
1449 b'to-report',
1467 b'to-report',
1450 b'',
1468 b'',
1451 _(b'build a report of affected revisions to this file'),
1469 _(b'build a report of affected revisions to this file'),
1452 _(b'FILE'),
1470 _(b'FILE'),
1453 ),
1471 ),
1454 (
1472 (
1455 b'',
1473 b'',
1456 b'from-report',
1474 b'from-report',
1457 b'',
1475 b'',
1458 _(b'repair revisions listed in this report file'),
1476 _(b'repair revisions listed in this report file'),
1459 _(b'FILE'),
1477 _(b'FILE'),
1460 ),
1478 ),
1461 (
1479 (
1462 b'',
1480 b'',
1463 b'paranoid',
1481 b'paranoid',
1464 False,
1482 False,
1465 _(b'check that both detection methods do the same thing'),
1483 _(b'check that both detection methods do the same thing'),
1466 ),
1484 ),
1467 ]
1485 ]
1468 + cmdutil.dryrunopts,
1486 + cmdutil.dryrunopts,
1469 )
1487 )
1470 def debug_repair_issue6528(ui, repo, **opts):
1488 def debug_repair_issue6528(ui, repo, **opts):
1471 """find affected revisions and repair them. See issue6528 for more details.
1489 """find affected revisions and repair them. See issue6528 for more details.
1472
1490
1473 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1491 The `--to-report` and `--from-report` flags allow you to cache and reuse the
1474 computation of affected revisions for a given repository across clones.
1492 computation of affected revisions for a given repository across clones.
1475 The report format is line-based (with empty lines ignored):
1493 The report format is line-based (with empty lines ignored):
1476
1494
1477 ```
1495 ```
1478 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1496 <ascii-hex of the affected revision>,... <unencoded filelog index filename>
1479 ```
1497 ```
1480
1498
1481 There can be multiple broken revisions per filelog, they are separated by
1499 There can be multiple broken revisions per filelog, they are separated by
1482 a comma with no spaces. The only space is between the revision(s) and the
1500 a comma with no spaces. The only space is between the revision(s) and the
1483 filename.
1501 filename.
1484
1502
1485 Note that this does *not* mean that this repairs future affected revisions,
1503 Note that this does *not* mean that this repairs future affected revisions,
1486 that needs a separate fix at the exchange level that was introduced in
1504 that needs a separate fix at the exchange level that was introduced in
1487 Mercurial 5.9.1.
1505 Mercurial 5.9.1.
1488
1506
1489 There is a `--paranoid` flag to test that the fast implementation is correct
1507 There is a `--paranoid` flag to test that the fast implementation is correct
1490 by checking it against the slow implementation. Since this matter is quite
1508 by checking it against the slow implementation. Since this matter is quite
1491 urgent and testing every edge-case is probably quite costly, we use this
1509 urgent and testing every edge-case is probably quite costly, we use this
1492 method to test on large repositories as a fuzzing method of sorts.
1510 method to test on large repositories as a fuzzing method of sorts.
1493 """
1511 """
1494 cmdutil.check_incompatible_arguments(
1512 cmdutil.check_incompatible_arguments(
1495 opts, 'to_report', ['from_report', 'dry_run']
1513 opts, 'to_report', ['from_report', 'dry_run']
1496 )
1514 )
1497 dry_run = opts.get('dry_run')
1515 dry_run = opts.get('dry_run')
1498 to_report = opts.get('to_report')
1516 to_report = opts.get('to_report')
1499 from_report = opts.get('from_report')
1517 from_report = opts.get('from_report')
1500 paranoid = opts.get('paranoid')
1518 paranoid = opts.get('paranoid')
1501 # TODO maybe add filelog pattern and revision pattern parameters to help
1519 # TODO maybe add filelog pattern and revision pattern parameters to help
1502 # narrow down the search for users that know what they're looking for?
1520 # narrow down the search for users that know what they're looking for?
1503
1521
1504 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1522 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
1505 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1523 msg = b"can only repair revlogv1 repositories, v2 is not affected"
1506 raise error.Abort(_(msg))
1524 raise error.Abort(_(msg))
1507
1525
1508 rewrite.repair_issue6528(
1526 rewrite.repair_issue6528(
1509 ui,
1527 ui,
1510 repo,
1528 repo,
1511 dry_run=dry_run,
1529 dry_run=dry_run,
1512 to_report=to_report,
1530 to_report=to_report,
1513 from_report=from_report,
1531 from_report=from_report,
1514 paranoid=paranoid,
1532 paranoid=paranoid,
1515 )
1533 )
1516
1534
1517
1535
1518 @command(b'debugformat', [] + cmdutil.formatteropts)
1536 @command(b'debugformat', [] + cmdutil.formatteropts)
1519 def debugformat(ui, repo, **opts):
1537 def debugformat(ui, repo, **opts):
1520 """display format information about the current repository
1538 """display format information about the current repository
1521
1539
1522 Use --verbose to get extra information about current config value and
1540 Use --verbose to get extra information about current config value and
1523 Mercurial default."""
1541 Mercurial default."""
1524 opts = pycompat.byteskwargs(opts)
1542 opts = pycompat.byteskwargs(opts)
1525 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1543 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1526 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1544 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1527
1545
1528 def makeformatname(name):
1546 def makeformatname(name):
1529 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1547 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1530
1548
1531 fm = ui.formatter(b'debugformat', opts)
1549 fm = ui.formatter(b'debugformat', opts)
1532 if fm.isplain():
1550 if fm.isplain():
1533
1551
1534 def formatvalue(value):
1552 def formatvalue(value):
1535 if util.safehasattr(value, b'startswith'):
1553 if util.safehasattr(value, b'startswith'):
1536 return value
1554 return value
1537 if value:
1555 if value:
1538 return b'yes'
1556 return b'yes'
1539 else:
1557 else:
1540 return b'no'
1558 return b'no'
1541
1559
1542 else:
1560 else:
1543 formatvalue = pycompat.identity
1561 formatvalue = pycompat.identity
1544
1562
1545 fm.plain(b'format-variant')
1563 fm.plain(b'format-variant')
1546 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1564 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1547 fm.plain(b' repo')
1565 fm.plain(b' repo')
1548 if ui.verbose:
1566 if ui.verbose:
1549 fm.plain(b' config default')
1567 fm.plain(b' config default')
1550 fm.plain(b'\n')
1568 fm.plain(b'\n')
1551 for fv in upgrade.allformatvariant:
1569 for fv in upgrade.allformatvariant:
1552 fm.startitem()
1570 fm.startitem()
1553 repovalue = fv.fromrepo(repo)
1571 repovalue = fv.fromrepo(repo)
1554 configvalue = fv.fromconfig(repo)
1572 configvalue = fv.fromconfig(repo)
1555
1573
1556 if repovalue != configvalue:
1574 if repovalue != configvalue:
1557 namelabel = b'formatvariant.name.mismatchconfig'
1575 namelabel = b'formatvariant.name.mismatchconfig'
1558 repolabel = b'formatvariant.repo.mismatchconfig'
1576 repolabel = b'formatvariant.repo.mismatchconfig'
1559 elif repovalue != fv.default:
1577 elif repovalue != fv.default:
1560 namelabel = b'formatvariant.name.mismatchdefault'
1578 namelabel = b'formatvariant.name.mismatchdefault'
1561 repolabel = b'formatvariant.repo.mismatchdefault'
1579 repolabel = b'formatvariant.repo.mismatchdefault'
1562 else:
1580 else:
1563 namelabel = b'formatvariant.name.uptodate'
1581 namelabel = b'formatvariant.name.uptodate'
1564 repolabel = b'formatvariant.repo.uptodate'
1582 repolabel = b'formatvariant.repo.uptodate'
1565
1583
1566 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1584 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1567 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1585 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1568 if fv.default != configvalue:
1586 if fv.default != configvalue:
1569 configlabel = b'formatvariant.config.special'
1587 configlabel = b'formatvariant.config.special'
1570 else:
1588 else:
1571 configlabel = b'formatvariant.config.default'
1589 configlabel = b'formatvariant.config.default'
1572 fm.condwrite(
1590 fm.condwrite(
1573 ui.verbose,
1591 ui.verbose,
1574 b'config',
1592 b'config',
1575 b' %6s',
1593 b' %6s',
1576 formatvalue(configvalue),
1594 formatvalue(configvalue),
1577 label=configlabel,
1595 label=configlabel,
1578 )
1596 )
1579 fm.condwrite(
1597 fm.condwrite(
1580 ui.verbose,
1598 ui.verbose,
1581 b'default',
1599 b'default',
1582 b' %7s',
1600 b' %7s',
1583 formatvalue(fv.default),
1601 formatvalue(fv.default),
1584 label=b'formatvariant.default',
1602 label=b'formatvariant.default',
1585 )
1603 )
1586 fm.plain(b'\n')
1604 fm.plain(b'\n')
1587 fm.end()
1605 fm.end()
1588
1606
1589
1607
1590 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1608 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1591 def debugfsinfo(ui, path=b"."):
1609 def debugfsinfo(ui, path=b"."):
1592 """show information detected about current filesystem"""
1610 """show information detected about current filesystem"""
1593 ui.writenoi18n(b'path: %s\n' % path)
1611 ui.writenoi18n(b'path: %s\n' % path)
1594 ui.writenoi18n(
1612 ui.writenoi18n(
1595 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1613 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1596 )
1614 )
1597 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1615 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1598 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1616 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1599 ui.writenoi18n(
1617 ui.writenoi18n(
1600 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1618 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1601 )
1619 )
1602 ui.writenoi18n(
1620 ui.writenoi18n(
1603 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1621 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1604 )
1622 )
1605 casesensitive = b'(unknown)'
1623 casesensitive = b'(unknown)'
1606 try:
1624 try:
1607 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1625 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1608 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1626 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1609 except OSError:
1627 except OSError:
1610 pass
1628 pass
1611 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1629 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1612
1630
1613
1631
1614 @command(
1632 @command(
1615 b'debuggetbundle',
1633 b'debuggetbundle',
1616 [
1634 [
1617 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1635 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1618 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1636 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1619 (
1637 (
1620 b't',
1638 b't',
1621 b'type',
1639 b'type',
1622 b'bzip2',
1640 b'bzip2',
1623 _(b'bundle compression type to use'),
1641 _(b'bundle compression type to use'),
1624 _(b'TYPE'),
1642 _(b'TYPE'),
1625 ),
1643 ),
1626 ],
1644 ],
1627 _(b'REPO FILE [-H|-C ID]...'),
1645 _(b'REPO FILE [-H|-C ID]...'),
1628 norepo=True,
1646 norepo=True,
1629 )
1647 )
1630 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1648 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1631 """retrieves a bundle from a repo
1649 """retrieves a bundle from a repo
1632
1650
1633 Every ID must be a full-length hex node id string. Saves the bundle to the
1651 Every ID must be a full-length hex node id string. Saves the bundle to the
1634 given file.
1652 given file.
1635 """
1653 """
1636 opts = pycompat.byteskwargs(opts)
1654 opts = pycompat.byteskwargs(opts)
1637 repo = hg.peer(ui, opts, repopath)
1655 repo = hg.peer(ui, opts, repopath)
1638 if not repo.capable(b'getbundle'):
1656 if not repo.capable(b'getbundle'):
1639 raise error.Abort(b"getbundle() not supported by target repository")
1657 raise error.Abort(b"getbundle() not supported by target repository")
1640 args = {}
1658 args = {}
1641 if common:
1659 if common:
1642 args['common'] = [bin(s) for s in common]
1660 args['common'] = [bin(s) for s in common]
1643 if head:
1661 if head:
1644 args['heads'] = [bin(s) for s in head]
1662 args['heads'] = [bin(s) for s in head]
1645 # TODO: get desired bundlecaps from command line.
1663 # TODO: get desired bundlecaps from command line.
1646 args['bundlecaps'] = None
1664 args['bundlecaps'] = None
1647 bundle = repo.getbundle(b'debug', **args)
1665 bundle = repo.getbundle(b'debug', **args)
1648
1666
1649 bundletype = opts.get(b'type', b'bzip2').lower()
1667 bundletype = opts.get(b'type', b'bzip2').lower()
1650 btypes = {
1668 btypes = {
1651 b'none': b'HG10UN',
1669 b'none': b'HG10UN',
1652 b'bzip2': b'HG10BZ',
1670 b'bzip2': b'HG10BZ',
1653 b'gzip': b'HG10GZ',
1671 b'gzip': b'HG10GZ',
1654 b'bundle2': b'HG20',
1672 b'bundle2': b'HG20',
1655 }
1673 }
1656 bundletype = btypes.get(bundletype)
1674 bundletype = btypes.get(bundletype)
1657 if bundletype not in bundle2.bundletypes:
1675 if bundletype not in bundle2.bundletypes:
1658 raise error.Abort(_(b'unknown bundle type specified with --type'))
1676 raise error.Abort(_(b'unknown bundle type specified with --type'))
1659 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1677 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1660
1678
1661
1679
1662 @command(b'debugignore', [], b'[FILE]')
1680 @command(b'debugignore', [], b'[FILE]')
1663 def debugignore(ui, repo, *files, **opts):
1681 def debugignore(ui, repo, *files, **opts):
1664 """display the combined ignore pattern and information about ignored files
1682 """display the combined ignore pattern and information about ignored files
1665
1683
1666 With no argument display the combined ignore pattern.
1684 With no argument display the combined ignore pattern.
1667
1685
1668 Given space separated file names, shows if the given file is ignored and
1686 Given space separated file names, shows if the given file is ignored and
1669 if so, show the ignore rule (file and line number) that matched it.
1687 if so, show the ignore rule (file and line number) that matched it.
1670 """
1688 """
1671 ignore = repo.dirstate._ignore
1689 ignore = repo.dirstate._ignore
1672 if not files:
1690 if not files:
1673 # Show all the patterns
1691 # Show all the patterns
1674 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1692 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1675 else:
1693 else:
1676 m = scmutil.match(repo[None], pats=files)
1694 m = scmutil.match(repo[None], pats=files)
1677 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1695 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1678 for f in m.files():
1696 for f in m.files():
1679 nf = util.normpath(f)
1697 nf = util.normpath(f)
1680 ignored = None
1698 ignored = None
1681 ignoredata = None
1699 ignoredata = None
1682 if nf != b'.':
1700 if nf != b'.':
1683 if ignore(nf):
1701 if ignore(nf):
1684 ignored = nf
1702 ignored = nf
1685 ignoredata = repo.dirstate._ignorefileandline(nf)
1703 ignoredata = repo.dirstate._ignorefileandline(nf)
1686 else:
1704 else:
1687 for p in pathutil.finddirs(nf):
1705 for p in pathutil.finddirs(nf):
1688 if ignore(p):
1706 if ignore(p):
1689 ignored = p
1707 ignored = p
1690 ignoredata = repo.dirstate._ignorefileandline(p)
1708 ignoredata = repo.dirstate._ignorefileandline(p)
1691 break
1709 break
1692 if ignored:
1710 if ignored:
1693 if ignored == nf:
1711 if ignored == nf:
1694 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1712 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1695 else:
1713 else:
1696 ui.write(
1714 ui.write(
1697 _(
1715 _(
1698 b"%s is ignored because of "
1716 b"%s is ignored because of "
1699 b"containing directory %s\n"
1717 b"containing directory %s\n"
1700 )
1718 )
1701 % (uipathfn(f), ignored)
1719 % (uipathfn(f), ignored)
1702 )
1720 )
1703 ignorefile, lineno, line = ignoredata
1721 ignorefile, lineno, line = ignoredata
1704 ui.write(
1722 ui.write(
1705 _(b"(ignore rule in %s, line %d: '%s')\n")
1723 _(b"(ignore rule in %s, line %d: '%s')\n")
1706 % (ignorefile, lineno, line)
1724 % (ignorefile, lineno, line)
1707 )
1725 )
1708 else:
1726 else:
1709 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1727 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1710
1728
1711
1729
1712 @command(
1730 @command(
1713 b'debugindex',
1731 b'debugindex',
1714 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1732 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1715 _(b'-c|-m|FILE'),
1733 _(b'-c|-m|FILE'),
1716 )
1734 )
1717 def debugindex(ui, repo, file_=None, **opts):
1735 def debugindex(ui, repo, file_=None, **opts):
1718 """dump index data for a storage primitive"""
1736 """dump index data for a storage primitive"""
1719 opts = pycompat.byteskwargs(opts)
1737 opts = pycompat.byteskwargs(opts)
1720 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1738 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1721
1739
1722 if ui.debugflag:
1740 if ui.debugflag:
1723 shortfn = hex
1741 shortfn = hex
1724 else:
1742 else:
1725 shortfn = short
1743 shortfn = short
1726
1744
1727 idlen = 12
1745 idlen = 12
1728 for i in store:
1746 for i in store:
1729 idlen = len(shortfn(store.node(i)))
1747 idlen = len(shortfn(store.node(i)))
1730 break
1748 break
1731
1749
1732 fm = ui.formatter(b'debugindex', opts)
1750 fm = ui.formatter(b'debugindex', opts)
1733 fm.plain(
1751 fm.plain(
1734 b' rev linkrev %s %s p2\n'
1752 b' rev linkrev %s %s p2\n'
1735 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1753 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1736 )
1754 )
1737
1755
1738 for rev in store:
1756 for rev in store:
1739 node = store.node(rev)
1757 node = store.node(rev)
1740 parents = store.parents(node)
1758 parents = store.parents(node)
1741
1759
1742 fm.startitem()
1760 fm.startitem()
1743 fm.write(b'rev', b'%6d ', rev)
1761 fm.write(b'rev', b'%6d ', rev)
1744 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1762 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1745 fm.write(b'node', b'%s ', shortfn(node))
1763 fm.write(b'node', b'%s ', shortfn(node))
1746 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1764 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1747 fm.write(b'p2', b'%s', shortfn(parents[1]))
1765 fm.write(b'p2', b'%s', shortfn(parents[1]))
1748 fm.plain(b'\n')
1766 fm.plain(b'\n')
1749
1767
1750 fm.end()
1768 fm.end()
1751
1769
1752
1770
1753 @command(
1771 @command(
1754 b'debugindexdot',
1772 b'debugindexdot',
1755 cmdutil.debugrevlogopts,
1773 cmdutil.debugrevlogopts,
1756 _(b'-c|-m|FILE'),
1774 _(b'-c|-m|FILE'),
1757 optionalrepo=True,
1775 optionalrepo=True,
1758 )
1776 )
1759 def debugindexdot(ui, repo, file_=None, **opts):
1777 def debugindexdot(ui, repo, file_=None, **opts):
1760 """dump an index DAG as a graphviz dot file"""
1778 """dump an index DAG as a graphviz dot file"""
1761 opts = pycompat.byteskwargs(opts)
1779 opts = pycompat.byteskwargs(opts)
1762 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1780 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1763 ui.writenoi18n(b"digraph G {\n")
1781 ui.writenoi18n(b"digraph G {\n")
1764 for i in r:
1782 for i in r:
1765 node = r.node(i)
1783 node = r.node(i)
1766 pp = r.parents(node)
1784 pp = r.parents(node)
1767 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1785 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1768 if pp[1] != repo.nullid:
1786 if pp[1] != repo.nullid:
1769 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1787 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1770 ui.write(b"}\n")
1788 ui.write(b"}\n")
1771
1789
1772
1790
1773 @command(b'debugindexstats', [])
1791 @command(b'debugindexstats', [])
1774 def debugindexstats(ui, repo):
1792 def debugindexstats(ui, repo):
1775 """show stats related to the changelog index"""
1793 """show stats related to the changelog index"""
1776 repo.changelog.shortest(repo.nullid, 1)
1794 repo.changelog.shortest(repo.nullid, 1)
1777 index = repo.changelog.index
1795 index = repo.changelog.index
1778 if not util.safehasattr(index, b'stats'):
1796 if not util.safehasattr(index, b'stats'):
1779 raise error.Abort(_(b'debugindexstats only works with native code'))
1797 raise error.Abort(_(b'debugindexstats only works with native code'))
1780 for k, v in sorted(index.stats().items()):
1798 for k, v in sorted(index.stats().items()):
1781 ui.write(b'%s: %d\n' % (k, v))
1799 ui.write(b'%s: %d\n' % (k, v))
1782
1800
1783
1801
1784 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1802 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1785 def debuginstall(ui, **opts):
1803 def debuginstall(ui, **opts):
1786 """test Mercurial installation
1804 """test Mercurial installation
1787
1805
1788 Returns 0 on success.
1806 Returns 0 on success.
1789 """
1807 """
1790 opts = pycompat.byteskwargs(opts)
1808 opts = pycompat.byteskwargs(opts)
1791
1809
1792 problems = 0
1810 problems = 0
1793
1811
1794 fm = ui.formatter(b'debuginstall', opts)
1812 fm = ui.formatter(b'debuginstall', opts)
1795 fm.startitem()
1813 fm.startitem()
1796
1814
1797 # encoding might be unknown or wrong. don't translate these messages.
1815 # encoding might be unknown or wrong. don't translate these messages.
1798 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1816 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1799 err = None
1817 err = None
1800 try:
1818 try:
1801 codecs.lookup(pycompat.sysstr(encoding.encoding))
1819 codecs.lookup(pycompat.sysstr(encoding.encoding))
1802 except LookupError as inst:
1820 except LookupError as inst:
1803 err = stringutil.forcebytestr(inst)
1821 err = stringutil.forcebytestr(inst)
1804 problems += 1
1822 problems += 1
1805 fm.condwrite(
1823 fm.condwrite(
1806 err,
1824 err,
1807 b'encodingerror',
1825 b'encodingerror',
1808 b" %s\n (check that your locale is properly set)\n",
1826 b" %s\n (check that your locale is properly set)\n",
1809 err,
1827 err,
1810 )
1828 )
1811
1829
1812 # Python
1830 # Python
1813 pythonlib = None
1831 pythonlib = None
1814 if util.safehasattr(os, '__file__'):
1832 if util.safehasattr(os, '__file__'):
1815 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1833 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1816 elif getattr(sys, 'oxidized', False):
1834 elif getattr(sys, 'oxidized', False):
1817 pythonlib = pycompat.sysexecutable
1835 pythonlib = pycompat.sysexecutable
1818
1836
1819 fm.write(
1837 fm.write(
1820 b'pythonexe',
1838 b'pythonexe',
1821 _(b"checking Python executable (%s)\n"),
1839 _(b"checking Python executable (%s)\n"),
1822 pycompat.sysexecutable or _(b"unknown"),
1840 pycompat.sysexecutable or _(b"unknown"),
1823 )
1841 )
1824 fm.write(
1842 fm.write(
1825 b'pythonimplementation',
1843 b'pythonimplementation',
1826 _(b"checking Python implementation (%s)\n"),
1844 _(b"checking Python implementation (%s)\n"),
1827 pycompat.sysbytes(platform.python_implementation()),
1845 pycompat.sysbytes(platform.python_implementation()),
1828 )
1846 )
1829 fm.write(
1847 fm.write(
1830 b'pythonver',
1848 b'pythonver',
1831 _(b"checking Python version (%s)\n"),
1849 _(b"checking Python version (%s)\n"),
1832 (b"%d.%d.%d" % sys.version_info[:3]),
1850 (b"%d.%d.%d" % sys.version_info[:3]),
1833 )
1851 )
1834 fm.write(
1852 fm.write(
1835 b'pythonlib',
1853 b'pythonlib',
1836 _(b"checking Python lib (%s)...\n"),
1854 _(b"checking Python lib (%s)...\n"),
1837 pythonlib or _(b"unknown"),
1855 pythonlib or _(b"unknown"),
1838 )
1856 )
1839
1857
1840 try:
1858 try:
1841 from . import rustext # pytype: disable=import-error
1859 from . import rustext # pytype: disable=import-error
1842
1860
1843 rustext.__doc__ # trigger lazy import
1861 rustext.__doc__ # trigger lazy import
1844 except ImportError:
1862 except ImportError:
1845 rustext = None
1863 rustext = None
1846
1864
1847 security = set(sslutil.supportedprotocols)
1865 security = set(sslutil.supportedprotocols)
1848 if sslutil.hassni:
1866 if sslutil.hassni:
1849 security.add(b'sni')
1867 security.add(b'sni')
1850
1868
1851 fm.write(
1869 fm.write(
1852 b'pythonsecurity',
1870 b'pythonsecurity',
1853 _(b"checking Python security support (%s)\n"),
1871 _(b"checking Python security support (%s)\n"),
1854 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1872 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1855 )
1873 )
1856
1874
1857 # These are warnings, not errors. So don't increment problem count. This
1875 # These are warnings, not errors. So don't increment problem count. This
1858 # may change in the future.
1876 # may change in the future.
1859 if b'tls1.2' not in security:
1877 if b'tls1.2' not in security:
1860 fm.plain(
1878 fm.plain(
1861 _(
1879 _(
1862 b' TLS 1.2 not supported by Python install; '
1880 b' TLS 1.2 not supported by Python install; '
1863 b'network connections lack modern security\n'
1881 b'network connections lack modern security\n'
1864 )
1882 )
1865 )
1883 )
1866 if b'sni' not in security:
1884 if b'sni' not in security:
1867 fm.plain(
1885 fm.plain(
1868 _(
1886 _(
1869 b' SNI not supported by Python install; may have '
1887 b' SNI not supported by Python install; may have '
1870 b'connectivity issues with some servers\n'
1888 b'connectivity issues with some servers\n'
1871 )
1889 )
1872 )
1890 )
1873
1891
1874 fm.plain(
1892 fm.plain(
1875 _(
1893 _(
1876 b"checking Rust extensions (%s)\n"
1894 b"checking Rust extensions (%s)\n"
1877 % (b'missing' if rustext is None else b'installed')
1895 % (b'missing' if rustext is None else b'installed')
1878 ),
1896 ),
1879 )
1897 )
1880
1898
1881 # TODO print CA cert info
1899 # TODO print CA cert info
1882
1900
1883 # hg version
1901 # hg version
1884 hgver = util.version()
1902 hgver = util.version()
1885 fm.write(
1903 fm.write(
1886 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1904 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1887 )
1905 )
1888 fm.write(
1906 fm.write(
1889 b'hgverextra',
1907 b'hgverextra',
1890 _(b"checking Mercurial custom build (%s)\n"),
1908 _(b"checking Mercurial custom build (%s)\n"),
1891 b'+'.join(hgver.split(b'+')[1:]),
1909 b'+'.join(hgver.split(b'+')[1:]),
1892 )
1910 )
1893
1911
1894 # compiled modules
1912 # compiled modules
1895 hgmodules = None
1913 hgmodules = None
1896 if util.safehasattr(sys.modules[__name__], '__file__'):
1914 if util.safehasattr(sys.modules[__name__], '__file__'):
1897 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1915 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1898 elif getattr(sys, 'oxidized', False):
1916 elif getattr(sys, 'oxidized', False):
1899 hgmodules = pycompat.sysexecutable
1917 hgmodules = pycompat.sysexecutable
1900
1918
1901 fm.write(
1919 fm.write(
1902 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1920 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1903 )
1921 )
1904 fm.write(
1922 fm.write(
1905 b'hgmodules',
1923 b'hgmodules',
1906 _(b"checking installed modules (%s)...\n"),
1924 _(b"checking installed modules (%s)...\n"),
1907 hgmodules or _(b"unknown"),
1925 hgmodules or _(b"unknown"),
1908 )
1926 )
1909
1927
1910 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1928 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1911 rustext = rustandc # for now, that's the only case
1929 rustext = rustandc # for now, that's the only case
1912 cext = policy.policy in (b'c', b'allow') or rustandc
1930 cext = policy.policy in (b'c', b'allow') or rustandc
1913 nopure = cext or rustext
1931 nopure = cext or rustext
1914 if nopure:
1932 if nopure:
1915 err = None
1933 err = None
1916 try:
1934 try:
1917 if cext:
1935 if cext:
1918 from .cext import ( # pytype: disable=import-error
1936 from .cext import ( # pytype: disable=import-error
1919 base85,
1937 base85,
1920 bdiff,
1938 bdiff,
1921 mpatch,
1939 mpatch,
1922 osutil,
1940 osutil,
1923 )
1941 )
1924
1942
1925 # quiet pyflakes
1943 # quiet pyflakes
1926 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1944 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1927 if rustext:
1945 if rustext:
1928 from .rustext import ( # pytype: disable=import-error
1946 from .rustext import ( # pytype: disable=import-error
1929 ancestor,
1947 ancestor,
1930 dirstate,
1948 dirstate,
1931 )
1949 )
1932
1950
1933 dir(ancestor), dir(dirstate) # quiet pyflakes
1951 dir(ancestor), dir(dirstate) # quiet pyflakes
1934 except Exception as inst:
1952 except Exception as inst:
1935 err = stringutil.forcebytestr(inst)
1953 err = stringutil.forcebytestr(inst)
1936 problems += 1
1954 problems += 1
1937 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1955 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1938
1956
1939 compengines = util.compengines._engines.values()
1957 compengines = util.compengines._engines.values()
1940 fm.write(
1958 fm.write(
1941 b'compengines',
1959 b'compengines',
1942 _(b'checking registered compression engines (%s)\n'),
1960 _(b'checking registered compression engines (%s)\n'),
1943 fm.formatlist(
1961 fm.formatlist(
1944 sorted(e.name() for e in compengines),
1962 sorted(e.name() for e in compengines),
1945 name=b'compengine',
1963 name=b'compengine',
1946 fmt=b'%s',
1964 fmt=b'%s',
1947 sep=b', ',
1965 sep=b', ',
1948 ),
1966 ),
1949 )
1967 )
1950 fm.write(
1968 fm.write(
1951 b'compenginesavail',
1969 b'compenginesavail',
1952 _(b'checking available compression engines (%s)\n'),
1970 _(b'checking available compression engines (%s)\n'),
1953 fm.formatlist(
1971 fm.formatlist(
1954 sorted(e.name() for e in compengines if e.available()),
1972 sorted(e.name() for e in compengines if e.available()),
1955 name=b'compengine',
1973 name=b'compengine',
1956 fmt=b'%s',
1974 fmt=b'%s',
1957 sep=b', ',
1975 sep=b', ',
1958 ),
1976 ),
1959 )
1977 )
1960 wirecompengines = compression.compengines.supportedwireengines(
1978 wirecompengines = compression.compengines.supportedwireengines(
1961 compression.SERVERROLE
1979 compression.SERVERROLE
1962 )
1980 )
1963 fm.write(
1981 fm.write(
1964 b'compenginesserver',
1982 b'compenginesserver',
1965 _(
1983 _(
1966 b'checking available compression engines '
1984 b'checking available compression engines '
1967 b'for wire protocol (%s)\n'
1985 b'for wire protocol (%s)\n'
1968 ),
1986 ),
1969 fm.formatlist(
1987 fm.formatlist(
1970 [e.name() for e in wirecompengines if e.wireprotosupport()],
1988 [e.name() for e in wirecompengines if e.wireprotosupport()],
1971 name=b'compengine',
1989 name=b'compengine',
1972 fmt=b'%s',
1990 fmt=b'%s',
1973 sep=b', ',
1991 sep=b', ',
1974 ),
1992 ),
1975 )
1993 )
1976 re2 = b'missing'
1994 re2 = b'missing'
1977 if util._re2:
1995 if util._re2:
1978 re2 = b'available'
1996 re2 = b'available'
1979 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1997 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1980 fm.data(re2=bool(util._re2))
1998 fm.data(re2=bool(util._re2))
1981
1999
1982 # templates
2000 # templates
1983 p = templater.templatedir()
2001 p = templater.templatedir()
1984 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
2002 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1985 fm.condwrite(not p, b'', _(b" no template directories found\n"))
2003 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1986 if p:
2004 if p:
1987 (m, fp) = templater.try_open_template(b"map-cmdline.default")
2005 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1988 if m:
2006 if m:
1989 # template found, check if it is working
2007 # template found, check if it is working
1990 err = None
2008 err = None
1991 try:
2009 try:
1992 templater.templater.frommapfile(m)
2010 templater.templater.frommapfile(m)
1993 except Exception as inst:
2011 except Exception as inst:
1994 err = stringutil.forcebytestr(inst)
2012 err = stringutil.forcebytestr(inst)
1995 p = None
2013 p = None
1996 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
2014 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1997 else:
2015 else:
1998 p = None
2016 p = None
1999 fm.condwrite(
2017 fm.condwrite(
2000 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2018 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
2001 )
2019 )
2002 fm.condwrite(
2020 fm.condwrite(
2003 not m,
2021 not m,
2004 b'defaulttemplatenotfound',
2022 b'defaulttemplatenotfound',
2005 _(b" template '%s' not found\n"),
2023 _(b" template '%s' not found\n"),
2006 b"default",
2024 b"default",
2007 )
2025 )
2008 if not p:
2026 if not p:
2009 problems += 1
2027 problems += 1
2010 fm.condwrite(
2028 fm.condwrite(
2011 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2029 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
2012 )
2030 )
2013
2031
2014 # editor
2032 # editor
2015 editor = ui.geteditor()
2033 editor = ui.geteditor()
2016 editor = util.expandpath(editor)
2034 editor = util.expandpath(editor)
2017 editorbin = procutil.shellsplit(editor)[0]
2035 editorbin = procutil.shellsplit(editor)[0]
2018 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2036 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
2019 cmdpath = procutil.findexe(editorbin)
2037 cmdpath = procutil.findexe(editorbin)
2020 fm.condwrite(
2038 fm.condwrite(
2021 not cmdpath and editor == b'vi',
2039 not cmdpath and editor == b'vi',
2022 b'vinotfound',
2040 b'vinotfound',
2023 _(
2041 _(
2024 b" No commit editor set and can't find %s in PATH\n"
2042 b" No commit editor set and can't find %s in PATH\n"
2025 b" (specify a commit editor in your configuration"
2043 b" (specify a commit editor in your configuration"
2026 b" file)\n"
2044 b" file)\n"
2027 ),
2045 ),
2028 not cmdpath and editor == b'vi' and editorbin,
2046 not cmdpath and editor == b'vi' and editorbin,
2029 )
2047 )
2030 fm.condwrite(
2048 fm.condwrite(
2031 not cmdpath and editor != b'vi',
2049 not cmdpath and editor != b'vi',
2032 b'editornotfound',
2050 b'editornotfound',
2033 _(
2051 _(
2034 b" Can't find editor '%s' in PATH\n"
2052 b" Can't find editor '%s' in PATH\n"
2035 b" (specify a commit editor in your configuration"
2053 b" (specify a commit editor in your configuration"
2036 b" file)\n"
2054 b" file)\n"
2037 ),
2055 ),
2038 not cmdpath and editorbin,
2056 not cmdpath and editorbin,
2039 )
2057 )
2040 if not cmdpath and editor != b'vi':
2058 if not cmdpath and editor != b'vi':
2041 problems += 1
2059 problems += 1
2042
2060
2043 # check username
2061 # check username
2044 username = None
2062 username = None
2045 err = None
2063 err = None
2046 try:
2064 try:
2047 username = ui.username()
2065 username = ui.username()
2048 except error.Abort as e:
2066 except error.Abort as e:
2049 err = e.message
2067 err = e.message
2050 problems += 1
2068 problems += 1
2051
2069
2052 fm.condwrite(
2070 fm.condwrite(
2053 username, b'username', _(b"checking username (%s)\n"), username
2071 username, b'username', _(b"checking username (%s)\n"), username
2054 )
2072 )
2055 fm.condwrite(
2073 fm.condwrite(
2056 err,
2074 err,
2057 b'usernameerror',
2075 b'usernameerror',
2058 _(
2076 _(
2059 b"checking username...\n %s\n"
2077 b"checking username...\n %s\n"
2060 b" (specify a username in your configuration file)\n"
2078 b" (specify a username in your configuration file)\n"
2061 ),
2079 ),
2062 err,
2080 err,
2063 )
2081 )
2064
2082
2065 for name, mod in extensions.extensions():
2083 for name, mod in extensions.extensions():
2066 handler = getattr(mod, 'debuginstall', None)
2084 handler = getattr(mod, 'debuginstall', None)
2067 if handler is not None:
2085 if handler is not None:
2068 problems += handler(ui, fm)
2086 problems += handler(ui, fm)
2069
2087
2070 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2088 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2071 if not problems:
2089 if not problems:
2072 fm.data(problems=problems)
2090 fm.data(problems=problems)
2073 fm.condwrite(
2091 fm.condwrite(
2074 problems,
2092 problems,
2075 b'problems',
2093 b'problems',
2076 _(b"%d problems detected, please check your install!\n"),
2094 _(b"%d problems detected, please check your install!\n"),
2077 problems,
2095 problems,
2078 )
2096 )
2079 fm.end()
2097 fm.end()
2080
2098
2081 return problems
2099 return problems
2082
2100
2083
2101
2084 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2102 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2085 def debugknown(ui, repopath, *ids, **opts):
2103 def debugknown(ui, repopath, *ids, **opts):
2086 """test whether node ids are known to a repo
2104 """test whether node ids are known to a repo
2087
2105
2088 Every ID must be a full-length hex node id string. Returns a list of 0s
2106 Every ID must be a full-length hex node id string. Returns a list of 0s
2089 and 1s indicating unknown/known.
2107 and 1s indicating unknown/known.
2090 """
2108 """
2091 opts = pycompat.byteskwargs(opts)
2109 opts = pycompat.byteskwargs(opts)
2092 repo = hg.peer(ui, opts, repopath)
2110 repo = hg.peer(ui, opts, repopath)
2093 if not repo.capable(b'known'):
2111 if not repo.capable(b'known'):
2094 raise error.Abort(b"known() not supported by target repository")
2112 raise error.Abort(b"known() not supported by target repository")
2095 flags = repo.known([bin(s) for s in ids])
2113 flags = repo.known([bin(s) for s in ids])
2096 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2114 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2097
2115
2098
2116
2099 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2117 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2100 def debuglabelcomplete(ui, repo, *args):
2118 def debuglabelcomplete(ui, repo, *args):
2101 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2119 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2102 debugnamecomplete(ui, repo, *args)
2120 debugnamecomplete(ui, repo, *args)
2103
2121
2104
2122
2105 @command(
2123 @command(
2106 b'debuglocks',
2124 b'debuglocks',
2107 [
2125 [
2108 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2126 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2109 (
2127 (
2110 b'W',
2128 b'W',
2111 b'force-free-wlock',
2129 b'force-free-wlock',
2112 None,
2130 None,
2113 _(b'free the working state lock (DANGEROUS)'),
2131 _(b'free the working state lock (DANGEROUS)'),
2114 ),
2132 ),
2115 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2133 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2116 (
2134 (
2117 b'S',
2135 b'S',
2118 b'set-wlock',
2136 b'set-wlock',
2119 None,
2137 None,
2120 _(b'set the working state lock until stopped'),
2138 _(b'set the working state lock until stopped'),
2121 ),
2139 ),
2122 ],
2140 ],
2123 _(b'[OPTION]...'),
2141 _(b'[OPTION]...'),
2124 )
2142 )
2125 def debuglocks(ui, repo, **opts):
2143 def debuglocks(ui, repo, **opts):
2126 """show or modify state of locks
2144 """show or modify state of locks
2127
2145
2128 By default, this command will show which locks are held. This
2146 By default, this command will show which locks are held. This
2129 includes the user and process holding the lock, the amount of time
2147 includes the user and process holding the lock, the amount of time
2130 the lock has been held, and the machine name where the process is
2148 the lock has been held, and the machine name where the process is
2131 running if it's not local.
2149 running if it's not local.
2132
2150
2133 Locks protect the integrity of Mercurial's data, so should be
2151 Locks protect the integrity of Mercurial's data, so should be
2134 treated with care. System crashes or other interruptions may cause
2152 treated with care. System crashes or other interruptions may cause
2135 locks to not be properly released, though Mercurial will usually
2153 locks to not be properly released, though Mercurial will usually
2136 detect and remove such stale locks automatically.
2154 detect and remove such stale locks automatically.
2137
2155
2138 However, detecting stale locks may not always be possible (for
2156 However, detecting stale locks may not always be possible (for
2139 instance, on a shared filesystem). Removing locks may also be
2157 instance, on a shared filesystem). Removing locks may also be
2140 blocked by filesystem permissions.
2158 blocked by filesystem permissions.
2141
2159
2142 Setting a lock will prevent other commands from changing the data.
2160 Setting a lock will prevent other commands from changing the data.
2143 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2161 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2144 The set locks are removed when the command exits.
2162 The set locks are removed when the command exits.
2145
2163
2146 Returns 0 if no locks are held.
2164 Returns 0 if no locks are held.
2147
2165
2148 """
2166 """
2149
2167
2150 if opts.get('force_free_lock'):
2168 if opts.get('force_free_lock'):
2151 repo.svfs.tryunlink(b'lock')
2169 repo.svfs.tryunlink(b'lock')
2152 if opts.get('force_free_wlock'):
2170 if opts.get('force_free_wlock'):
2153 repo.vfs.tryunlink(b'wlock')
2171 repo.vfs.tryunlink(b'wlock')
2154 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2172 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2155 return 0
2173 return 0
2156
2174
2157 locks = []
2175 locks = []
2158 try:
2176 try:
2159 if opts.get('set_wlock'):
2177 if opts.get('set_wlock'):
2160 try:
2178 try:
2161 locks.append(repo.wlock(False))
2179 locks.append(repo.wlock(False))
2162 except error.LockHeld:
2180 except error.LockHeld:
2163 raise error.Abort(_(b'wlock is already held'))
2181 raise error.Abort(_(b'wlock is already held'))
2164 if opts.get('set_lock'):
2182 if opts.get('set_lock'):
2165 try:
2183 try:
2166 locks.append(repo.lock(False))
2184 locks.append(repo.lock(False))
2167 except error.LockHeld:
2185 except error.LockHeld:
2168 raise error.Abort(_(b'lock is already held'))
2186 raise error.Abort(_(b'lock is already held'))
2169 if len(locks):
2187 if len(locks):
2170 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2188 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2171 return 0
2189 return 0
2172 finally:
2190 finally:
2173 release(*locks)
2191 release(*locks)
2174
2192
2175 now = time.time()
2193 now = time.time()
2176 held = 0
2194 held = 0
2177
2195
2178 def report(vfs, name, method):
2196 def report(vfs, name, method):
2179 # this causes stale locks to get reaped for more accurate reporting
2197 # this causes stale locks to get reaped for more accurate reporting
2180 try:
2198 try:
2181 l = method(False)
2199 l = method(False)
2182 except error.LockHeld:
2200 except error.LockHeld:
2183 l = None
2201 l = None
2184
2202
2185 if l:
2203 if l:
2186 l.release()
2204 l.release()
2187 else:
2205 else:
2188 try:
2206 try:
2189 st = vfs.lstat(name)
2207 st = vfs.lstat(name)
2190 age = now - st[stat.ST_MTIME]
2208 age = now - st[stat.ST_MTIME]
2191 user = util.username(st.st_uid)
2209 user = util.username(st.st_uid)
2192 locker = vfs.readlock(name)
2210 locker = vfs.readlock(name)
2193 if b":" in locker:
2211 if b":" in locker:
2194 host, pid = locker.split(b':')
2212 host, pid = locker.split(b':')
2195 if host == socket.gethostname():
2213 if host == socket.gethostname():
2196 locker = b'user %s, process %s' % (user or b'None', pid)
2214 locker = b'user %s, process %s' % (user or b'None', pid)
2197 else:
2215 else:
2198 locker = b'user %s, process %s, host %s' % (
2216 locker = b'user %s, process %s, host %s' % (
2199 user or b'None',
2217 user or b'None',
2200 pid,
2218 pid,
2201 host,
2219 host,
2202 )
2220 )
2203 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2221 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2204 return 1
2222 return 1
2205 except OSError as e:
2223 except OSError as e:
2206 if e.errno != errno.ENOENT:
2224 if e.errno != errno.ENOENT:
2207 raise
2225 raise
2208
2226
2209 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2227 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2210 return 0
2228 return 0
2211
2229
2212 held += report(repo.svfs, b"lock", repo.lock)
2230 held += report(repo.svfs, b"lock", repo.lock)
2213 held += report(repo.vfs, b"wlock", repo.wlock)
2231 held += report(repo.vfs, b"wlock", repo.wlock)
2214
2232
2215 return held
2233 return held
2216
2234
2217
2235
2218 @command(
2236 @command(
2219 b'debugmanifestfulltextcache',
2237 b'debugmanifestfulltextcache',
2220 [
2238 [
2221 (b'', b'clear', False, _(b'clear the cache')),
2239 (b'', b'clear', False, _(b'clear the cache')),
2222 (
2240 (
2223 b'a',
2241 b'a',
2224 b'add',
2242 b'add',
2225 [],
2243 [],
2226 _(b'add the given manifest nodes to the cache'),
2244 _(b'add the given manifest nodes to the cache'),
2227 _(b'NODE'),
2245 _(b'NODE'),
2228 ),
2246 ),
2229 ],
2247 ],
2230 b'',
2248 b'',
2231 )
2249 )
2232 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2250 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2233 """show, clear or amend the contents of the manifest fulltext cache"""
2251 """show, clear or amend the contents of the manifest fulltext cache"""
2234
2252
2235 def getcache():
2253 def getcache():
2236 r = repo.manifestlog.getstorage(b'')
2254 r = repo.manifestlog.getstorage(b'')
2237 try:
2255 try:
2238 return r._fulltextcache
2256 return r._fulltextcache
2239 except AttributeError:
2257 except AttributeError:
2240 msg = _(
2258 msg = _(
2241 b"Current revlog implementation doesn't appear to have a "
2259 b"Current revlog implementation doesn't appear to have a "
2242 b"manifest fulltext cache\n"
2260 b"manifest fulltext cache\n"
2243 )
2261 )
2244 raise error.Abort(msg)
2262 raise error.Abort(msg)
2245
2263
2246 if opts.get('clear'):
2264 if opts.get('clear'):
2247 with repo.wlock():
2265 with repo.wlock():
2248 cache = getcache()
2266 cache = getcache()
2249 cache.clear(clear_persisted_data=True)
2267 cache.clear(clear_persisted_data=True)
2250 return
2268 return
2251
2269
2252 if add:
2270 if add:
2253 with repo.wlock():
2271 with repo.wlock():
2254 m = repo.manifestlog
2272 m = repo.manifestlog
2255 store = m.getstorage(b'')
2273 store = m.getstorage(b'')
2256 for n in add:
2274 for n in add:
2257 try:
2275 try:
2258 manifest = m[store.lookup(n)]
2276 manifest = m[store.lookup(n)]
2259 except error.LookupError as e:
2277 except error.LookupError as e:
2260 raise error.Abort(
2278 raise error.Abort(
2261 bytes(e), hint=b"Check your manifest node id"
2279 bytes(e), hint=b"Check your manifest node id"
2262 )
2280 )
2263 manifest.read() # stores revisision in cache too
2281 manifest.read() # stores revisision in cache too
2264 return
2282 return
2265
2283
2266 cache = getcache()
2284 cache = getcache()
2267 if not len(cache):
2285 if not len(cache):
2268 ui.write(_(b'cache empty\n'))
2286 ui.write(_(b'cache empty\n'))
2269 else:
2287 else:
2270 ui.write(
2288 ui.write(
2271 _(
2289 _(
2272 b'cache contains %d manifest entries, in order of most to '
2290 b'cache contains %d manifest entries, in order of most to '
2273 b'least recent:\n'
2291 b'least recent:\n'
2274 )
2292 )
2275 % (len(cache),)
2293 % (len(cache),)
2276 )
2294 )
2277 totalsize = 0
2295 totalsize = 0
2278 for nodeid in cache:
2296 for nodeid in cache:
2279 # Use cache.get to not update the LRU order
2297 # Use cache.get to not update the LRU order
2280 data = cache.peek(nodeid)
2298 data = cache.peek(nodeid)
2281 size = len(data)
2299 size = len(data)
2282 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2300 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2283 ui.write(
2301 ui.write(
2284 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2302 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2285 )
2303 )
2286 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2304 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2287 ui.write(
2305 ui.write(
2288 _(b'total cache data size %s, on-disk %s\n')
2306 _(b'total cache data size %s, on-disk %s\n')
2289 % (util.bytecount(totalsize), util.bytecount(ondisk))
2307 % (util.bytecount(totalsize), util.bytecount(ondisk))
2290 )
2308 )
2291
2309
2292
2310
2293 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2311 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2294 def debugmergestate(ui, repo, *args, **opts):
2312 def debugmergestate(ui, repo, *args, **opts):
2295 """print merge state
2313 """print merge state
2296
2314
2297 Use --verbose to print out information about whether v1 or v2 merge state
2315 Use --verbose to print out information about whether v1 or v2 merge state
2298 was chosen."""
2316 was chosen."""
2299
2317
2300 if ui.verbose:
2318 if ui.verbose:
2301 ms = mergestatemod.mergestate(repo)
2319 ms = mergestatemod.mergestate(repo)
2302
2320
2303 # sort so that reasonable information is on top
2321 # sort so that reasonable information is on top
2304 v1records = ms._readrecordsv1()
2322 v1records = ms._readrecordsv1()
2305 v2records = ms._readrecordsv2()
2323 v2records = ms._readrecordsv2()
2306
2324
2307 if not v1records and not v2records:
2325 if not v1records and not v2records:
2308 pass
2326 pass
2309 elif not v2records:
2327 elif not v2records:
2310 ui.writenoi18n(b'no version 2 merge state\n')
2328 ui.writenoi18n(b'no version 2 merge state\n')
2311 elif ms._v1v2match(v1records, v2records):
2329 elif ms._v1v2match(v1records, v2records):
2312 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2330 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2313 else:
2331 else:
2314 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2332 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2315
2333
2316 opts = pycompat.byteskwargs(opts)
2334 opts = pycompat.byteskwargs(opts)
2317 if not opts[b'template']:
2335 if not opts[b'template']:
2318 opts[b'template'] = (
2336 opts[b'template'] = (
2319 b'{if(commits, "", "no merge state found\n")}'
2337 b'{if(commits, "", "no merge state found\n")}'
2320 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2338 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2321 b'{files % "file: {path} (state \\"{state}\\")\n'
2339 b'{files % "file: {path} (state \\"{state}\\")\n'
2322 b'{if(local_path, "'
2340 b'{if(local_path, "'
2323 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2341 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2324 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2342 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2325 b' other path: {other_path} (node {other_node})\n'
2343 b' other path: {other_path} (node {other_node})\n'
2326 b'")}'
2344 b'")}'
2327 b'{if(rename_side, "'
2345 b'{if(rename_side, "'
2328 b' rename side: {rename_side}\n'
2346 b' rename side: {rename_side}\n'
2329 b' renamed path: {renamed_path}\n'
2347 b' renamed path: {renamed_path}\n'
2330 b'")}'
2348 b'")}'
2331 b'{extras % " extra: {key} = {value}\n"}'
2349 b'{extras % " extra: {key} = {value}\n"}'
2332 b'"}'
2350 b'"}'
2333 b'{extras % "extra: {file} ({key} = {value})\n"}'
2351 b'{extras % "extra: {file} ({key} = {value})\n"}'
2334 )
2352 )
2335
2353
2336 ms = mergestatemod.mergestate.read(repo)
2354 ms = mergestatemod.mergestate.read(repo)
2337
2355
2338 fm = ui.formatter(b'debugmergestate', opts)
2356 fm = ui.formatter(b'debugmergestate', opts)
2339 fm.startitem()
2357 fm.startitem()
2340
2358
2341 fm_commits = fm.nested(b'commits')
2359 fm_commits = fm.nested(b'commits')
2342 if ms.active():
2360 if ms.active():
2343 for name, node, label_index in (
2361 for name, node, label_index in (
2344 (b'local', ms.local, 0),
2362 (b'local', ms.local, 0),
2345 (b'other', ms.other, 1),
2363 (b'other', ms.other, 1),
2346 ):
2364 ):
2347 fm_commits.startitem()
2365 fm_commits.startitem()
2348 fm_commits.data(name=name)
2366 fm_commits.data(name=name)
2349 fm_commits.data(node=hex(node))
2367 fm_commits.data(node=hex(node))
2350 if ms._labels and len(ms._labels) > label_index:
2368 if ms._labels and len(ms._labels) > label_index:
2351 fm_commits.data(label=ms._labels[label_index])
2369 fm_commits.data(label=ms._labels[label_index])
2352 fm_commits.end()
2370 fm_commits.end()
2353
2371
2354 fm_files = fm.nested(b'files')
2372 fm_files = fm.nested(b'files')
2355 if ms.active():
2373 if ms.active():
2356 for f in ms:
2374 for f in ms:
2357 fm_files.startitem()
2375 fm_files.startitem()
2358 fm_files.data(path=f)
2376 fm_files.data(path=f)
2359 state = ms._state[f]
2377 state = ms._state[f]
2360 fm_files.data(state=state[0])
2378 fm_files.data(state=state[0])
2361 if state[0] in (
2379 if state[0] in (
2362 mergestatemod.MERGE_RECORD_UNRESOLVED,
2380 mergestatemod.MERGE_RECORD_UNRESOLVED,
2363 mergestatemod.MERGE_RECORD_RESOLVED,
2381 mergestatemod.MERGE_RECORD_RESOLVED,
2364 ):
2382 ):
2365 fm_files.data(local_key=state[1])
2383 fm_files.data(local_key=state[1])
2366 fm_files.data(local_path=state[2])
2384 fm_files.data(local_path=state[2])
2367 fm_files.data(ancestor_path=state[3])
2385 fm_files.data(ancestor_path=state[3])
2368 fm_files.data(ancestor_node=state[4])
2386 fm_files.data(ancestor_node=state[4])
2369 fm_files.data(other_path=state[5])
2387 fm_files.data(other_path=state[5])
2370 fm_files.data(other_node=state[6])
2388 fm_files.data(other_node=state[6])
2371 fm_files.data(local_flags=state[7])
2389 fm_files.data(local_flags=state[7])
2372 elif state[0] in (
2390 elif state[0] in (
2373 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2391 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2374 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2392 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2375 ):
2393 ):
2376 fm_files.data(renamed_path=state[1])
2394 fm_files.data(renamed_path=state[1])
2377 fm_files.data(rename_side=state[2])
2395 fm_files.data(rename_side=state[2])
2378 fm_extras = fm_files.nested(b'extras')
2396 fm_extras = fm_files.nested(b'extras')
2379 for k, v in sorted(ms.extras(f).items()):
2397 for k, v in sorted(ms.extras(f).items()):
2380 fm_extras.startitem()
2398 fm_extras.startitem()
2381 fm_extras.data(key=k)
2399 fm_extras.data(key=k)
2382 fm_extras.data(value=v)
2400 fm_extras.data(value=v)
2383 fm_extras.end()
2401 fm_extras.end()
2384
2402
2385 fm_files.end()
2403 fm_files.end()
2386
2404
2387 fm_extras = fm.nested(b'extras')
2405 fm_extras = fm.nested(b'extras')
2388 for f, d in sorted(ms.allextras().items()):
2406 for f, d in sorted(ms.allextras().items()):
2389 if f in ms:
2407 if f in ms:
2390 # If file is in mergestate, we have already processed it's extras
2408 # If file is in mergestate, we have already processed it's extras
2391 continue
2409 continue
2392 for k, v in d.items():
2410 for k, v in d.items():
2393 fm_extras.startitem()
2411 fm_extras.startitem()
2394 fm_extras.data(file=f)
2412 fm_extras.data(file=f)
2395 fm_extras.data(key=k)
2413 fm_extras.data(key=k)
2396 fm_extras.data(value=v)
2414 fm_extras.data(value=v)
2397 fm_extras.end()
2415 fm_extras.end()
2398
2416
2399 fm.end()
2417 fm.end()
2400
2418
2401
2419
2402 @command(b'debugnamecomplete', [], _(b'NAME...'))
2420 @command(b'debugnamecomplete', [], _(b'NAME...'))
2403 def debugnamecomplete(ui, repo, *args):
2421 def debugnamecomplete(ui, repo, *args):
2404 '''complete "names" - tags, open branch names, bookmark names'''
2422 '''complete "names" - tags, open branch names, bookmark names'''
2405
2423
2406 names = set()
2424 names = set()
2407 # since we previously only listed open branches, we will handle that
2425 # since we previously only listed open branches, we will handle that
2408 # specially (after this for loop)
2426 # specially (after this for loop)
2409 for name, ns in repo.names.items():
2427 for name, ns in repo.names.items():
2410 if name != b'branches':
2428 if name != b'branches':
2411 names.update(ns.listnames(repo))
2429 names.update(ns.listnames(repo))
2412 names.update(
2430 names.update(
2413 tag
2431 tag
2414 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2432 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2415 if not closed
2433 if not closed
2416 )
2434 )
2417 completions = set()
2435 completions = set()
2418 if not args:
2436 if not args:
2419 args = [b'']
2437 args = [b'']
2420 for a in args:
2438 for a in args:
2421 completions.update(n for n in names if n.startswith(a))
2439 completions.update(n for n in names if n.startswith(a))
2422 ui.write(b'\n'.join(sorted(completions)))
2440 ui.write(b'\n'.join(sorted(completions)))
2423 ui.write(b'\n')
2441 ui.write(b'\n')
2424
2442
2425
2443
2426 @command(
2444 @command(
2427 b'debugnodemap',
2445 b'debugnodemap',
2428 [
2446 [
2429 (
2447 (
2430 b'',
2448 b'',
2431 b'dump-new',
2449 b'dump-new',
2432 False,
2450 False,
2433 _(b'write a (new) persistent binary nodemap on stdout'),
2451 _(b'write a (new) persistent binary nodemap on stdout'),
2434 ),
2452 ),
2435 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2453 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2436 (
2454 (
2437 b'',
2455 b'',
2438 b'check',
2456 b'check',
2439 False,
2457 False,
2440 _(b'check that the data on disk data are correct.'),
2458 _(b'check that the data on disk data are correct.'),
2441 ),
2459 ),
2442 (
2460 (
2443 b'',
2461 b'',
2444 b'metadata',
2462 b'metadata',
2445 False,
2463 False,
2446 _(b'display the on disk meta data for the nodemap'),
2464 _(b'display the on disk meta data for the nodemap'),
2447 ),
2465 ),
2448 ],
2466 ],
2449 )
2467 )
2450 def debugnodemap(ui, repo, **opts):
2468 def debugnodemap(ui, repo, **opts):
2451 """write and inspect on disk nodemap"""
2469 """write and inspect on disk nodemap"""
2452 if opts['dump_new']:
2470 if opts['dump_new']:
2453 unfi = repo.unfiltered()
2471 unfi = repo.unfiltered()
2454 cl = unfi.changelog
2472 cl = unfi.changelog
2455 if util.safehasattr(cl.index, "nodemap_data_all"):
2473 if util.safehasattr(cl.index, "nodemap_data_all"):
2456 data = cl.index.nodemap_data_all()
2474 data = cl.index.nodemap_data_all()
2457 else:
2475 else:
2458 data = nodemap.persistent_data(cl.index)
2476 data = nodemap.persistent_data(cl.index)
2459 ui.write(data)
2477 ui.write(data)
2460 elif opts['dump_disk']:
2478 elif opts['dump_disk']:
2461 unfi = repo.unfiltered()
2479 unfi = repo.unfiltered()
2462 cl = unfi.changelog
2480 cl = unfi.changelog
2463 nm_data = nodemap.persisted_data(cl)
2481 nm_data = nodemap.persisted_data(cl)
2464 if nm_data is not None:
2482 if nm_data is not None:
2465 docket, data = nm_data
2483 docket, data = nm_data
2466 ui.write(data[:])
2484 ui.write(data[:])
2467 elif opts['check']:
2485 elif opts['check']:
2468 unfi = repo.unfiltered()
2486 unfi = repo.unfiltered()
2469 cl = unfi.changelog
2487 cl = unfi.changelog
2470 nm_data = nodemap.persisted_data(cl)
2488 nm_data = nodemap.persisted_data(cl)
2471 if nm_data is not None:
2489 if nm_data is not None:
2472 docket, data = nm_data
2490 docket, data = nm_data
2473 return nodemap.check_data(ui, cl.index, data)
2491 return nodemap.check_data(ui, cl.index, data)
2474 elif opts['metadata']:
2492 elif opts['metadata']:
2475 unfi = repo.unfiltered()
2493 unfi = repo.unfiltered()
2476 cl = unfi.changelog
2494 cl = unfi.changelog
2477 nm_data = nodemap.persisted_data(cl)
2495 nm_data = nodemap.persisted_data(cl)
2478 if nm_data is not None:
2496 if nm_data is not None:
2479 docket, data = nm_data
2497 docket, data = nm_data
2480 ui.write((b"uid: %s\n") % docket.uid)
2498 ui.write((b"uid: %s\n") % docket.uid)
2481 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2499 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2482 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2500 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2483 ui.write((b"data-length: %d\n") % docket.data_length)
2501 ui.write((b"data-length: %d\n") % docket.data_length)
2484 ui.write((b"data-unused: %d\n") % docket.data_unused)
2502 ui.write((b"data-unused: %d\n") % docket.data_unused)
2485 unused_perc = docket.data_unused * 100.0 / docket.data_length
2503 unused_perc = docket.data_unused * 100.0 / docket.data_length
2486 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2504 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2487
2505
2488
2506
2489 @command(
2507 @command(
2490 b'debugobsolete',
2508 b'debugobsolete',
2491 [
2509 [
2492 (b'', b'flags', 0, _(b'markers flag')),
2510 (b'', b'flags', 0, _(b'markers flag')),
2493 (
2511 (
2494 b'',
2512 b'',
2495 b'record-parents',
2513 b'record-parents',
2496 False,
2514 False,
2497 _(b'record parent information for the precursor'),
2515 _(b'record parent information for the precursor'),
2498 ),
2516 ),
2499 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2517 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2500 (
2518 (
2501 b'',
2519 b'',
2502 b'exclusive',
2520 b'exclusive',
2503 False,
2521 False,
2504 _(b'restrict display to markers only relevant to REV'),
2522 _(b'restrict display to markers only relevant to REV'),
2505 ),
2523 ),
2506 (b'', b'index', False, _(b'display index of the marker')),
2524 (b'', b'index', False, _(b'display index of the marker')),
2507 (b'', b'delete', [], _(b'delete markers specified by indices')),
2525 (b'', b'delete', [], _(b'delete markers specified by indices')),
2508 ]
2526 ]
2509 + cmdutil.commitopts2
2527 + cmdutil.commitopts2
2510 + cmdutil.formatteropts,
2528 + cmdutil.formatteropts,
2511 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2529 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2512 )
2530 )
2513 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2531 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2514 """create arbitrary obsolete marker
2532 """create arbitrary obsolete marker
2515
2533
2516 With no arguments, displays the list of obsolescence markers."""
2534 With no arguments, displays the list of obsolescence markers."""
2517
2535
2518 opts = pycompat.byteskwargs(opts)
2536 opts = pycompat.byteskwargs(opts)
2519
2537
2520 def parsenodeid(s):
2538 def parsenodeid(s):
2521 try:
2539 try:
2522 # We do not use revsingle/revrange functions here to accept
2540 # We do not use revsingle/revrange functions here to accept
2523 # arbitrary node identifiers, possibly not present in the
2541 # arbitrary node identifiers, possibly not present in the
2524 # local repository.
2542 # local repository.
2525 n = bin(s)
2543 n = bin(s)
2526 if len(n) != repo.nodeconstants.nodelen:
2544 if len(n) != repo.nodeconstants.nodelen:
2527 raise TypeError()
2545 raise TypeError()
2528 return n
2546 return n
2529 except TypeError:
2547 except TypeError:
2530 raise error.InputError(
2548 raise error.InputError(
2531 b'changeset references must be full hexadecimal '
2549 b'changeset references must be full hexadecimal '
2532 b'node identifiers'
2550 b'node identifiers'
2533 )
2551 )
2534
2552
2535 if opts.get(b'delete'):
2553 if opts.get(b'delete'):
2536 indices = []
2554 indices = []
2537 for v in opts.get(b'delete'):
2555 for v in opts.get(b'delete'):
2538 try:
2556 try:
2539 indices.append(int(v))
2557 indices.append(int(v))
2540 except ValueError:
2558 except ValueError:
2541 raise error.InputError(
2559 raise error.InputError(
2542 _(b'invalid index value: %r') % v,
2560 _(b'invalid index value: %r') % v,
2543 hint=_(b'use integers for indices'),
2561 hint=_(b'use integers for indices'),
2544 )
2562 )
2545
2563
2546 if repo.currenttransaction():
2564 if repo.currenttransaction():
2547 raise error.Abort(
2565 raise error.Abort(
2548 _(b'cannot delete obsmarkers in the middle of transaction.')
2566 _(b'cannot delete obsmarkers in the middle of transaction.')
2549 )
2567 )
2550
2568
2551 with repo.lock():
2569 with repo.lock():
2552 n = repair.deleteobsmarkers(repo.obsstore, indices)
2570 n = repair.deleteobsmarkers(repo.obsstore, indices)
2553 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2571 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2554
2572
2555 return
2573 return
2556
2574
2557 if precursor is not None:
2575 if precursor is not None:
2558 if opts[b'rev']:
2576 if opts[b'rev']:
2559 raise error.InputError(
2577 raise error.InputError(
2560 b'cannot select revision when creating marker'
2578 b'cannot select revision when creating marker'
2561 )
2579 )
2562 metadata = {}
2580 metadata = {}
2563 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2581 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2564 succs = tuple(parsenodeid(succ) for succ in successors)
2582 succs = tuple(parsenodeid(succ) for succ in successors)
2565 l = repo.lock()
2583 l = repo.lock()
2566 try:
2584 try:
2567 tr = repo.transaction(b'debugobsolete')
2585 tr = repo.transaction(b'debugobsolete')
2568 try:
2586 try:
2569 date = opts.get(b'date')
2587 date = opts.get(b'date')
2570 if date:
2588 if date:
2571 date = dateutil.parsedate(date)
2589 date = dateutil.parsedate(date)
2572 else:
2590 else:
2573 date = None
2591 date = None
2574 prec = parsenodeid(precursor)
2592 prec = parsenodeid(precursor)
2575 parents = None
2593 parents = None
2576 if opts[b'record_parents']:
2594 if opts[b'record_parents']:
2577 if prec not in repo.unfiltered():
2595 if prec not in repo.unfiltered():
2578 raise error.Abort(
2596 raise error.Abort(
2579 b'cannot used --record-parents on '
2597 b'cannot used --record-parents on '
2580 b'unknown changesets'
2598 b'unknown changesets'
2581 )
2599 )
2582 parents = repo.unfiltered()[prec].parents()
2600 parents = repo.unfiltered()[prec].parents()
2583 parents = tuple(p.node() for p in parents)
2601 parents = tuple(p.node() for p in parents)
2584 repo.obsstore.create(
2602 repo.obsstore.create(
2585 tr,
2603 tr,
2586 prec,
2604 prec,
2587 succs,
2605 succs,
2588 opts[b'flags'],
2606 opts[b'flags'],
2589 parents=parents,
2607 parents=parents,
2590 date=date,
2608 date=date,
2591 metadata=metadata,
2609 metadata=metadata,
2592 ui=ui,
2610 ui=ui,
2593 )
2611 )
2594 tr.close()
2612 tr.close()
2595 except ValueError as exc:
2613 except ValueError as exc:
2596 raise error.Abort(
2614 raise error.Abort(
2597 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2615 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2598 )
2616 )
2599 finally:
2617 finally:
2600 tr.release()
2618 tr.release()
2601 finally:
2619 finally:
2602 l.release()
2620 l.release()
2603 else:
2621 else:
2604 if opts[b'rev']:
2622 if opts[b'rev']:
2605 revs = logcmdutil.revrange(repo, opts[b'rev'])
2623 revs = logcmdutil.revrange(repo, opts[b'rev'])
2606 nodes = [repo[r].node() for r in revs]
2624 nodes = [repo[r].node() for r in revs]
2607 markers = list(
2625 markers = list(
2608 obsutil.getmarkers(
2626 obsutil.getmarkers(
2609 repo, nodes=nodes, exclusive=opts[b'exclusive']
2627 repo, nodes=nodes, exclusive=opts[b'exclusive']
2610 )
2628 )
2611 )
2629 )
2612 markers.sort(key=lambda x: x._data)
2630 markers.sort(key=lambda x: x._data)
2613 else:
2631 else:
2614 markers = obsutil.getmarkers(repo)
2632 markers = obsutil.getmarkers(repo)
2615
2633
2616 markerstoiter = markers
2634 markerstoiter = markers
2617 isrelevant = lambda m: True
2635 isrelevant = lambda m: True
2618 if opts.get(b'rev') and opts.get(b'index'):
2636 if opts.get(b'rev') and opts.get(b'index'):
2619 markerstoiter = obsutil.getmarkers(repo)
2637 markerstoiter = obsutil.getmarkers(repo)
2620 markerset = set(markers)
2638 markerset = set(markers)
2621 isrelevant = lambda m: m in markerset
2639 isrelevant = lambda m: m in markerset
2622
2640
2623 fm = ui.formatter(b'debugobsolete', opts)
2641 fm = ui.formatter(b'debugobsolete', opts)
2624 for i, m in enumerate(markerstoiter):
2642 for i, m in enumerate(markerstoiter):
2625 if not isrelevant(m):
2643 if not isrelevant(m):
2626 # marker can be irrelevant when we're iterating over a set
2644 # marker can be irrelevant when we're iterating over a set
2627 # of markers (markerstoiter) which is bigger than the set
2645 # of markers (markerstoiter) which is bigger than the set
2628 # of markers we want to display (markers)
2646 # of markers we want to display (markers)
2629 # this can happen if both --index and --rev options are
2647 # this can happen if both --index and --rev options are
2630 # provided and thus we need to iterate over all of the markers
2648 # provided and thus we need to iterate over all of the markers
2631 # to get the correct indices, but only display the ones that
2649 # to get the correct indices, but only display the ones that
2632 # are relevant to --rev value
2650 # are relevant to --rev value
2633 continue
2651 continue
2634 fm.startitem()
2652 fm.startitem()
2635 ind = i if opts.get(b'index') else None
2653 ind = i if opts.get(b'index') else None
2636 cmdutil.showmarker(fm, m, index=ind)
2654 cmdutil.showmarker(fm, m, index=ind)
2637 fm.end()
2655 fm.end()
2638
2656
2639
2657
2640 @command(
2658 @command(
2641 b'debugp1copies',
2659 b'debugp1copies',
2642 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2660 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2643 _(b'[-r REV]'),
2661 _(b'[-r REV]'),
2644 )
2662 )
2645 def debugp1copies(ui, repo, **opts):
2663 def debugp1copies(ui, repo, **opts):
2646 """dump copy information compared to p1"""
2664 """dump copy information compared to p1"""
2647
2665
2648 opts = pycompat.byteskwargs(opts)
2666 opts = pycompat.byteskwargs(opts)
2649 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2667 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2650 for dst, src in ctx.p1copies().items():
2668 for dst, src in ctx.p1copies().items():
2651 ui.write(b'%s -> %s\n' % (src, dst))
2669 ui.write(b'%s -> %s\n' % (src, dst))
2652
2670
2653
2671
2654 @command(
2672 @command(
2655 b'debugp2copies',
2673 b'debugp2copies',
2656 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2674 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2657 _(b'[-r REV]'),
2675 _(b'[-r REV]'),
2658 )
2676 )
2659 def debugp1copies(ui, repo, **opts):
2677 def debugp1copies(ui, repo, **opts):
2660 """dump copy information compared to p2"""
2678 """dump copy information compared to p2"""
2661
2679
2662 opts = pycompat.byteskwargs(opts)
2680 opts = pycompat.byteskwargs(opts)
2663 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2681 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2664 for dst, src in ctx.p2copies().items():
2682 for dst, src in ctx.p2copies().items():
2665 ui.write(b'%s -> %s\n' % (src, dst))
2683 ui.write(b'%s -> %s\n' % (src, dst))
2666
2684
2667
2685
2668 @command(
2686 @command(
2669 b'debugpathcomplete',
2687 b'debugpathcomplete',
2670 [
2688 [
2671 (b'f', b'full', None, _(b'complete an entire path')),
2689 (b'f', b'full', None, _(b'complete an entire path')),
2672 (b'n', b'normal', None, _(b'show only normal files')),
2690 (b'n', b'normal', None, _(b'show only normal files')),
2673 (b'a', b'added', None, _(b'show only added files')),
2691 (b'a', b'added', None, _(b'show only added files')),
2674 (b'r', b'removed', None, _(b'show only removed files')),
2692 (b'r', b'removed', None, _(b'show only removed files')),
2675 ],
2693 ],
2676 _(b'FILESPEC...'),
2694 _(b'FILESPEC...'),
2677 )
2695 )
2678 def debugpathcomplete(ui, repo, *specs, **opts):
2696 def debugpathcomplete(ui, repo, *specs, **opts):
2679 """complete part or all of a tracked path
2697 """complete part or all of a tracked path
2680
2698
2681 This command supports shells that offer path name completion. It
2699 This command supports shells that offer path name completion. It
2682 currently completes only files already known to the dirstate.
2700 currently completes only files already known to the dirstate.
2683
2701
2684 Completion extends only to the next path segment unless
2702 Completion extends only to the next path segment unless
2685 --full is specified, in which case entire paths are used."""
2703 --full is specified, in which case entire paths are used."""
2686
2704
2687 def complete(path, acceptable):
2705 def complete(path, acceptable):
2688 dirstate = repo.dirstate
2706 dirstate = repo.dirstate
2689 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2707 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2690 rootdir = repo.root + pycompat.ossep
2708 rootdir = repo.root + pycompat.ossep
2691 if spec != repo.root and not spec.startswith(rootdir):
2709 if spec != repo.root and not spec.startswith(rootdir):
2692 return [], []
2710 return [], []
2693 if os.path.isdir(spec):
2711 if os.path.isdir(spec):
2694 spec += b'/'
2712 spec += b'/'
2695 spec = spec[len(rootdir) :]
2713 spec = spec[len(rootdir) :]
2696 fixpaths = pycompat.ossep != b'/'
2714 fixpaths = pycompat.ossep != b'/'
2697 if fixpaths:
2715 if fixpaths:
2698 spec = spec.replace(pycompat.ossep, b'/')
2716 spec = spec.replace(pycompat.ossep, b'/')
2699 speclen = len(spec)
2717 speclen = len(spec)
2700 fullpaths = opts['full']
2718 fullpaths = opts['full']
2701 files, dirs = set(), set()
2719 files, dirs = set(), set()
2702 adddir, addfile = dirs.add, files.add
2720 adddir, addfile = dirs.add, files.add
2703 for f, st in dirstate.items():
2721 for f, st in dirstate.items():
2704 if f.startswith(spec) and st.state in acceptable:
2722 if f.startswith(spec) and st.state in acceptable:
2705 if fixpaths:
2723 if fixpaths:
2706 f = f.replace(b'/', pycompat.ossep)
2724 f = f.replace(b'/', pycompat.ossep)
2707 if fullpaths:
2725 if fullpaths:
2708 addfile(f)
2726 addfile(f)
2709 continue
2727 continue
2710 s = f.find(pycompat.ossep, speclen)
2728 s = f.find(pycompat.ossep, speclen)
2711 if s >= 0:
2729 if s >= 0:
2712 adddir(f[:s])
2730 adddir(f[:s])
2713 else:
2731 else:
2714 addfile(f)
2732 addfile(f)
2715 return files, dirs
2733 return files, dirs
2716
2734
2717 acceptable = b''
2735 acceptable = b''
2718 if opts['normal']:
2736 if opts['normal']:
2719 acceptable += b'nm'
2737 acceptable += b'nm'
2720 if opts['added']:
2738 if opts['added']:
2721 acceptable += b'a'
2739 acceptable += b'a'
2722 if opts['removed']:
2740 if opts['removed']:
2723 acceptable += b'r'
2741 acceptable += b'r'
2724 cwd = repo.getcwd()
2742 cwd = repo.getcwd()
2725 if not specs:
2743 if not specs:
2726 specs = [b'.']
2744 specs = [b'.']
2727
2745
2728 files, dirs = set(), set()
2746 files, dirs = set(), set()
2729 for spec in specs:
2747 for spec in specs:
2730 f, d = complete(spec, acceptable or b'nmar')
2748 f, d = complete(spec, acceptable or b'nmar')
2731 files.update(f)
2749 files.update(f)
2732 dirs.update(d)
2750 dirs.update(d)
2733 files.update(dirs)
2751 files.update(dirs)
2734 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2752 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2735 ui.write(b'\n')
2753 ui.write(b'\n')
2736
2754
2737
2755
2738 @command(
2756 @command(
2739 b'debugpathcopies',
2757 b'debugpathcopies',
2740 cmdutil.walkopts,
2758 cmdutil.walkopts,
2741 b'hg debugpathcopies REV1 REV2 [FILE]',
2759 b'hg debugpathcopies REV1 REV2 [FILE]',
2742 inferrepo=True,
2760 inferrepo=True,
2743 )
2761 )
2744 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2762 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2745 """show copies between two revisions"""
2763 """show copies between two revisions"""
2746 ctx1 = scmutil.revsingle(repo, rev1)
2764 ctx1 = scmutil.revsingle(repo, rev1)
2747 ctx2 = scmutil.revsingle(repo, rev2)
2765 ctx2 = scmutil.revsingle(repo, rev2)
2748 m = scmutil.match(ctx1, pats, opts)
2766 m = scmutil.match(ctx1, pats, opts)
2749 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2767 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2750 ui.write(b'%s -> %s\n' % (src, dst))
2768 ui.write(b'%s -> %s\n' % (src, dst))
2751
2769
2752
2770
2753 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2771 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2754 def debugpeer(ui, path):
2772 def debugpeer(ui, path):
2755 """establish a connection to a peer repository"""
2773 """establish a connection to a peer repository"""
2756 # Always enable peer request logging. Requires --debug to display
2774 # Always enable peer request logging. Requires --debug to display
2757 # though.
2775 # though.
2758 overrides = {
2776 overrides = {
2759 (b'devel', b'debug.peer-request'): True,
2777 (b'devel', b'debug.peer-request'): True,
2760 }
2778 }
2761
2779
2762 with ui.configoverride(overrides):
2780 with ui.configoverride(overrides):
2763 peer = hg.peer(ui, {}, path)
2781 peer = hg.peer(ui, {}, path)
2764
2782
2765 try:
2783 try:
2766 local = peer.local() is not None
2784 local = peer.local() is not None
2767 canpush = peer.canpush()
2785 canpush = peer.canpush()
2768
2786
2769 ui.write(_(b'url: %s\n') % peer.url())
2787 ui.write(_(b'url: %s\n') % peer.url())
2770 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2788 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2771 ui.write(
2789 ui.write(
2772 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2790 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2773 )
2791 )
2774 finally:
2792 finally:
2775 peer.close()
2793 peer.close()
2776
2794
2777
2795
2778 @command(
2796 @command(
2779 b'debugpickmergetool',
2797 b'debugpickmergetool',
2780 [
2798 [
2781 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2799 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2782 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2800 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2783 ]
2801 ]
2784 + cmdutil.walkopts
2802 + cmdutil.walkopts
2785 + cmdutil.mergetoolopts,
2803 + cmdutil.mergetoolopts,
2786 _(b'[PATTERN]...'),
2804 _(b'[PATTERN]...'),
2787 inferrepo=True,
2805 inferrepo=True,
2788 )
2806 )
2789 def debugpickmergetool(ui, repo, *pats, **opts):
2807 def debugpickmergetool(ui, repo, *pats, **opts):
2790 """examine which merge tool is chosen for specified file
2808 """examine which merge tool is chosen for specified file
2791
2809
2792 As described in :hg:`help merge-tools`, Mercurial examines
2810 As described in :hg:`help merge-tools`, Mercurial examines
2793 configurations below in this order to decide which merge tool is
2811 configurations below in this order to decide which merge tool is
2794 chosen for specified file.
2812 chosen for specified file.
2795
2813
2796 1. ``--tool`` option
2814 1. ``--tool`` option
2797 2. ``HGMERGE`` environment variable
2815 2. ``HGMERGE`` environment variable
2798 3. configurations in ``merge-patterns`` section
2816 3. configurations in ``merge-patterns`` section
2799 4. configuration of ``ui.merge``
2817 4. configuration of ``ui.merge``
2800 5. configurations in ``merge-tools`` section
2818 5. configurations in ``merge-tools`` section
2801 6. ``hgmerge`` tool (for historical reason only)
2819 6. ``hgmerge`` tool (for historical reason only)
2802 7. default tool for fallback (``:merge`` or ``:prompt``)
2820 7. default tool for fallback (``:merge`` or ``:prompt``)
2803
2821
2804 This command writes out examination result in the style below::
2822 This command writes out examination result in the style below::
2805
2823
2806 FILE = MERGETOOL
2824 FILE = MERGETOOL
2807
2825
2808 By default, all files known in the first parent context of the
2826 By default, all files known in the first parent context of the
2809 working directory are examined. Use file patterns and/or -I/-X
2827 working directory are examined. Use file patterns and/or -I/-X
2810 options to limit target files. -r/--rev is also useful to examine
2828 options to limit target files. -r/--rev is also useful to examine
2811 files in another context without actual updating to it.
2829 files in another context without actual updating to it.
2812
2830
2813 With --debug, this command shows warning messages while matching
2831 With --debug, this command shows warning messages while matching
2814 against ``merge-patterns`` and so on, too. It is recommended to
2832 against ``merge-patterns`` and so on, too. It is recommended to
2815 use this option with explicit file patterns and/or -I/-X options,
2833 use this option with explicit file patterns and/or -I/-X options,
2816 because this option increases amount of output per file according
2834 because this option increases amount of output per file according
2817 to configurations in hgrc.
2835 to configurations in hgrc.
2818
2836
2819 With -v/--verbose, this command shows configurations below at
2837 With -v/--verbose, this command shows configurations below at
2820 first (only if specified).
2838 first (only if specified).
2821
2839
2822 - ``--tool`` option
2840 - ``--tool`` option
2823 - ``HGMERGE`` environment variable
2841 - ``HGMERGE`` environment variable
2824 - configuration of ``ui.merge``
2842 - configuration of ``ui.merge``
2825
2843
2826 If merge tool is chosen before matching against
2844 If merge tool is chosen before matching against
2827 ``merge-patterns``, this command can't show any helpful
2845 ``merge-patterns``, this command can't show any helpful
2828 information, even with --debug. In such case, information above is
2846 information, even with --debug. In such case, information above is
2829 useful to know why a merge tool is chosen.
2847 useful to know why a merge tool is chosen.
2830 """
2848 """
2831 opts = pycompat.byteskwargs(opts)
2849 opts = pycompat.byteskwargs(opts)
2832 overrides = {}
2850 overrides = {}
2833 if opts[b'tool']:
2851 if opts[b'tool']:
2834 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2852 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2835 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2853 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2836
2854
2837 with ui.configoverride(overrides, b'debugmergepatterns'):
2855 with ui.configoverride(overrides, b'debugmergepatterns'):
2838 hgmerge = encoding.environ.get(b"HGMERGE")
2856 hgmerge = encoding.environ.get(b"HGMERGE")
2839 if hgmerge is not None:
2857 if hgmerge is not None:
2840 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2858 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2841 uimerge = ui.config(b"ui", b"merge")
2859 uimerge = ui.config(b"ui", b"merge")
2842 if uimerge:
2860 if uimerge:
2843 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2861 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2844
2862
2845 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2863 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2846 m = scmutil.match(ctx, pats, opts)
2864 m = scmutil.match(ctx, pats, opts)
2847 changedelete = opts[b'changedelete']
2865 changedelete = opts[b'changedelete']
2848 for path in ctx.walk(m):
2866 for path in ctx.walk(m):
2849 fctx = ctx[path]
2867 fctx = ctx[path]
2850 with ui.silent(
2868 with ui.silent(
2851 error=True
2869 error=True
2852 ) if not ui.debugflag else util.nullcontextmanager():
2870 ) if not ui.debugflag else util.nullcontextmanager():
2853 tool, toolpath = filemerge._picktool(
2871 tool, toolpath = filemerge._picktool(
2854 repo,
2872 repo,
2855 ui,
2873 ui,
2856 path,
2874 path,
2857 fctx.isbinary(),
2875 fctx.isbinary(),
2858 b'l' in fctx.flags(),
2876 b'l' in fctx.flags(),
2859 changedelete,
2877 changedelete,
2860 )
2878 )
2861 ui.write(b'%s = %s\n' % (path, tool))
2879 ui.write(b'%s = %s\n' % (path, tool))
2862
2880
2863
2881
2864 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2882 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2865 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2883 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2866 """access the pushkey key/value protocol
2884 """access the pushkey key/value protocol
2867
2885
2868 With two args, list the keys in the given namespace.
2886 With two args, list the keys in the given namespace.
2869
2887
2870 With five args, set a key to new if it currently is set to old.
2888 With five args, set a key to new if it currently is set to old.
2871 Reports success or failure.
2889 Reports success or failure.
2872 """
2890 """
2873
2891
2874 target = hg.peer(ui, {}, repopath)
2892 target = hg.peer(ui, {}, repopath)
2875 try:
2893 try:
2876 if keyinfo:
2894 if keyinfo:
2877 key, old, new = keyinfo
2895 key, old, new = keyinfo
2878 with target.commandexecutor() as e:
2896 with target.commandexecutor() as e:
2879 r = e.callcommand(
2897 r = e.callcommand(
2880 b'pushkey',
2898 b'pushkey',
2881 {
2899 {
2882 b'namespace': namespace,
2900 b'namespace': namespace,
2883 b'key': key,
2901 b'key': key,
2884 b'old': old,
2902 b'old': old,
2885 b'new': new,
2903 b'new': new,
2886 },
2904 },
2887 ).result()
2905 ).result()
2888
2906
2889 ui.status(pycompat.bytestr(r) + b'\n')
2907 ui.status(pycompat.bytestr(r) + b'\n')
2890 return not r
2908 return not r
2891 else:
2909 else:
2892 for k, v in sorted(target.listkeys(namespace).items()):
2910 for k, v in sorted(target.listkeys(namespace).items()):
2893 ui.write(
2911 ui.write(
2894 b"%s\t%s\n"
2912 b"%s\t%s\n"
2895 % (stringutil.escapestr(k), stringutil.escapestr(v))
2913 % (stringutil.escapestr(k), stringutil.escapestr(v))
2896 )
2914 )
2897 finally:
2915 finally:
2898 target.close()
2916 target.close()
2899
2917
2900
2918
2901 @command(b'debugpvec', [], _(b'A B'))
2919 @command(b'debugpvec', [], _(b'A B'))
2902 def debugpvec(ui, repo, a, b=None):
2920 def debugpvec(ui, repo, a, b=None):
2903 ca = scmutil.revsingle(repo, a)
2921 ca = scmutil.revsingle(repo, a)
2904 cb = scmutil.revsingle(repo, b)
2922 cb = scmutil.revsingle(repo, b)
2905 pa = pvec.ctxpvec(ca)
2923 pa = pvec.ctxpvec(ca)
2906 pb = pvec.ctxpvec(cb)
2924 pb = pvec.ctxpvec(cb)
2907 if pa == pb:
2925 if pa == pb:
2908 rel = b"="
2926 rel = b"="
2909 elif pa > pb:
2927 elif pa > pb:
2910 rel = b">"
2928 rel = b">"
2911 elif pa < pb:
2929 elif pa < pb:
2912 rel = b"<"
2930 rel = b"<"
2913 elif pa | pb:
2931 elif pa | pb:
2914 rel = b"|"
2932 rel = b"|"
2915 ui.write(_(b"a: %s\n") % pa)
2933 ui.write(_(b"a: %s\n") % pa)
2916 ui.write(_(b"b: %s\n") % pb)
2934 ui.write(_(b"b: %s\n") % pb)
2917 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2935 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2918 ui.write(
2936 ui.write(
2919 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2937 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2920 % (
2938 % (
2921 abs(pa._depth - pb._depth),
2939 abs(pa._depth - pb._depth),
2922 pvec._hamming(pa._vec, pb._vec),
2940 pvec._hamming(pa._vec, pb._vec),
2923 pa.distance(pb),
2941 pa.distance(pb),
2924 rel,
2942 rel,
2925 )
2943 )
2926 )
2944 )
2927
2945
2928
2946
2929 @command(
2947 @command(
2930 b'debugrebuilddirstate|debugrebuildstate',
2948 b'debugrebuilddirstate|debugrebuildstate',
2931 [
2949 [
2932 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2950 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2933 (
2951 (
2934 b'',
2952 b'',
2935 b'minimal',
2953 b'minimal',
2936 None,
2954 None,
2937 _(
2955 _(
2938 b'only rebuild files that are inconsistent with '
2956 b'only rebuild files that are inconsistent with '
2939 b'the working copy parent'
2957 b'the working copy parent'
2940 ),
2958 ),
2941 ),
2959 ),
2942 ],
2960 ],
2943 _(b'[-r REV]'),
2961 _(b'[-r REV]'),
2944 )
2962 )
2945 def debugrebuilddirstate(ui, repo, rev, **opts):
2963 def debugrebuilddirstate(ui, repo, rev, **opts):
2946 """rebuild the dirstate as it would look like for the given revision
2964 """rebuild the dirstate as it would look like for the given revision
2947
2965
2948 If no revision is specified the first current parent will be used.
2966 If no revision is specified the first current parent will be used.
2949
2967
2950 The dirstate will be set to the files of the given revision.
2968 The dirstate will be set to the files of the given revision.
2951 The actual working directory content or existing dirstate
2969 The actual working directory content or existing dirstate
2952 information such as adds or removes is not considered.
2970 information such as adds or removes is not considered.
2953
2971
2954 ``minimal`` will only rebuild the dirstate status for files that claim to be
2972 ``minimal`` will only rebuild the dirstate status for files that claim to be
2955 tracked but are not in the parent manifest, or that exist in the parent
2973 tracked but are not in the parent manifest, or that exist in the parent
2956 manifest but are not in the dirstate. It will not change adds, removes, or
2974 manifest but are not in the dirstate. It will not change adds, removes, or
2957 modified files that are in the working copy parent.
2975 modified files that are in the working copy parent.
2958
2976
2959 One use of this command is to make the next :hg:`status` invocation
2977 One use of this command is to make the next :hg:`status` invocation
2960 check the actual file content.
2978 check the actual file content.
2961 """
2979 """
2962 ctx = scmutil.revsingle(repo, rev)
2980 ctx = scmutil.revsingle(repo, rev)
2963 with repo.wlock():
2981 with repo.wlock():
2964 dirstate = repo.dirstate
2982 dirstate = repo.dirstate
2965 changedfiles = None
2983 changedfiles = None
2966 # See command doc for what minimal does.
2984 # See command doc for what minimal does.
2967 if opts.get('minimal'):
2985 if opts.get('minimal'):
2968 manifestfiles = set(ctx.manifest().keys())
2986 manifestfiles = set(ctx.manifest().keys())
2969 dirstatefiles = set(dirstate)
2987 dirstatefiles = set(dirstate)
2970 manifestonly = manifestfiles - dirstatefiles
2988 manifestonly = manifestfiles - dirstatefiles
2971 dsonly = dirstatefiles - manifestfiles
2989 dsonly = dirstatefiles - manifestfiles
2972 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
2990 dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
2973 changedfiles = manifestonly | dsnotadded
2991 changedfiles = manifestonly | dsnotadded
2974
2992
2975 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2993 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2976
2994
2977
2995
2978 @command(
2996 @command(
2979 b'debugrebuildfncache',
2997 b'debugrebuildfncache',
2980 [
2998 [
2981 (
2999 (
2982 b'',
3000 b'',
2983 b'only-data',
3001 b'only-data',
2984 False,
3002 False,
2985 _(b'only look for wrong .d files (much faster)'),
3003 _(b'only look for wrong .d files (much faster)'),
2986 )
3004 )
2987 ],
3005 ],
2988 b'',
3006 b'',
2989 )
3007 )
2990 def debugrebuildfncache(ui, repo, **opts):
3008 def debugrebuildfncache(ui, repo, **opts):
2991 """rebuild the fncache file"""
3009 """rebuild the fncache file"""
2992 opts = pycompat.byteskwargs(opts)
3010 opts = pycompat.byteskwargs(opts)
2993 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
3011 repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
2994
3012
2995
3013
2996 @command(
3014 @command(
2997 b'debugrename',
3015 b'debugrename',
2998 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
3016 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2999 _(b'[-r REV] [FILE]...'),
3017 _(b'[-r REV] [FILE]...'),
3000 )
3018 )
3001 def debugrename(ui, repo, *pats, **opts):
3019 def debugrename(ui, repo, *pats, **opts):
3002 """dump rename information"""
3020 """dump rename information"""
3003
3021
3004 opts = pycompat.byteskwargs(opts)
3022 opts = pycompat.byteskwargs(opts)
3005 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3023 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
3006 m = scmutil.match(ctx, pats, opts)
3024 m = scmutil.match(ctx, pats, opts)
3007 for abs in ctx.walk(m):
3025 for abs in ctx.walk(m):
3008 fctx = ctx[abs]
3026 fctx = ctx[abs]
3009 o = fctx.filelog().renamed(fctx.filenode())
3027 o = fctx.filelog().renamed(fctx.filenode())
3010 rel = repo.pathto(abs)
3028 rel = repo.pathto(abs)
3011 if o:
3029 if o:
3012 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3030 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
3013 else:
3031 else:
3014 ui.write(_(b"%s not renamed\n") % rel)
3032 ui.write(_(b"%s not renamed\n") % rel)
3015
3033
3016
3034
3017 @command(b'debugrequires|debugrequirements', [], b'')
3035 @command(b'debugrequires|debugrequirements', [], b'')
3018 def debugrequirements(ui, repo):
3036 def debugrequirements(ui, repo):
3019 """print the current repo requirements"""
3037 """print the current repo requirements"""
3020 for r in sorted(repo.requirements):
3038 for r in sorted(repo.requirements):
3021 ui.write(b"%s\n" % r)
3039 ui.write(b"%s\n" % r)
3022
3040
3023
3041
3024 @command(
3042 @command(
3025 b'debugrevlog',
3043 b'debugrevlog',
3026 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3044 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
3027 _(b'-c|-m|FILE'),
3045 _(b'-c|-m|FILE'),
3028 optionalrepo=True,
3046 optionalrepo=True,
3029 )
3047 )
3030 def debugrevlog(ui, repo, file_=None, **opts):
3048 def debugrevlog(ui, repo, file_=None, **opts):
3031 """show data and statistics about a revlog"""
3049 """show data and statistics about a revlog"""
3032 opts = pycompat.byteskwargs(opts)
3050 opts = pycompat.byteskwargs(opts)
3033 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3051 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
3034
3052
3035 if opts.get(b"dump"):
3053 if opts.get(b"dump"):
3036 numrevs = len(r)
3054 numrevs = len(r)
3037 ui.write(
3055 ui.write(
3038 (
3056 (
3039 b"# rev p1rev p2rev start end deltastart base p1 p2"
3057 b"# rev p1rev p2rev start end deltastart base p1 p2"
3040 b" rawsize totalsize compression heads chainlen\n"
3058 b" rawsize totalsize compression heads chainlen\n"
3041 )
3059 )
3042 )
3060 )
3043 ts = 0
3061 ts = 0
3044 heads = set()
3062 heads = set()
3045
3063
3046 for rev in pycompat.xrange(numrevs):
3064 for rev in pycompat.xrange(numrevs):
3047 dbase = r.deltaparent(rev)
3065 dbase = r.deltaparent(rev)
3048 if dbase == -1:
3066 if dbase == -1:
3049 dbase = rev
3067 dbase = rev
3050 cbase = r.chainbase(rev)
3068 cbase = r.chainbase(rev)
3051 clen = r.chainlen(rev)
3069 clen = r.chainlen(rev)
3052 p1, p2 = r.parentrevs(rev)
3070 p1, p2 = r.parentrevs(rev)
3053 rs = r.rawsize(rev)
3071 rs = r.rawsize(rev)
3054 ts = ts + rs
3072 ts = ts + rs
3055 heads -= set(r.parentrevs(rev))
3073 heads -= set(r.parentrevs(rev))
3056 heads.add(rev)
3074 heads.add(rev)
3057 try:
3075 try:
3058 compression = ts / r.end(rev)
3076 compression = ts / r.end(rev)
3059 except ZeroDivisionError:
3077 except ZeroDivisionError:
3060 compression = 0
3078 compression = 0
3061 ui.write(
3079 ui.write(
3062 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3080 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
3063 b"%11d %5d %8d\n"
3081 b"%11d %5d %8d\n"
3064 % (
3082 % (
3065 rev,
3083 rev,
3066 p1,
3084 p1,
3067 p2,
3085 p2,
3068 r.start(rev),
3086 r.start(rev),
3069 r.end(rev),
3087 r.end(rev),
3070 r.start(dbase),
3088 r.start(dbase),
3071 r.start(cbase),
3089 r.start(cbase),
3072 r.start(p1),
3090 r.start(p1),
3073 r.start(p2),
3091 r.start(p2),
3074 rs,
3092 rs,
3075 ts,
3093 ts,
3076 compression,
3094 compression,
3077 len(heads),
3095 len(heads),
3078 clen,
3096 clen,
3079 )
3097 )
3080 )
3098 )
3081 return 0
3099 return 0
3082
3100
3083 format = r._format_version
3101 format = r._format_version
3084 v = r._format_flags
3102 v = r._format_flags
3085 flags = []
3103 flags = []
3086 gdelta = False
3104 gdelta = False
3087 if v & revlog.FLAG_INLINE_DATA:
3105 if v & revlog.FLAG_INLINE_DATA:
3088 flags.append(b'inline')
3106 flags.append(b'inline')
3089 if v & revlog.FLAG_GENERALDELTA:
3107 if v & revlog.FLAG_GENERALDELTA:
3090 gdelta = True
3108 gdelta = True
3091 flags.append(b'generaldelta')
3109 flags.append(b'generaldelta')
3092 if not flags:
3110 if not flags:
3093 flags = [b'(none)']
3111 flags = [b'(none)']
3094
3112
3095 ### tracks merge vs single parent
3113 ### tracks merge vs single parent
3096 nummerges = 0
3114 nummerges = 0
3097
3115
3098 ### tracks ways the "delta" are build
3116 ### tracks ways the "delta" are build
3099 # nodelta
3117 # nodelta
3100 numempty = 0
3118 numempty = 0
3101 numemptytext = 0
3119 numemptytext = 0
3102 numemptydelta = 0
3120 numemptydelta = 0
3103 # full file content
3121 # full file content
3104 numfull = 0
3122 numfull = 0
3105 # intermediate snapshot against a prior snapshot
3123 # intermediate snapshot against a prior snapshot
3106 numsemi = 0
3124 numsemi = 0
3107 # snapshot count per depth
3125 # snapshot count per depth
3108 numsnapdepth = collections.defaultdict(lambda: 0)
3126 numsnapdepth = collections.defaultdict(lambda: 0)
3109 # delta against previous revision
3127 # delta against previous revision
3110 numprev = 0
3128 numprev = 0
3111 # delta against first or second parent (not prev)
3129 # delta against first or second parent (not prev)
3112 nump1 = 0
3130 nump1 = 0
3113 nump2 = 0
3131 nump2 = 0
3114 # delta against neither prev nor parents
3132 # delta against neither prev nor parents
3115 numother = 0
3133 numother = 0
3116 # delta against prev that are also first or second parent
3134 # delta against prev that are also first or second parent
3117 # (details of `numprev`)
3135 # (details of `numprev`)
3118 nump1prev = 0
3136 nump1prev = 0
3119 nump2prev = 0
3137 nump2prev = 0
3120
3138
3121 # data about delta chain of each revs
3139 # data about delta chain of each revs
3122 chainlengths = []
3140 chainlengths = []
3123 chainbases = []
3141 chainbases = []
3124 chainspans = []
3142 chainspans = []
3125
3143
3126 # data about each revision
3144 # data about each revision
3127 datasize = [None, 0, 0]
3145 datasize = [None, 0, 0]
3128 fullsize = [None, 0, 0]
3146 fullsize = [None, 0, 0]
3129 semisize = [None, 0, 0]
3147 semisize = [None, 0, 0]
3130 # snapshot count per depth
3148 # snapshot count per depth
3131 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3149 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3132 deltasize = [None, 0, 0]
3150 deltasize = [None, 0, 0]
3133 chunktypecounts = {}
3151 chunktypecounts = {}
3134 chunktypesizes = {}
3152 chunktypesizes = {}
3135
3153
3136 def addsize(size, l):
3154 def addsize(size, l):
3137 if l[0] is None or size < l[0]:
3155 if l[0] is None or size < l[0]:
3138 l[0] = size
3156 l[0] = size
3139 if size > l[1]:
3157 if size > l[1]:
3140 l[1] = size
3158 l[1] = size
3141 l[2] += size
3159 l[2] += size
3142
3160
3143 numrevs = len(r)
3161 numrevs = len(r)
3144 for rev in pycompat.xrange(numrevs):
3162 for rev in pycompat.xrange(numrevs):
3145 p1, p2 = r.parentrevs(rev)
3163 p1, p2 = r.parentrevs(rev)
3146 delta = r.deltaparent(rev)
3164 delta = r.deltaparent(rev)
3147 if format > 0:
3165 if format > 0:
3148 addsize(r.rawsize(rev), datasize)
3166 addsize(r.rawsize(rev), datasize)
3149 if p2 != nullrev:
3167 if p2 != nullrev:
3150 nummerges += 1
3168 nummerges += 1
3151 size = r.length(rev)
3169 size = r.length(rev)
3152 if delta == nullrev:
3170 if delta == nullrev:
3153 chainlengths.append(0)
3171 chainlengths.append(0)
3154 chainbases.append(r.start(rev))
3172 chainbases.append(r.start(rev))
3155 chainspans.append(size)
3173 chainspans.append(size)
3156 if size == 0:
3174 if size == 0:
3157 numempty += 1
3175 numempty += 1
3158 numemptytext += 1
3176 numemptytext += 1
3159 else:
3177 else:
3160 numfull += 1
3178 numfull += 1
3161 numsnapdepth[0] += 1
3179 numsnapdepth[0] += 1
3162 addsize(size, fullsize)
3180 addsize(size, fullsize)
3163 addsize(size, snapsizedepth[0])
3181 addsize(size, snapsizedepth[0])
3164 else:
3182 else:
3165 chainlengths.append(chainlengths[delta] + 1)
3183 chainlengths.append(chainlengths[delta] + 1)
3166 baseaddr = chainbases[delta]
3184 baseaddr = chainbases[delta]
3167 revaddr = r.start(rev)
3185 revaddr = r.start(rev)
3168 chainbases.append(baseaddr)
3186 chainbases.append(baseaddr)
3169 chainspans.append((revaddr - baseaddr) + size)
3187 chainspans.append((revaddr - baseaddr) + size)
3170 if size == 0:
3188 if size == 0:
3171 numempty += 1
3189 numempty += 1
3172 numemptydelta += 1
3190 numemptydelta += 1
3173 elif r.issnapshot(rev):
3191 elif r.issnapshot(rev):
3174 addsize(size, semisize)
3192 addsize(size, semisize)
3175 numsemi += 1
3193 numsemi += 1
3176 depth = r.snapshotdepth(rev)
3194 depth = r.snapshotdepth(rev)
3177 numsnapdepth[depth] += 1
3195 numsnapdepth[depth] += 1
3178 addsize(size, snapsizedepth[depth])
3196 addsize(size, snapsizedepth[depth])
3179 else:
3197 else:
3180 addsize(size, deltasize)
3198 addsize(size, deltasize)
3181 if delta == rev - 1:
3199 if delta == rev - 1:
3182 numprev += 1
3200 numprev += 1
3183 if delta == p1:
3201 if delta == p1:
3184 nump1prev += 1
3202 nump1prev += 1
3185 elif delta == p2:
3203 elif delta == p2:
3186 nump2prev += 1
3204 nump2prev += 1
3187 elif delta == p1:
3205 elif delta == p1:
3188 nump1 += 1
3206 nump1 += 1
3189 elif delta == p2:
3207 elif delta == p2:
3190 nump2 += 1
3208 nump2 += 1
3191 elif delta != nullrev:
3209 elif delta != nullrev:
3192 numother += 1
3210 numother += 1
3193
3211
3194 # Obtain data on the raw chunks in the revlog.
3212 # Obtain data on the raw chunks in the revlog.
3195 if util.safehasattr(r, b'_getsegmentforrevs'):
3213 if util.safehasattr(r, b'_getsegmentforrevs'):
3196 segment = r._getsegmentforrevs(rev, rev)[1]
3214 segment = r._getsegmentforrevs(rev, rev)[1]
3197 else:
3215 else:
3198 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3216 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3199 if segment:
3217 if segment:
3200 chunktype = bytes(segment[0:1])
3218 chunktype = bytes(segment[0:1])
3201 else:
3219 else:
3202 chunktype = b'empty'
3220 chunktype = b'empty'
3203
3221
3204 if chunktype not in chunktypecounts:
3222 if chunktype not in chunktypecounts:
3205 chunktypecounts[chunktype] = 0
3223 chunktypecounts[chunktype] = 0
3206 chunktypesizes[chunktype] = 0
3224 chunktypesizes[chunktype] = 0
3207
3225
3208 chunktypecounts[chunktype] += 1
3226 chunktypecounts[chunktype] += 1
3209 chunktypesizes[chunktype] += size
3227 chunktypesizes[chunktype] += size
3210
3228
3211 # Adjust size min value for empty cases
3229 # Adjust size min value for empty cases
3212 for size in (datasize, fullsize, semisize, deltasize):
3230 for size in (datasize, fullsize, semisize, deltasize):
3213 if size[0] is None:
3231 if size[0] is None:
3214 size[0] = 0
3232 size[0] = 0
3215
3233
3216 numdeltas = numrevs - numfull - numempty - numsemi
3234 numdeltas = numrevs - numfull - numempty - numsemi
3217 numoprev = numprev - nump1prev - nump2prev
3235 numoprev = numprev - nump1prev - nump2prev
3218 totalrawsize = datasize[2]
3236 totalrawsize = datasize[2]
3219 datasize[2] /= numrevs
3237 datasize[2] /= numrevs
3220 fulltotal = fullsize[2]
3238 fulltotal = fullsize[2]
3221 if numfull == 0:
3239 if numfull == 0:
3222 fullsize[2] = 0
3240 fullsize[2] = 0
3223 else:
3241 else:
3224 fullsize[2] /= numfull
3242 fullsize[2] /= numfull
3225 semitotal = semisize[2]
3243 semitotal = semisize[2]
3226 snaptotal = {}
3244 snaptotal = {}
3227 if numsemi > 0:
3245 if numsemi > 0:
3228 semisize[2] /= numsemi
3246 semisize[2] /= numsemi
3229 for depth in snapsizedepth:
3247 for depth in snapsizedepth:
3230 snaptotal[depth] = snapsizedepth[depth][2]
3248 snaptotal[depth] = snapsizedepth[depth][2]
3231 snapsizedepth[depth][2] /= numsnapdepth[depth]
3249 snapsizedepth[depth][2] /= numsnapdepth[depth]
3232
3250
3233 deltatotal = deltasize[2]
3251 deltatotal = deltasize[2]
3234 if numdeltas > 0:
3252 if numdeltas > 0:
3235 deltasize[2] /= numdeltas
3253 deltasize[2] /= numdeltas
3236 totalsize = fulltotal + semitotal + deltatotal
3254 totalsize = fulltotal + semitotal + deltatotal
3237 avgchainlen = sum(chainlengths) / numrevs
3255 avgchainlen = sum(chainlengths) / numrevs
3238 maxchainlen = max(chainlengths)
3256 maxchainlen = max(chainlengths)
3239 maxchainspan = max(chainspans)
3257 maxchainspan = max(chainspans)
3240 compratio = 1
3258 compratio = 1
3241 if totalsize:
3259 if totalsize:
3242 compratio = totalrawsize / totalsize
3260 compratio = totalrawsize / totalsize
3243
3261
3244 basedfmtstr = b'%%%dd\n'
3262 basedfmtstr = b'%%%dd\n'
3245 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3263 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3246
3264
3247 def dfmtstr(max):
3265 def dfmtstr(max):
3248 return basedfmtstr % len(str(max))
3266 return basedfmtstr % len(str(max))
3249
3267
3250 def pcfmtstr(max, padding=0):
3268 def pcfmtstr(max, padding=0):
3251 return basepcfmtstr % (len(str(max)), b' ' * padding)
3269 return basepcfmtstr % (len(str(max)), b' ' * padding)
3252
3270
3253 def pcfmt(value, total):
3271 def pcfmt(value, total):
3254 if total:
3272 if total:
3255 return (value, 100 * float(value) / total)
3273 return (value, 100 * float(value) / total)
3256 else:
3274 else:
3257 return value, 100.0
3275 return value, 100.0
3258
3276
3259 ui.writenoi18n(b'format : %d\n' % format)
3277 ui.writenoi18n(b'format : %d\n' % format)
3260 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3278 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3261
3279
3262 ui.write(b'\n')
3280 ui.write(b'\n')
3263 fmt = pcfmtstr(totalsize)
3281 fmt = pcfmtstr(totalsize)
3264 fmt2 = dfmtstr(totalsize)
3282 fmt2 = dfmtstr(totalsize)
3265 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3283 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3266 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3284 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3267 ui.writenoi18n(
3285 ui.writenoi18n(
3268 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3286 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3269 )
3287 )
3270 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3288 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3271 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3289 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3272 ui.writenoi18n(
3290 ui.writenoi18n(
3273 b' text : '
3291 b' text : '
3274 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3292 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3275 )
3293 )
3276 ui.writenoi18n(
3294 ui.writenoi18n(
3277 b' delta : '
3295 b' delta : '
3278 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3296 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3279 )
3297 )
3280 ui.writenoi18n(
3298 ui.writenoi18n(
3281 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3299 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3282 )
3300 )
3283 for depth in sorted(numsnapdepth):
3301 for depth in sorted(numsnapdepth):
3284 ui.write(
3302 ui.write(
3285 (b' lvl-%-3d : ' % depth)
3303 (b' lvl-%-3d : ' % depth)
3286 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3304 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3287 )
3305 )
3288 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3306 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3289 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3307 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3290 ui.writenoi18n(
3308 ui.writenoi18n(
3291 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3309 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3292 )
3310 )
3293 for depth in sorted(numsnapdepth):
3311 for depth in sorted(numsnapdepth):
3294 ui.write(
3312 ui.write(
3295 (b' lvl-%-3d : ' % depth)
3313 (b' lvl-%-3d : ' % depth)
3296 + fmt % pcfmt(snaptotal[depth], totalsize)
3314 + fmt % pcfmt(snaptotal[depth], totalsize)
3297 )
3315 )
3298 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3316 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3299
3317
3300 def fmtchunktype(chunktype):
3318 def fmtchunktype(chunktype):
3301 if chunktype == b'empty':
3319 if chunktype == b'empty':
3302 return b' %s : ' % chunktype
3320 return b' %s : ' % chunktype
3303 elif chunktype in pycompat.bytestr(string.ascii_letters):
3321 elif chunktype in pycompat.bytestr(string.ascii_letters):
3304 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3322 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3305 else:
3323 else:
3306 return b' 0x%s : ' % hex(chunktype)
3324 return b' 0x%s : ' % hex(chunktype)
3307
3325
3308 ui.write(b'\n')
3326 ui.write(b'\n')
3309 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3327 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3310 for chunktype in sorted(chunktypecounts):
3328 for chunktype in sorted(chunktypecounts):
3311 ui.write(fmtchunktype(chunktype))
3329 ui.write(fmtchunktype(chunktype))
3312 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3330 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3313 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3331 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3314 for chunktype in sorted(chunktypecounts):
3332 for chunktype in sorted(chunktypecounts):
3315 ui.write(fmtchunktype(chunktype))
3333 ui.write(fmtchunktype(chunktype))
3316 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3334 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3317
3335
3318 ui.write(b'\n')
3336 ui.write(b'\n')
3319 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3337 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3320 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3338 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3321 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3339 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3322 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3340 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3323 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3341 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3324
3342
3325 if format > 0:
3343 if format > 0:
3326 ui.write(b'\n')
3344 ui.write(b'\n')
3327 ui.writenoi18n(
3345 ui.writenoi18n(
3328 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3346 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3329 % tuple(datasize)
3347 % tuple(datasize)
3330 )
3348 )
3331 ui.writenoi18n(
3349 ui.writenoi18n(
3332 b'full revision size (min/max/avg) : %d / %d / %d\n'
3350 b'full revision size (min/max/avg) : %d / %d / %d\n'
3333 % tuple(fullsize)
3351 % tuple(fullsize)
3334 )
3352 )
3335 ui.writenoi18n(
3353 ui.writenoi18n(
3336 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3354 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3337 % tuple(semisize)
3355 % tuple(semisize)
3338 )
3356 )
3339 for depth in sorted(snapsizedepth):
3357 for depth in sorted(snapsizedepth):
3340 if depth == 0:
3358 if depth == 0:
3341 continue
3359 continue
3342 ui.writenoi18n(
3360 ui.writenoi18n(
3343 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3361 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3344 % ((depth,) + tuple(snapsizedepth[depth]))
3362 % ((depth,) + tuple(snapsizedepth[depth]))
3345 )
3363 )
3346 ui.writenoi18n(
3364 ui.writenoi18n(
3347 b'delta size (min/max/avg) : %d / %d / %d\n'
3365 b'delta size (min/max/avg) : %d / %d / %d\n'
3348 % tuple(deltasize)
3366 % tuple(deltasize)
3349 )
3367 )
3350
3368
3351 if numdeltas > 0:
3369 if numdeltas > 0:
3352 ui.write(b'\n')
3370 ui.write(b'\n')
3353 fmt = pcfmtstr(numdeltas)
3371 fmt = pcfmtstr(numdeltas)
3354 fmt2 = pcfmtstr(numdeltas, 4)
3372 fmt2 = pcfmtstr(numdeltas, 4)
3355 ui.writenoi18n(
3373 ui.writenoi18n(
3356 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3374 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3357 )
3375 )
3358 if numprev > 0:
3376 if numprev > 0:
3359 ui.writenoi18n(
3377 ui.writenoi18n(
3360 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3378 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3361 )
3379 )
3362 ui.writenoi18n(
3380 ui.writenoi18n(
3363 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3381 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3364 )
3382 )
3365 ui.writenoi18n(
3383 ui.writenoi18n(
3366 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3384 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3367 )
3385 )
3368 if gdelta:
3386 if gdelta:
3369 ui.writenoi18n(
3387 ui.writenoi18n(
3370 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3388 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3371 )
3389 )
3372 ui.writenoi18n(
3390 ui.writenoi18n(
3373 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3391 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3374 )
3392 )
3375 ui.writenoi18n(
3393 ui.writenoi18n(
3376 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3394 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3377 )
3395 )
3378
3396
3379
3397
3380 @command(
3398 @command(
3381 b'debugrevlogindex',
3399 b'debugrevlogindex',
3382 cmdutil.debugrevlogopts
3400 cmdutil.debugrevlogopts
3383 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3401 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3384 _(b'[-f FORMAT] -c|-m|FILE'),
3402 _(b'[-f FORMAT] -c|-m|FILE'),
3385 optionalrepo=True,
3403 optionalrepo=True,
3386 )
3404 )
3387 def debugrevlogindex(ui, repo, file_=None, **opts):
3405 def debugrevlogindex(ui, repo, file_=None, **opts):
3388 """dump the contents of a revlog index"""
3406 """dump the contents of a revlog index"""
3389 opts = pycompat.byteskwargs(opts)
3407 opts = pycompat.byteskwargs(opts)
3390 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3408 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3391 format = opts.get(b'format', 0)
3409 format = opts.get(b'format', 0)
3392 if format not in (0, 1):
3410 if format not in (0, 1):
3393 raise error.Abort(_(b"unknown format %d") % format)
3411 raise error.Abort(_(b"unknown format %d") % format)
3394
3412
3395 if ui.debugflag:
3413 if ui.debugflag:
3396 shortfn = hex
3414 shortfn = hex
3397 else:
3415 else:
3398 shortfn = short
3416 shortfn = short
3399
3417
3400 # There might not be anything in r, so have a sane default
3418 # There might not be anything in r, so have a sane default
3401 idlen = 12
3419 idlen = 12
3402 for i in r:
3420 for i in r:
3403 idlen = len(shortfn(r.node(i)))
3421 idlen = len(shortfn(r.node(i)))
3404 break
3422 break
3405
3423
3406 if format == 0:
3424 if format == 0:
3407 if ui.verbose:
3425 if ui.verbose:
3408 ui.writenoi18n(
3426 ui.writenoi18n(
3409 b" rev offset length linkrev %s %s p2\n"
3427 b" rev offset length linkrev %s %s p2\n"
3410 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3428 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3411 )
3429 )
3412 else:
3430 else:
3413 ui.writenoi18n(
3431 ui.writenoi18n(
3414 b" rev linkrev %s %s p2\n"
3432 b" rev linkrev %s %s p2\n"
3415 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3433 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3416 )
3434 )
3417 elif format == 1:
3435 elif format == 1:
3418 if ui.verbose:
3436 if ui.verbose:
3419 ui.writenoi18n(
3437 ui.writenoi18n(
3420 (
3438 (
3421 b" rev flag offset length size link p1"
3439 b" rev flag offset length size link p1"
3422 b" p2 %s\n"
3440 b" p2 %s\n"
3423 )
3441 )
3424 % b"nodeid".rjust(idlen)
3442 % b"nodeid".rjust(idlen)
3425 )
3443 )
3426 else:
3444 else:
3427 ui.writenoi18n(
3445 ui.writenoi18n(
3428 b" rev flag size link p1 p2 %s\n"
3446 b" rev flag size link p1 p2 %s\n"
3429 % b"nodeid".rjust(idlen)
3447 % b"nodeid".rjust(idlen)
3430 )
3448 )
3431
3449
3432 for i in r:
3450 for i in r:
3433 node = r.node(i)
3451 node = r.node(i)
3434 if format == 0:
3452 if format == 0:
3435 try:
3453 try:
3436 pp = r.parents(node)
3454 pp = r.parents(node)
3437 except Exception:
3455 except Exception:
3438 pp = [repo.nullid, repo.nullid]
3456 pp = [repo.nullid, repo.nullid]
3439 if ui.verbose:
3457 if ui.verbose:
3440 ui.write(
3458 ui.write(
3441 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3459 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3442 % (
3460 % (
3443 i,
3461 i,
3444 r.start(i),
3462 r.start(i),
3445 r.length(i),
3463 r.length(i),
3446 r.linkrev(i),
3464 r.linkrev(i),
3447 shortfn(node),
3465 shortfn(node),
3448 shortfn(pp[0]),
3466 shortfn(pp[0]),
3449 shortfn(pp[1]),
3467 shortfn(pp[1]),
3450 )
3468 )
3451 )
3469 )
3452 else:
3470 else:
3453 ui.write(
3471 ui.write(
3454 b"% 6d % 7d %s %s %s\n"
3472 b"% 6d % 7d %s %s %s\n"
3455 % (
3473 % (
3456 i,
3474 i,
3457 r.linkrev(i),
3475 r.linkrev(i),
3458 shortfn(node),
3476 shortfn(node),
3459 shortfn(pp[0]),
3477 shortfn(pp[0]),
3460 shortfn(pp[1]),
3478 shortfn(pp[1]),
3461 )
3479 )
3462 )
3480 )
3463 elif format == 1:
3481 elif format == 1:
3464 pr = r.parentrevs(i)
3482 pr = r.parentrevs(i)
3465 if ui.verbose:
3483 if ui.verbose:
3466 ui.write(
3484 ui.write(
3467 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3485 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3468 % (
3486 % (
3469 i,
3487 i,
3470 r.flags(i),
3488 r.flags(i),
3471 r.start(i),
3489 r.start(i),
3472 r.length(i),
3490 r.length(i),
3473 r.rawsize(i),
3491 r.rawsize(i),
3474 r.linkrev(i),
3492 r.linkrev(i),
3475 pr[0],
3493 pr[0],
3476 pr[1],
3494 pr[1],
3477 shortfn(node),
3495 shortfn(node),
3478 )
3496 )
3479 )
3497 )
3480 else:
3498 else:
3481 ui.write(
3499 ui.write(
3482 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3500 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3483 % (
3501 % (
3484 i,
3502 i,
3485 r.flags(i),
3503 r.flags(i),
3486 r.rawsize(i),
3504 r.rawsize(i),
3487 r.linkrev(i),
3505 r.linkrev(i),
3488 pr[0],
3506 pr[0],
3489 pr[1],
3507 pr[1],
3490 shortfn(node),
3508 shortfn(node),
3491 )
3509 )
3492 )
3510 )
3493
3511
3494
3512
3495 @command(
3513 @command(
3496 b'debugrevspec',
3514 b'debugrevspec',
3497 [
3515 [
3498 (
3516 (
3499 b'',
3517 b'',
3500 b'optimize',
3518 b'optimize',
3501 None,
3519 None,
3502 _(b'print parsed tree after optimizing (DEPRECATED)'),
3520 _(b'print parsed tree after optimizing (DEPRECATED)'),
3503 ),
3521 ),
3504 (
3522 (
3505 b'',
3523 b'',
3506 b'show-revs',
3524 b'show-revs',
3507 True,
3525 True,
3508 _(b'print list of result revisions (default)'),
3526 _(b'print list of result revisions (default)'),
3509 ),
3527 ),
3510 (
3528 (
3511 b's',
3529 b's',
3512 b'show-set',
3530 b'show-set',
3513 None,
3531 None,
3514 _(b'print internal representation of result set'),
3532 _(b'print internal representation of result set'),
3515 ),
3533 ),
3516 (
3534 (
3517 b'p',
3535 b'p',
3518 b'show-stage',
3536 b'show-stage',
3519 [],
3537 [],
3520 _(b'print parsed tree at the given stage'),
3538 _(b'print parsed tree at the given stage'),
3521 _(b'NAME'),
3539 _(b'NAME'),
3522 ),
3540 ),
3523 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3541 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3524 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3542 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3525 ],
3543 ],
3526 b'REVSPEC',
3544 b'REVSPEC',
3527 )
3545 )
3528 def debugrevspec(ui, repo, expr, **opts):
3546 def debugrevspec(ui, repo, expr, **opts):
3529 """parse and apply a revision specification
3547 """parse and apply a revision specification
3530
3548
3531 Use -p/--show-stage option to print the parsed tree at the given stages.
3549 Use -p/--show-stage option to print the parsed tree at the given stages.
3532 Use -p all to print tree at every stage.
3550 Use -p all to print tree at every stage.
3533
3551
3534 Use --no-show-revs option with -s or -p to print only the set
3552 Use --no-show-revs option with -s or -p to print only the set
3535 representation or the parsed tree respectively.
3553 representation or the parsed tree respectively.
3536
3554
3537 Use --verify-optimized to compare the optimized result with the unoptimized
3555 Use --verify-optimized to compare the optimized result with the unoptimized
3538 one. Returns 1 if the optimized result differs.
3556 one. Returns 1 if the optimized result differs.
3539 """
3557 """
3540 opts = pycompat.byteskwargs(opts)
3558 opts = pycompat.byteskwargs(opts)
3541 aliases = ui.configitems(b'revsetalias')
3559 aliases = ui.configitems(b'revsetalias')
3542 stages = [
3560 stages = [
3543 (b'parsed', lambda tree: tree),
3561 (b'parsed', lambda tree: tree),
3544 (
3562 (
3545 b'expanded',
3563 b'expanded',
3546 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3564 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3547 ),
3565 ),
3548 (b'concatenated', revsetlang.foldconcat),
3566 (b'concatenated', revsetlang.foldconcat),
3549 (b'analyzed', revsetlang.analyze),
3567 (b'analyzed', revsetlang.analyze),
3550 (b'optimized', revsetlang.optimize),
3568 (b'optimized', revsetlang.optimize),
3551 ]
3569 ]
3552 if opts[b'no_optimized']:
3570 if opts[b'no_optimized']:
3553 stages = stages[:-1]
3571 stages = stages[:-1]
3554 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3572 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3555 raise error.Abort(
3573 raise error.Abort(
3556 _(b'cannot use --verify-optimized with --no-optimized')
3574 _(b'cannot use --verify-optimized with --no-optimized')
3557 )
3575 )
3558 stagenames = {n for n, f in stages}
3576 stagenames = {n for n, f in stages}
3559
3577
3560 showalways = set()
3578 showalways = set()
3561 showchanged = set()
3579 showchanged = set()
3562 if ui.verbose and not opts[b'show_stage']:
3580 if ui.verbose and not opts[b'show_stage']:
3563 # show parsed tree by --verbose (deprecated)
3581 # show parsed tree by --verbose (deprecated)
3564 showalways.add(b'parsed')
3582 showalways.add(b'parsed')
3565 showchanged.update([b'expanded', b'concatenated'])
3583 showchanged.update([b'expanded', b'concatenated'])
3566 if opts[b'optimize']:
3584 if opts[b'optimize']:
3567 showalways.add(b'optimized')
3585 showalways.add(b'optimized')
3568 if opts[b'show_stage'] and opts[b'optimize']:
3586 if opts[b'show_stage'] and opts[b'optimize']:
3569 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3587 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3570 if opts[b'show_stage'] == [b'all']:
3588 if opts[b'show_stage'] == [b'all']:
3571 showalways.update(stagenames)
3589 showalways.update(stagenames)
3572 else:
3590 else:
3573 for n in opts[b'show_stage']:
3591 for n in opts[b'show_stage']:
3574 if n not in stagenames:
3592 if n not in stagenames:
3575 raise error.Abort(_(b'invalid stage name: %s') % n)
3593 raise error.Abort(_(b'invalid stage name: %s') % n)
3576 showalways.update(opts[b'show_stage'])
3594 showalways.update(opts[b'show_stage'])
3577
3595
3578 treebystage = {}
3596 treebystage = {}
3579 printedtree = None
3597 printedtree = None
3580 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3598 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3581 for n, f in stages:
3599 for n, f in stages:
3582 treebystage[n] = tree = f(tree)
3600 treebystage[n] = tree = f(tree)
3583 if n in showalways or (n in showchanged and tree != printedtree):
3601 if n in showalways or (n in showchanged and tree != printedtree):
3584 if opts[b'show_stage'] or n != b'parsed':
3602 if opts[b'show_stage'] or n != b'parsed':
3585 ui.write(b"* %s:\n" % n)
3603 ui.write(b"* %s:\n" % n)
3586 ui.write(revsetlang.prettyformat(tree), b"\n")
3604 ui.write(revsetlang.prettyformat(tree), b"\n")
3587 printedtree = tree
3605 printedtree = tree
3588
3606
3589 if opts[b'verify_optimized']:
3607 if opts[b'verify_optimized']:
3590 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3608 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3591 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3609 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3592 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3610 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3593 ui.writenoi18n(
3611 ui.writenoi18n(
3594 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3612 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3595 )
3613 )
3596 ui.writenoi18n(
3614 ui.writenoi18n(
3597 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3615 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3598 )
3616 )
3599 arevs = list(arevs)
3617 arevs = list(arevs)
3600 brevs = list(brevs)
3618 brevs = list(brevs)
3601 if arevs == brevs:
3619 if arevs == brevs:
3602 return 0
3620 return 0
3603 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3621 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3604 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3622 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3605 sm = difflib.SequenceMatcher(None, arevs, brevs)
3623 sm = difflib.SequenceMatcher(None, arevs, brevs)
3606 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3624 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3607 if tag in ('delete', 'replace'):
3625 if tag in ('delete', 'replace'):
3608 for c in arevs[alo:ahi]:
3626 for c in arevs[alo:ahi]:
3609 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3627 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3610 if tag in ('insert', 'replace'):
3628 if tag in ('insert', 'replace'):
3611 for c in brevs[blo:bhi]:
3629 for c in brevs[blo:bhi]:
3612 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3630 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3613 if tag == 'equal':
3631 if tag == 'equal':
3614 for c in arevs[alo:ahi]:
3632 for c in arevs[alo:ahi]:
3615 ui.write(b' %d\n' % c)
3633 ui.write(b' %d\n' % c)
3616 return 1
3634 return 1
3617
3635
3618 func = revset.makematcher(tree)
3636 func = revset.makematcher(tree)
3619 revs = func(repo)
3637 revs = func(repo)
3620 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3638 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3621 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3639 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3622 if not opts[b'show_revs']:
3640 if not opts[b'show_revs']:
3623 return
3641 return
3624 for c in revs:
3642 for c in revs:
3625 ui.write(b"%d\n" % c)
3643 ui.write(b"%d\n" % c)
3626
3644
3627
3645
3628 @command(
3646 @command(
3629 b'debugserve',
3647 b'debugserve',
3630 [
3648 [
3631 (
3649 (
3632 b'',
3650 b'',
3633 b'sshstdio',
3651 b'sshstdio',
3634 False,
3652 False,
3635 _(b'run an SSH server bound to process handles'),
3653 _(b'run an SSH server bound to process handles'),
3636 ),
3654 ),
3637 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3655 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3638 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3656 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3639 ],
3657 ],
3640 b'',
3658 b'',
3641 )
3659 )
3642 def debugserve(ui, repo, **opts):
3660 def debugserve(ui, repo, **opts):
3643 """run a server with advanced settings
3661 """run a server with advanced settings
3644
3662
3645 This command is similar to :hg:`serve`. It exists partially as a
3663 This command is similar to :hg:`serve`. It exists partially as a
3646 workaround to the fact that ``hg serve --stdio`` must have specific
3664 workaround to the fact that ``hg serve --stdio`` must have specific
3647 arguments for security reasons.
3665 arguments for security reasons.
3648 """
3666 """
3649 opts = pycompat.byteskwargs(opts)
3667 opts = pycompat.byteskwargs(opts)
3650
3668
3651 if not opts[b'sshstdio']:
3669 if not opts[b'sshstdio']:
3652 raise error.Abort(_(b'only --sshstdio is currently supported'))
3670 raise error.Abort(_(b'only --sshstdio is currently supported'))
3653
3671
3654 logfh = None
3672 logfh = None
3655
3673
3656 if opts[b'logiofd'] and opts[b'logiofile']:
3674 if opts[b'logiofd'] and opts[b'logiofile']:
3657 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3675 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3658
3676
3659 if opts[b'logiofd']:
3677 if opts[b'logiofd']:
3660 # Ideally we would be line buffered. But line buffering in binary
3678 # Ideally we would be line buffered. But line buffering in binary
3661 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3679 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3662 # buffering could have performance impacts. But since this isn't
3680 # buffering could have performance impacts. But since this isn't
3663 # performance critical code, it should be fine.
3681 # performance critical code, it should be fine.
3664 try:
3682 try:
3665 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3683 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3666 except OSError as e:
3684 except OSError as e:
3667 if e.errno != errno.ESPIPE:
3685 if e.errno != errno.ESPIPE:
3668 raise
3686 raise
3669 # can't seek a pipe, so `ab` mode fails on py3
3687 # can't seek a pipe, so `ab` mode fails on py3
3670 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3688 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3671 elif opts[b'logiofile']:
3689 elif opts[b'logiofile']:
3672 logfh = open(opts[b'logiofile'], b'ab', 0)
3690 logfh = open(opts[b'logiofile'], b'ab', 0)
3673
3691
3674 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3692 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3675 s.serve_forever()
3693 s.serve_forever()
3676
3694
3677
3695
3678 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3696 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3679 def debugsetparents(ui, repo, rev1, rev2=None):
3697 def debugsetparents(ui, repo, rev1, rev2=None):
3680 """manually set the parents of the current working directory (DANGEROUS)
3698 """manually set the parents of the current working directory (DANGEROUS)
3681
3699
3682 This command is not what you are looking for and should not be used. Using
3700 This command is not what you are looking for and should not be used. Using
3683 this command will most certainly results in slight corruption of the file
3701 this command will most certainly results in slight corruption of the file
3684 level histories withing your repository. DO NOT USE THIS COMMAND.
3702 level histories withing your repository. DO NOT USE THIS COMMAND.
3685
3703
3686 The command update the p1 and p2 field in the dirstate, and not touching
3704 The command update the p1 and p2 field in the dirstate, and not touching
3687 anything else. This useful for writing repository conversion tools, but
3705 anything else. This useful for writing repository conversion tools, but
3688 should be used with extreme care. For example, neither the working
3706 should be used with extreme care. For example, neither the working
3689 directory nor the dirstate is updated, so file status may be incorrect
3707 directory nor the dirstate is updated, so file status may be incorrect
3690 after running this command. Only used if you are one of the few people that
3708 after running this command. Only used if you are one of the few people that
3691 deeply unstand both conversion tools and file level histories. If you are
3709 deeply unstand both conversion tools and file level histories. If you are
3692 reading this help, you are not one of this people (most of them sailed west
3710 reading this help, you are not one of this people (most of them sailed west
3693 from Mithlond anyway.
3711 from Mithlond anyway.
3694
3712
3695 So one last time DO NOT USE THIS COMMAND.
3713 So one last time DO NOT USE THIS COMMAND.
3696
3714
3697 Returns 0 on success.
3715 Returns 0 on success.
3698 """
3716 """
3699
3717
3700 node1 = scmutil.revsingle(repo, rev1).node()
3718 node1 = scmutil.revsingle(repo, rev1).node()
3701 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3719 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3702
3720
3703 with repo.wlock():
3721 with repo.wlock():
3704 repo.setparents(node1, node2)
3722 repo.setparents(node1, node2)
3705
3723
3706
3724
3707 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3725 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3708 def debugsidedata(ui, repo, file_, rev=None, **opts):
3726 def debugsidedata(ui, repo, file_, rev=None, **opts):
3709 """dump the side data for a cl/manifest/file revision
3727 """dump the side data for a cl/manifest/file revision
3710
3728
3711 Use --verbose to dump the sidedata content."""
3729 Use --verbose to dump the sidedata content."""
3712 opts = pycompat.byteskwargs(opts)
3730 opts = pycompat.byteskwargs(opts)
3713 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3731 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3714 if rev is not None:
3732 if rev is not None:
3715 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3733 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3716 file_, rev = None, file_
3734 file_, rev = None, file_
3717 elif rev is None:
3735 elif rev is None:
3718 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3736 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3719 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3737 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3720 r = getattr(r, '_revlog', r)
3738 r = getattr(r, '_revlog', r)
3721 try:
3739 try:
3722 sidedata = r.sidedata(r.lookup(rev))
3740 sidedata = r.sidedata(r.lookup(rev))
3723 except KeyError:
3741 except KeyError:
3724 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3742 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3725 if sidedata:
3743 if sidedata:
3726 sidedata = list(sidedata.items())
3744 sidedata = list(sidedata.items())
3727 sidedata.sort()
3745 sidedata.sort()
3728 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3746 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3729 for key, value in sidedata:
3747 for key, value in sidedata:
3730 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3748 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3731 if ui.verbose:
3749 if ui.verbose:
3732 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3750 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3733
3751
3734
3752
3735 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3753 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3736 def debugssl(ui, repo, source=None, **opts):
3754 def debugssl(ui, repo, source=None, **opts):
3737 """test a secure connection to a server
3755 """test a secure connection to a server
3738
3756
3739 This builds the certificate chain for the server on Windows, installing the
3757 This builds the certificate chain for the server on Windows, installing the
3740 missing intermediates and trusted root via Windows Update if necessary. It
3758 missing intermediates and trusted root via Windows Update if necessary. It
3741 does nothing on other platforms.
3759 does nothing on other platforms.
3742
3760
3743 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3761 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3744 that server is used. See :hg:`help urls` for more information.
3762 that server is used. See :hg:`help urls` for more information.
3745
3763
3746 If the update succeeds, retry the original operation. Otherwise, the cause
3764 If the update succeeds, retry the original operation. Otherwise, the cause
3747 of the SSL error is likely another issue.
3765 of the SSL error is likely another issue.
3748 """
3766 """
3749 if not pycompat.iswindows:
3767 if not pycompat.iswindows:
3750 raise error.Abort(
3768 raise error.Abort(
3751 _(b'certificate chain building is only possible on Windows')
3769 _(b'certificate chain building is only possible on Windows')
3752 )
3770 )
3753
3771
3754 if not source:
3772 if not source:
3755 if not repo:
3773 if not repo:
3756 raise error.Abort(
3774 raise error.Abort(
3757 _(
3775 _(
3758 b"there is no Mercurial repository here, and no "
3776 b"there is no Mercurial repository here, and no "
3759 b"server specified"
3777 b"server specified"
3760 )
3778 )
3761 )
3779 )
3762 source = b"default"
3780 source = b"default"
3763
3781
3764 source, branches = urlutil.get_unique_pull_path(
3782 source, branches = urlutil.get_unique_pull_path(
3765 b'debugssl', repo, ui, source
3783 b'debugssl', repo, ui, source
3766 )
3784 )
3767 url = urlutil.url(source)
3785 url = urlutil.url(source)
3768
3786
3769 defaultport = {b'https': 443, b'ssh': 22}
3787 defaultport = {b'https': 443, b'ssh': 22}
3770 if url.scheme in defaultport:
3788 if url.scheme in defaultport:
3771 try:
3789 try:
3772 addr = (url.host, int(url.port or defaultport[url.scheme]))
3790 addr = (url.host, int(url.port or defaultport[url.scheme]))
3773 except ValueError:
3791 except ValueError:
3774 raise error.Abort(_(b"malformed port number in URL"))
3792 raise error.Abort(_(b"malformed port number in URL"))
3775 else:
3793 else:
3776 raise error.Abort(_(b"only https and ssh connections are supported"))
3794 raise error.Abort(_(b"only https and ssh connections are supported"))
3777
3795
3778 from . import win32
3796 from . import win32
3779
3797
3780 s = ssl.wrap_socket(
3798 s = ssl.wrap_socket(
3781 socket.socket(),
3799 socket.socket(),
3782 ssl_version=ssl.PROTOCOL_TLS,
3800 ssl_version=ssl.PROTOCOL_TLS,
3783 cert_reqs=ssl.CERT_NONE,
3801 cert_reqs=ssl.CERT_NONE,
3784 ca_certs=None,
3802 ca_certs=None,
3785 )
3803 )
3786
3804
3787 try:
3805 try:
3788 s.connect(addr)
3806 s.connect(addr)
3789 cert = s.getpeercert(True)
3807 cert = s.getpeercert(True)
3790
3808
3791 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3809 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3792
3810
3793 complete = win32.checkcertificatechain(cert, build=False)
3811 complete = win32.checkcertificatechain(cert, build=False)
3794
3812
3795 if not complete:
3813 if not complete:
3796 ui.status(_(b'certificate chain is incomplete, updating... '))
3814 ui.status(_(b'certificate chain is incomplete, updating... '))
3797
3815
3798 if not win32.checkcertificatechain(cert):
3816 if not win32.checkcertificatechain(cert):
3799 ui.status(_(b'failed.\n'))
3817 ui.status(_(b'failed.\n'))
3800 else:
3818 else:
3801 ui.status(_(b'done.\n'))
3819 ui.status(_(b'done.\n'))
3802 else:
3820 else:
3803 ui.status(_(b'full certificate chain is available\n'))
3821 ui.status(_(b'full certificate chain is available\n'))
3804 finally:
3822 finally:
3805 s.close()
3823 s.close()
3806
3824
3807
3825
3808 @command(
3826 @command(
3809 b"debugbackupbundle",
3827 b"debugbackupbundle",
3810 [
3828 [
3811 (
3829 (
3812 b"",
3830 b"",
3813 b"recover",
3831 b"recover",
3814 b"",
3832 b"",
3815 b"brings the specified changeset back into the repository",
3833 b"brings the specified changeset back into the repository",
3816 )
3834 )
3817 ]
3835 ]
3818 + cmdutil.logopts,
3836 + cmdutil.logopts,
3819 _(b"hg debugbackupbundle [--recover HASH]"),
3837 _(b"hg debugbackupbundle [--recover HASH]"),
3820 )
3838 )
3821 def debugbackupbundle(ui, repo, *pats, **opts):
3839 def debugbackupbundle(ui, repo, *pats, **opts):
3822 """lists the changesets available in backup bundles
3840 """lists the changesets available in backup bundles
3823
3841
3824 Without any arguments, this command prints a list of the changesets in each
3842 Without any arguments, this command prints a list of the changesets in each
3825 backup bundle.
3843 backup bundle.
3826
3844
3827 --recover takes a changeset hash and unbundles the first bundle that
3845 --recover takes a changeset hash and unbundles the first bundle that
3828 contains that hash, which puts that changeset back in your repository.
3846 contains that hash, which puts that changeset back in your repository.
3829
3847
3830 --verbose will print the entire commit message and the bundle path for that
3848 --verbose will print the entire commit message and the bundle path for that
3831 backup.
3849 backup.
3832 """
3850 """
3833 backups = list(
3851 backups = list(
3834 filter(
3852 filter(
3835 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3853 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3836 )
3854 )
3837 )
3855 )
3838 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3856 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3839
3857
3840 opts = pycompat.byteskwargs(opts)
3858 opts = pycompat.byteskwargs(opts)
3841 opts[b"bundle"] = b""
3859 opts[b"bundle"] = b""
3842 opts[b"force"] = None
3860 opts[b"force"] = None
3843 limit = logcmdutil.getlimit(opts)
3861 limit = logcmdutil.getlimit(opts)
3844
3862
3845 def display(other, chlist, displayer):
3863 def display(other, chlist, displayer):
3846 if opts.get(b"newest_first"):
3864 if opts.get(b"newest_first"):
3847 chlist.reverse()
3865 chlist.reverse()
3848 count = 0
3866 count = 0
3849 for n in chlist:
3867 for n in chlist:
3850 if limit is not None and count >= limit:
3868 if limit is not None and count >= limit:
3851 break
3869 break
3852 parents = [
3870 parents = [
3853 True for p in other.changelog.parents(n) if p != repo.nullid
3871 True for p in other.changelog.parents(n) if p != repo.nullid
3854 ]
3872 ]
3855 if opts.get(b"no_merges") and len(parents) == 2:
3873 if opts.get(b"no_merges") and len(parents) == 2:
3856 continue
3874 continue
3857 count += 1
3875 count += 1
3858 displayer.show(other[n])
3876 displayer.show(other[n])
3859
3877
3860 recovernode = opts.get(b"recover")
3878 recovernode = opts.get(b"recover")
3861 if recovernode:
3879 if recovernode:
3862 if scmutil.isrevsymbol(repo, recovernode):
3880 if scmutil.isrevsymbol(repo, recovernode):
3863 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3881 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3864 return
3882 return
3865 elif backups:
3883 elif backups:
3866 msg = _(
3884 msg = _(
3867 b"Recover changesets using: hg debugbackupbundle --recover "
3885 b"Recover changesets using: hg debugbackupbundle --recover "
3868 b"<changeset hash>\n\nAvailable backup changesets:"
3886 b"<changeset hash>\n\nAvailable backup changesets:"
3869 )
3887 )
3870 ui.status(msg, label=b"status.removed")
3888 ui.status(msg, label=b"status.removed")
3871 else:
3889 else:
3872 ui.status(_(b"no backup changesets found\n"))
3890 ui.status(_(b"no backup changesets found\n"))
3873 return
3891 return
3874
3892
3875 for backup in backups:
3893 for backup in backups:
3876 # Much of this is copied from the hg incoming logic
3894 # Much of this is copied from the hg incoming logic
3877 source = os.path.relpath(backup, encoding.getcwd())
3895 source = os.path.relpath(backup, encoding.getcwd())
3878 source, branches = urlutil.get_unique_pull_path(
3896 source, branches = urlutil.get_unique_pull_path(
3879 b'debugbackupbundle',
3897 b'debugbackupbundle',
3880 repo,
3898 repo,
3881 ui,
3899 ui,
3882 source,
3900 source,
3883 default_branches=opts.get(b'branch'),
3901 default_branches=opts.get(b'branch'),
3884 )
3902 )
3885 try:
3903 try:
3886 other = hg.peer(repo, opts, source)
3904 other = hg.peer(repo, opts, source)
3887 except error.LookupError as ex:
3905 except error.LookupError as ex:
3888 msg = _(b"\nwarning: unable to open bundle %s") % source
3906 msg = _(b"\nwarning: unable to open bundle %s") % source
3889 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3907 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3890 ui.warn(msg, hint=hint)
3908 ui.warn(msg, hint=hint)
3891 continue
3909 continue
3892 revs, checkout = hg.addbranchrevs(
3910 revs, checkout = hg.addbranchrevs(
3893 repo, other, branches, opts.get(b"rev")
3911 repo, other, branches, opts.get(b"rev")
3894 )
3912 )
3895
3913
3896 if revs:
3914 if revs:
3897 revs = [other.lookup(rev) for rev in revs]
3915 revs = [other.lookup(rev) for rev in revs]
3898
3916
3899 with ui.silent():
3917 with ui.silent():
3900 try:
3918 try:
3901 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3919 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3902 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3920 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3903 )
3921 )
3904 except error.LookupError:
3922 except error.LookupError:
3905 continue
3923 continue
3906
3924
3907 try:
3925 try:
3908 if not chlist:
3926 if not chlist:
3909 continue
3927 continue
3910 if recovernode:
3928 if recovernode:
3911 with repo.lock(), repo.transaction(b"unbundle") as tr:
3929 with repo.lock(), repo.transaction(b"unbundle") as tr:
3912 if scmutil.isrevsymbol(other, recovernode):
3930 if scmutil.isrevsymbol(other, recovernode):
3913 ui.status(_(b"Unbundling %s\n") % (recovernode))
3931 ui.status(_(b"Unbundling %s\n") % (recovernode))
3914 f = hg.openpath(ui, source)
3932 f = hg.openpath(ui, source)
3915 gen = exchange.readbundle(ui, f, source)
3933 gen = exchange.readbundle(ui, f, source)
3916 if isinstance(gen, bundle2.unbundle20):
3934 if isinstance(gen, bundle2.unbundle20):
3917 bundle2.applybundle(
3935 bundle2.applybundle(
3918 repo,
3936 repo,
3919 gen,
3937 gen,
3920 tr,
3938 tr,
3921 source=b"unbundle",
3939 source=b"unbundle",
3922 url=b"bundle:" + source,
3940 url=b"bundle:" + source,
3923 )
3941 )
3924 else:
3942 else:
3925 gen.apply(repo, b"unbundle", b"bundle:" + source)
3943 gen.apply(repo, b"unbundle", b"bundle:" + source)
3926 break
3944 break
3927 else:
3945 else:
3928 backupdate = encoding.strtolocal(
3946 backupdate = encoding.strtolocal(
3929 time.strftime(
3947 time.strftime(
3930 "%a %H:%M, %Y-%m-%d",
3948 "%a %H:%M, %Y-%m-%d",
3931 time.localtime(os.path.getmtime(source)),
3949 time.localtime(os.path.getmtime(source)),
3932 )
3950 )
3933 )
3951 )
3934 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3952 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3935 if ui.verbose:
3953 if ui.verbose:
3936 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3954 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3937 else:
3955 else:
3938 opts[
3956 opts[
3939 b"template"
3957 b"template"
3940 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3958 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3941 displayer = logcmdutil.changesetdisplayer(
3959 displayer = logcmdutil.changesetdisplayer(
3942 ui, other, opts, False
3960 ui, other, opts, False
3943 )
3961 )
3944 display(other, chlist, displayer)
3962 display(other, chlist, displayer)
3945 displayer.close()
3963 displayer.close()
3946 finally:
3964 finally:
3947 cleanupfn()
3965 cleanupfn()
3948
3966
3949
3967
3950 @command(
3968 @command(
3951 b'debugsub',
3969 b'debugsub',
3952 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3970 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3953 _(b'[-r REV] [REV]'),
3971 _(b'[-r REV] [REV]'),
3954 )
3972 )
3955 def debugsub(ui, repo, rev=None):
3973 def debugsub(ui, repo, rev=None):
3956 ctx = scmutil.revsingle(repo, rev, None)
3974 ctx = scmutil.revsingle(repo, rev, None)
3957 for k, v in sorted(ctx.substate.items()):
3975 for k, v in sorted(ctx.substate.items()):
3958 ui.writenoi18n(b'path %s\n' % k)
3976 ui.writenoi18n(b'path %s\n' % k)
3959 ui.writenoi18n(b' source %s\n' % v[0])
3977 ui.writenoi18n(b' source %s\n' % v[0])
3960 ui.writenoi18n(b' revision %s\n' % v[1])
3978 ui.writenoi18n(b' revision %s\n' % v[1])
3961
3979
3962
3980
3963 @command(b'debugshell', optionalrepo=True)
3981 @command(b'debugshell', optionalrepo=True)
3964 def debugshell(ui, repo):
3982 def debugshell(ui, repo):
3965 """run an interactive Python interpreter
3983 """run an interactive Python interpreter
3966
3984
3967 The local namespace is provided with a reference to the ui and
3985 The local namespace is provided with a reference to the ui and
3968 the repo instance (if available).
3986 the repo instance (if available).
3969 """
3987 """
3970 import code
3988 import code
3971
3989
3972 imported_objects = {
3990 imported_objects = {
3973 'ui': ui,
3991 'ui': ui,
3974 'repo': repo,
3992 'repo': repo,
3975 }
3993 }
3976
3994
3977 code.interact(local=imported_objects)
3995 code.interact(local=imported_objects)
3978
3996
3979
3997
3980 @command(
3998 @command(
3981 b'debugsuccessorssets',
3999 b'debugsuccessorssets',
3982 [(b'', b'closest', False, _(b'return closest successors sets only'))],
4000 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3983 _(b'[REV]'),
4001 _(b'[REV]'),
3984 )
4002 )
3985 def debugsuccessorssets(ui, repo, *revs, **opts):
4003 def debugsuccessorssets(ui, repo, *revs, **opts):
3986 """show set of successors for revision
4004 """show set of successors for revision
3987
4005
3988 A successors set of changeset A is a consistent group of revisions that
4006 A successors set of changeset A is a consistent group of revisions that
3989 succeed A. It contains non-obsolete changesets only unless closests
4007 succeed A. It contains non-obsolete changesets only unless closests
3990 successors set is set.
4008 successors set is set.
3991
4009
3992 In most cases a changeset A has a single successors set containing a single
4010 In most cases a changeset A has a single successors set containing a single
3993 successor (changeset A replaced by A').
4011 successor (changeset A replaced by A').
3994
4012
3995 A changeset that is made obsolete with no successors are called "pruned".
4013 A changeset that is made obsolete with no successors are called "pruned".
3996 Such changesets have no successors sets at all.
4014 Such changesets have no successors sets at all.
3997
4015
3998 A changeset that has been "split" will have a successors set containing
4016 A changeset that has been "split" will have a successors set containing
3999 more than one successor.
4017 more than one successor.
4000
4018
4001 A changeset that has been rewritten in multiple different ways is called
4019 A changeset that has been rewritten in multiple different ways is called
4002 "divergent". Such changesets have multiple successor sets (each of which
4020 "divergent". Such changesets have multiple successor sets (each of which
4003 may also be split, i.e. have multiple successors).
4021 may also be split, i.e. have multiple successors).
4004
4022
4005 Results are displayed as follows::
4023 Results are displayed as follows::
4006
4024
4007 <rev1>
4025 <rev1>
4008 <successors-1A>
4026 <successors-1A>
4009 <rev2>
4027 <rev2>
4010 <successors-2A>
4028 <successors-2A>
4011 <successors-2B1> <successors-2B2> <successors-2B3>
4029 <successors-2B1> <successors-2B2> <successors-2B3>
4012
4030
4013 Here rev2 has two possible (i.e. divergent) successors sets. The first
4031 Here rev2 has two possible (i.e. divergent) successors sets. The first
4014 holds one element, whereas the second holds three (i.e. the changeset has
4032 holds one element, whereas the second holds three (i.e. the changeset has
4015 been split).
4033 been split).
4016 """
4034 """
4017 # passed to successorssets caching computation from one call to another
4035 # passed to successorssets caching computation from one call to another
4018 cache = {}
4036 cache = {}
4019 ctx2str = bytes
4037 ctx2str = bytes
4020 node2str = short
4038 node2str = short
4021 for rev in logcmdutil.revrange(repo, revs):
4039 for rev in logcmdutil.revrange(repo, revs):
4022 ctx = repo[rev]
4040 ctx = repo[rev]
4023 ui.write(b'%s\n' % ctx2str(ctx))
4041 ui.write(b'%s\n' % ctx2str(ctx))
4024 for succsset in obsutil.successorssets(
4042 for succsset in obsutil.successorssets(
4025 repo, ctx.node(), closest=opts['closest'], cache=cache
4043 repo, ctx.node(), closest=opts['closest'], cache=cache
4026 ):
4044 ):
4027 if succsset:
4045 if succsset:
4028 ui.write(b' ')
4046 ui.write(b' ')
4029 ui.write(node2str(succsset[0]))
4047 ui.write(node2str(succsset[0]))
4030 for node in succsset[1:]:
4048 for node in succsset[1:]:
4031 ui.write(b' ')
4049 ui.write(b' ')
4032 ui.write(node2str(node))
4050 ui.write(node2str(node))
4033 ui.write(b'\n')
4051 ui.write(b'\n')
4034
4052
4035
4053
4036 @command(b'debugtagscache', [])
4054 @command(b'debugtagscache', [])
4037 def debugtagscache(ui, repo):
4055 def debugtagscache(ui, repo):
4038 """display the contents of .hg/cache/hgtagsfnodes1"""
4056 """display the contents of .hg/cache/hgtagsfnodes1"""
4039 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4057 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
4040 flog = repo.file(b'.hgtags')
4058 flog = repo.file(b'.hgtags')
4041 for r in repo:
4059 for r in repo:
4042 node = repo[r].node()
4060 node = repo[r].node()
4043 tagsnode = cache.getfnode(node, computemissing=False)
4061 tagsnode = cache.getfnode(node, computemissing=False)
4044 if tagsnode:
4062 if tagsnode:
4045 tagsnodedisplay = hex(tagsnode)
4063 tagsnodedisplay = hex(tagsnode)
4046 if not flog.hasnode(tagsnode):
4064 if not flog.hasnode(tagsnode):
4047 tagsnodedisplay += b' (unknown node)'
4065 tagsnodedisplay += b' (unknown node)'
4048 elif tagsnode is None:
4066 elif tagsnode is None:
4049 tagsnodedisplay = b'missing'
4067 tagsnodedisplay = b'missing'
4050 else:
4068 else:
4051 tagsnodedisplay = b'invalid'
4069 tagsnodedisplay = b'invalid'
4052
4070
4053 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4071 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
4054
4072
4055
4073
4056 @command(
4074 @command(
4057 b'debugtemplate',
4075 b'debugtemplate',
4058 [
4076 [
4059 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4077 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
4060 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4078 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
4061 ],
4079 ],
4062 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4080 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
4063 optionalrepo=True,
4081 optionalrepo=True,
4064 )
4082 )
4065 def debugtemplate(ui, repo, tmpl, **opts):
4083 def debugtemplate(ui, repo, tmpl, **opts):
4066 """parse and apply a template
4084 """parse and apply a template
4067
4085
4068 If -r/--rev is given, the template is processed as a log template and
4086 If -r/--rev is given, the template is processed as a log template and
4069 applied to the given changesets. Otherwise, it is processed as a generic
4087 applied to the given changesets. Otherwise, it is processed as a generic
4070 template.
4088 template.
4071
4089
4072 Use --verbose to print the parsed tree.
4090 Use --verbose to print the parsed tree.
4073 """
4091 """
4074 revs = None
4092 revs = None
4075 if opts['rev']:
4093 if opts['rev']:
4076 if repo is None:
4094 if repo is None:
4077 raise error.RepoError(
4095 raise error.RepoError(
4078 _(b'there is no Mercurial repository here (.hg not found)')
4096 _(b'there is no Mercurial repository here (.hg not found)')
4079 )
4097 )
4080 revs = logcmdutil.revrange(repo, opts['rev'])
4098 revs = logcmdutil.revrange(repo, opts['rev'])
4081
4099
4082 props = {}
4100 props = {}
4083 for d in opts['define']:
4101 for d in opts['define']:
4084 try:
4102 try:
4085 k, v = (e.strip() for e in d.split(b'=', 1))
4103 k, v = (e.strip() for e in d.split(b'=', 1))
4086 if not k or k == b'ui':
4104 if not k or k == b'ui':
4087 raise ValueError
4105 raise ValueError
4088 props[k] = v
4106 props[k] = v
4089 except ValueError:
4107 except ValueError:
4090 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4108 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4091
4109
4092 if ui.verbose:
4110 if ui.verbose:
4093 aliases = ui.configitems(b'templatealias')
4111 aliases = ui.configitems(b'templatealias')
4094 tree = templater.parse(tmpl)
4112 tree = templater.parse(tmpl)
4095 ui.note(templater.prettyformat(tree), b'\n')
4113 ui.note(templater.prettyformat(tree), b'\n')
4096 newtree = templater.expandaliases(tree, aliases)
4114 newtree = templater.expandaliases(tree, aliases)
4097 if newtree != tree:
4115 if newtree != tree:
4098 ui.notenoi18n(
4116 ui.notenoi18n(
4099 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4117 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4100 )
4118 )
4101
4119
4102 if revs is None:
4120 if revs is None:
4103 tres = formatter.templateresources(ui, repo)
4121 tres = formatter.templateresources(ui, repo)
4104 t = formatter.maketemplater(ui, tmpl, resources=tres)
4122 t = formatter.maketemplater(ui, tmpl, resources=tres)
4105 if ui.verbose:
4123 if ui.verbose:
4106 kwds, funcs = t.symbolsuseddefault()
4124 kwds, funcs = t.symbolsuseddefault()
4107 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4125 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4108 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4126 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4109 ui.write(t.renderdefault(props))
4127 ui.write(t.renderdefault(props))
4110 else:
4128 else:
4111 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4129 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4112 if ui.verbose:
4130 if ui.verbose:
4113 kwds, funcs = displayer.t.symbolsuseddefault()
4131 kwds, funcs = displayer.t.symbolsuseddefault()
4114 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4132 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4115 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4133 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4116 for r in revs:
4134 for r in revs:
4117 displayer.show(repo[r], **pycompat.strkwargs(props))
4135 displayer.show(repo[r], **pycompat.strkwargs(props))
4118 displayer.close()
4136 displayer.close()
4119
4137
4120
4138
4121 @command(
4139 @command(
4122 b'debuguigetpass',
4140 b'debuguigetpass',
4123 [
4141 [
4124 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4142 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4125 ],
4143 ],
4126 _(b'[-p TEXT]'),
4144 _(b'[-p TEXT]'),
4127 norepo=True,
4145 norepo=True,
4128 )
4146 )
4129 def debuguigetpass(ui, prompt=b''):
4147 def debuguigetpass(ui, prompt=b''):
4130 """show prompt to type password"""
4148 """show prompt to type password"""
4131 r = ui.getpass(prompt)
4149 r = ui.getpass(prompt)
4132 if r is None:
4150 if r is None:
4133 r = b"<default response>"
4151 r = b"<default response>"
4134 ui.writenoi18n(b'response: %s\n' % r)
4152 ui.writenoi18n(b'response: %s\n' % r)
4135
4153
4136
4154
4137 @command(
4155 @command(
4138 b'debuguiprompt',
4156 b'debuguiprompt',
4139 [
4157 [
4140 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4158 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4141 ],
4159 ],
4142 _(b'[-p TEXT]'),
4160 _(b'[-p TEXT]'),
4143 norepo=True,
4161 norepo=True,
4144 )
4162 )
4145 def debuguiprompt(ui, prompt=b''):
4163 def debuguiprompt(ui, prompt=b''):
4146 """show plain prompt"""
4164 """show plain prompt"""
4147 r = ui.prompt(prompt)
4165 r = ui.prompt(prompt)
4148 ui.writenoi18n(b'response: %s\n' % r)
4166 ui.writenoi18n(b'response: %s\n' % r)
4149
4167
4150
4168
4151 @command(b'debugupdatecaches', [])
4169 @command(b'debugupdatecaches', [])
4152 def debugupdatecaches(ui, repo, *pats, **opts):
4170 def debugupdatecaches(ui, repo, *pats, **opts):
4153 """warm all known caches in the repository"""
4171 """warm all known caches in the repository"""
4154 with repo.wlock(), repo.lock():
4172 with repo.wlock(), repo.lock():
4155 repo.updatecaches(caches=repository.CACHES_ALL)
4173 repo.updatecaches(caches=repository.CACHES_ALL)
4156
4174
4157
4175
4158 @command(
4176 @command(
4159 b'debugupgraderepo',
4177 b'debugupgraderepo',
4160 [
4178 [
4161 (
4179 (
4162 b'o',
4180 b'o',
4163 b'optimize',
4181 b'optimize',
4164 [],
4182 [],
4165 _(b'extra optimization to perform'),
4183 _(b'extra optimization to perform'),
4166 _(b'NAME'),
4184 _(b'NAME'),
4167 ),
4185 ),
4168 (b'', b'run', False, _(b'performs an upgrade')),
4186 (b'', b'run', False, _(b'performs an upgrade')),
4169 (b'', b'backup', True, _(b'keep the old repository content around')),
4187 (b'', b'backup', True, _(b'keep the old repository content around')),
4170 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4188 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4171 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4189 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4172 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4190 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4173 ],
4191 ],
4174 )
4192 )
4175 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4193 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4176 """upgrade a repository to use different features
4194 """upgrade a repository to use different features
4177
4195
4178 If no arguments are specified, the repository is evaluated for upgrade
4196 If no arguments are specified, the repository is evaluated for upgrade
4179 and a list of problems and potential optimizations is printed.
4197 and a list of problems and potential optimizations is printed.
4180
4198
4181 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4199 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4182 can be influenced via additional arguments. More details will be provided
4200 can be influenced via additional arguments. More details will be provided
4183 by the command output when run without ``--run``.
4201 by the command output when run without ``--run``.
4184
4202
4185 During the upgrade, the repository will be locked and no writes will be
4203 During the upgrade, the repository will be locked and no writes will be
4186 allowed.
4204 allowed.
4187
4205
4188 At the end of the upgrade, the repository may not be readable while new
4206 At the end of the upgrade, the repository may not be readable while new
4189 repository data is swapped in. This window will be as long as it takes to
4207 repository data is swapped in. This window will be as long as it takes to
4190 rename some directories inside the ``.hg`` directory. On most machines, this
4208 rename some directories inside the ``.hg`` directory. On most machines, this
4191 should complete almost instantaneously and the chances of a consumer being
4209 should complete almost instantaneously and the chances of a consumer being
4192 unable to access the repository should be low.
4210 unable to access the repository should be low.
4193
4211
4194 By default, all revlogs will be upgraded. You can restrict this using flags
4212 By default, all revlogs will be upgraded. You can restrict this using flags
4195 such as `--manifest`:
4213 such as `--manifest`:
4196
4214
4197 * `--manifest`: only optimize the manifest
4215 * `--manifest`: only optimize the manifest
4198 * `--no-manifest`: optimize all revlog but the manifest
4216 * `--no-manifest`: optimize all revlog but the manifest
4199 * `--changelog`: optimize the changelog only
4217 * `--changelog`: optimize the changelog only
4200 * `--no-changelog --no-manifest`: optimize filelogs only
4218 * `--no-changelog --no-manifest`: optimize filelogs only
4201 * `--filelogs`: optimize the filelogs only
4219 * `--filelogs`: optimize the filelogs only
4202 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4220 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4203 """
4221 """
4204 return upgrade.upgraderepo(
4222 return upgrade.upgraderepo(
4205 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4223 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4206 )
4224 )
4207
4225
4208
4226
4209 @command(
4227 @command(
4210 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4228 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4211 )
4229 )
4212 def debugwalk(ui, repo, *pats, **opts):
4230 def debugwalk(ui, repo, *pats, **opts):
4213 """show how files match on given patterns"""
4231 """show how files match on given patterns"""
4214 opts = pycompat.byteskwargs(opts)
4232 opts = pycompat.byteskwargs(opts)
4215 m = scmutil.match(repo[None], pats, opts)
4233 m = scmutil.match(repo[None], pats, opts)
4216 if ui.verbose:
4234 if ui.verbose:
4217 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4235 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4218 items = list(repo[None].walk(m))
4236 items = list(repo[None].walk(m))
4219 if not items:
4237 if not items:
4220 return
4238 return
4221 f = lambda fn: fn
4239 f = lambda fn: fn
4222 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4240 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4223 f = lambda fn: util.normpath(fn)
4241 f = lambda fn: util.normpath(fn)
4224 fmt = b'f %%-%ds %%-%ds %%s' % (
4242 fmt = b'f %%-%ds %%-%ds %%s' % (
4225 max([len(abs) for abs in items]),
4243 max([len(abs) for abs in items]),
4226 max([len(repo.pathto(abs)) for abs in items]),
4244 max([len(repo.pathto(abs)) for abs in items]),
4227 )
4245 )
4228 for abs in items:
4246 for abs in items:
4229 line = fmt % (
4247 line = fmt % (
4230 abs,
4248 abs,
4231 f(repo.pathto(abs)),
4249 f(repo.pathto(abs)),
4232 m.exact(abs) and b'exact' or b'',
4250 m.exact(abs) and b'exact' or b'',
4233 )
4251 )
4234 ui.write(b"%s\n" % line.rstrip())
4252 ui.write(b"%s\n" % line.rstrip())
4235
4253
4236
4254
4237 @command(b'debugwhyunstable', [], _(b'REV'))
4255 @command(b'debugwhyunstable', [], _(b'REV'))
4238 def debugwhyunstable(ui, repo, rev):
4256 def debugwhyunstable(ui, repo, rev):
4239 """explain instabilities of a changeset"""
4257 """explain instabilities of a changeset"""
4240 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4258 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4241 dnodes = b''
4259 dnodes = b''
4242 if entry.get(b'divergentnodes'):
4260 if entry.get(b'divergentnodes'):
4243 dnodes = (
4261 dnodes = (
4244 b' '.join(
4262 b' '.join(
4245 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4263 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4246 for ctx in entry[b'divergentnodes']
4264 for ctx in entry[b'divergentnodes']
4247 )
4265 )
4248 + b' '
4266 + b' '
4249 )
4267 )
4250 ui.write(
4268 ui.write(
4251 b'%s: %s%s %s\n'
4269 b'%s: %s%s %s\n'
4252 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4270 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4253 )
4271 )
4254
4272
4255
4273
4256 @command(
4274 @command(
4257 b'debugwireargs',
4275 b'debugwireargs',
4258 [
4276 [
4259 (b'', b'three', b'', b'three'),
4277 (b'', b'three', b'', b'three'),
4260 (b'', b'four', b'', b'four'),
4278 (b'', b'four', b'', b'four'),
4261 (b'', b'five', b'', b'five'),
4279 (b'', b'five', b'', b'five'),
4262 ]
4280 ]
4263 + cmdutil.remoteopts,
4281 + cmdutil.remoteopts,
4264 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4282 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4265 norepo=True,
4283 norepo=True,
4266 )
4284 )
4267 def debugwireargs(ui, repopath, *vals, **opts):
4285 def debugwireargs(ui, repopath, *vals, **opts):
4268 opts = pycompat.byteskwargs(opts)
4286 opts = pycompat.byteskwargs(opts)
4269 repo = hg.peer(ui, opts, repopath)
4287 repo = hg.peer(ui, opts, repopath)
4270 try:
4288 try:
4271 for opt in cmdutil.remoteopts:
4289 for opt in cmdutil.remoteopts:
4272 del opts[opt[1]]
4290 del opts[opt[1]]
4273 args = {}
4291 args = {}
4274 for k, v in opts.items():
4292 for k, v in opts.items():
4275 if v:
4293 if v:
4276 args[k] = v
4294 args[k] = v
4277 args = pycompat.strkwargs(args)
4295 args = pycompat.strkwargs(args)
4278 # run twice to check that we don't mess up the stream for the next command
4296 # run twice to check that we don't mess up the stream for the next command
4279 res1 = repo.debugwireargs(*vals, **args)
4297 res1 = repo.debugwireargs(*vals, **args)
4280 res2 = repo.debugwireargs(*vals, **args)
4298 res2 = repo.debugwireargs(*vals, **args)
4281 ui.write(b"%s\n" % res1)
4299 ui.write(b"%s\n" % res1)
4282 if res1 != res2:
4300 if res1 != res2:
4283 ui.warn(b"%s\n" % res2)
4301 ui.warn(b"%s\n" % res2)
4284 finally:
4302 finally:
4285 repo.close()
4303 repo.close()
4286
4304
4287
4305
4288 def _parsewirelangblocks(fh):
4306 def _parsewirelangblocks(fh):
4289 activeaction = None
4307 activeaction = None
4290 blocklines = []
4308 blocklines = []
4291 lastindent = 0
4309 lastindent = 0
4292
4310
4293 for line in fh:
4311 for line in fh:
4294 line = line.rstrip()
4312 line = line.rstrip()
4295 if not line:
4313 if not line:
4296 continue
4314 continue
4297
4315
4298 if line.startswith(b'#'):
4316 if line.startswith(b'#'):
4299 continue
4317 continue
4300
4318
4301 if not line.startswith(b' '):
4319 if not line.startswith(b' '):
4302 # New block. Flush previous one.
4320 # New block. Flush previous one.
4303 if activeaction:
4321 if activeaction:
4304 yield activeaction, blocklines
4322 yield activeaction, blocklines
4305
4323
4306 activeaction = line
4324 activeaction = line
4307 blocklines = []
4325 blocklines = []
4308 lastindent = 0
4326 lastindent = 0
4309 continue
4327 continue
4310
4328
4311 # Else we start with an indent.
4329 # Else we start with an indent.
4312
4330
4313 if not activeaction:
4331 if not activeaction:
4314 raise error.Abort(_(b'indented line outside of block'))
4332 raise error.Abort(_(b'indented line outside of block'))
4315
4333
4316 indent = len(line) - len(line.lstrip())
4334 indent = len(line) - len(line.lstrip())
4317
4335
4318 # If this line is indented more than the last line, concatenate it.
4336 # If this line is indented more than the last line, concatenate it.
4319 if indent > lastindent and blocklines:
4337 if indent > lastindent and blocklines:
4320 blocklines[-1] += line.lstrip()
4338 blocklines[-1] += line.lstrip()
4321 else:
4339 else:
4322 blocklines.append(line)
4340 blocklines.append(line)
4323 lastindent = indent
4341 lastindent = indent
4324
4342
4325 # Flush last block.
4343 # Flush last block.
4326 if activeaction:
4344 if activeaction:
4327 yield activeaction, blocklines
4345 yield activeaction, blocklines
4328
4346
4329
4347
4330 @command(
4348 @command(
4331 b'debugwireproto',
4349 b'debugwireproto',
4332 [
4350 [
4333 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4351 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4334 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4352 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4335 (
4353 (
4336 b'',
4354 b'',
4337 b'noreadstderr',
4355 b'noreadstderr',
4338 False,
4356 False,
4339 _(b'do not read from stderr of the remote'),
4357 _(b'do not read from stderr of the remote'),
4340 ),
4358 ),
4341 (
4359 (
4342 b'',
4360 b'',
4343 b'nologhandshake',
4361 b'nologhandshake',
4344 False,
4362 False,
4345 _(b'do not log I/O related to the peer handshake'),
4363 _(b'do not log I/O related to the peer handshake'),
4346 ),
4364 ),
4347 ]
4365 ]
4348 + cmdutil.remoteopts,
4366 + cmdutil.remoteopts,
4349 _(b'[PATH]'),
4367 _(b'[PATH]'),
4350 optionalrepo=True,
4368 optionalrepo=True,
4351 )
4369 )
4352 def debugwireproto(ui, repo, path=None, **opts):
4370 def debugwireproto(ui, repo, path=None, **opts):
4353 """send wire protocol commands to a server
4371 """send wire protocol commands to a server
4354
4372
4355 This command can be used to issue wire protocol commands to remote
4373 This command can be used to issue wire protocol commands to remote
4356 peers and to debug the raw data being exchanged.
4374 peers and to debug the raw data being exchanged.
4357
4375
4358 ``--localssh`` will start an SSH server against the current repository
4376 ``--localssh`` will start an SSH server against the current repository
4359 and connect to that. By default, the connection will perform a handshake
4377 and connect to that. By default, the connection will perform a handshake
4360 and establish an appropriate peer instance.
4378 and establish an appropriate peer instance.
4361
4379
4362 ``--peer`` can be used to bypass the handshake protocol and construct a
4380 ``--peer`` can be used to bypass the handshake protocol and construct a
4363 peer instance using the specified class type. Valid values are ``raw``,
4381 peer instance using the specified class type. Valid values are ``raw``,
4364 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4382 ``ssh1``. ``raw`` instances only allow sending raw data payloads and
4365 don't support higher-level command actions.
4383 don't support higher-level command actions.
4366
4384
4367 ``--noreadstderr`` can be used to disable automatic reading from stderr
4385 ``--noreadstderr`` can be used to disable automatic reading from stderr
4368 of the peer (for SSH connections only). Disabling automatic reading of
4386 of the peer (for SSH connections only). Disabling automatic reading of
4369 stderr is useful for making output more deterministic.
4387 stderr is useful for making output more deterministic.
4370
4388
4371 Commands are issued via a mini language which is specified via stdin.
4389 Commands are issued via a mini language which is specified via stdin.
4372 The language consists of individual actions to perform. An action is
4390 The language consists of individual actions to perform. An action is
4373 defined by a block. A block is defined as a line with no leading
4391 defined by a block. A block is defined as a line with no leading
4374 space followed by 0 or more lines with leading space. Blocks are
4392 space followed by 0 or more lines with leading space. Blocks are
4375 effectively a high-level command with additional metadata.
4393 effectively a high-level command with additional metadata.
4376
4394
4377 Lines beginning with ``#`` are ignored.
4395 Lines beginning with ``#`` are ignored.
4378
4396
4379 The following sections denote available actions.
4397 The following sections denote available actions.
4380
4398
4381 raw
4399 raw
4382 ---
4400 ---
4383
4401
4384 Send raw data to the server.
4402 Send raw data to the server.
4385
4403
4386 The block payload contains the raw data to send as one atomic send
4404 The block payload contains the raw data to send as one atomic send
4387 operation. The data may not actually be delivered in a single system
4405 operation. The data may not actually be delivered in a single system
4388 call: it depends on the abilities of the transport being used.
4406 call: it depends on the abilities of the transport being used.
4389
4407
4390 Each line in the block is de-indented and concatenated. Then, that
4408 Each line in the block is de-indented and concatenated. Then, that
4391 value is evaluated as a Python b'' literal. This allows the use of
4409 value is evaluated as a Python b'' literal. This allows the use of
4392 backslash escaping, etc.
4410 backslash escaping, etc.
4393
4411
4394 raw+
4412 raw+
4395 ----
4413 ----
4396
4414
4397 Behaves like ``raw`` except flushes output afterwards.
4415 Behaves like ``raw`` except flushes output afterwards.
4398
4416
4399 command <X>
4417 command <X>
4400 -----------
4418 -----------
4401
4419
4402 Send a request to run a named command, whose name follows the ``command``
4420 Send a request to run a named command, whose name follows the ``command``
4403 string.
4421 string.
4404
4422
4405 Arguments to the command are defined as lines in this block. The format of
4423 Arguments to the command are defined as lines in this block. The format of
4406 each line is ``<key> <value>``. e.g.::
4424 each line is ``<key> <value>``. e.g.::
4407
4425
4408 command listkeys
4426 command listkeys
4409 namespace bookmarks
4427 namespace bookmarks
4410
4428
4411 If the value begins with ``eval:``, it will be interpreted as a Python
4429 If the value begins with ``eval:``, it will be interpreted as a Python
4412 literal expression. Otherwise values are interpreted as Python b'' literals.
4430 literal expression. Otherwise values are interpreted as Python b'' literals.
4413 This allows sending complex types and encoding special byte sequences via
4431 This allows sending complex types and encoding special byte sequences via
4414 backslash escaping.
4432 backslash escaping.
4415
4433
4416 The following arguments have special meaning:
4434 The following arguments have special meaning:
4417
4435
4418 ``PUSHFILE``
4436 ``PUSHFILE``
4419 When defined, the *push* mechanism of the peer will be used instead
4437 When defined, the *push* mechanism of the peer will be used instead
4420 of the static request-response mechanism and the content of the
4438 of the static request-response mechanism and the content of the
4421 file specified in the value of this argument will be sent as the
4439 file specified in the value of this argument will be sent as the
4422 command payload.
4440 command payload.
4423
4441
4424 This can be used to submit a local bundle file to the remote.
4442 This can be used to submit a local bundle file to the remote.
4425
4443
4426 batchbegin
4444 batchbegin
4427 ----------
4445 ----------
4428
4446
4429 Instruct the peer to begin a batched send.
4447 Instruct the peer to begin a batched send.
4430
4448
4431 All ``command`` blocks are queued for execution until the next
4449 All ``command`` blocks are queued for execution until the next
4432 ``batchsubmit`` block.
4450 ``batchsubmit`` block.
4433
4451
4434 batchsubmit
4452 batchsubmit
4435 -----------
4453 -----------
4436
4454
4437 Submit previously queued ``command`` blocks as a batch request.
4455 Submit previously queued ``command`` blocks as a batch request.
4438
4456
4439 This action MUST be paired with a ``batchbegin`` action.
4457 This action MUST be paired with a ``batchbegin`` action.
4440
4458
4441 httprequest <method> <path>
4459 httprequest <method> <path>
4442 ---------------------------
4460 ---------------------------
4443
4461
4444 (HTTP peer only)
4462 (HTTP peer only)
4445
4463
4446 Send an HTTP request to the peer.
4464 Send an HTTP request to the peer.
4447
4465
4448 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4466 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4449
4467
4450 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4468 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4451 headers to add to the request. e.g. ``Accept: foo``.
4469 headers to add to the request. e.g. ``Accept: foo``.
4452
4470
4453 The following arguments are special:
4471 The following arguments are special:
4454
4472
4455 ``BODYFILE``
4473 ``BODYFILE``
4456 The content of the file defined as the value to this argument will be
4474 The content of the file defined as the value to this argument will be
4457 transferred verbatim as the HTTP request body.
4475 transferred verbatim as the HTTP request body.
4458
4476
4459 ``frame <type> <flags> <payload>``
4477 ``frame <type> <flags> <payload>``
4460 Send a unified protocol frame as part of the request body.
4478 Send a unified protocol frame as part of the request body.
4461
4479
4462 All frames will be collected and sent as the body to the HTTP
4480 All frames will be collected and sent as the body to the HTTP
4463 request.
4481 request.
4464
4482
4465 close
4483 close
4466 -----
4484 -----
4467
4485
4468 Close the connection to the server.
4486 Close the connection to the server.
4469
4487
4470 flush
4488 flush
4471 -----
4489 -----
4472
4490
4473 Flush data written to the server.
4491 Flush data written to the server.
4474
4492
4475 readavailable
4493 readavailable
4476 -------------
4494 -------------
4477
4495
4478 Close the write end of the connection and read all available data from
4496 Close the write end of the connection and read all available data from
4479 the server.
4497 the server.
4480
4498
4481 If the connection to the server encompasses multiple pipes, we poll both
4499 If the connection to the server encompasses multiple pipes, we poll both
4482 pipes and read available data.
4500 pipes and read available data.
4483
4501
4484 readline
4502 readline
4485 --------
4503 --------
4486
4504
4487 Read a line of output from the server. If there are multiple output
4505 Read a line of output from the server. If there are multiple output
4488 pipes, reads only the main pipe.
4506 pipes, reads only the main pipe.
4489
4507
4490 ereadline
4508 ereadline
4491 ---------
4509 ---------
4492
4510
4493 Like ``readline``, but read from the stderr pipe, if available.
4511 Like ``readline``, but read from the stderr pipe, if available.
4494
4512
4495 read <X>
4513 read <X>
4496 --------
4514 --------
4497
4515
4498 ``read()`` N bytes from the server's main output pipe.
4516 ``read()`` N bytes from the server's main output pipe.
4499
4517
4500 eread <X>
4518 eread <X>
4501 ---------
4519 ---------
4502
4520
4503 ``read()`` N bytes from the server's stderr pipe, if available.
4521 ``read()`` N bytes from the server's stderr pipe, if available.
4504
4522
4505 Specifying Unified Frame-Based Protocol Frames
4523 Specifying Unified Frame-Based Protocol Frames
4506 ----------------------------------------------
4524 ----------------------------------------------
4507
4525
4508 It is possible to emit a *Unified Frame-Based Protocol* by using special
4526 It is possible to emit a *Unified Frame-Based Protocol* by using special
4509 syntax.
4527 syntax.
4510
4528
4511 A frame is composed as a type, flags, and payload. These can be parsed
4529 A frame is composed as a type, flags, and payload. These can be parsed
4512 from a string of the form:
4530 from a string of the form:
4513
4531
4514 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4532 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4515
4533
4516 ``request-id`` and ``stream-id`` are integers defining the request and
4534 ``request-id`` and ``stream-id`` are integers defining the request and
4517 stream identifiers.
4535 stream identifiers.
4518
4536
4519 ``type`` can be an integer value for the frame type or the string name
4537 ``type`` can be an integer value for the frame type or the string name
4520 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4538 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4521 ``command-name``.
4539 ``command-name``.
4522
4540
4523 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4541 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4524 components. Each component (and there can be just one) can be an integer
4542 components. Each component (and there can be just one) can be an integer
4525 or a flag name for stream flags or frame flags, respectively. Values are
4543 or a flag name for stream flags or frame flags, respectively. Values are
4526 resolved to integers and then bitwise OR'd together.
4544 resolved to integers and then bitwise OR'd together.
4527
4545
4528 ``payload`` represents the raw frame payload. If it begins with
4546 ``payload`` represents the raw frame payload. If it begins with
4529 ``cbor:``, the following string is evaluated as Python code and the
4547 ``cbor:``, the following string is evaluated as Python code and the
4530 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4548 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4531 as a Python byte string literal.
4549 as a Python byte string literal.
4532 """
4550 """
4533 opts = pycompat.byteskwargs(opts)
4551 opts = pycompat.byteskwargs(opts)
4534
4552
4535 if opts[b'localssh'] and not repo:
4553 if opts[b'localssh'] and not repo:
4536 raise error.Abort(_(b'--localssh requires a repository'))
4554 raise error.Abort(_(b'--localssh requires a repository'))
4537
4555
4538 if opts[b'peer'] and opts[b'peer'] not in (
4556 if opts[b'peer'] and opts[b'peer'] not in (
4539 b'raw',
4557 b'raw',
4540 b'ssh1',
4558 b'ssh1',
4541 ):
4559 ):
4542 raise error.Abort(
4560 raise error.Abort(
4543 _(b'invalid value for --peer'),
4561 _(b'invalid value for --peer'),
4544 hint=_(b'valid values are "raw" and "ssh1"'),
4562 hint=_(b'valid values are "raw" and "ssh1"'),
4545 )
4563 )
4546
4564
4547 if path and opts[b'localssh']:
4565 if path and opts[b'localssh']:
4548 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4566 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4549
4567
4550 if ui.interactive():
4568 if ui.interactive():
4551 ui.write(_(b'(waiting for commands on stdin)\n'))
4569 ui.write(_(b'(waiting for commands on stdin)\n'))
4552
4570
4553 blocks = list(_parsewirelangblocks(ui.fin))
4571 blocks = list(_parsewirelangblocks(ui.fin))
4554
4572
4555 proc = None
4573 proc = None
4556 stdin = None
4574 stdin = None
4557 stdout = None
4575 stdout = None
4558 stderr = None
4576 stderr = None
4559 opener = None
4577 opener = None
4560
4578
4561 if opts[b'localssh']:
4579 if opts[b'localssh']:
4562 # We start the SSH server in its own process so there is process
4580 # We start the SSH server in its own process so there is process
4563 # separation. This prevents a whole class of potential bugs around
4581 # separation. This prevents a whole class of potential bugs around
4564 # shared state from interfering with server operation.
4582 # shared state from interfering with server operation.
4565 args = procutil.hgcmd() + [
4583 args = procutil.hgcmd() + [
4566 b'-R',
4584 b'-R',
4567 repo.root,
4585 repo.root,
4568 b'debugserve',
4586 b'debugserve',
4569 b'--sshstdio',
4587 b'--sshstdio',
4570 ]
4588 ]
4571 proc = subprocess.Popen(
4589 proc = subprocess.Popen(
4572 pycompat.rapply(procutil.tonativestr, args),
4590 pycompat.rapply(procutil.tonativestr, args),
4573 stdin=subprocess.PIPE,
4591 stdin=subprocess.PIPE,
4574 stdout=subprocess.PIPE,
4592 stdout=subprocess.PIPE,
4575 stderr=subprocess.PIPE,
4593 stderr=subprocess.PIPE,
4576 bufsize=0,
4594 bufsize=0,
4577 )
4595 )
4578
4596
4579 stdin = proc.stdin
4597 stdin = proc.stdin
4580 stdout = proc.stdout
4598 stdout = proc.stdout
4581 stderr = proc.stderr
4599 stderr = proc.stderr
4582
4600
4583 # We turn the pipes into observers so we can log I/O.
4601 # We turn the pipes into observers so we can log I/O.
4584 if ui.verbose or opts[b'peer'] == b'raw':
4602 if ui.verbose or opts[b'peer'] == b'raw':
4585 stdin = util.makeloggingfileobject(
4603 stdin = util.makeloggingfileobject(
4586 ui, proc.stdin, b'i', logdata=True
4604 ui, proc.stdin, b'i', logdata=True
4587 )
4605 )
4588 stdout = util.makeloggingfileobject(
4606 stdout = util.makeloggingfileobject(
4589 ui, proc.stdout, b'o', logdata=True
4607 ui, proc.stdout, b'o', logdata=True
4590 )
4608 )
4591 stderr = util.makeloggingfileobject(
4609 stderr = util.makeloggingfileobject(
4592 ui, proc.stderr, b'e', logdata=True
4610 ui, proc.stderr, b'e', logdata=True
4593 )
4611 )
4594
4612
4595 # --localssh also implies the peer connection settings.
4613 # --localssh also implies the peer connection settings.
4596
4614
4597 url = b'ssh://localserver'
4615 url = b'ssh://localserver'
4598 autoreadstderr = not opts[b'noreadstderr']
4616 autoreadstderr = not opts[b'noreadstderr']
4599
4617
4600 if opts[b'peer'] == b'ssh1':
4618 if opts[b'peer'] == b'ssh1':
4601 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4619 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4602 peer = sshpeer.sshv1peer(
4620 peer = sshpeer.sshv1peer(
4603 ui,
4621 ui,
4604 url,
4622 url,
4605 proc,
4623 proc,
4606 stdin,
4624 stdin,
4607 stdout,
4625 stdout,
4608 stderr,
4626 stderr,
4609 None,
4627 None,
4610 autoreadstderr=autoreadstderr,
4628 autoreadstderr=autoreadstderr,
4611 )
4629 )
4612 elif opts[b'peer'] == b'raw':
4630 elif opts[b'peer'] == b'raw':
4613 ui.write(_(b'using raw connection to peer\n'))
4631 ui.write(_(b'using raw connection to peer\n'))
4614 peer = None
4632 peer = None
4615 else:
4633 else:
4616 ui.write(_(b'creating ssh peer from handshake results\n'))
4634 ui.write(_(b'creating ssh peer from handshake results\n'))
4617 peer = sshpeer.makepeer(
4635 peer = sshpeer.makepeer(
4618 ui,
4636 ui,
4619 url,
4637 url,
4620 proc,
4638 proc,
4621 stdin,
4639 stdin,
4622 stdout,
4640 stdout,
4623 stderr,
4641 stderr,
4624 autoreadstderr=autoreadstderr,
4642 autoreadstderr=autoreadstderr,
4625 )
4643 )
4626
4644
4627 elif path:
4645 elif path:
4628 # We bypass hg.peer() so we can proxy the sockets.
4646 # We bypass hg.peer() so we can proxy the sockets.
4629 # TODO consider not doing this because we skip
4647 # TODO consider not doing this because we skip
4630 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4648 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4631 u = urlutil.url(path)
4649 u = urlutil.url(path)
4632 if u.scheme != b'http':
4650 if u.scheme != b'http':
4633 raise error.Abort(_(b'only http:// paths are currently supported'))
4651 raise error.Abort(_(b'only http:// paths are currently supported'))
4634
4652
4635 url, authinfo = u.authinfo()
4653 url, authinfo = u.authinfo()
4636 openerargs = {
4654 openerargs = {
4637 'useragent': b'Mercurial debugwireproto',
4655 'useragent': b'Mercurial debugwireproto',
4638 }
4656 }
4639
4657
4640 # Turn pipes/sockets into observers so we can log I/O.
4658 # Turn pipes/sockets into observers so we can log I/O.
4641 if ui.verbose:
4659 if ui.verbose:
4642 openerargs.update(
4660 openerargs.update(
4643 {
4661 {
4644 'loggingfh': ui,
4662 'loggingfh': ui,
4645 'loggingname': b's',
4663 'loggingname': b's',
4646 'loggingopts': {
4664 'loggingopts': {
4647 'logdata': True,
4665 'logdata': True,
4648 'logdataapis': False,
4666 'logdataapis': False,
4649 },
4667 },
4650 }
4668 }
4651 )
4669 )
4652
4670
4653 if ui.debugflag:
4671 if ui.debugflag:
4654 openerargs['loggingopts']['logdataapis'] = True
4672 openerargs['loggingopts']['logdataapis'] = True
4655
4673
4656 # Don't send default headers when in raw mode. This allows us to
4674 # Don't send default headers when in raw mode. This allows us to
4657 # bypass most of the behavior of our URL handling code so we can
4675 # bypass most of the behavior of our URL handling code so we can
4658 # have near complete control over what's sent on the wire.
4676 # have near complete control over what's sent on the wire.
4659 if opts[b'peer'] == b'raw':
4677 if opts[b'peer'] == b'raw':
4660 openerargs['sendaccept'] = False
4678 openerargs['sendaccept'] = False
4661
4679
4662 opener = urlmod.opener(ui, authinfo, **openerargs)
4680 opener = urlmod.opener(ui, authinfo, **openerargs)
4663
4681
4664 if opts[b'peer'] == b'raw':
4682 if opts[b'peer'] == b'raw':
4665 ui.write(_(b'using raw connection to peer\n'))
4683 ui.write(_(b'using raw connection to peer\n'))
4666 peer = None
4684 peer = None
4667 elif opts[b'peer']:
4685 elif opts[b'peer']:
4668 raise error.Abort(
4686 raise error.Abort(
4669 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4687 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4670 )
4688 )
4671 else:
4689 else:
4672 peer = httppeer.makepeer(ui, path, opener=opener)
4690 peer = httppeer.makepeer(ui, path, opener=opener)
4673
4691
4674 # We /could/ populate stdin/stdout with sock.makefile()...
4692 # We /could/ populate stdin/stdout with sock.makefile()...
4675 else:
4693 else:
4676 raise error.Abort(_(b'unsupported connection configuration'))
4694 raise error.Abort(_(b'unsupported connection configuration'))
4677
4695
4678 batchedcommands = None
4696 batchedcommands = None
4679
4697
4680 # Now perform actions based on the parsed wire language instructions.
4698 # Now perform actions based on the parsed wire language instructions.
4681 for action, lines in blocks:
4699 for action, lines in blocks:
4682 if action in (b'raw', b'raw+'):
4700 if action in (b'raw', b'raw+'):
4683 if not stdin:
4701 if not stdin:
4684 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4702 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4685
4703
4686 # Concatenate the data together.
4704 # Concatenate the data together.
4687 data = b''.join(l.lstrip() for l in lines)
4705 data = b''.join(l.lstrip() for l in lines)
4688 data = stringutil.unescapestr(data)
4706 data = stringutil.unescapestr(data)
4689 stdin.write(data)
4707 stdin.write(data)
4690
4708
4691 if action == b'raw+':
4709 if action == b'raw+':
4692 stdin.flush()
4710 stdin.flush()
4693 elif action == b'flush':
4711 elif action == b'flush':
4694 if not stdin:
4712 if not stdin:
4695 raise error.Abort(_(b'cannot call flush on this peer'))
4713 raise error.Abort(_(b'cannot call flush on this peer'))
4696 stdin.flush()
4714 stdin.flush()
4697 elif action.startswith(b'command'):
4715 elif action.startswith(b'command'):
4698 if not peer:
4716 if not peer:
4699 raise error.Abort(
4717 raise error.Abort(
4700 _(
4718 _(
4701 b'cannot send commands unless peer instance '
4719 b'cannot send commands unless peer instance '
4702 b'is available'
4720 b'is available'
4703 )
4721 )
4704 )
4722 )
4705
4723
4706 command = action.split(b' ', 1)[1]
4724 command = action.split(b' ', 1)[1]
4707
4725
4708 args = {}
4726 args = {}
4709 for line in lines:
4727 for line in lines:
4710 # We need to allow empty values.
4728 # We need to allow empty values.
4711 fields = line.lstrip().split(b' ', 1)
4729 fields = line.lstrip().split(b' ', 1)
4712 if len(fields) == 1:
4730 if len(fields) == 1:
4713 key = fields[0]
4731 key = fields[0]
4714 value = b''
4732 value = b''
4715 else:
4733 else:
4716 key, value = fields
4734 key, value = fields
4717
4735
4718 if value.startswith(b'eval:'):
4736 if value.startswith(b'eval:'):
4719 value = stringutil.evalpythonliteral(value[5:])
4737 value = stringutil.evalpythonliteral(value[5:])
4720 else:
4738 else:
4721 value = stringutil.unescapestr(value)
4739 value = stringutil.unescapestr(value)
4722
4740
4723 args[key] = value
4741 args[key] = value
4724
4742
4725 if batchedcommands is not None:
4743 if batchedcommands is not None:
4726 batchedcommands.append((command, args))
4744 batchedcommands.append((command, args))
4727 continue
4745 continue
4728
4746
4729 ui.status(_(b'sending %s command\n') % command)
4747 ui.status(_(b'sending %s command\n') % command)
4730
4748
4731 if b'PUSHFILE' in args:
4749 if b'PUSHFILE' in args:
4732 with open(args[b'PUSHFILE'], 'rb') as fh:
4750 with open(args[b'PUSHFILE'], 'rb') as fh:
4733 del args[b'PUSHFILE']
4751 del args[b'PUSHFILE']
4734 res, output = peer._callpush(
4752 res, output = peer._callpush(
4735 command, fh, **pycompat.strkwargs(args)
4753 command, fh, **pycompat.strkwargs(args)
4736 )
4754 )
4737 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4755 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4738 ui.status(
4756 ui.status(
4739 _(b'remote output: %s\n') % stringutil.escapestr(output)
4757 _(b'remote output: %s\n') % stringutil.escapestr(output)
4740 )
4758 )
4741 else:
4759 else:
4742 with peer.commandexecutor() as e:
4760 with peer.commandexecutor() as e:
4743 res = e.callcommand(command, args).result()
4761 res = e.callcommand(command, args).result()
4744
4762
4745 ui.status(
4763 ui.status(
4746 _(b'response: %s\n')
4764 _(b'response: %s\n')
4747 % stringutil.pprint(res, bprefix=True, indent=2)
4765 % stringutil.pprint(res, bprefix=True, indent=2)
4748 )
4766 )
4749
4767
4750 elif action == b'batchbegin':
4768 elif action == b'batchbegin':
4751 if batchedcommands is not None:
4769 if batchedcommands is not None:
4752 raise error.Abort(_(b'nested batchbegin not allowed'))
4770 raise error.Abort(_(b'nested batchbegin not allowed'))
4753
4771
4754 batchedcommands = []
4772 batchedcommands = []
4755 elif action == b'batchsubmit':
4773 elif action == b'batchsubmit':
4756 # There is a batching API we could go through. But it would be
4774 # There is a batching API we could go through. But it would be
4757 # difficult to normalize requests into function calls. It is easier
4775 # difficult to normalize requests into function calls. It is easier
4758 # to bypass this layer and normalize to commands + args.
4776 # to bypass this layer and normalize to commands + args.
4759 ui.status(
4777 ui.status(
4760 _(b'sending batch with %d sub-commands\n')
4778 _(b'sending batch with %d sub-commands\n')
4761 % len(batchedcommands)
4779 % len(batchedcommands)
4762 )
4780 )
4763 assert peer is not None
4781 assert peer is not None
4764 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4782 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4765 ui.status(
4783 ui.status(
4766 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4784 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4767 )
4785 )
4768
4786
4769 batchedcommands = None
4787 batchedcommands = None
4770
4788
4771 elif action.startswith(b'httprequest '):
4789 elif action.startswith(b'httprequest '):
4772 if not opener:
4790 if not opener:
4773 raise error.Abort(
4791 raise error.Abort(
4774 _(b'cannot use httprequest without an HTTP peer')
4792 _(b'cannot use httprequest without an HTTP peer')
4775 )
4793 )
4776
4794
4777 request = action.split(b' ', 2)
4795 request = action.split(b' ', 2)
4778 if len(request) != 3:
4796 if len(request) != 3:
4779 raise error.Abort(
4797 raise error.Abort(
4780 _(
4798 _(
4781 b'invalid httprequest: expected format is '
4799 b'invalid httprequest: expected format is '
4782 b'"httprequest <method> <path>'
4800 b'"httprequest <method> <path>'
4783 )
4801 )
4784 )
4802 )
4785
4803
4786 method, httppath = request[1:]
4804 method, httppath = request[1:]
4787 headers = {}
4805 headers = {}
4788 body = None
4806 body = None
4789 frames = []
4807 frames = []
4790 for line in lines:
4808 for line in lines:
4791 line = line.lstrip()
4809 line = line.lstrip()
4792 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4810 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4793 if m:
4811 if m:
4794 # Headers need to use native strings.
4812 # Headers need to use native strings.
4795 key = pycompat.strurl(m.group(1))
4813 key = pycompat.strurl(m.group(1))
4796 value = pycompat.strurl(m.group(2))
4814 value = pycompat.strurl(m.group(2))
4797 headers[key] = value
4815 headers[key] = value
4798 continue
4816 continue
4799
4817
4800 if line.startswith(b'BODYFILE '):
4818 if line.startswith(b'BODYFILE '):
4801 with open(line.split(b' ', 1), b'rb') as fh:
4819 with open(line.split(b' ', 1), b'rb') as fh:
4802 body = fh.read()
4820 body = fh.read()
4803 elif line.startswith(b'frame '):
4821 elif line.startswith(b'frame '):
4804 frame = wireprotoframing.makeframefromhumanstring(
4822 frame = wireprotoframing.makeframefromhumanstring(
4805 line[len(b'frame ') :]
4823 line[len(b'frame ') :]
4806 )
4824 )
4807
4825
4808 frames.append(frame)
4826 frames.append(frame)
4809 else:
4827 else:
4810 raise error.Abort(
4828 raise error.Abort(
4811 _(b'unknown argument to httprequest: %s') % line
4829 _(b'unknown argument to httprequest: %s') % line
4812 )
4830 )
4813
4831
4814 url = path + httppath
4832 url = path + httppath
4815
4833
4816 if frames:
4834 if frames:
4817 body = b''.join(bytes(f) for f in frames)
4835 body = b''.join(bytes(f) for f in frames)
4818
4836
4819 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4837 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4820
4838
4821 # urllib.Request insists on using has_data() as a proxy for
4839 # urllib.Request insists on using has_data() as a proxy for
4822 # determining the request method. Override that to use our
4840 # determining the request method. Override that to use our
4823 # explicitly requested method.
4841 # explicitly requested method.
4824 req.get_method = lambda: pycompat.sysstr(method)
4842 req.get_method = lambda: pycompat.sysstr(method)
4825
4843
4826 try:
4844 try:
4827 res = opener.open(req)
4845 res = opener.open(req)
4828 body = res.read()
4846 body = res.read()
4829 except util.urlerr.urlerror as e:
4847 except util.urlerr.urlerror as e:
4830 # read() method must be called, but only exists in Python 2
4848 # read() method must be called, but only exists in Python 2
4831 getattr(e, 'read', lambda: None)()
4849 getattr(e, 'read', lambda: None)()
4832 continue
4850 continue
4833
4851
4834 ct = res.headers.get('Content-Type')
4852 ct = res.headers.get('Content-Type')
4835 if ct == 'application/mercurial-cbor':
4853 if ct == 'application/mercurial-cbor':
4836 ui.write(
4854 ui.write(
4837 _(b'cbor> %s\n')
4855 _(b'cbor> %s\n')
4838 % stringutil.pprint(
4856 % stringutil.pprint(
4839 cborutil.decodeall(body), bprefix=True, indent=2
4857 cborutil.decodeall(body), bprefix=True, indent=2
4840 )
4858 )
4841 )
4859 )
4842
4860
4843 elif action == b'close':
4861 elif action == b'close':
4844 assert peer is not None
4862 assert peer is not None
4845 peer.close()
4863 peer.close()
4846 elif action == b'readavailable':
4864 elif action == b'readavailable':
4847 if not stdout or not stderr:
4865 if not stdout or not stderr:
4848 raise error.Abort(
4866 raise error.Abort(
4849 _(b'readavailable not available on this peer')
4867 _(b'readavailable not available on this peer')
4850 )
4868 )
4851
4869
4852 stdin.close()
4870 stdin.close()
4853 stdout.read()
4871 stdout.read()
4854 stderr.read()
4872 stderr.read()
4855
4873
4856 elif action == b'readline':
4874 elif action == b'readline':
4857 if not stdout:
4875 if not stdout:
4858 raise error.Abort(_(b'readline not available on this peer'))
4876 raise error.Abort(_(b'readline not available on this peer'))
4859 stdout.readline()
4877 stdout.readline()
4860 elif action == b'ereadline':
4878 elif action == b'ereadline':
4861 if not stderr:
4879 if not stderr:
4862 raise error.Abort(_(b'ereadline not available on this peer'))
4880 raise error.Abort(_(b'ereadline not available on this peer'))
4863 stderr.readline()
4881 stderr.readline()
4864 elif action.startswith(b'read '):
4882 elif action.startswith(b'read '):
4865 count = int(action.split(b' ', 1)[1])
4883 count = int(action.split(b' ', 1)[1])
4866 if not stdout:
4884 if not stdout:
4867 raise error.Abort(_(b'read not available on this peer'))
4885 raise error.Abort(_(b'read not available on this peer'))
4868 stdout.read(count)
4886 stdout.read(count)
4869 elif action.startswith(b'eread '):
4887 elif action.startswith(b'eread '):
4870 count = int(action.split(b' ', 1)[1])
4888 count = int(action.split(b' ', 1)[1])
4871 if not stderr:
4889 if not stderr:
4872 raise error.Abort(_(b'eread not available on this peer'))
4890 raise error.Abort(_(b'eread not available on this peer'))
4873 stderr.read(count)
4891 stderr.read(count)
4874 else:
4892 else:
4875 raise error.Abort(_(b'unknown action: %s') % action)
4893 raise error.Abort(_(b'unknown action: %s') % action)
4876
4894
4877 if batchedcommands is not None:
4895 if batchedcommands is not None:
4878 raise error.Abort(_(b'unclosed "batchbegin" request'))
4896 raise error.Abort(_(b'unclosed "batchbegin" request'))
4879
4897
4880 if peer:
4898 if peer:
4881 peer.close()
4899 peer.close()
4882
4900
4883 if proc:
4901 if proc:
4884 proc.kill()
4902 proc.kill()
@@ -1,95 +1,96 b''
1 Mercurial can be augmented with Rust extensions for speeding up certain
1 Mercurial can be augmented with Rust extensions for speeding up certain
2 operations.
2 operations.
3
3
4 Compatibility
4 Compatibility
5 =============
5 =============
6
6
7 Though the Rust extensions are only tested by the project under Linux, users of
7 Though the Rust extensions are only tested by the project under Linux, users of
8 MacOS, FreeBSD and other UNIX-likes have been using the Rust extensions. Your
8 MacOS, FreeBSD and other UNIX-likes have been using the Rust extensions. Your
9 mileage may vary, but by all means do give us feedback or signal your interest
9 mileage may vary, but by all means do give us feedback or signal your interest
10 for better support.
10 for better support.
11
11
12 No Rust extensions are available for Windows at this time.
12 No Rust extensions are available for Windows at this time.
13
13
14 Features
14 Features
15 ========
15 ========
16
16
17 The following operations are sped up when using Rust:
17 The following operations are sped up when using Rust:
18
18
19 - discovery of differences between repositories (pull/push)
19 - discovery of differences between repositories (pull/push)
20 - nodemap (see :hg:`help config.format.use-persistent-nodemap`)
20 - nodemap (see :hg:`help config.format.use-persistent-nodemap`)
21 - all commands using the dirstate (status, commit, diff, add, update, etc.)
21 - all commands using the dirstate (status, commit, diff, add, update, etc.)
22 - dirstate-v2 (see :hg:`help config.format.use-dirstate-v2`)
22 - dirstate-v2 (see :hg:`help config.format.use-dirstate-v2`)
23 - iteration over ancestors in a graph
23 - iteration over ancestors in a graph
24
24
25 More features are in the works, and improvements on the above listed are still
25 More features are in the works, and improvements on the above listed are still
26 in progress. For more experimental work see the "rhg" section.
26 in progress. For more experimental work see the "rhg" section.
27
27
28 Checking for Rust
28 Checking for Rust
29 =================
29 =================
30
30
31 You may already have the Rust extensions depending on how you install Mercurial.
31 You may already have the Rust extensions depending on how you install
32 Mercurial::
32
33
33 $ hg debuginstall | grep -i rust
34 $ hg debuginstall | grep -i rust
34 checking Rust extensions (installed)
35 checking Rust extensions (installed)
35 checking module policy (rust+c-allow)
36 checking module policy (rust+c-allow)
36
37
37 If those lines don't even exist, you're using an old version of `hg` which does
38 If those lines don't even exist, you're using an old version of `hg` which does
38 not have any Rust extensions yet.
39 not have any Rust extensions yet.
39
40
40 Installing
41 Installing
41 ==========
42 ==========
42
43
43 You will need `cargo` to be in your `$PATH`. See the "MSRV" section for which
44 You will need `cargo` to be in your `$PATH`. See the "MSRV" section for which
44 version to use.
45 version to use.
45
46
46 Using pip
47 Using pip
47 ---------
48 ---------
48
49
49 Users of `pip` can install the Rust extensions with the following command:
50 Users of `pip` can install the Rust extensions with the following command::
50
51
51 $ pip install mercurial --global-option --rust --no-use-pep517
52 $ pip install mercurial --global-option --rust --no-use-pep517
52
53
53 `--no-use-pep517` is here to tell `pip` to preserve backwards compatibility with
54 `--no-use-pep517` is here to tell `pip` to preserve backwards compatibility with
54 the legacy `setup.py` system. Mercurial has not yet migrated its complex setup
55 the legacy `setup.py` system. Mercurial has not yet migrated its complex setup
55 to the new system, so we still need this to add compiled extensions.
56 to the new system, so we still need this to add compiled extensions.
56
57
57 This might take a couple of minutes because you're compiling everything.
58 This might take a couple of minutes because you're compiling everything.
58
59
59 See the "Checking for Rust" section to see if the install succeeded.
60 See the "Checking for Rust" section to see if the install succeeded.
60
61
61 From your distribution
62 From your distribution
62 ----------------------
63 ----------------------
63
64
64 Some distributions are shipping Mercurial with Rust extensions enabled and
65 Some distributions are shipping Mercurial with Rust extensions enabled and
65 pre-compiled (meaning you won't have to install `cargo`), or allow you to
66 pre-compiled (meaning you won't have to install `cargo`), or allow you to
66 specify an install flag. Check with your specific distribution for how to do
67 specify an install flag. Check with your specific distribution for how to do
67 that, or ask their team to add support for hg+Rust!
68 that, or ask their team to add support for hg+Rust!
68
69
69 From source
70 From source
70 -----------
71 -----------
71
72
72 Please refer to the `rust/README.rst` file in the Mercurial repository for
73 Please refer to the `rust/README.rst` file in the Mercurial repository for
73 instructions on how to install from source.
74 instructions on how to install from source.
74
75
75 MSRV
76 MSRV
76 ====
77 ====
77
78
78 The minimum supported Rust version is currently 1.48.0. The project's policy is
79 The minimum supported Rust version is currently 1.48.0. The project's policy is
79 to follow the version from Debian stable, to make the distributions' job easier.
80 to follow the version from Debian stable, to make the distributions' job easier.
80
81
81 rhg
82 rhg
82 ===
83 ===
83
84
84 There exists an experimental pure-Rust version of Mercurial called `rhg` with a
85 There exists an experimental pure-Rust version of Mercurial called `rhg` with a
85 fallback mechanism for unsupported invocations. It allows for much faster
86 fallback mechanism for unsupported invocations. It allows for much faster
86 execution of certain commands while adding no discernable overhead for the rest.
87 execution of certain commands while adding no discernable overhead for the rest.
87
88
88 The only way of trying it out is by building it from source. Please refer to
89 The only way of trying it out is by building it from source. Please refer to
89 `rust/README.rst` in the Mercurial repository.
90 `rust/README.rst` in the Mercurial repository.
90
91
91 Contributing
92 Contributing
92 ============
93 ============
93
94
94 If you would like to help the Rust endeavor, please refer to `rust/README.rst`
95 If you would like to help the Rust endeavor, please refer to `rust/README.rst`
95 in the Mercurial repository.
96 in the Mercurial repository.
@@ -1,3929 +1,3930 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullrev,
23 nullrev,
24 sha1nodeconstants,
24 sha1nodeconstants,
25 short,
25 short,
26 )
26 )
27 from .pycompat import (
27 from .pycompat import (
28 delattr,
28 delattr,
29 getattr,
29 getattr,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 dirstateguard,
41 dirstateguard,
42 discovery,
42 discovery,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filelog,
47 filelog,
48 hook,
48 hook,
49 lock as lockmod,
49 lock as lockmod,
50 match as matchmod,
50 match as matchmod,
51 mergestate as mergestatemod,
51 mergestate as mergestatemod,
52 mergeutil,
52 mergeutil,
53 namespaces,
53 namespaces,
54 narrowspec,
54 narrowspec,
55 obsolete,
55 obsolete,
56 pathutil,
56 pathutil,
57 phases,
57 phases,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 # set of (path, vfs-location) tuples. vfs-location is:
100 # set of (path, vfs-location) tuples. vfs-location is:
101 # - 'plain for vfs relative paths
101 # - 'plain for vfs relative paths
102 # - '' for svfs relative paths
102 # - '' for svfs relative paths
103 _cachedfiles = set()
103 _cachedfiles = set()
104
104
105
105
106 class _basefilecache(scmutil.filecache):
106 class _basefilecache(scmutil.filecache):
107 """All filecache usage on repo are done for logic that should be unfiltered"""
107 """All filecache usage on repo are done for logic that should be unfiltered"""
108
108
109 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
110 if repo is None:
110 if repo is None:
111 return self
111 return self
112 # proxy to unfiltered __dict__ since filtered repo has no entry
112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 unfi = repo.unfiltered()
113 unfi = repo.unfiltered()
114 try:
114 try:
115 return unfi.__dict__[self.sname]
115 return unfi.__dict__[self.sname]
116 except KeyError:
116 except KeyError:
117 pass
117 pass
118 return super(_basefilecache, self).__get__(unfi, type)
118 return super(_basefilecache, self).__get__(unfi, type)
119
119
120 def set(self, repo, value):
120 def set(self, repo, value):
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122
122
123
123
124 class repofilecache(_basefilecache):
124 class repofilecache(_basefilecache):
125 """filecache for files in .hg but outside of .hg/store"""
125 """filecache for files in .hg but outside of .hg/store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(repofilecache, self).__init__(*paths)
128 super(repofilecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b'plain'))
130 _cachedfiles.add((path, b'plain'))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.vfs.join(fname)
133 return obj.vfs.join(fname)
134
134
135
135
136 class storecache(_basefilecache):
136 class storecache(_basefilecache):
137 """filecache for files in the store"""
137 """filecache for files in the store"""
138
138
139 def __init__(self, *paths):
139 def __init__(self, *paths):
140 super(storecache, self).__init__(*paths)
140 super(storecache, self).__init__(*paths)
141 for path in paths:
141 for path in paths:
142 _cachedfiles.add((path, b''))
142 _cachedfiles.add((path, b''))
143
143
144 def join(self, obj, fname):
144 def join(self, obj, fname):
145 return obj.sjoin(fname)
145 return obj.sjoin(fname)
146
146
147
147
148 class changelogcache(storecache):
148 class changelogcache(storecache):
149 """filecache for the changelog"""
149 """filecache for the changelog"""
150
150
151 def __init__(self):
151 def __init__(self):
152 super(changelogcache, self).__init__()
152 super(changelogcache, self).__init__()
153 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.i', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
155
155
156 def tracked_paths(self, obj):
156 def tracked_paths(self, obj):
157 paths = [self.join(obj, b'00changelog.i')]
157 paths = [self.join(obj, b'00changelog.i')]
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
159 paths.append(self.join(obj, b'00changelog.n'))
159 paths.append(self.join(obj, b'00changelog.n'))
160 return paths
160 return paths
161
161
162
162
163 class manifestlogcache(storecache):
163 class manifestlogcache(storecache):
164 """filecache for the manifestlog"""
164 """filecache for the manifestlog"""
165
165
166 def __init__(self):
166 def __init__(self):
167 super(manifestlogcache, self).__init__()
167 super(manifestlogcache, self).__init__()
168 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.i', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
170
170
171 def tracked_paths(self, obj):
171 def tracked_paths(self, obj):
172 paths = [self.join(obj, b'00manifest.i')]
172 paths = [self.join(obj, b'00manifest.i')]
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
174 paths.append(self.join(obj, b'00manifest.n'))
174 paths.append(self.join(obj, b'00manifest.n'))
175 return paths
175 return paths
176
176
177
177
178 class mixedrepostorecache(_basefilecache):
178 class mixedrepostorecache(_basefilecache):
179 """filecache for a mix files in .hg/store and outside"""
179 """filecache for a mix files in .hg/store and outside"""
180
180
181 def __init__(self, *pathsandlocations):
181 def __init__(self, *pathsandlocations):
182 # scmutil.filecache only uses the path for passing back into our
182 # scmutil.filecache only uses the path for passing back into our
183 # join(), so we can safely pass a list of paths and locations
183 # join(), so we can safely pass a list of paths and locations
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
186
186
187 def join(self, obj, fnameandlocation):
187 def join(self, obj, fnameandlocation):
188 fname, location = fnameandlocation
188 fname, location = fnameandlocation
189 if location == b'plain':
189 if location == b'plain':
190 return obj.vfs.join(fname)
190 return obj.vfs.join(fname)
191 else:
191 else:
192 if location != b'':
192 if location != b'':
193 raise error.ProgrammingError(
193 raise error.ProgrammingError(
194 b'unexpected location: %s' % location
194 b'unexpected location: %s' % location
195 )
195 )
196 return obj.sjoin(fname)
196 return obj.sjoin(fname)
197
197
198
198
199 def isfilecached(repo, name):
199 def isfilecached(repo, name):
200 """check if a repo has already cached "name" filecache-ed property
200 """check if a repo has already cached "name" filecache-ed property
201
201
202 This returns (cachedobj-or-None, iscached) tuple.
202 This returns (cachedobj-or-None, iscached) tuple.
203 """
203 """
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
205 if not cacheentry:
205 if not cacheentry:
206 return None, False
206 return None, False
207 return cacheentry.obj, True
207 return cacheentry.obj, True
208
208
209
209
210 class unfilteredpropertycache(util.propertycache):
210 class unfilteredpropertycache(util.propertycache):
211 """propertycache that apply to unfiltered repo only"""
211 """propertycache that apply to unfiltered repo only"""
212
212
213 def __get__(self, repo, type=None):
213 def __get__(self, repo, type=None):
214 unfi = repo.unfiltered()
214 unfi = repo.unfiltered()
215 if unfi is repo:
215 if unfi is repo:
216 return super(unfilteredpropertycache, self).__get__(unfi)
216 return super(unfilteredpropertycache, self).__get__(unfi)
217 return getattr(unfi, self.name)
217 return getattr(unfi, self.name)
218
218
219
219
220 class filteredpropertycache(util.propertycache):
220 class filteredpropertycache(util.propertycache):
221 """propertycache that must take filtering in account"""
221 """propertycache that must take filtering in account"""
222
222
223 def cachevalue(self, obj, value):
223 def cachevalue(self, obj, value):
224 object.__setattr__(obj, self.name, value)
224 object.__setattr__(obj, self.name, value)
225
225
226
226
227 def hasunfilteredcache(repo, name):
227 def hasunfilteredcache(repo, name):
228 """check if a repo has an unfilteredpropertycache value for <name>"""
228 """check if a repo has an unfilteredpropertycache value for <name>"""
229 return name in vars(repo.unfiltered())
229 return name in vars(repo.unfiltered())
230
230
231
231
232 def unfilteredmethod(orig):
232 def unfilteredmethod(orig):
233 """decorate method that always need to be run on unfiltered version"""
233 """decorate method that always need to be run on unfiltered version"""
234
234
235 @functools.wraps(orig)
235 @functools.wraps(orig)
236 def wrapper(repo, *args, **kwargs):
236 def wrapper(repo, *args, **kwargs):
237 return orig(repo.unfiltered(), *args, **kwargs)
237 return orig(repo.unfiltered(), *args, **kwargs)
238
238
239 return wrapper
239 return wrapper
240
240
241
241
242 moderncaps = {
242 moderncaps = {
243 b'lookup',
243 b'lookup',
244 b'branchmap',
244 b'branchmap',
245 b'pushkey',
245 b'pushkey',
246 b'known',
246 b'known',
247 b'getbundle',
247 b'getbundle',
248 b'unbundle',
248 b'unbundle',
249 }
249 }
250 legacycaps = moderncaps.union({b'changegroupsubset'})
250 legacycaps = moderncaps.union({b'changegroupsubset'})
251
251
252
252
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
254 class localcommandexecutor:
254 class localcommandexecutor:
255 def __init__(self, peer):
255 def __init__(self, peer):
256 self._peer = peer
256 self._peer = peer
257 self._sent = False
257 self._sent = False
258 self._closed = False
258 self._closed = False
259
259
260 def __enter__(self):
260 def __enter__(self):
261 return self
261 return self
262
262
263 def __exit__(self, exctype, excvalue, exctb):
263 def __exit__(self, exctype, excvalue, exctb):
264 self.close()
264 self.close()
265
265
266 def callcommand(self, command, args):
266 def callcommand(self, command, args):
267 if self._sent:
267 if self._sent:
268 raise error.ProgrammingError(
268 raise error.ProgrammingError(
269 b'callcommand() cannot be used after sendcommands()'
269 b'callcommand() cannot be used after sendcommands()'
270 )
270 )
271
271
272 if self._closed:
272 if self._closed:
273 raise error.ProgrammingError(
273 raise error.ProgrammingError(
274 b'callcommand() cannot be used after close()'
274 b'callcommand() cannot be used after close()'
275 )
275 )
276
276
277 # We don't need to support anything fancy. Just call the named
277 # We don't need to support anything fancy. Just call the named
278 # method on the peer and return a resolved future.
278 # method on the peer and return a resolved future.
279 fn = getattr(self._peer, pycompat.sysstr(command))
279 fn = getattr(self._peer, pycompat.sysstr(command))
280
280
281 f = futures.Future()
281 f = futures.Future()
282
282
283 try:
283 try:
284 result = fn(**pycompat.strkwargs(args))
284 result = fn(**pycompat.strkwargs(args))
285 except Exception:
285 except Exception:
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
287 else:
287 else:
288 f.set_result(result)
288 f.set_result(result)
289
289
290 return f
290 return f
291
291
292 def sendcommands(self):
292 def sendcommands(self):
293 self._sent = True
293 self._sent = True
294
294
295 def close(self):
295 def close(self):
296 self._closed = True
296 self._closed = True
297
297
298
298
299 @interfaceutil.implementer(repository.ipeercommands)
299 @interfaceutil.implementer(repository.ipeercommands)
300 class localpeer(repository.peer):
300 class localpeer(repository.peer):
301 '''peer for a local repo; reflects only the most recent API'''
301 '''peer for a local repo; reflects only the most recent API'''
302
302
303 def __init__(self, repo, caps=None):
303 def __init__(self, repo, caps=None):
304 super(localpeer, self).__init__()
304 super(localpeer, self).__init__()
305
305
306 if caps is None:
306 if caps is None:
307 caps = moderncaps.copy()
307 caps = moderncaps.copy()
308 self._repo = repo.filtered(b'served')
308 self._repo = repo.filtered(b'served')
309 self.ui = repo.ui
309 self.ui = repo.ui
310
310
311 if repo._wanted_sidedata:
311 if repo._wanted_sidedata:
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
314
314
315 self._caps = repo._restrictcapabilities(caps)
315 self._caps = repo._restrictcapabilities(caps)
316
316
317 # Begin of _basepeer interface.
317 # Begin of _basepeer interface.
318
318
319 def url(self):
319 def url(self):
320 return self._repo.url()
320 return self._repo.url()
321
321
322 def local(self):
322 def local(self):
323 return self._repo
323 return self._repo
324
324
325 def peer(self):
325 def peer(self):
326 return self
326 return self
327
327
328 def canpush(self):
328 def canpush(self):
329 return True
329 return True
330
330
331 def close(self):
331 def close(self):
332 self._repo.close()
332 self._repo.close()
333
333
334 # End of _basepeer interface.
334 # End of _basepeer interface.
335
335
336 # Begin of _basewirecommands interface.
336 # Begin of _basewirecommands interface.
337
337
338 def branchmap(self):
338 def branchmap(self):
339 return self._repo.branchmap()
339 return self._repo.branchmap()
340
340
341 def capabilities(self):
341 def capabilities(self):
342 return self._caps
342 return self._caps
343
343
344 def clonebundles(self):
344 def clonebundles(self):
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346
346
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 """Used to test argument passing over the wire"""
348 """Used to test argument passing over the wire"""
349 return b"%s %s %s %s %s" % (
349 return b"%s %s %s %s %s" % (
350 one,
350 one,
351 two,
351 two,
352 pycompat.bytestr(three),
352 pycompat.bytestr(three),
353 pycompat.bytestr(four),
353 pycompat.bytestr(four),
354 pycompat.bytestr(five),
354 pycompat.bytestr(five),
355 )
355 )
356
356
357 def getbundle(
357 def getbundle(
358 self,
358 self,
359 source,
359 source,
360 heads=None,
360 heads=None,
361 common=None,
361 common=None,
362 bundlecaps=None,
362 bundlecaps=None,
363 remote_sidedata=None,
363 remote_sidedata=None,
364 **kwargs
364 **kwargs
365 ):
365 ):
366 chunks = exchange.getbundlechunks(
366 chunks = exchange.getbundlechunks(
367 self._repo,
367 self._repo,
368 source,
368 source,
369 heads=heads,
369 heads=heads,
370 common=common,
370 common=common,
371 bundlecaps=bundlecaps,
371 bundlecaps=bundlecaps,
372 remote_sidedata=remote_sidedata,
372 remote_sidedata=remote_sidedata,
373 **kwargs
373 **kwargs
374 )[1]
374 )[1]
375 cb = util.chunkbuffer(chunks)
375 cb = util.chunkbuffer(chunks)
376
376
377 if exchange.bundle2requested(bundlecaps):
377 if exchange.bundle2requested(bundlecaps):
378 # When requesting a bundle2, getbundle returns a stream to make the
378 # When requesting a bundle2, getbundle returns a stream to make the
379 # wire level function happier. We need to build a proper object
379 # wire level function happier. We need to build a proper object
380 # from it in local peer.
380 # from it in local peer.
381 return bundle2.getunbundler(self.ui, cb)
381 return bundle2.getunbundler(self.ui, cb)
382 else:
382 else:
383 return changegroup.getunbundler(b'01', cb, None)
383 return changegroup.getunbundler(b'01', cb, None)
384
384
385 def heads(self):
385 def heads(self):
386 return self._repo.heads()
386 return self._repo.heads()
387
387
388 def known(self, nodes):
388 def known(self, nodes):
389 return self._repo.known(nodes)
389 return self._repo.known(nodes)
390
390
391 def listkeys(self, namespace):
391 def listkeys(self, namespace):
392 return self._repo.listkeys(namespace)
392 return self._repo.listkeys(namespace)
393
393
394 def lookup(self, key):
394 def lookup(self, key):
395 return self._repo.lookup(key)
395 return self._repo.lookup(key)
396
396
397 def pushkey(self, namespace, key, old, new):
397 def pushkey(self, namespace, key, old, new):
398 return self._repo.pushkey(namespace, key, old, new)
398 return self._repo.pushkey(namespace, key, old, new)
399
399
400 def stream_out(self):
400 def stream_out(self):
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402
402
403 def unbundle(self, bundle, heads, url):
403 def unbundle(self, bundle, heads, url):
404 """apply a bundle on a repo
404 """apply a bundle on a repo
405
405
406 This function handles the repo locking itself."""
406 This function handles the repo locking itself."""
407 try:
407 try:
408 try:
408 try:
409 bundle = exchange.readbundle(self.ui, bundle, None)
409 bundle = exchange.readbundle(self.ui, bundle, None)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 if util.safehasattr(ret, b'getchunks'):
411 if util.safehasattr(ret, b'getchunks'):
412 # This is a bundle20 object, turn it into an unbundler.
412 # This is a bundle20 object, turn it into an unbundler.
413 # This little dance should be dropped eventually when the
413 # This little dance should be dropped eventually when the
414 # API is finally improved.
414 # API is finally improved.
415 stream = util.chunkbuffer(ret.getchunks())
415 stream = util.chunkbuffer(ret.getchunks())
416 ret = bundle2.getunbundler(self.ui, stream)
416 ret = bundle2.getunbundler(self.ui, stream)
417 return ret
417 return ret
418 except Exception as exc:
418 except Exception as exc:
419 # If the exception contains output salvaged from a bundle2
419 # If the exception contains output salvaged from a bundle2
420 # reply, we need to make sure it is printed before continuing
420 # reply, we need to make sure it is printed before continuing
421 # to fail. So we build a bundle2 with such output and consume
421 # to fail. So we build a bundle2 with such output and consume
422 # it directly.
422 # it directly.
423 #
423 #
424 # This is not very elegant but allows a "simple" solution for
424 # This is not very elegant but allows a "simple" solution for
425 # issue4594
425 # issue4594
426 output = getattr(exc, '_bundle2salvagedoutput', ())
426 output = getattr(exc, '_bundle2salvagedoutput', ())
427 if output:
427 if output:
428 bundler = bundle2.bundle20(self._repo.ui)
428 bundler = bundle2.bundle20(self._repo.ui)
429 for out in output:
429 for out in output:
430 bundler.addpart(out)
430 bundler.addpart(out)
431 stream = util.chunkbuffer(bundler.getchunks())
431 stream = util.chunkbuffer(bundler.getchunks())
432 b = bundle2.getunbundler(self.ui, stream)
432 b = bundle2.getunbundler(self.ui, stream)
433 bundle2.processbundle(self._repo, b)
433 bundle2.processbundle(self._repo, b)
434 raise
434 raise
435 except error.PushRaced as exc:
435 except error.PushRaced as exc:
436 raise error.ResponseError(
436 raise error.ResponseError(
437 _(b'push failed:'), stringutil.forcebytestr(exc)
437 _(b'push failed:'), stringutil.forcebytestr(exc)
438 )
438 )
439
439
440 # End of _basewirecommands interface.
440 # End of _basewirecommands interface.
441
441
442 # Begin of peer interface.
442 # Begin of peer interface.
443
443
444 def commandexecutor(self):
444 def commandexecutor(self):
445 return localcommandexecutor(self)
445 return localcommandexecutor(self)
446
446
447 # End of peer interface.
447 # End of peer interface.
448
448
449
449
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 class locallegacypeer(localpeer):
451 class locallegacypeer(localpeer):
452 """peer extension which implements legacy methods too; used for tests with
452 """peer extension which implements legacy methods too; used for tests with
453 restricted capabilities"""
453 restricted capabilities"""
454
454
455 def __init__(self, repo):
455 def __init__(self, repo):
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457
457
458 # Begin of baselegacywirecommands interface.
458 # Begin of baselegacywirecommands interface.
459
459
460 def between(self, pairs):
460 def between(self, pairs):
461 return self._repo.between(pairs)
461 return self._repo.between(pairs)
462
462
463 def branches(self, nodes):
463 def branches(self, nodes):
464 return self._repo.branches(nodes)
464 return self._repo.branches(nodes)
465
465
466 def changegroup(self, nodes, source):
466 def changegroup(self, nodes, source):
467 outgoing = discovery.outgoing(
467 outgoing = discovery.outgoing(
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 )
469 )
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471
471
472 def changegroupsubset(self, bases, heads, source):
472 def changegroupsubset(self, bases, heads, source):
473 outgoing = discovery.outgoing(
473 outgoing = discovery.outgoing(
474 self._repo, missingroots=bases, ancestorsof=heads
474 self._repo, missingroots=bases, ancestorsof=heads
475 )
475 )
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477
477
478 # End of baselegacywirecommands interface.
478 # End of baselegacywirecommands interface.
479
479
480
480
481 # Functions receiving (ui, features) that extensions can register to impact
481 # Functions receiving (ui, features) that extensions can register to impact
482 # the ability to load repositories with custom requirements. Only
482 # the ability to load repositories with custom requirements. Only
483 # functions defined in loaded extensions are called.
483 # functions defined in loaded extensions are called.
484 #
484 #
485 # The function receives a set of requirement strings that the repository
485 # The function receives a set of requirement strings that the repository
486 # is capable of opening. Functions will typically add elements to the
486 # is capable of opening. Functions will typically add elements to the
487 # set to reflect that the extension knows how to handle that requirements.
487 # set to reflect that the extension knows how to handle that requirements.
488 featuresetupfuncs = set()
488 featuresetupfuncs = set()
489
489
490
490
491 def _getsharedvfs(hgvfs, requirements):
491 def _getsharedvfs(hgvfs, requirements):
492 """returns the vfs object pointing to root of shared source
492 """returns the vfs object pointing to root of shared source
493 repo for a shared repository
493 repo for a shared repository
494
494
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
497 """
497 """
498 # The ``shared`` or ``relshared`` requirements indicate the
498 # The ``shared`` or ``relshared`` requirements indicate the
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # This is an absolute path for ``shared`` and relative to
500 # This is an absolute path for ``shared`` and relative to
501 # ``.hg/`` for ``relshared``.
501 # ``.hg/`` for ``relshared``.
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
505
505
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507
507
508 if not sharedvfs.exists():
508 if not sharedvfs.exists():
509 raise error.RepoError(
509 raise error.RepoError(
510 _(b'.hg/sharedpath points to nonexistent directory %s')
510 _(b'.hg/sharedpath points to nonexistent directory %s')
511 % sharedvfs.base
511 % sharedvfs.base
512 )
512 )
513 return sharedvfs
513 return sharedvfs
514
514
515
515
516 def _readrequires(vfs, allowmissing):
516 def _readrequires(vfs, allowmissing):
517 """reads the require file present at root of this vfs
517 """reads the require file present at root of this vfs
518 and return a set of requirements
518 and return a set of requirements
519
519
520 If allowmissing is True, we suppress ENOENT if raised"""
520 If allowmissing is True, we suppress ENOENT if raised"""
521 # requires file contains a newline-delimited list of
521 # requires file contains a newline-delimited list of
522 # features/capabilities the opener (us) must have in order to use
522 # features/capabilities the opener (us) must have in order to use
523 # the repository. This file was introduced in Mercurial 0.9.2,
523 # the repository. This file was introduced in Mercurial 0.9.2,
524 # which means very old repositories may not have one. We assume
524 # which means very old repositories may not have one. We assume
525 # a missing file translates to no requirements.
525 # a missing file translates to no requirements.
526 try:
526 try:
527 requirements = set(vfs.read(b'requires').splitlines())
527 requirements = set(vfs.read(b'requires').splitlines())
528 except IOError as e:
528 except IOError as e:
529 if not (allowmissing and e.errno == errno.ENOENT):
529 if not (allowmissing and e.errno == errno.ENOENT):
530 raise
530 raise
531 requirements = set()
531 requirements = set()
532 return requirements
532 return requirements
533
533
534
534
535 def makelocalrepository(baseui, path, intents=None):
535 def makelocalrepository(baseui, path, intents=None):
536 """Create a local repository object.
536 """Create a local repository object.
537
537
538 Given arguments needed to construct a local repository, this function
538 Given arguments needed to construct a local repository, this function
539 performs various early repository loading functionality (such as
539 performs various early repository loading functionality (such as
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
541 the repository can be opened, derives a type suitable for representing
541 the repository can be opened, derives a type suitable for representing
542 that repository, and returns an instance of it.
542 that repository, and returns an instance of it.
543
543
544 The returned object conforms to the ``repository.completelocalrepository``
544 The returned object conforms to the ``repository.completelocalrepository``
545 interface.
545 interface.
546
546
547 The repository type is derived by calling a series of factory functions
547 The repository type is derived by calling a series of factory functions
548 for each aspect/interface of the final repository. These are defined by
548 for each aspect/interface of the final repository. These are defined by
549 ``REPO_INTERFACES``.
549 ``REPO_INTERFACES``.
550
550
551 Each factory function is called to produce a type implementing a specific
551 Each factory function is called to produce a type implementing a specific
552 interface. The cumulative list of returned types will be combined into a
552 interface. The cumulative list of returned types will be combined into a
553 new type and that type will be instantiated to represent the local
553 new type and that type will be instantiated to represent the local
554 repository.
554 repository.
555
555
556 The factory functions each receive various state that may be consulted
556 The factory functions each receive various state that may be consulted
557 as part of deriving a type.
557 as part of deriving a type.
558
558
559 Extensions should wrap these factory functions to customize repository type
559 Extensions should wrap these factory functions to customize repository type
560 creation. Note that an extension's wrapped function may be called even if
560 creation. Note that an extension's wrapped function may be called even if
561 that extension is not loaded for the repo being constructed. Extensions
561 that extension is not loaded for the repo being constructed. Extensions
562 should check if their ``__name__`` appears in the
562 should check if their ``__name__`` appears in the
563 ``extensionmodulenames`` set passed to the factory function and no-op if
563 ``extensionmodulenames`` set passed to the factory function and no-op if
564 not.
564 not.
565 """
565 """
566 ui = baseui.copy()
566 ui = baseui.copy()
567 # Prevent copying repo configuration.
567 # Prevent copying repo configuration.
568 ui.copy = baseui.copy
568 ui.copy = baseui.copy
569
569
570 # Working directory VFS rooted at repository root.
570 # Working directory VFS rooted at repository root.
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
572
572
573 # Main VFS for .hg/ directory.
573 # Main VFS for .hg/ directory.
574 hgpath = wdirvfs.join(b'.hg')
574 hgpath = wdirvfs.join(b'.hg')
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
576 # Whether this repository is shared one or not
576 # Whether this repository is shared one or not
577 shared = False
577 shared = False
578 # If this repository is shared, vfs pointing to shared repo
578 # If this repository is shared, vfs pointing to shared repo
579 sharedvfs = None
579 sharedvfs = None
580
580
581 # The .hg/ path should exist and should be a directory. All other
581 # The .hg/ path should exist and should be a directory. All other
582 # cases are errors.
582 # cases are errors.
583 if not hgvfs.isdir():
583 if not hgvfs.isdir():
584 try:
584 try:
585 hgvfs.stat()
585 hgvfs.stat()
586 except OSError as e:
586 except OSError as e:
587 if e.errno != errno.ENOENT:
587 if e.errno != errno.ENOENT:
588 raise
588 raise
589 except ValueError as e:
589 except ValueError as e:
590 # Can be raised on Python 3.8 when path is invalid.
590 # Can be raised on Python 3.8 when path is invalid.
591 raise error.Abort(
591 raise error.Abort(
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
593 )
593 )
594
594
595 raise error.RepoError(_(b'repository %s not found') % path)
595 raise error.RepoError(_(b'repository %s not found') % path)
596
596
597 requirements = _readrequires(hgvfs, True)
597 requirements = _readrequires(hgvfs, True)
598 shared = (
598 shared = (
599 requirementsmod.SHARED_REQUIREMENT in requirements
599 requirementsmod.SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
601 )
601 )
602 storevfs = None
602 storevfs = None
603 if shared:
603 if shared:
604 # This is a shared repo
604 # This is a shared repo
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
607 else:
607 else:
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
609
609
610 # if .hg/requires contains the sharesafe requirement, it means
610 # if .hg/requires contains the sharesafe requirement, it means
611 # there exists a `.hg/store/requires` too and we should read it
611 # there exists a `.hg/store/requires` too and we should read it
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
614 # is not present, refer checkrequirementscompat() for that
614 # is not present, refer checkrequirementscompat() for that
615 #
615 #
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
617 # repository was shared the old way. We check the share source .hg/requires
617 # repository was shared the old way. We check the share source .hg/requires
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
619 # to be reshared
619 # to be reshared
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
622
622
623 if (
623 if (
624 shared
624 shared
625 and requirementsmod.SHARESAFE_REQUIREMENT
625 and requirementsmod.SHARESAFE_REQUIREMENT
626 not in _readrequires(sharedvfs, True)
626 not in _readrequires(sharedvfs, True)
627 ):
627 ):
628 mismatch_warn = ui.configbool(
628 mismatch_warn = ui.configbool(
629 b'share', b'safe-mismatch.source-not-safe.warn'
629 b'share', b'safe-mismatch.source-not-safe.warn'
630 )
630 )
631 mismatch_config = ui.config(
631 mismatch_config = ui.config(
632 b'share', b'safe-mismatch.source-not-safe'
632 b'share', b'safe-mismatch.source-not-safe'
633 )
633 )
634 if mismatch_config in (
634 if mismatch_config in (
635 b'downgrade-allow',
635 b'downgrade-allow',
636 b'allow',
636 b'allow',
637 b'downgrade-abort',
637 b'downgrade-abort',
638 ):
638 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
640 from . import upgrade
641
641
642 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
643 ui,
643 ui,
644 hgvfs,
644 hgvfs,
645 sharedvfs,
645 sharedvfs,
646 requirements,
646 requirements,
647 mismatch_config,
647 mismatch_config,
648 mismatch_warn,
648 mismatch_warn,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 if mismatch_config in (
674 if mismatch_config in (
675 b'upgrade-allow',
675 b'upgrade-allow',
676 b'allow',
676 b'allow',
677 b'upgrade-abort',
677 b'upgrade-abort',
678 ):
678 ):
679 # prevent cyclic import localrepo -> upgrade -> localrepo
679 # prevent cyclic import localrepo -> upgrade -> localrepo
680 from . import upgrade
680 from . import upgrade
681
681
682 upgrade.upgrade_share_to_safe(
682 upgrade.upgrade_share_to_safe(
683 ui,
683 ui,
684 hgvfs,
684 hgvfs,
685 storevfs,
685 storevfs,
686 requirements,
686 requirements,
687 mismatch_config,
687 mismatch_config,
688 mismatch_warn,
688 mismatch_warn,
689 )
689 )
690 elif mismatch_config == b'abort':
690 elif mismatch_config == b'abort':
691 raise error.Abort(
691 raise error.Abort(
692 _(
692 _(
693 b'version mismatch: source uses share-safe'
693 b'version mismatch: source uses share-safe'
694 b' functionality while the current share does not'
694 b' functionality while the current share does not'
695 ),
695 ),
696 hint=hint,
696 hint=hint,
697 )
697 )
698 else:
698 else:
699 raise error.Abort(
699 raise error.Abort(
700 _(
700 _(
701 b"share-safe mismatch with source.\nUnrecognized"
701 b"share-safe mismatch with source.\nUnrecognized"
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 )
703 )
704 % mismatch_config,
704 % mismatch_config,
705 hint=hint,
705 hint=hint,
706 )
706 )
707
707
708 # The .hg/hgrc file may load extensions or contain config options
708 # The .hg/hgrc file may load extensions or contain config options
709 # that influence repository construction. Attempt to load it and
709 # that influence repository construction. Attempt to load it and
710 # process any new extensions that it may have pulled in.
710 # process any new extensions that it may have pulled in.
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 extensions.loadall(ui)
713 extensions.loadall(ui)
714 extensions.populateui(ui)
714 extensions.populateui(ui)
715
715
716 # Set of module names of extensions loaded for this repository.
716 # Set of module names of extensions loaded for this repository.
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718
718
719 supportedrequirements = gathersupportedrequirements(ui)
719 supportedrequirements = gathersupportedrequirements(ui)
720
720
721 # We first validate the requirements are known.
721 # We first validate the requirements are known.
722 ensurerequirementsrecognized(requirements, supportedrequirements)
722 ensurerequirementsrecognized(requirements, supportedrequirements)
723
723
724 # Then we validate that the known set is reasonable to use together.
724 # Then we validate that the known set is reasonable to use together.
725 ensurerequirementscompatible(ui, requirements)
725 ensurerequirementscompatible(ui, requirements)
726
726
727 # TODO there are unhandled edge cases related to opening repositories with
727 # TODO there are unhandled edge cases related to opening repositories with
728 # shared storage. If storage is shared, we should also test for requirements
728 # shared storage. If storage is shared, we should also test for requirements
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # that repo, as that repo may load extensions needed to open it. This is a
730 # that repo, as that repo may load extensions needed to open it. This is a
731 # bit complicated because we don't want the other hgrc to overwrite settings
731 # bit complicated because we don't want the other hgrc to overwrite settings
732 # in this hgrc.
732 # in this hgrc.
733 #
733 #
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # file when sharing repos. But if a requirement is added after the share is
735 # file when sharing repos. But if a requirement is added after the share is
736 # performed, thereby introducing a new requirement for the opener, we may
736 # performed, thereby introducing a new requirement for the opener, we may
737 # will not see that and could encounter a run-time error interacting with
737 # will not see that and could encounter a run-time error interacting with
738 # that shared store since it has an unknown-to-us requirement.
738 # that shared store since it has an unknown-to-us requirement.
739
739
740 # At this point, we know we should be capable of opening the repository.
740 # At this point, we know we should be capable of opening the repository.
741 # Now get on with doing that.
741 # Now get on with doing that.
742
742
743 features = set()
743 features = set()
744
744
745 # The "store" part of the repository holds versioned data. How it is
745 # The "store" part of the repository holds versioned data. How it is
746 # accessed is determined by various requirements. If `shared` or
746 # accessed is determined by various requirements. If `shared` or
747 # `relshared` requirements are present, this indicates current repository
747 # `relshared` requirements are present, this indicates current repository
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 if shared:
749 if shared:
750 storebasepath = sharedvfs.base
750 storebasepath = sharedvfs.base
751 cachepath = sharedvfs.join(b'cache')
751 cachepath = sharedvfs.join(b'cache')
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 else:
753 else:
754 storebasepath = hgvfs.base
754 storebasepath = hgvfs.base
755 cachepath = hgvfs.join(b'cache')
755 cachepath = hgvfs.join(b'cache')
756 wcachepath = hgvfs.join(b'wcache')
756 wcachepath = hgvfs.join(b'wcache')
757
757
758 # The store has changed over time and the exact layout is dictated by
758 # The store has changed over time and the exact layout is dictated by
759 # requirements. The store interface abstracts differences across all
759 # requirements. The store interface abstracts differences across all
760 # of them.
760 # of them.
761 store = makestore(
761 store = makestore(
762 requirements,
762 requirements,
763 storebasepath,
763 storebasepath,
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 )
765 )
766 hgvfs.createmode = store.createmode
766 hgvfs.createmode = store.createmode
767
767
768 storevfs = store.vfs
768 storevfs = store.vfs
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770
770
771 if (
771 if (
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 ):
774 ):
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 # the revlogv2 docket introduced race condition that we need to fix
776 # the revlogv2 docket introduced race condition that we need to fix
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778
778
779 # The cache vfs is used to manage cache files.
779 # The cache vfs is used to manage cache files.
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs.createmode = store.createmode
781 cachevfs.createmode = store.createmode
782 # The cache vfs is used to manage cache files related to the working copy
782 # The cache vfs is used to manage cache files related to the working copy
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs.createmode = store.createmode
784 wcachevfs.createmode = store.createmode
785
785
786 # Now resolve the type for the repository object. We do this by repeatedly
786 # Now resolve the type for the repository object. We do this by repeatedly
787 # calling a factory function to produces types for specific aspects of the
787 # calling a factory function to produces types for specific aspects of the
788 # repo's operation. The aggregate returned types are used as base classes
788 # repo's operation. The aggregate returned types are used as base classes
789 # for a dynamically-derived type, which will represent our new repository.
789 # for a dynamically-derived type, which will represent our new repository.
790
790
791 bases = []
791 bases = []
792 extrastate = {}
792 extrastate = {}
793
793
794 for iface, fn in REPO_INTERFACES:
794 for iface, fn in REPO_INTERFACES:
795 # We pass all potentially useful state to give extensions tons of
795 # We pass all potentially useful state to give extensions tons of
796 # flexibility.
796 # flexibility.
797 typ = fn()(
797 typ = fn()(
798 ui=ui,
798 ui=ui,
799 intents=intents,
799 intents=intents,
800 requirements=requirements,
800 requirements=requirements,
801 features=features,
801 features=features,
802 wdirvfs=wdirvfs,
802 wdirvfs=wdirvfs,
803 hgvfs=hgvfs,
803 hgvfs=hgvfs,
804 store=store,
804 store=store,
805 storevfs=storevfs,
805 storevfs=storevfs,
806 storeoptions=storevfs.options,
806 storeoptions=storevfs.options,
807 cachevfs=cachevfs,
807 cachevfs=cachevfs,
808 wcachevfs=wcachevfs,
808 wcachevfs=wcachevfs,
809 extensionmodulenames=extensionmodulenames,
809 extensionmodulenames=extensionmodulenames,
810 extrastate=extrastate,
810 extrastate=extrastate,
811 baseclasses=bases,
811 baseclasses=bases,
812 )
812 )
813
813
814 if not isinstance(typ, type):
814 if not isinstance(typ, type):
815 raise error.ProgrammingError(
815 raise error.ProgrammingError(
816 b'unable to construct type for %s' % iface
816 b'unable to construct type for %s' % iface
817 )
817 )
818
818
819 bases.append(typ)
819 bases.append(typ)
820
820
821 # type() allows you to use characters in type names that wouldn't be
821 # type() allows you to use characters in type names that wouldn't be
822 # recognized as Python symbols in source code. We abuse that to add
822 # recognized as Python symbols in source code. We abuse that to add
823 # rich information about our constructed repo.
823 # rich information about our constructed repo.
824 name = pycompat.sysstr(
824 name = pycompat.sysstr(
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 )
826 )
827
827
828 cls = type(name, tuple(bases), {})
828 cls = type(name, tuple(bases), {})
829
829
830 return cls(
830 return cls(
831 baseui=baseui,
831 baseui=baseui,
832 ui=ui,
832 ui=ui,
833 origroot=path,
833 origroot=path,
834 wdirvfs=wdirvfs,
834 wdirvfs=wdirvfs,
835 hgvfs=hgvfs,
835 hgvfs=hgvfs,
836 requirements=requirements,
836 requirements=requirements,
837 supportedrequirements=supportedrequirements,
837 supportedrequirements=supportedrequirements,
838 sharedpath=storebasepath,
838 sharedpath=storebasepath,
839 store=store,
839 store=store,
840 cachevfs=cachevfs,
840 cachevfs=cachevfs,
841 wcachevfs=wcachevfs,
841 wcachevfs=wcachevfs,
842 features=features,
842 features=features,
843 intents=intents,
843 intents=intents,
844 )
844 )
845
845
846
846
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
848 """Load hgrc files/content into a ui instance.
848 """Load hgrc files/content into a ui instance.
849
849
850 This is called during repository opening to load any additional
850 This is called during repository opening to load any additional
851 config files or settings relevant to the current repository.
851 config files or settings relevant to the current repository.
852
852
853 Returns a bool indicating whether any additional configs were loaded.
853 Returns a bool indicating whether any additional configs were loaded.
854
854
855 Extensions should monkeypatch this function to modify how per-repo
855 Extensions should monkeypatch this function to modify how per-repo
856 configs are loaded. For example, an extension may wish to pull in
856 configs are loaded. For example, an extension may wish to pull in
857 configs from alternate files or sources.
857 configs from alternate files or sources.
858
858
859 sharedvfs is vfs object pointing to source repo if the current one is a
859 sharedvfs is vfs object pointing to source repo if the current one is a
860 shared one
860 shared one
861 """
861 """
862 if not rcutil.use_repo_hgrc():
862 if not rcutil.use_repo_hgrc():
863 return False
863 return False
864
864
865 ret = False
865 ret = False
866 # first load config from shared source if we has to
866 # first load config from shared source if we has to
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
868 try:
868 try:
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
870 ret = True
870 ret = True
871 except IOError:
871 except IOError:
872 pass
872 pass
873
873
874 try:
874 try:
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
876 ret = True
876 ret = True
877 except IOError:
877 except IOError:
878 pass
878 pass
879
879
880 try:
880 try:
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
882 ret = True
882 ret = True
883 except IOError:
883 except IOError:
884 pass
884 pass
885
885
886 return ret
886 return ret
887
887
888
888
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
890 """Perform additional actions after .hg/hgrc is loaded.
890 """Perform additional actions after .hg/hgrc is loaded.
891
891
892 This function is called during repository loading immediately after
892 This function is called during repository loading immediately after
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
894
894
895 The function can be used to validate configs, automatically add
895 The function can be used to validate configs, automatically add
896 options (including extensions) based on requirements, etc.
896 options (including extensions) based on requirements, etc.
897 """
897 """
898
898
899 # Map of requirements to list of extensions to load automatically when
899 # Map of requirements to list of extensions to load automatically when
900 # requirement is present.
900 # requirement is present.
901 autoextensions = {
901 autoextensions = {
902 b'git': [b'git'],
902 b'git': [b'git'],
903 b'largefiles': [b'largefiles'],
903 b'largefiles': [b'largefiles'],
904 b'lfs': [b'lfs'],
904 b'lfs': [b'lfs'],
905 }
905 }
906
906
907 for requirement, names in sorted(autoextensions.items()):
907 for requirement, names in sorted(autoextensions.items()):
908 if requirement not in requirements:
908 if requirement not in requirements:
909 continue
909 continue
910
910
911 for name in names:
911 for name in names:
912 if not ui.hasconfig(b'extensions', name):
912 if not ui.hasconfig(b'extensions', name):
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
914
914
915
915
916 def gathersupportedrequirements(ui):
916 def gathersupportedrequirements(ui):
917 """Determine the complete set of recognized requirements."""
917 """Determine the complete set of recognized requirements."""
918 # Start with all requirements supported by this file.
918 # Start with all requirements supported by this file.
919 supported = set(localrepository._basesupported)
919 supported = set(localrepository._basesupported)
920
920
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
922 # relevant to this ui instance.
922 # relevant to this ui instance.
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
924
924
925 for fn in featuresetupfuncs:
925 for fn in featuresetupfuncs:
926 if fn.__module__ in modules:
926 if fn.__module__ in modules:
927 fn(ui, supported)
927 fn(ui, supported)
928
928
929 # Add derived requirements from registered compression engines.
929 # Add derived requirements from registered compression engines.
930 for name in util.compengines:
930 for name in util.compengines:
931 engine = util.compengines[name]
931 engine = util.compengines[name]
932 if engine.available() and engine.revlogheader():
932 if engine.available() and engine.revlogheader():
933 supported.add(b'exp-compression-%s' % name)
933 supported.add(b'exp-compression-%s' % name)
934 if engine.name() == b'zstd':
934 if engine.name() == b'zstd':
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
936
936
937 return supported
937 return supported
938
938
939
939
940 def ensurerequirementsrecognized(requirements, supported):
940 def ensurerequirementsrecognized(requirements, supported):
941 """Validate that a set of local requirements is recognized.
941 """Validate that a set of local requirements is recognized.
942
942
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
944 exists any requirement in that set that currently loaded code doesn't
944 exists any requirement in that set that currently loaded code doesn't
945 recognize.
945 recognize.
946
946
947 Returns a set of supported requirements.
947 Returns a set of supported requirements.
948 """
948 """
949 missing = set()
949 missing = set()
950
950
951 for requirement in requirements:
951 for requirement in requirements:
952 if requirement in supported:
952 if requirement in supported:
953 continue
953 continue
954
954
955 if not requirement or not requirement[0:1].isalnum():
955 if not requirement or not requirement[0:1].isalnum():
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
957
957
958 missing.add(requirement)
958 missing.add(requirement)
959
959
960 if missing:
960 if missing:
961 raise error.RequirementError(
961 raise error.RequirementError(
962 _(b'repository requires features unknown to this Mercurial: %s')
962 _(b'repository requires features unknown to this Mercurial: %s')
963 % b' '.join(sorted(missing)),
963 % b' '.join(sorted(missing)),
964 hint=_(
964 hint=_(
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
966 b'for more information'
966 b'for more information'
967 ),
967 ),
968 )
968 )
969
969
970
970
971 def ensurerequirementscompatible(ui, requirements):
971 def ensurerequirementscompatible(ui, requirements):
972 """Validates that a set of recognized requirements is mutually compatible.
972 """Validates that a set of recognized requirements is mutually compatible.
973
973
974 Some requirements may not be compatible with others or require
974 Some requirements may not be compatible with others or require
975 config options that aren't enabled. This function is called during
975 config options that aren't enabled. This function is called during
976 repository opening to ensure that the set of requirements needed
976 repository opening to ensure that the set of requirements needed
977 to open a repository is sane and compatible with config options.
977 to open a repository is sane and compatible with config options.
978
978
979 Extensions can monkeypatch this function to perform additional
979 Extensions can monkeypatch this function to perform additional
980 checking.
980 checking.
981
981
982 ``error.RepoError`` should be raised on failure.
982 ``error.RepoError`` should be raised on failure.
983 """
983 """
984 if (
984 if (
985 requirementsmod.SPARSE_REQUIREMENT in requirements
985 requirementsmod.SPARSE_REQUIREMENT in requirements
986 and not sparse.enabled
986 and not sparse.enabled
987 ):
987 ):
988 raise error.RepoError(
988 raise error.RepoError(
989 _(
989 _(
990 b'repository is using sparse feature but '
990 b'repository is using sparse feature but '
991 b'sparse is not enabled; enable the '
991 b'sparse is not enabled; enable the '
992 b'"sparse" extensions to access'
992 b'"sparse" extensions to access'
993 )
993 )
994 )
994 )
995
995
996
996
997 def makestore(requirements, path, vfstype):
997 def makestore(requirements, path, vfstype):
998 """Construct a storage object for a repository."""
998 """Construct a storage object for a repository."""
999 if requirementsmod.STORE_REQUIREMENT in requirements:
999 if requirementsmod.STORE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1002 return storemod.fncachestore(path, vfstype, dotencode)
1002 return storemod.fncachestore(path, vfstype, dotencode)
1003
1003
1004 return storemod.encodedstore(path, vfstype)
1004 return storemod.encodedstore(path, vfstype)
1005
1005
1006 return storemod.basicstore(path, vfstype)
1006 return storemod.basicstore(path, vfstype)
1007
1007
1008
1008
1009 def resolvestorevfsoptions(ui, requirements, features):
1009 def resolvestorevfsoptions(ui, requirements, features):
1010 """Resolve the options to pass to the store vfs opener.
1010 """Resolve the options to pass to the store vfs opener.
1011
1011
1012 The returned dict is used to influence behavior of the storage layer.
1012 The returned dict is used to influence behavior of the storage layer.
1013 """
1013 """
1014 options = {}
1014 options = {}
1015
1015
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1017 options[b'treemanifest'] = True
1017 options[b'treemanifest'] = True
1018
1018
1019 # experimental config: format.manifestcachesize
1019 # experimental config: format.manifestcachesize
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1021 if manifestcachesize is not None:
1021 if manifestcachesize is not None:
1022 options[b'manifestcachesize'] = manifestcachesize
1022 options[b'manifestcachesize'] = manifestcachesize
1023
1023
1024 # In the absence of another requirement superseding a revlog-related
1024 # In the absence of another requirement superseding a revlog-related
1025 # requirement, we have to assume the repo is using revlog version 0.
1025 # requirement, we have to assume the repo is using revlog version 0.
1026 # This revlog format is super old and we don't bother trying to parse
1026 # This revlog format is super old and we don't bother trying to parse
1027 # opener options for it because those options wouldn't do anything
1027 # opener options for it because those options wouldn't do anything
1028 # meaningful on such old repos.
1028 # meaningful on such old repos.
1029 if (
1029 if (
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1032 ):
1032 ):
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1034 else: # explicitly mark repo as using revlogv0
1034 else: # explicitly mark repo as using revlogv0
1035 options[b'revlogv0'] = True
1035 options[b'revlogv0'] = True
1036
1036
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1038 options[b'copies-storage'] = b'changeset-sidedata'
1038 options[b'copies-storage'] = b'changeset-sidedata'
1039 else:
1039 else:
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1042 if writecopiesto in copiesextramode:
1042 if writecopiesto in copiesextramode:
1043 options[b'copies-storage'] = b'extra'
1043 options[b'copies-storage'] = b'extra'
1044
1044
1045 return options
1045 return options
1046
1046
1047
1047
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1049 """Resolve opener options specific to revlogs."""
1049 """Resolve opener options specific to revlogs."""
1050
1050
1051 options = {}
1051 options = {}
1052 options[b'flagprocessors'] = {}
1052 options[b'flagprocessors'] = {}
1053
1053
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1055 options[b'revlogv1'] = True
1055 options[b'revlogv1'] = True
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1057 options[b'revlogv2'] = True
1057 options[b'revlogv2'] = True
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1059 options[b'changelogv2'] = True
1059 options[b'changelogv2'] = True
1060
1060
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1062 options[b'generaldelta'] = True
1062 options[b'generaldelta'] = True
1063
1063
1064 # experimental config: format.chunkcachesize
1064 # experimental config: format.chunkcachesize
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1066 if chunkcachesize is not None:
1066 if chunkcachesize is not None:
1067 options[b'chunkcachesize'] = chunkcachesize
1067 options[b'chunkcachesize'] = chunkcachesize
1068
1068
1069 deltabothparents = ui.configbool(
1069 deltabothparents = ui.configbool(
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1071 )
1071 )
1072 options[b'deltabothparents'] = deltabothparents
1072 options[b'deltabothparents'] = deltabothparents
1073
1073
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 options[b'issue6528.fix-incoming'] = issue6528
1075 options[b'issue6528.fix-incoming'] = issue6528
1076
1076
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 lazydeltabase = False
1078 lazydeltabase = False
1079 if lazydelta:
1079 if lazydelta:
1080 lazydeltabase = ui.configbool(
1080 lazydeltabase = ui.configbool(
1081 b'storage', b'revlog.reuse-external-delta-parent'
1081 b'storage', b'revlog.reuse-external-delta-parent'
1082 )
1082 )
1083 if lazydeltabase is None:
1083 if lazydeltabase is None:
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 options[b'lazydelta'] = lazydelta
1085 options[b'lazydelta'] = lazydelta
1086 options[b'lazydeltabase'] = lazydeltabase
1086 options[b'lazydeltabase'] = lazydeltabase
1087
1087
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 if 0 <= chainspan:
1089 if 0 <= chainspan:
1090 options[b'maxdeltachainspan'] = chainspan
1090 options[b'maxdeltachainspan'] = chainspan
1091
1091
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 if mmapindexthreshold is not None:
1093 if mmapindexthreshold is not None:
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1095
1095
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 srdensitythres = float(
1097 srdensitythres = float(
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 )
1099 )
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 options[b'with-sparse-read'] = withsparseread
1101 options[b'with-sparse-read'] = withsparseread
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1104
1104
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 options[b'sparse-revlog'] = sparserevlog
1106 options[b'sparse-revlog'] = sparserevlog
1107 if sparserevlog:
1107 if sparserevlog:
1108 options[b'generaldelta'] = True
1108 options[b'generaldelta'] = True
1109
1109
1110 maxchainlen = None
1110 maxchainlen = None
1111 if sparserevlog:
1111 if sparserevlog:
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 # experimental config: format.maxchainlen
1113 # experimental config: format.maxchainlen
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 if maxchainlen is not None:
1115 if maxchainlen is not None:
1116 options[b'maxchainlen'] = maxchainlen
1116 options[b'maxchainlen'] = maxchainlen
1117
1117
1118 for r in requirements:
1118 for r in requirements:
1119 # we allow multiple compression engine requirement to co-exist because
1119 # we allow multiple compression engine requirement to co-exist because
1120 # strickly speaking, revlog seems to support mixed compression style.
1120 # strickly speaking, revlog seems to support mixed compression style.
1121 #
1121 #
1122 # The compression used for new entries will be "the last one"
1122 # The compression used for new entries will be "the last one"
1123 prefix = r.startswith
1123 prefix = r.startswith
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1126
1126
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 if options[b'zlib.level'] is not None:
1128 if options[b'zlib.level'] is not None:
1129 if not (0 <= options[b'zlib.level'] <= 9):
1129 if not (0 <= options[b'zlib.level'] <= 9):
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 raise error.Abort(msg % options[b'zlib.level'])
1131 raise error.Abort(msg % options[b'zlib.level'])
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 if options[b'zstd.level'] is not None:
1133 if options[b'zstd.level'] is not None:
1134 if not (0 <= options[b'zstd.level'] <= 22):
1134 if not (0 <= options[b'zstd.level'] <= 22):
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 raise error.Abort(msg % options[b'zstd.level'])
1136 raise error.Abort(msg % options[b'zstd.level'])
1137
1137
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 options[b'enableellipsis'] = True
1139 options[b'enableellipsis'] = True
1140
1140
1141 if ui.configbool(b'experimental', b'rust.index'):
1141 if ui.configbool(b'experimental', b'rust.index'):
1142 options[b'rust.index'] = True
1142 options[b'rust.index'] = True
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 slow_path = ui.config(
1144 slow_path = ui.config(
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 )
1146 )
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1148 default = ui.config_default(
1148 default = ui.config_default(
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 )
1150 )
1151 msg = _(
1151 msg = _(
1152 b'unknown value for config '
1152 b'unknown value for config '
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 )
1154 )
1155 ui.warn(msg % slow_path)
1155 ui.warn(msg % slow_path)
1156 if not ui.quiet:
1156 if not ui.quiet:
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 slow_path = default
1158 slow_path = default
1159
1159
1160 msg = _(
1160 msg = _(
1161 b"accessing `persistent-nodemap` repository without associated "
1161 b"accessing `persistent-nodemap` repository without associated "
1162 b"fast implementation."
1162 b"fast implementation."
1163 )
1163 )
1164 hint = _(
1164 hint = _(
1165 b"check `hg help config.format.use-persistent-nodemap` "
1165 b"check `hg help config.format.use-persistent-nodemap` "
1166 b"for details"
1166 b"for details"
1167 )
1167 )
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 if slow_path == b'warn':
1169 if slow_path == b'warn':
1170 msg = b"warning: " + msg + b'\n'
1170 msg = b"warning: " + msg + b'\n'
1171 ui.warn(msg)
1171 ui.warn(msg)
1172 if not ui.quiet:
1172 if not ui.quiet:
1173 hint = b'(' + hint + b')\n'
1173 hint = b'(' + hint + b')\n'
1174 ui.warn(hint)
1174 ui.warn(hint)
1175 if slow_path == b'abort':
1175 if slow_path == b'abort':
1176 raise error.Abort(msg, hint=hint)
1176 raise error.Abort(msg, hint=hint)
1177 options[b'persistent-nodemap'] = True
1177 options[b'persistent-nodemap'] = True
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 ui.warn(msg % slow_path)
1183 ui.warn(msg % slow_path)
1184 if not ui.quiet:
1184 if not ui.quiet:
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 slow_path = default
1186 slow_path = default
1187
1187
1188 msg = _(
1188 msg = _(
1189 b"accessing `dirstate-v2` repository without associated "
1189 b"accessing `dirstate-v2` repository without associated "
1190 b"fast implementation."
1190 b"fast implementation."
1191 )
1191 )
1192 hint = _(
1192 hint = _(
1193 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1193 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1194 )
1194 )
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 if slow_path == b'warn':
1196 if slow_path == b'warn':
1197 msg = b"warning: " + msg + b'\n'
1197 msg = b"warning: " + msg + b'\n'
1198 ui.warn(msg)
1198 ui.warn(msg)
1199 if not ui.quiet:
1199 if not ui.quiet:
1200 hint = b'(' + hint + b')\n'
1200 hint = b'(' + hint + b')\n'
1201 ui.warn(hint)
1201 ui.warn(hint)
1202 if slow_path == b'abort':
1202 if slow_path == b'abort':
1203 raise error.Abort(msg, hint=hint)
1203 raise error.Abort(msg, hint=hint)
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 options[b'persistent-nodemap.mmap'] = True
1205 options[b'persistent-nodemap.mmap'] = True
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 options[b'devel-force-nodemap'] = True
1207 options[b'devel-force-nodemap'] = True
1208
1208
1209 return options
1209 return options
1210
1210
1211
1211
1212 def makemain(**kwargs):
1212 def makemain(**kwargs):
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 return localrepository
1214 return localrepository
1215
1215
1216
1216
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 class revlogfilestorage:
1218 class revlogfilestorage:
1219 """File storage when using revlogs."""
1219 """File storage when using revlogs."""
1220
1220
1221 def file(self, path):
1221 def file(self, path):
1222 if path.startswith(b'/'):
1222 if path.startswith(b'/'):
1223 path = path[1:]
1223 path = path[1:]
1224
1224
1225 return filelog.filelog(self.svfs, path)
1225 return filelog.filelog(self.svfs, path)
1226
1226
1227
1227
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 class revlognarrowfilestorage:
1229 class revlognarrowfilestorage:
1230 """File storage when using revlogs and narrow files."""
1230 """File storage when using revlogs and narrow files."""
1231
1231
1232 def file(self, path):
1232 def file(self, path):
1233 if path.startswith(b'/'):
1233 if path.startswith(b'/'):
1234 path = path[1:]
1234 path = path[1:]
1235
1235
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237
1237
1238
1238
1239 def makefilestorage(requirements, features, **kwargs):
1239 def makefilestorage(requirements, features, **kwargs):
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243
1243
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 return revlognarrowfilestorage
1245 return revlognarrowfilestorage
1246 else:
1246 else:
1247 return revlogfilestorage
1247 return revlogfilestorage
1248
1248
1249
1249
1250 # List of repository interfaces and factory functions for them. Each
1250 # List of repository interfaces and factory functions for them. Each
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1252 # derive the final type for a local repository instance. We capture the
1252 # derive the final type for a local repository instance. We capture the
1253 # function as a lambda so we don't hold a reference and the module-level
1253 # function as a lambda so we don't hold a reference and the module-level
1254 # functions can be wrapped.
1254 # functions can be wrapped.
1255 REPO_INTERFACES = [
1255 REPO_INTERFACES = [
1256 (repository.ilocalrepositorymain, lambda: makemain),
1256 (repository.ilocalrepositorymain, lambda: makemain),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 ]
1258 ]
1259
1259
1260
1260
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 class localrepository:
1262 class localrepository:
1263 """Main class for representing local repositories.
1263 """Main class for representing local repositories.
1264
1264
1265 All local repositories are instances of this class.
1265 All local repositories are instances of this class.
1266
1266
1267 Constructed on its own, instances of this class are not usable as
1267 Constructed on its own, instances of this class are not usable as
1268 repository objects. To obtain a usable repository object, call
1268 repository objects. To obtain a usable repository object, call
1269 ``hg.repository()``, ``localrepo.instance()``, or
1269 ``hg.repository()``, ``localrepo.instance()``, or
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 ``instance()`` adds support for creating new repositories.
1271 ``instance()`` adds support for creating new repositories.
1272 ``hg.repository()`` adds more extension integration, including calling
1272 ``hg.repository()`` adds more extension integration, including calling
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 used.
1274 used.
1275 """
1275 """
1276
1276
1277 _basesupported = {
1277 _basesupported = {
1278 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1278 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1279 requirementsmod.CHANGELOGV2_REQUIREMENT,
1279 requirementsmod.CHANGELOGV2_REQUIREMENT,
1280 requirementsmod.COPIESSDC_REQUIREMENT,
1280 requirementsmod.COPIESSDC_REQUIREMENT,
1281 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1281 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1282 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1282 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1283 requirementsmod.DOTENCODE_REQUIREMENT,
1283 requirementsmod.DOTENCODE_REQUIREMENT,
1284 requirementsmod.FNCACHE_REQUIREMENT,
1284 requirementsmod.FNCACHE_REQUIREMENT,
1285 requirementsmod.GENERALDELTA_REQUIREMENT,
1285 requirementsmod.GENERALDELTA_REQUIREMENT,
1286 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1286 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1287 requirementsmod.NODEMAP_REQUIREMENT,
1287 requirementsmod.NODEMAP_REQUIREMENT,
1288 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1288 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1289 requirementsmod.REVLOGV1_REQUIREMENT,
1289 requirementsmod.REVLOGV1_REQUIREMENT,
1290 requirementsmod.REVLOGV2_REQUIREMENT,
1290 requirementsmod.REVLOGV2_REQUIREMENT,
1291 requirementsmod.SHARED_REQUIREMENT,
1291 requirementsmod.SHARED_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1293 requirementsmod.SPARSE_REQUIREMENT,
1293 requirementsmod.SPARSE_REQUIREMENT,
1294 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1294 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.TREEMANIFEST_REQUIREMENT,
1296 requirementsmod.TREEMANIFEST_REQUIREMENT,
1297 }
1297 }
1298
1298
1299 # list of prefix for file which can be written without 'wlock'
1299 # list of prefix for file which can be written without 'wlock'
1300 # Extensions should extend this list when needed
1300 # Extensions should extend this list when needed
1301 _wlockfreeprefix = {
1301 _wlockfreeprefix = {
1302 # We migh consider requiring 'wlock' for the next
1302 # We migh consider requiring 'wlock' for the next
1303 # two, but pretty much all the existing code assume
1303 # two, but pretty much all the existing code assume
1304 # wlock is not needed so we keep them excluded for
1304 # wlock is not needed so we keep them excluded for
1305 # now.
1305 # now.
1306 b'hgrc',
1306 b'hgrc',
1307 b'requires',
1307 b'requires',
1308 # XXX cache is a complicatged business someone
1308 # XXX cache is a complicatged business someone
1309 # should investigate this in depth at some point
1309 # should investigate this in depth at some point
1310 b'cache/',
1310 b'cache/',
1311 # XXX shouldn't be dirstate covered by the wlock?
1311 # XXX shouldn't be dirstate covered by the wlock?
1312 b'dirstate',
1312 b'dirstate',
1313 # XXX bisect was still a bit too messy at the time
1313 # XXX bisect was still a bit too messy at the time
1314 # this changeset was introduced. Someone should fix
1314 # this changeset was introduced. Someone should fix
1315 # the remainig bit and drop this line
1315 # the remainig bit and drop this line
1316 b'bisect.state',
1316 b'bisect.state',
1317 }
1317 }
1318
1318
1319 def __init__(
1319 def __init__(
1320 self,
1320 self,
1321 baseui,
1321 baseui,
1322 ui,
1322 ui,
1323 origroot,
1323 origroot,
1324 wdirvfs,
1324 wdirvfs,
1325 hgvfs,
1325 hgvfs,
1326 requirements,
1326 requirements,
1327 supportedrequirements,
1327 supportedrequirements,
1328 sharedpath,
1328 sharedpath,
1329 store,
1329 store,
1330 cachevfs,
1330 cachevfs,
1331 wcachevfs,
1331 wcachevfs,
1332 features,
1332 features,
1333 intents=None,
1333 intents=None,
1334 ):
1334 ):
1335 """Create a new local repository instance.
1335 """Create a new local repository instance.
1336
1336
1337 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1337 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1338 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1338 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1339 object.
1339 object.
1340
1340
1341 Arguments:
1341 Arguments:
1342
1342
1343 baseui
1343 baseui
1344 ``ui.ui`` instance that ``ui`` argument was based off of.
1344 ``ui.ui`` instance that ``ui`` argument was based off of.
1345
1345
1346 ui
1346 ui
1347 ``ui.ui`` instance for use by the repository.
1347 ``ui.ui`` instance for use by the repository.
1348
1348
1349 origroot
1349 origroot
1350 ``bytes`` path to working directory root of this repository.
1350 ``bytes`` path to working directory root of this repository.
1351
1351
1352 wdirvfs
1352 wdirvfs
1353 ``vfs.vfs`` rooted at the working directory.
1353 ``vfs.vfs`` rooted at the working directory.
1354
1354
1355 hgvfs
1355 hgvfs
1356 ``vfs.vfs`` rooted at .hg/
1356 ``vfs.vfs`` rooted at .hg/
1357
1357
1358 requirements
1358 requirements
1359 ``set`` of bytestrings representing repository opening requirements.
1359 ``set`` of bytestrings representing repository opening requirements.
1360
1360
1361 supportedrequirements
1361 supportedrequirements
1362 ``set`` of bytestrings representing repository requirements that we
1362 ``set`` of bytestrings representing repository requirements that we
1363 know how to open. May be a supetset of ``requirements``.
1363 know how to open. May be a supetset of ``requirements``.
1364
1364
1365 sharedpath
1365 sharedpath
1366 ``bytes`` Defining path to storage base directory. Points to a
1366 ``bytes`` Defining path to storage base directory. Points to a
1367 ``.hg/`` directory somewhere.
1367 ``.hg/`` directory somewhere.
1368
1368
1369 store
1369 store
1370 ``store.basicstore`` (or derived) instance providing access to
1370 ``store.basicstore`` (or derived) instance providing access to
1371 versioned storage.
1371 versioned storage.
1372
1372
1373 cachevfs
1373 cachevfs
1374 ``vfs.vfs`` used for cache files.
1374 ``vfs.vfs`` used for cache files.
1375
1375
1376 wcachevfs
1376 wcachevfs
1377 ``vfs.vfs`` used for cache files related to the working copy.
1377 ``vfs.vfs`` used for cache files related to the working copy.
1378
1378
1379 features
1379 features
1380 ``set`` of bytestrings defining features/capabilities of this
1380 ``set`` of bytestrings defining features/capabilities of this
1381 instance.
1381 instance.
1382
1382
1383 intents
1383 intents
1384 ``set`` of system strings indicating what this repo will be used
1384 ``set`` of system strings indicating what this repo will be used
1385 for.
1385 for.
1386 """
1386 """
1387 self.baseui = baseui
1387 self.baseui = baseui
1388 self.ui = ui
1388 self.ui = ui
1389 self.origroot = origroot
1389 self.origroot = origroot
1390 # vfs rooted at working directory.
1390 # vfs rooted at working directory.
1391 self.wvfs = wdirvfs
1391 self.wvfs = wdirvfs
1392 self.root = wdirvfs.base
1392 self.root = wdirvfs.base
1393 # vfs rooted at .hg/. Used to access most non-store paths.
1393 # vfs rooted at .hg/. Used to access most non-store paths.
1394 self.vfs = hgvfs
1394 self.vfs = hgvfs
1395 self.path = hgvfs.base
1395 self.path = hgvfs.base
1396 self.requirements = requirements
1396 self.requirements = requirements
1397 self.nodeconstants = sha1nodeconstants
1397 self.nodeconstants = sha1nodeconstants
1398 self.nullid = self.nodeconstants.nullid
1398 self.nullid = self.nodeconstants.nullid
1399 self.supported = supportedrequirements
1399 self.supported = supportedrequirements
1400 self.sharedpath = sharedpath
1400 self.sharedpath = sharedpath
1401 self.store = store
1401 self.store = store
1402 self.cachevfs = cachevfs
1402 self.cachevfs = cachevfs
1403 self.wcachevfs = wcachevfs
1403 self.wcachevfs = wcachevfs
1404 self.features = features
1404 self.features = features
1405
1405
1406 self.filtername = None
1406 self.filtername = None
1407
1407
1408 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1408 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1409 b'devel', b'check-locks'
1409 b'devel', b'check-locks'
1410 ):
1410 ):
1411 self.vfs.audit = self._getvfsward(self.vfs.audit)
1411 self.vfs.audit = self._getvfsward(self.vfs.audit)
1412 # A list of callback to shape the phase if no data were found.
1412 # A list of callback to shape the phase if no data were found.
1413 # Callback are in the form: func(repo, roots) --> processed root.
1413 # Callback are in the form: func(repo, roots) --> processed root.
1414 # This list it to be filled by extension during repo setup
1414 # This list it to be filled by extension during repo setup
1415 self._phasedefaults = []
1415 self._phasedefaults = []
1416
1416
1417 color.setup(self.ui)
1417 color.setup(self.ui)
1418
1418
1419 self.spath = self.store.path
1419 self.spath = self.store.path
1420 self.svfs = self.store.vfs
1420 self.svfs = self.store.vfs
1421 self.sjoin = self.store.join
1421 self.sjoin = self.store.join
1422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 b'devel', b'check-locks'
1423 b'devel', b'check-locks'
1424 ):
1424 ):
1425 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1425 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1426 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1426 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1427 else: # standard vfs
1427 else: # standard vfs
1428 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1428 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1429
1429
1430 self._dirstatevalidatewarned = False
1430 self._dirstatevalidatewarned = False
1431
1431
1432 self._branchcaches = branchmap.BranchMapCache()
1432 self._branchcaches = branchmap.BranchMapCache()
1433 self._revbranchcache = None
1433 self._revbranchcache = None
1434 self._filterpats = {}
1434 self._filterpats = {}
1435 self._datafilters = {}
1435 self._datafilters = {}
1436 self._transref = self._lockref = self._wlockref = None
1436 self._transref = self._lockref = self._wlockref = None
1437
1437
1438 # A cache for various files under .hg/ that tracks file changes,
1438 # A cache for various files under .hg/ that tracks file changes,
1439 # (used by the filecache decorator)
1439 # (used by the filecache decorator)
1440 #
1440 #
1441 # Maps a property name to its util.filecacheentry
1441 # Maps a property name to its util.filecacheentry
1442 self._filecache = {}
1442 self._filecache = {}
1443
1443
1444 # hold sets of revision to be filtered
1444 # hold sets of revision to be filtered
1445 # should be cleared when something might have changed the filter value:
1445 # should be cleared when something might have changed the filter value:
1446 # - new changesets,
1446 # - new changesets,
1447 # - phase change,
1447 # - phase change,
1448 # - new obsolescence marker,
1448 # - new obsolescence marker,
1449 # - working directory parent change,
1449 # - working directory parent change,
1450 # - bookmark changes
1450 # - bookmark changes
1451 self.filteredrevcache = {}
1451 self.filteredrevcache = {}
1452
1452
1453 # post-dirstate-status hooks
1453 # post-dirstate-status hooks
1454 self._postdsstatus = []
1454 self._postdsstatus = []
1455
1455
1456 # generic mapping between names and nodes
1456 # generic mapping between names and nodes
1457 self.names = namespaces.namespaces()
1457 self.names = namespaces.namespaces()
1458
1458
1459 # Key to signature value.
1459 # Key to signature value.
1460 self._sparsesignaturecache = {}
1460 self._sparsesignaturecache = {}
1461 # Signature to cached matcher instance.
1461 # Signature to cached matcher instance.
1462 self._sparsematchercache = {}
1462 self._sparsematchercache = {}
1463
1463
1464 self._extrafilterid = repoview.extrafilter(ui)
1464 self._extrafilterid = repoview.extrafilter(ui)
1465
1465
1466 self.filecopiesmode = None
1466 self.filecopiesmode = None
1467 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1467 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1468 self.filecopiesmode = b'changeset-sidedata'
1468 self.filecopiesmode = b'changeset-sidedata'
1469
1469
1470 self._wanted_sidedata = set()
1470 self._wanted_sidedata = set()
1471 self._sidedata_computers = {}
1471 self._sidedata_computers = {}
1472 sidedatamod.set_sidedata_spec_for_repo(self)
1472 sidedatamod.set_sidedata_spec_for_repo(self)
1473
1473
1474 def _getvfsward(self, origfunc):
1474 def _getvfsward(self, origfunc):
1475 """build a ward for self.vfs"""
1475 """build a ward for self.vfs"""
1476 rref = weakref.ref(self)
1476 rref = weakref.ref(self)
1477
1477
1478 def checkvfs(path, mode=None):
1478 def checkvfs(path, mode=None):
1479 ret = origfunc(path, mode=mode)
1479 ret = origfunc(path, mode=mode)
1480 repo = rref()
1480 repo = rref()
1481 if (
1481 if (
1482 repo is None
1482 repo is None
1483 or not util.safehasattr(repo, b'_wlockref')
1483 or not util.safehasattr(repo, b'_wlockref')
1484 or not util.safehasattr(repo, b'_lockref')
1484 or not util.safehasattr(repo, b'_lockref')
1485 ):
1485 ):
1486 return
1486 return
1487 if mode in (None, b'r', b'rb'):
1487 if mode in (None, b'r', b'rb'):
1488 return
1488 return
1489 if path.startswith(repo.path):
1489 if path.startswith(repo.path):
1490 # truncate name relative to the repository (.hg)
1490 # truncate name relative to the repository (.hg)
1491 path = path[len(repo.path) + 1 :]
1491 path = path[len(repo.path) + 1 :]
1492 if path.startswith(b'cache/'):
1492 if path.startswith(b'cache/'):
1493 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1493 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1494 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1494 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1495 # path prefixes covered by 'lock'
1495 # path prefixes covered by 'lock'
1496 vfs_path_prefixes = (
1496 vfs_path_prefixes = (
1497 b'journal.',
1497 b'journal.',
1498 b'undo.',
1498 b'undo.',
1499 b'strip-backup/',
1499 b'strip-backup/',
1500 b'cache/',
1500 b'cache/',
1501 )
1501 )
1502 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1502 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1503 if repo._currentlock(repo._lockref) is None:
1503 if repo._currentlock(repo._lockref) is None:
1504 repo.ui.develwarn(
1504 repo.ui.develwarn(
1505 b'write with no lock: "%s"' % path,
1505 b'write with no lock: "%s"' % path,
1506 stacklevel=3,
1506 stacklevel=3,
1507 config=b'check-locks',
1507 config=b'check-locks',
1508 )
1508 )
1509 elif repo._currentlock(repo._wlockref) is None:
1509 elif repo._currentlock(repo._wlockref) is None:
1510 # rest of vfs files are covered by 'wlock'
1510 # rest of vfs files are covered by 'wlock'
1511 #
1511 #
1512 # exclude special files
1512 # exclude special files
1513 for prefix in self._wlockfreeprefix:
1513 for prefix in self._wlockfreeprefix:
1514 if path.startswith(prefix):
1514 if path.startswith(prefix):
1515 return
1515 return
1516 repo.ui.develwarn(
1516 repo.ui.develwarn(
1517 b'write with no wlock: "%s"' % path,
1517 b'write with no wlock: "%s"' % path,
1518 stacklevel=3,
1518 stacklevel=3,
1519 config=b'check-locks',
1519 config=b'check-locks',
1520 )
1520 )
1521 return ret
1521 return ret
1522
1522
1523 return checkvfs
1523 return checkvfs
1524
1524
1525 def _getsvfsward(self, origfunc):
1525 def _getsvfsward(self, origfunc):
1526 """build a ward for self.svfs"""
1526 """build a ward for self.svfs"""
1527 rref = weakref.ref(self)
1527 rref = weakref.ref(self)
1528
1528
1529 def checksvfs(path, mode=None):
1529 def checksvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1530 ret = origfunc(path, mode=mode)
1531 repo = rref()
1531 repo = rref()
1532 if repo is None or not util.safehasattr(repo, b'_lockref'):
1532 if repo is None or not util.safehasattr(repo, b'_lockref'):
1533 return
1533 return
1534 if mode in (None, b'r', b'rb'):
1534 if mode in (None, b'r', b'rb'):
1535 return
1535 return
1536 if path.startswith(repo.sharedpath):
1536 if path.startswith(repo.sharedpath):
1537 # truncate name relative to the repository (.hg)
1537 # truncate name relative to the repository (.hg)
1538 path = path[len(repo.sharedpath) + 1 :]
1538 path = path[len(repo.sharedpath) + 1 :]
1539 if repo._currentlock(repo._lockref) is None:
1539 if repo._currentlock(repo._lockref) is None:
1540 repo.ui.develwarn(
1540 repo.ui.develwarn(
1541 b'write with no lock: "%s"' % path, stacklevel=4
1541 b'write with no lock: "%s"' % path, stacklevel=4
1542 )
1542 )
1543 return ret
1543 return ret
1544
1544
1545 return checksvfs
1545 return checksvfs
1546
1546
1547 def close(self):
1547 def close(self):
1548 self._writecaches()
1548 self._writecaches()
1549
1549
1550 def _writecaches(self):
1550 def _writecaches(self):
1551 if self._revbranchcache:
1551 if self._revbranchcache:
1552 self._revbranchcache.write()
1552 self._revbranchcache.write()
1553
1553
1554 def _restrictcapabilities(self, caps):
1554 def _restrictcapabilities(self, caps):
1555 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1555 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1556 caps = set(caps)
1556 caps = set(caps)
1557 capsblob = bundle2.encodecaps(
1557 capsblob = bundle2.encodecaps(
1558 bundle2.getrepocaps(self, role=b'client')
1558 bundle2.getrepocaps(self, role=b'client')
1559 )
1559 )
1560 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1560 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1561 if self.ui.configbool(b'experimental', b'narrow'):
1561 if self.ui.configbool(b'experimental', b'narrow'):
1562 caps.add(wireprototypes.NARROWCAP)
1562 caps.add(wireprototypes.NARROWCAP)
1563 return caps
1563 return caps
1564
1564
1565 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1565 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1566 # self -> auditor -> self._checknested -> self
1566 # self -> auditor -> self._checknested -> self
1567
1567
1568 @property
1568 @property
1569 def auditor(self):
1569 def auditor(self):
1570 # This is only used by context.workingctx.match in order to
1570 # This is only used by context.workingctx.match in order to
1571 # detect files in subrepos.
1571 # detect files in subrepos.
1572 return pathutil.pathauditor(self.root, callback=self._checknested)
1572 return pathutil.pathauditor(self.root, callback=self._checknested)
1573
1573
1574 @property
1574 @property
1575 def nofsauditor(self):
1575 def nofsauditor(self):
1576 # This is only used by context.basectx.match in order to detect
1576 # This is only used by context.basectx.match in order to detect
1577 # files in subrepos.
1577 # files in subrepos.
1578 return pathutil.pathauditor(
1578 return pathutil.pathauditor(
1579 self.root, callback=self._checknested, realfs=False, cached=True
1579 self.root, callback=self._checknested, realfs=False, cached=True
1580 )
1580 )
1581
1581
1582 def _checknested(self, path):
1582 def _checknested(self, path):
1583 """Determine if path is a legal nested repository."""
1583 """Determine if path is a legal nested repository."""
1584 if not path.startswith(self.root):
1584 if not path.startswith(self.root):
1585 return False
1585 return False
1586 subpath = path[len(self.root) + 1 :]
1586 subpath = path[len(self.root) + 1 :]
1587 normsubpath = util.pconvert(subpath)
1587 normsubpath = util.pconvert(subpath)
1588
1588
1589 # XXX: Checking against the current working copy is wrong in
1589 # XXX: Checking against the current working copy is wrong in
1590 # the sense that it can reject things like
1590 # the sense that it can reject things like
1591 #
1591 #
1592 # $ hg cat -r 10 sub/x.txt
1592 # $ hg cat -r 10 sub/x.txt
1593 #
1593 #
1594 # if sub/ is no longer a subrepository in the working copy
1594 # if sub/ is no longer a subrepository in the working copy
1595 # parent revision.
1595 # parent revision.
1596 #
1596 #
1597 # However, it can of course also allow things that would have
1597 # However, it can of course also allow things that would have
1598 # been rejected before, such as the above cat command if sub/
1598 # been rejected before, such as the above cat command if sub/
1599 # is a subrepository now, but was a normal directory before.
1599 # is a subrepository now, but was a normal directory before.
1600 # The old path auditor would have rejected by mistake since it
1600 # The old path auditor would have rejected by mistake since it
1601 # panics when it sees sub/.hg/.
1601 # panics when it sees sub/.hg/.
1602 #
1602 #
1603 # All in all, checking against the working copy seems sensible
1603 # All in all, checking against the working copy seems sensible
1604 # since we want to prevent access to nested repositories on
1604 # since we want to prevent access to nested repositories on
1605 # the filesystem *now*.
1605 # the filesystem *now*.
1606 ctx = self[None]
1606 ctx = self[None]
1607 parts = util.splitpath(subpath)
1607 parts = util.splitpath(subpath)
1608 while parts:
1608 while parts:
1609 prefix = b'/'.join(parts)
1609 prefix = b'/'.join(parts)
1610 if prefix in ctx.substate:
1610 if prefix in ctx.substate:
1611 if prefix == normsubpath:
1611 if prefix == normsubpath:
1612 return True
1612 return True
1613 else:
1613 else:
1614 sub = ctx.sub(prefix)
1614 sub = ctx.sub(prefix)
1615 return sub.checknested(subpath[len(prefix) + 1 :])
1615 return sub.checknested(subpath[len(prefix) + 1 :])
1616 else:
1616 else:
1617 parts.pop()
1617 parts.pop()
1618 return False
1618 return False
1619
1619
1620 def peer(self):
1620 def peer(self):
1621 return localpeer(self) # not cached to avoid reference cycle
1621 return localpeer(self) # not cached to avoid reference cycle
1622
1622
1623 def unfiltered(self):
1623 def unfiltered(self):
1624 """Return unfiltered version of the repository
1624 """Return unfiltered version of the repository
1625
1625
1626 Intended to be overwritten by filtered repo."""
1626 Intended to be overwritten by filtered repo."""
1627 return self
1627 return self
1628
1628
1629 def filtered(self, name, visibilityexceptions=None):
1629 def filtered(self, name, visibilityexceptions=None):
1630 """Return a filtered version of a repository
1630 """Return a filtered version of a repository
1631
1631
1632 The `name` parameter is the identifier of the requested view. This
1632 The `name` parameter is the identifier of the requested view. This
1633 will return a repoview object set "exactly" to the specified view.
1633 will return a repoview object set "exactly" to the specified view.
1634
1634
1635 This function does not apply recursive filtering to a repository. For
1635 This function does not apply recursive filtering to a repository. For
1636 example calling `repo.filtered("served")` will return a repoview using
1636 example calling `repo.filtered("served")` will return a repoview using
1637 the "served" view, regardless of the initial view used by `repo`.
1637 the "served" view, regardless of the initial view used by `repo`.
1638
1638
1639 In other word, there is always only one level of `repoview` "filtering".
1639 In other word, there is always only one level of `repoview` "filtering".
1640 """
1640 """
1641 if self._extrafilterid is not None and b'%' not in name:
1641 if self._extrafilterid is not None and b'%' not in name:
1642 name = name + b'%' + self._extrafilterid
1642 name = name + b'%' + self._extrafilterid
1643
1643
1644 cls = repoview.newtype(self.unfiltered().__class__)
1644 cls = repoview.newtype(self.unfiltered().__class__)
1645 return cls(self, name, visibilityexceptions)
1645 return cls(self, name, visibilityexceptions)
1646
1646
1647 @mixedrepostorecache(
1647 @mixedrepostorecache(
1648 (b'bookmarks', b'plain'),
1648 (b'bookmarks', b'plain'),
1649 (b'bookmarks.current', b'plain'),
1649 (b'bookmarks.current', b'plain'),
1650 (b'bookmarks', b''),
1650 (b'bookmarks', b''),
1651 (b'00changelog.i', b''),
1651 (b'00changelog.i', b''),
1652 )
1652 )
1653 def _bookmarks(self):
1653 def _bookmarks(self):
1654 # Since the multiple files involved in the transaction cannot be
1654 # Since the multiple files involved in the transaction cannot be
1655 # written atomically (with current repository format), there is a race
1655 # written atomically (with current repository format), there is a race
1656 # condition here.
1656 # condition here.
1657 #
1657 #
1658 # 1) changelog content A is read
1658 # 1) changelog content A is read
1659 # 2) outside transaction update changelog to content B
1659 # 2) outside transaction update changelog to content B
1660 # 3) outside transaction update bookmark file referring to content B
1660 # 3) outside transaction update bookmark file referring to content B
1661 # 4) bookmarks file content is read and filtered against changelog-A
1661 # 4) bookmarks file content is read and filtered against changelog-A
1662 #
1662 #
1663 # When this happens, bookmarks against nodes missing from A are dropped.
1663 # When this happens, bookmarks against nodes missing from A are dropped.
1664 #
1664 #
1665 # Having this happening during read is not great, but it become worse
1665 # Having this happening during read is not great, but it become worse
1666 # when this happen during write because the bookmarks to the "unknown"
1666 # when this happen during write because the bookmarks to the "unknown"
1667 # nodes will be dropped for good. However, writes happen within locks.
1667 # nodes will be dropped for good. However, writes happen within locks.
1668 # This locking makes it possible to have a race free consistent read.
1668 # This locking makes it possible to have a race free consistent read.
1669 # For this purpose data read from disc before locking are
1669 # For this purpose data read from disc before locking are
1670 # "invalidated" right after the locks are taken. This invalidations are
1670 # "invalidated" right after the locks are taken. This invalidations are
1671 # "light", the `filecache` mechanism keep the data in memory and will
1671 # "light", the `filecache` mechanism keep the data in memory and will
1672 # reuse them if the underlying files did not changed. Not parsing the
1672 # reuse them if the underlying files did not changed. Not parsing the
1673 # same data multiple times helps performances.
1673 # same data multiple times helps performances.
1674 #
1674 #
1675 # Unfortunately in the case describe above, the files tracked by the
1675 # Unfortunately in the case describe above, the files tracked by the
1676 # bookmarks file cache might not have changed, but the in-memory
1676 # bookmarks file cache might not have changed, but the in-memory
1677 # content is still "wrong" because we used an older changelog content
1677 # content is still "wrong" because we used an older changelog content
1678 # to process the on-disk data. So after locking, the changelog would be
1678 # to process the on-disk data. So after locking, the changelog would be
1679 # refreshed but `_bookmarks` would be preserved.
1679 # refreshed but `_bookmarks` would be preserved.
1680 # Adding `00changelog.i` to the list of tracked file is not
1680 # Adding `00changelog.i` to the list of tracked file is not
1681 # enough, because at the time we build the content for `_bookmarks` in
1681 # enough, because at the time we build the content for `_bookmarks` in
1682 # (4), the changelog file has already diverged from the content used
1682 # (4), the changelog file has already diverged from the content used
1683 # for loading `changelog` in (1)
1683 # for loading `changelog` in (1)
1684 #
1684 #
1685 # To prevent the issue, we force the changelog to be explicitly
1685 # To prevent the issue, we force the changelog to be explicitly
1686 # reloaded while computing `_bookmarks`. The data race can still happen
1686 # reloaded while computing `_bookmarks`. The data race can still happen
1687 # without the lock (with a narrower window), but it would no longer go
1687 # without the lock (with a narrower window), but it would no longer go
1688 # undetected during the lock time refresh.
1688 # undetected during the lock time refresh.
1689 #
1689 #
1690 # The new schedule is as follow
1690 # The new schedule is as follow
1691 #
1691 #
1692 # 1) filecache logic detect that `_bookmarks` needs to be computed
1692 # 1) filecache logic detect that `_bookmarks` needs to be computed
1693 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1693 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1694 # 3) We force `changelog` filecache to be tested
1694 # 3) We force `changelog` filecache to be tested
1695 # 4) cachestat for `changelog` are captured (for changelog)
1695 # 4) cachestat for `changelog` are captured (for changelog)
1696 # 5) `_bookmarks` is computed and cached
1696 # 5) `_bookmarks` is computed and cached
1697 #
1697 #
1698 # The step in (3) ensure we have a changelog at least as recent as the
1698 # The step in (3) ensure we have a changelog at least as recent as the
1699 # cache stat computed in (1). As a result at locking time:
1699 # cache stat computed in (1). As a result at locking time:
1700 # * if the changelog did not changed since (1) -> we can reuse the data
1700 # * if the changelog did not changed since (1) -> we can reuse the data
1701 # * otherwise -> the bookmarks get refreshed.
1701 # * otherwise -> the bookmarks get refreshed.
1702 self._refreshchangelog()
1702 self._refreshchangelog()
1703 return bookmarks.bmstore(self)
1703 return bookmarks.bmstore(self)
1704
1704
1705 def _refreshchangelog(self):
1705 def _refreshchangelog(self):
1706 """make sure the in memory changelog match the on-disk one"""
1706 """make sure the in memory changelog match the on-disk one"""
1707 if 'changelog' in vars(self) and self.currenttransaction() is None:
1707 if 'changelog' in vars(self) and self.currenttransaction() is None:
1708 del self.changelog
1708 del self.changelog
1709
1709
1710 @property
1710 @property
1711 def _activebookmark(self):
1711 def _activebookmark(self):
1712 return self._bookmarks.active
1712 return self._bookmarks.active
1713
1713
1714 # _phasesets depend on changelog. what we need is to call
1714 # _phasesets depend on changelog. what we need is to call
1715 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1715 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1716 # can't be easily expressed in filecache mechanism.
1716 # can't be easily expressed in filecache mechanism.
1717 @storecache(b'phaseroots', b'00changelog.i')
1717 @storecache(b'phaseroots', b'00changelog.i')
1718 def _phasecache(self):
1718 def _phasecache(self):
1719 return phases.phasecache(self, self._phasedefaults)
1719 return phases.phasecache(self, self._phasedefaults)
1720
1720
1721 @storecache(b'obsstore')
1721 @storecache(b'obsstore')
1722 def obsstore(self):
1722 def obsstore(self):
1723 return obsolete.makestore(self.ui, self)
1723 return obsolete.makestore(self.ui, self)
1724
1724
1725 @changelogcache()
1725 @changelogcache()
1726 def changelog(repo):
1726 def changelog(repo):
1727 # load dirstate before changelog to avoid race see issue6303
1727 # load dirstate before changelog to avoid race see issue6303
1728 repo.dirstate.prefetch_parents()
1728 repo.dirstate.prefetch_parents()
1729 return repo.store.changelog(
1729 return repo.store.changelog(
1730 txnutil.mayhavepending(repo.root),
1730 txnutil.mayhavepending(repo.root),
1731 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1731 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1732 )
1732 )
1733
1733
1734 @manifestlogcache()
1734 @manifestlogcache()
1735 def manifestlog(self):
1735 def manifestlog(self):
1736 return self.store.manifestlog(self, self._storenarrowmatch)
1736 return self.store.manifestlog(self, self._storenarrowmatch)
1737
1737
1738 @repofilecache(b'dirstate')
1738 @repofilecache(b'dirstate')
1739 def dirstate(self):
1739 def dirstate(self):
1740 return self._makedirstate()
1740 return self._makedirstate()
1741
1741
1742 def _makedirstate(self):
1742 def _makedirstate(self):
1743 """Extension point for wrapping the dirstate per-repo."""
1743 """Extension point for wrapping the dirstate per-repo."""
1744 sparsematchfn = lambda: sparse.matcher(self)
1744 sparsematchfn = lambda: sparse.matcher(self)
1745 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1745 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1746 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1746 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1747 use_dirstate_v2 = v2_req in self.requirements
1747 use_dirstate_v2 = v2_req in self.requirements
1748 use_tracked_hint = th in self.requirements
1748 use_tracked_hint = th in self.requirements
1749
1749
1750 return dirstate.dirstate(
1750 return dirstate.dirstate(
1751 self.vfs,
1751 self.vfs,
1752 self.ui,
1752 self.ui,
1753 self.root,
1753 self.root,
1754 self._dirstatevalidate,
1754 self._dirstatevalidate,
1755 sparsematchfn,
1755 sparsematchfn,
1756 self.nodeconstants,
1756 self.nodeconstants,
1757 use_dirstate_v2,
1757 use_dirstate_v2,
1758 use_tracked_hint=use_tracked_hint,
1758 use_tracked_hint=use_tracked_hint,
1759 )
1759 )
1760
1760
1761 def _dirstatevalidate(self, node):
1761 def _dirstatevalidate(self, node):
1762 try:
1762 try:
1763 self.changelog.rev(node)
1763 self.changelog.rev(node)
1764 return node
1764 return node
1765 except error.LookupError:
1765 except error.LookupError:
1766 if not self._dirstatevalidatewarned:
1766 if not self._dirstatevalidatewarned:
1767 self._dirstatevalidatewarned = True
1767 self._dirstatevalidatewarned = True
1768 self.ui.warn(
1768 self.ui.warn(
1769 _(b"warning: ignoring unknown working parent %s!\n")
1769 _(b"warning: ignoring unknown working parent %s!\n")
1770 % short(node)
1770 % short(node)
1771 )
1771 )
1772 return self.nullid
1772 return self.nullid
1773
1773
1774 @storecache(narrowspec.FILENAME)
1774 @storecache(narrowspec.FILENAME)
1775 def narrowpats(self):
1775 def narrowpats(self):
1776 """matcher patterns for this repository's narrowspec
1776 """matcher patterns for this repository's narrowspec
1777
1777
1778 A tuple of (includes, excludes).
1778 A tuple of (includes, excludes).
1779 """
1779 """
1780 return narrowspec.load(self)
1780 return narrowspec.load(self)
1781
1781
1782 @storecache(narrowspec.FILENAME)
1782 @storecache(narrowspec.FILENAME)
1783 def _storenarrowmatch(self):
1783 def _storenarrowmatch(self):
1784 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1784 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1785 return matchmod.always()
1785 return matchmod.always()
1786 include, exclude = self.narrowpats
1786 include, exclude = self.narrowpats
1787 return narrowspec.match(self.root, include=include, exclude=exclude)
1787 return narrowspec.match(self.root, include=include, exclude=exclude)
1788
1788
1789 @storecache(narrowspec.FILENAME)
1789 @storecache(narrowspec.FILENAME)
1790 def _narrowmatch(self):
1790 def _narrowmatch(self):
1791 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1791 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1792 return matchmod.always()
1792 return matchmod.always()
1793 narrowspec.checkworkingcopynarrowspec(self)
1793 narrowspec.checkworkingcopynarrowspec(self)
1794 include, exclude = self.narrowpats
1794 include, exclude = self.narrowpats
1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1796
1796
1797 def narrowmatch(self, match=None, includeexact=False):
1797 def narrowmatch(self, match=None, includeexact=False):
1798 """matcher corresponding the the repo's narrowspec
1798 """matcher corresponding the the repo's narrowspec
1799
1799
1800 If `match` is given, then that will be intersected with the narrow
1800 If `match` is given, then that will be intersected with the narrow
1801 matcher.
1801 matcher.
1802
1802
1803 If `includeexact` is True, then any exact matches from `match` will
1803 If `includeexact` is True, then any exact matches from `match` will
1804 be included even if they're outside the narrowspec.
1804 be included even if they're outside the narrowspec.
1805 """
1805 """
1806 if match:
1806 if match:
1807 if includeexact and not self._narrowmatch.always():
1807 if includeexact and not self._narrowmatch.always():
1808 # do not exclude explicitly-specified paths so that they can
1808 # do not exclude explicitly-specified paths so that they can
1809 # be warned later on
1809 # be warned later on
1810 em = matchmod.exact(match.files())
1810 em = matchmod.exact(match.files())
1811 nm = matchmod.unionmatcher([self._narrowmatch, em])
1811 nm = matchmod.unionmatcher([self._narrowmatch, em])
1812 return matchmod.intersectmatchers(match, nm)
1812 return matchmod.intersectmatchers(match, nm)
1813 return matchmod.intersectmatchers(match, self._narrowmatch)
1813 return matchmod.intersectmatchers(match, self._narrowmatch)
1814 return self._narrowmatch
1814 return self._narrowmatch
1815
1815
1816 def setnarrowpats(self, newincludes, newexcludes):
1816 def setnarrowpats(self, newincludes, newexcludes):
1817 narrowspec.save(self, newincludes, newexcludes)
1817 narrowspec.save(self, newincludes, newexcludes)
1818 self.invalidate(clearfilecache=True)
1818 self.invalidate(clearfilecache=True)
1819
1819
1820 @unfilteredpropertycache
1820 @unfilteredpropertycache
1821 def _quick_access_changeid_null(self):
1821 def _quick_access_changeid_null(self):
1822 return {
1822 return {
1823 b'null': (nullrev, self.nodeconstants.nullid),
1823 b'null': (nullrev, self.nodeconstants.nullid),
1824 nullrev: (nullrev, self.nodeconstants.nullid),
1824 nullrev: (nullrev, self.nodeconstants.nullid),
1825 self.nullid: (nullrev, self.nullid),
1825 self.nullid: (nullrev, self.nullid),
1826 }
1826 }
1827
1827
1828 @unfilteredpropertycache
1828 @unfilteredpropertycache
1829 def _quick_access_changeid_wc(self):
1829 def _quick_access_changeid_wc(self):
1830 # also fast path access to the working copy parents
1830 # also fast path access to the working copy parents
1831 # however, only do it for filter that ensure wc is visible.
1831 # however, only do it for filter that ensure wc is visible.
1832 quick = self._quick_access_changeid_null.copy()
1832 quick = self._quick_access_changeid_null.copy()
1833 cl = self.unfiltered().changelog
1833 cl = self.unfiltered().changelog
1834 for node in self.dirstate.parents():
1834 for node in self.dirstate.parents():
1835 if node == self.nullid:
1835 if node == self.nullid:
1836 continue
1836 continue
1837 rev = cl.index.get_rev(node)
1837 rev = cl.index.get_rev(node)
1838 if rev is None:
1838 if rev is None:
1839 # unknown working copy parent case:
1839 # unknown working copy parent case:
1840 #
1840 #
1841 # skip the fast path and let higher code deal with it
1841 # skip the fast path and let higher code deal with it
1842 continue
1842 continue
1843 pair = (rev, node)
1843 pair = (rev, node)
1844 quick[rev] = pair
1844 quick[rev] = pair
1845 quick[node] = pair
1845 quick[node] = pair
1846 # also add the parents of the parents
1846 # also add the parents of the parents
1847 for r in cl.parentrevs(rev):
1847 for r in cl.parentrevs(rev):
1848 if r == nullrev:
1848 if r == nullrev:
1849 continue
1849 continue
1850 n = cl.node(r)
1850 n = cl.node(r)
1851 pair = (r, n)
1851 pair = (r, n)
1852 quick[r] = pair
1852 quick[r] = pair
1853 quick[n] = pair
1853 quick[n] = pair
1854 p1node = self.dirstate.p1()
1854 p1node = self.dirstate.p1()
1855 if p1node != self.nullid:
1855 if p1node != self.nullid:
1856 quick[b'.'] = quick[p1node]
1856 quick[b'.'] = quick[p1node]
1857 return quick
1857 return quick
1858
1858
1859 @unfilteredmethod
1859 @unfilteredmethod
1860 def _quick_access_changeid_invalidate(self):
1860 def _quick_access_changeid_invalidate(self):
1861 if '_quick_access_changeid_wc' in vars(self):
1861 if '_quick_access_changeid_wc' in vars(self):
1862 del self.__dict__['_quick_access_changeid_wc']
1862 del self.__dict__['_quick_access_changeid_wc']
1863
1863
1864 @property
1864 @property
1865 def _quick_access_changeid(self):
1865 def _quick_access_changeid(self):
1866 """an helper dictionnary for __getitem__ calls
1866 """an helper dictionnary for __getitem__ calls
1867
1867
1868 This contains a list of symbol we can recognise right away without
1868 This contains a list of symbol we can recognise right away without
1869 further processing.
1869 further processing.
1870 """
1870 """
1871 if self.filtername in repoview.filter_has_wc:
1871 if self.filtername in repoview.filter_has_wc:
1872 return self._quick_access_changeid_wc
1872 return self._quick_access_changeid_wc
1873 return self._quick_access_changeid_null
1873 return self._quick_access_changeid_null
1874
1874
1875 def __getitem__(self, changeid):
1875 def __getitem__(self, changeid):
1876 # dealing with special cases
1876 # dealing with special cases
1877 if changeid is None:
1877 if changeid is None:
1878 return context.workingctx(self)
1878 return context.workingctx(self)
1879 if isinstance(changeid, context.basectx):
1879 if isinstance(changeid, context.basectx):
1880 return changeid
1880 return changeid
1881
1881
1882 # dealing with multiple revisions
1882 # dealing with multiple revisions
1883 if isinstance(changeid, slice):
1883 if isinstance(changeid, slice):
1884 # wdirrev isn't contiguous so the slice shouldn't include it
1884 # wdirrev isn't contiguous so the slice shouldn't include it
1885 return [
1885 return [
1886 self[i]
1886 self[i]
1887 for i in pycompat.xrange(*changeid.indices(len(self)))
1887 for i in pycompat.xrange(*changeid.indices(len(self)))
1888 if i not in self.changelog.filteredrevs
1888 if i not in self.changelog.filteredrevs
1889 ]
1889 ]
1890
1890
1891 # dealing with some special values
1891 # dealing with some special values
1892 quick_access = self._quick_access_changeid.get(changeid)
1892 quick_access = self._quick_access_changeid.get(changeid)
1893 if quick_access is not None:
1893 if quick_access is not None:
1894 rev, node = quick_access
1894 rev, node = quick_access
1895 return context.changectx(self, rev, node, maybe_filtered=False)
1895 return context.changectx(self, rev, node, maybe_filtered=False)
1896 if changeid == b'tip':
1896 if changeid == b'tip':
1897 node = self.changelog.tip()
1897 node = self.changelog.tip()
1898 rev = self.changelog.rev(node)
1898 rev = self.changelog.rev(node)
1899 return context.changectx(self, rev, node)
1899 return context.changectx(self, rev, node)
1900
1900
1901 # dealing with arbitrary values
1901 # dealing with arbitrary values
1902 try:
1902 try:
1903 if isinstance(changeid, int):
1903 if isinstance(changeid, int):
1904 node = self.changelog.node(changeid)
1904 node = self.changelog.node(changeid)
1905 rev = changeid
1905 rev = changeid
1906 elif changeid == b'.':
1906 elif changeid == b'.':
1907 # this is a hack to delay/avoid loading obsmarkers
1907 # this is a hack to delay/avoid loading obsmarkers
1908 # when we know that '.' won't be hidden
1908 # when we know that '.' won't be hidden
1909 node = self.dirstate.p1()
1909 node = self.dirstate.p1()
1910 rev = self.unfiltered().changelog.rev(node)
1910 rev = self.unfiltered().changelog.rev(node)
1911 elif len(changeid) == self.nodeconstants.nodelen:
1911 elif len(changeid) == self.nodeconstants.nodelen:
1912 try:
1912 try:
1913 node = changeid
1913 node = changeid
1914 rev = self.changelog.rev(changeid)
1914 rev = self.changelog.rev(changeid)
1915 except error.FilteredLookupError:
1915 except error.FilteredLookupError:
1916 changeid = hex(changeid) # for the error message
1916 changeid = hex(changeid) # for the error message
1917 raise
1917 raise
1918 except LookupError:
1918 except LookupError:
1919 # check if it might have come from damaged dirstate
1919 # check if it might have come from damaged dirstate
1920 #
1920 #
1921 # XXX we could avoid the unfiltered if we had a recognizable
1921 # XXX we could avoid the unfiltered if we had a recognizable
1922 # exception for filtered changeset access
1922 # exception for filtered changeset access
1923 if (
1923 if (
1924 self.local()
1924 self.local()
1925 and changeid in self.unfiltered().dirstate.parents()
1925 and changeid in self.unfiltered().dirstate.parents()
1926 ):
1926 ):
1927 msg = _(b"working directory has unknown parent '%s'!")
1927 msg = _(b"working directory has unknown parent '%s'!")
1928 raise error.Abort(msg % short(changeid))
1928 raise error.Abort(msg % short(changeid))
1929 changeid = hex(changeid) # for the error message
1929 changeid = hex(changeid) # for the error message
1930 raise
1930 raise
1931
1931
1932 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1932 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1933 node = bin(changeid)
1933 node = bin(changeid)
1934 rev = self.changelog.rev(node)
1934 rev = self.changelog.rev(node)
1935 else:
1935 else:
1936 raise error.ProgrammingError(
1936 raise error.ProgrammingError(
1937 b"unsupported changeid '%s' of type %s"
1937 b"unsupported changeid '%s' of type %s"
1938 % (changeid, pycompat.bytestr(type(changeid)))
1938 % (changeid, pycompat.bytestr(type(changeid)))
1939 )
1939 )
1940
1940
1941 return context.changectx(self, rev, node)
1941 return context.changectx(self, rev, node)
1942
1942
1943 except (error.FilteredIndexError, error.FilteredLookupError):
1943 except (error.FilteredIndexError, error.FilteredLookupError):
1944 raise error.FilteredRepoLookupError(
1944 raise error.FilteredRepoLookupError(
1945 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1945 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1946 )
1946 )
1947 except (IndexError, LookupError):
1947 except (IndexError, LookupError):
1948 raise error.RepoLookupError(
1948 raise error.RepoLookupError(
1949 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1949 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1950 )
1950 )
1951 except error.WdirUnsupported:
1951 except error.WdirUnsupported:
1952 return context.workingctx(self)
1952 return context.workingctx(self)
1953
1953
1954 def __contains__(self, changeid):
1954 def __contains__(self, changeid):
1955 """True if the given changeid exists"""
1955 """True if the given changeid exists"""
1956 try:
1956 try:
1957 self[changeid]
1957 self[changeid]
1958 return True
1958 return True
1959 except error.RepoLookupError:
1959 except error.RepoLookupError:
1960 return False
1960 return False
1961
1961
1962 def __nonzero__(self):
1962 def __nonzero__(self):
1963 return True
1963 return True
1964
1964
1965 __bool__ = __nonzero__
1965 __bool__ = __nonzero__
1966
1966
1967 def __len__(self):
1967 def __len__(self):
1968 # no need to pay the cost of repoview.changelog
1968 # no need to pay the cost of repoview.changelog
1969 unfi = self.unfiltered()
1969 unfi = self.unfiltered()
1970 return len(unfi.changelog)
1970 return len(unfi.changelog)
1971
1971
1972 def __iter__(self):
1972 def __iter__(self):
1973 return iter(self.changelog)
1973 return iter(self.changelog)
1974
1974
1975 def revs(self, expr, *args):
1975 def revs(self, expr, *args):
1976 """Find revisions matching a revset.
1976 """Find revisions matching a revset.
1977
1977
1978 The revset is specified as a string ``expr`` that may contain
1978 The revset is specified as a string ``expr`` that may contain
1979 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1979 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1980
1980
1981 Revset aliases from the configuration are not expanded. To expand
1981 Revset aliases from the configuration are not expanded. To expand
1982 user aliases, consider calling ``scmutil.revrange()`` or
1982 user aliases, consider calling ``scmutil.revrange()`` or
1983 ``repo.anyrevs([expr], user=True)``.
1983 ``repo.anyrevs([expr], user=True)``.
1984
1984
1985 Returns a smartset.abstractsmartset, which is a list-like interface
1985 Returns a smartset.abstractsmartset, which is a list-like interface
1986 that contains integer revisions.
1986 that contains integer revisions.
1987 """
1987 """
1988 tree = revsetlang.spectree(expr, *args)
1988 tree = revsetlang.spectree(expr, *args)
1989 return revset.makematcher(tree)(self)
1989 return revset.makematcher(tree)(self)
1990
1990
1991 def set(self, expr, *args):
1991 def set(self, expr, *args):
1992 """Find revisions matching a revset and emit changectx instances.
1992 """Find revisions matching a revset and emit changectx instances.
1993
1993
1994 This is a convenience wrapper around ``revs()`` that iterates the
1994 This is a convenience wrapper around ``revs()`` that iterates the
1995 result and is a generator of changectx instances.
1995 result and is a generator of changectx instances.
1996
1996
1997 Revset aliases from the configuration are not expanded. To expand
1997 Revset aliases from the configuration are not expanded. To expand
1998 user aliases, consider calling ``scmutil.revrange()``.
1998 user aliases, consider calling ``scmutil.revrange()``.
1999 """
1999 """
2000 for r in self.revs(expr, *args):
2000 for r in self.revs(expr, *args):
2001 yield self[r]
2001 yield self[r]
2002
2002
2003 def anyrevs(self, specs, user=False, localalias=None):
2003 def anyrevs(self, specs, user=False, localalias=None):
2004 """Find revisions matching one of the given revsets.
2004 """Find revisions matching one of the given revsets.
2005
2005
2006 Revset aliases from the configuration are not expanded by default. To
2006 Revset aliases from the configuration are not expanded by default. To
2007 expand user aliases, specify ``user=True``. To provide some local
2007 expand user aliases, specify ``user=True``. To provide some local
2008 definitions overriding user aliases, set ``localalias`` to
2008 definitions overriding user aliases, set ``localalias`` to
2009 ``{name: definitionstring}``.
2009 ``{name: definitionstring}``.
2010 """
2010 """
2011 if specs == [b'null']:
2011 if specs == [b'null']:
2012 return revset.baseset([nullrev])
2012 return revset.baseset([nullrev])
2013 if specs == [b'.']:
2013 if specs == [b'.']:
2014 quick_data = self._quick_access_changeid.get(b'.')
2014 quick_data = self._quick_access_changeid.get(b'.')
2015 if quick_data is not None:
2015 if quick_data is not None:
2016 return revset.baseset([quick_data[0]])
2016 return revset.baseset([quick_data[0]])
2017 if user:
2017 if user:
2018 m = revset.matchany(
2018 m = revset.matchany(
2019 self.ui,
2019 self.ui,
2020 specs,
2020 specs,
2021 lookup=revset.lookupfn(self),
2021 lookup=revset.lookupfn(self),
2022 localalias=localalias,
2022 localalias=localalias,
2023 )
2023 )
2024 else:
2024 else:
2025 m = revset.matchany(None, specs, localalias=localalias)
2025 m = revset.matchany(None, specs, localalias=localalias)
2026 return m(self)
2026 return m(self)
2027
2027
2028 def url(self):
2028 def url(self):
2029 return b'file:' + self.root
2029 return b'file:' + self.root
2030
2030
2031 def hook(self, name, throw=False, **args):
2031 def hook(self, name, throw=False, **args):
2032 """Call a hook, passing this repo instance.
2032 """Call a hook, passing this repo instance.
2033
2033
2034 This a convenience method to aid invoking hooks. Extensions likely
2034 This a convenience method to aid invoking hooks. Extensions likely
2035 won't call this unless they have registered a custom hook or are
2035 won't call this unless they have registered a custom hook or are
2036 replacing code that is expected to call a hook.
2036 replacing code that is expected to call a hook.
2037 """
2037 """
2038 return hook.hook(self.ui, self, name, throw, **args)
2038 return hook.hook(self.ui, self, name, throw, **args)
2039
2039
2040 @filteredpropertycache
2040 @filteredpropertycache
2041 def _tagscache(self):
2041 def _tagscache(self):
2042 """Returns a tagscache object that contains various tags related
2042 """Returns a tagscache object that contains various tags related
2043 caches."""
2043 caches."""
2044
2044
2045 # This simplifies its cache management by having one decorated
2045 # This simplifies its cache management by having one decorated
2046 # function (this one) and the rest simply fetch things from it.
2046 # function (this one) and the rest simply fetch things from it.
2047 class tagscache:
2047 class tagscache:
2048 def __init__(self):
2048 def __init__(self):
2049 # These two define the set of tags for this repository. tags
2049 # These two define the set of tags for this repository. tags
2050 # maps tag name to node; tagtypes maps tag name to 'global' or
2050 # maps tag name to node; tagtypes maps tag name to 'global' or
2051 # 'local'. (Global tags are defined by .hgtags across all
2051 # 'local'. (Global tags are defined by .hgtags across all
2052 # heads, and local tags are defined in .hg/localtags.)
2052 # heads, and local tags are defined in .hg/localtags.)
2053 # They constitute the in-memory cache of tags.
2053 # They constitute the in-memory cache of tags.
2054 self.tags = self.tagtypes = None
2054 self.tags = self.tagtypes = None
2055
2055
2056 self.nodetagscache = self.tagslist = None
2056 self.nodetagscache = self.tagslist = None
2057
2057
2058 cache = tagscache()
2058 cache = tagscache()
2059 cache.tags, cache.tagtypes = self._findtags()
2059 cache.tags, cache.tagtypes = self._findtags()
2060
2060
2061 return cache
2061 return cache
2062
2062
2063 def tags(self):
2063 def tags(self):
2064 '''return a mapping of tag to node'''
2064 '''return a mapping of tag to node'''
2065 t = {}
2065 t = {}
2066 if self.changelog.filteredrevs:
2066 if self.changelog.filteredrevs:
2067 tags, tt = self._findtags()
2067 tags, tt = self._findtags()
2068 else:
2068 else:
2069 tags = self._tagscache.tags
2069 tags = self._tagscache.tags
2070 rev = self.changelog.rev
2070 rev = self.changelog.rev
2071 for k, v in tags.items():
2071 for k, v in tags.items():
2072 try:
2072 try:
2073 # ignore tags to unknown nodes
2073 # ignore tags to unknown nodes
2074 rev(v)
2074 rev(v)
2075 t[k] = v
2075 t[k] = v
2076 except (error.LookupError, ValueError):
2076 except (error.LookupError, ValueError):
2077 pass
2077 pass
2078 return t
2078 return t
2079
2079
2080 def _findtags(self):
2080 def _findtags(self):
2081 """Do the hard work of finding tags. Return a pair of dicts
2081 """Do the hard work of finding tags. Return a pair of dicts
2082 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2082 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2083 maps tag name to a string like \'global\' or \'local\'.
2083 maps tag name to a string like \'global\' or \'local\'.
2084 Subclasses or extensions are free to add their own tags, but
2084 Subclasses or extensions are free to add their own tags, but
2085 should be aware that the returned dicts will be retained for the
2085 should be aware that the returned dicts will be retained for the
2086 duration of the localrepo object."""
2086 duration of the localrepo object."""
2087
2087
2088 # XXX what tagtype should subclasses/extensions use? Currently
2088 # XXX what tagtype should subclasses/extensions use? Currently
2089 # mq and bookmarks add tags, but do not set the tagtype at all.
2089 # mq and bookmarks add tags, but do not set the tagtype at all.
2090 # Should each extension invent its own tag type? Should there
2090 # Should each extension invent its own tag type? Should there
2091 # be one tagtype for all such "virtual" tags? Or is the status
2091 # be one tagtype for all such "virtual" tags? Or is the status
2092 # quo fine?
2092 # quo fine?
2093
2093
2094 # map tag name to (node, hist)
2094 # map tag name to (node, hist)
2095 alltags = tagsmod.findglobaltags(self.ui, self)
2095 alltags = tagsmod.findglobaltags(self.ui, self)
2096 # map tag name to tag type
2096 # map tag name to tag type
2097 tagtypes = {tag: b'global' for tag in alltags}
2097 tagtypes = {tag: b'global' for tag in alltags}
2098
2098
2099 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2099 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2100
2100
2101 # Build the return dicts. Have to re-encode tag names because
2101 # Build the return dicts. Have to re-encode tag names because
2102 # the tags module always uses UTF-8 (in order not to lose info
2102 # the tags module always uses UTF-8 (in order not to lose info
2103 # writing to the cache), but the rest of Mercurial wants them in
2103 # writing to the cache), but the rest of Mercurial wants them in
2104 # local encoding.
2104 # local encoding.
2105 tags = {}
2105 tags = {}
2106 for (name, (node, hist)) in alltags.items():
2106 for (name, (node, hist)) in alltags.items():
2107 if node != self.nullid:
2107 if node != self.nullid:
2108 tags[encoding.tolocal(name)] = node
2108 tags[encoding.tolocal(name)] = node
2109 tags[b'tip'] = self.changelog.tip()
2109 tags[b'tip'] = self.changelog.tip()
2110 tagtypes = {
2110 tagtypes = {
2111 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2111 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2112 }
2112 }
2113 return (tags, tagtypes)
2113 return (tags, tagtypes)
2114
2114
2115 def tagtype(self, tagname):
2115 def tagtype(self, tagname):
2116 """
2116 """
2117 return the type of the given tag. result can be:
2117 return the type of the given tag. result can be:
2118
2118
2119 'local' : a local tag
2119 'local' : a local tag
2120 'global' : a global tag
2120 'global' : a global tag
2121 None : tag does not exist
2121 None : tag does not exist
2122 """
2122 """
2123
2123
2124 return self._tagscache.tagtypes.get(tagname)
2124 return self._tagscache.tagtypes.get(tagname)
2125
2125
2126 def tagslist(self):
2126 def tagslist(self):
2127 '''return a list of tags ordered by revision'''
2127 '''return a list of tags ordered by revision'''
2128 if not self._tagscache.tagslist:
2128 if not self._tagscache.tagslist:
2129 l = []
2129 l = []
2130 for t, n in self.tags().items():
2130 for t, n in self.tags().items():
2131 l.append((self.changelog.rev(n), t, n))
2131 l.append((self.changelog.rev(n), t, n))
2132 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2132 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2133
2133
2134 return self._tagscache.tagslist
2134 return self._tagscache.tagslist
2135
2135
2136 def nodetags(self, node):
2136 def nodetags(self, node):
2137 '''return the tags associated with a node'''
2137 '''return the tags associated with a node'''
2138 if not self._tagscache.nodetagscache:
2138 if not self._tagscache.nodetagscache:
2139 nodetagscache = {}
2139 nodetagscache = {}
2140 for t, n in self._tagscache.tags.items():
2140 for t, n in self._tagscache.tags.items():
2141 nodetagscache.setdefault(n, []).append(t)
2141 nodetagscache.setdefault(n, []).append(t)
2142 for tags in nodetagscache.values():
2142 for tags in nodetagscache.values():
2143 tags.sort()
2143 tags.sort()
2144 self._tagscache.nodetagscache = nodetagscache
2144 self._tagscache.nodetagscache = nodetagscache
2145 return self._tagscache.nodetagscache.get(node, [])
2145 return self._tagscache.nodetagscache.get(node, [])
2146
2146
2147 def nodebookmarks(self, node):
2147 def nodebookmarks(self, node):
2148 """return the list of bookmarks pointing to the specified node"""
2148 """return the list of bookmarks pointing to the specified node"""
2149 return self._bookmarks.names(node)
2149 return self._bookmarks.names(node)
2150
2150
2151 def branchmap(self):
2151 def branchmap(self):
2152 """returns a dictionary {branch: [branchheads]} with branchheads
2152 """returns a dictionary {branch: [branchheads]} with branchheads
2153 ordered by increasing revision number"""
2153 ordered by increasing revision number"""
2154 return self._branchcaches[self]
2154 return self._branchcaches[self]
2155
2155
2156 @unfilteredmethod
2156 @unfilteredmethod
2157 def revbranchcache(self):
2157 def revbranchcache(self):
2158 if not self._revbranchcache:
2158 if not self._revbranchcache:
2159 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2159 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2160 return self._revbranchcache
2160 return self._revbranchcache
2161
2161
2162 def register_changeset(self, rev, changelogrevision):
2162 def register_changeset(self, rev, changelogrevision):
2163 self.revbranchcache().setdata(rev, changelogrevision)
2163 self.revbranchcache().setdata(rev, changelogrevision)
2164
2164
2165 def branchtip(self, branch, ignoremissing=False):
2165 def branchtip(self, branch, ignoremissing=False):
2166 """return the tip node for a given branch
2166 """return the tip node for a given branch
2167
2167
2168 If ignoremissing is True, then this method will not raise an error.
2168 If ignoremissing is True, then this method will not raise an error.
2169 This is helpful for callers that only expect None for a missing branch
2169 This is helpful for callers that only expect None for a missing branch
2170 (e.g. namespace).
2170 (e.g. namespace).
2171
2171
2172 """
2172 """
2173 try:
2173 try:
2174 return self.branchmap().branchtip(branch)
2174 return self.branchmap().branchtip(branch)
2175 except KeyError:
2175 except KeyError:
2176 if not ignoremissing:
2176 if not ignoremissing:
2177 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2177 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2178 else:
2178 else:
2179 pass
2179 pass
2180
2180
2181 def lookup(self, key):
2181 def lookup(self, key):
2182 node = scmutil.revsymbol(self, key).node()
2182 node = scmutil.revsymbol(self, key).node()
2183 if node is None:
2183 if node is None:
2184 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2184 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2185 return node
2185 return node
2186
2186
2187 def lookupbranch(self, key):
2187 def lookupbranch(self, key):
2188 if self.branchmap().hasbranch(key):
2188 if self.branchmap().hasbranch(key):
2189 return key
2189 return key
2190
2190
2191 return scmutil.revsymbol(self, key).branch()
2191 return scmutil.revsymbol(self, key).branch()
2192
2192
2193 def known(self, nodes):
2193 def known(self, nodes):
2194 cl = self.changelog
2194 cl = self.changelog
2195 get_rev = cl.index.get_rev
2195 get_rev = cl.index.get_rev
2196 filtered = cl.filteredrevs
2196 filtered = cl.filteredrevs
2197 result = []
2197 result = []
2198 for n in nodes:
2198 for n in nodes:
2199 r = get_rev(n)
2199 r = get_rev(n)
2200 resp = not (r is None or r in filtered)
2200 resp = not (r is None or r in filtered)
2201 result.append(resp)
2201 result.append(resp)
2202 return result
2202 return result
2203
2203
2204 def local(self):
2204 def local(self):
2205 return self
2205 return self
2206
2206
2207 def publishing(self):
2207 def publishing(self):
2208 # it's safe (and desirable) to trust the publish flag unconditionally
2208 # it's safe (and desirable) to trust the publish flag unconditionally
2209 # so that we don't finalize changes shared between users via ssh or nfs
2209 # so that we don't finalize changes shared between users via ssh or nfs
2210 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2210 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2211
2211
2212 def cancopy(self):
2212 def cancopy(self):
2213 # so statichttprepo's override of local() works
2213 # so statichttprepo's override of local() works
2214 if not self.local():
2214 if not self.local():
2215 return False
2215 return False
2216 if not self.publishing():
2216 if not self.publishing():
2217 return True
2217 return True
2218 # if publishing we can't copy if there is filtered content
2218 # if publishing we can't copy if there is filtered content
2219 return not self.filtered(b'visible').changelog.filteredrevs
2219 return not self.filtered(b'visible').changelog.filteredrevs
2220
2220
2221 def shared(self):
2221 def shared(self):
2222 '''the type of shared repository (None if not shared)'''
2222 '''the type of shared repository (None if not shared)'''
2223 if self.sharedpath != self.path:
2223 if self.sharedpath != self.path:
2224 return b'store'
2224 return b'store'
2225 return None
2225 return None
2226
2226
2227 def wjoin(self, f, *insidef):
2227 def wjoin(self, f, *insidef):
2228 return self.vfs.reljoin(self.root, f, *insidef)
2228 return self.vfs.reljoin(self.root, f, *insidef)
2229
2229
2230 def setparents(self, p1, p2=None):
2230 def setparents(self, p1, p2=None):
2231 if p2 is None:
2231 if p2 is None:
2232 p2 = self.nullid
2232 p2 = self.nullid
2233 self[None].setparents(p1, p2)
2233 self[None].setparents(p1, p2)
2234 self._quick_access_changeid_invalidate()
2234 self._quick_access_changeid_invalidate()
2235
2235
2236 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2236 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2237 """changeid must be a changeset revision, if specified.
2237 """changeid must be a changeset revision, if specified.
2238 fileid can be a file revision or node."""
2238 fileid can be a file revision or node."""
2239 return context.filectx(
2239 return context.filectx(
2240 self, path, changeid, fileid, changectx=changectx
2240 self, path, changeid, fileid, changectx=changectx
2241 )
2241 )
2242
2242
2243 def getcwd(self):
2243 def getcwd(self):
2244 return self.dirstate.getcwd()
2244 return self.dirstate.getcwd()
2245
2245
2246 def pathto(self, f, cwd=None):
2246 def pathto(self, f, cwd=None):
2247 return self.dirstate.pathto(f, cwd)
2247 return self.dirstate.pathto(f, cwd)
2248
2248
2249 def _loadfilter(self, filter):
2249 def _loadfilter(self, filter):
2250 if filter not in self._filterpats:
2250 if filter not in self._filterpats:
2251 l = []
2251 l = []
2252 for pat, cmd in self.ui.configitems(filter):
2252 for pat, cmd in self.ui.configitems(filter):
2253 if cmd == b'!':
2253 if cmd == b'!':
2254 continue
2254 continue
2255 mf = matchmod.match(self.root, b'', [pat])
2255 mf = matchmod.match(self.root, b'', [pat])
2256 fn = None
2256 fn = None
2257 params = cmd
2257 params = cmd
2258 for name, filterfn in self._datafilters.items():
2258 for name, filterfn in self._datafilters.items():
2259 if cmd.startswith(name):
2259 if cmd.startswith(name):
2260 fn = filterfn
2260 fn = filterfn
2261 params = cmd[len(name) :].lstrip()
2261 params = cmd[len(name) :].lstrip()
2262 break
2262 break
2263 if not fn:
2263 if not fn:
2264 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2264 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2265 fn.__name__ = 'commandfilter'
2265 fn.__name__ = 'commandfilter'
2266 # Wrap old filters not supporting keyword arguments
2266 # Wrap old filters not supporting keyword arguments
2267 if not pycompat.getargspec(fn)[2]:
2267 if not pycompat.getargspec(fn)[2]:
2268 oldfn = fn
2268 oldfn = fn
2269 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2269 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2270 fn.__name__ = 'compat-' + oldfn.__name__
2270 fn.__name__ = 'compat-' + oldfn.__name__
2271 l.append((mf, fn, params))
2271 l.append((mf, fn, params))
2272 self._filterpats[filter] = l
2272 self._filterpats[filter] = l
2273 return self._filterpats[filter]
2273 return self._filterpats[filter]
2274
2274
2275 def _filter(self, filterpats, filename, data):
2275 def _filter(self, filterpats, filename, data):
2276 for mf, fn, cmd in filterpats:
2276 for mf, fn, cmd in filterpats:
2277 if mf(filename):
2277 if mf(filename):
2278 self.ui.debug(
2278 self.ui.debug(
2279 b"filtering %s through %s\n"
2279 b"filtering %s through %s\n"
2280 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2280 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2281 )
2281 )
2282 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2282 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2283 break
2283 break
2284
2284
2285 return data
2285 return data
2286
2286
2287 @unfilteredpropertycache
2287 @unfilteredpropertycache
2288 def _encodefilterpats(self):
2288 def _encodefilterpats(self):
2289 return self._loadfilter(b'encode')
2289 return self._loadfilter(b'encode')
2290
2290
2291 @unfilteredpropertycache
2291 @unfilteredpropertycache
2292 def _decodefilterpats(self):
2292 def _decodefilterpats(self):
2293 return self._loadfilter(b'decode')
2293 return self._loadfilter(b'decode')
2294
2294
2295 def adddatafilter(self, name, filter):
2295 def adddatafilter(self, name, filter):
2296 self._datafilters[name] = filter
2296 self._datafilters[name] = filter
2297
2297
2298 def wread(self, filename):
2298 def wread(self, filename):
2299 if self.wvfs.islink(filename):
2299 if self.wvfs.islink(filename):
2300 data = self.wvfs.readlink(filename)
2300 data = self.wvfs.readlink(filename)
2301 else:
2301 else:
2302 data = self.wvfs.read(filename)
2302 data = self.wvfs.read(filename)
2303 return self._filter(self._encodefilterpats, filename, data)
2303 return self._filter(self._encodefilterpats, filename, data)
2304
2304
2305 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2305 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2306 """write ``data`` into ``filename`` in the working directory
2306 """write ``data`` into ``filename`` in the working directory
2307
2307
2308 This returns length of written (maybe decoded) data.
2308 This returns length of written (maybe decoded) data.
2309 """
2309 """
2310 data = self._filter(self._decodefilterpats, filename, data)
2310 data = self._filter(self._decodefilterpats, filename, data)
2311 if b'l' in flags:
2311 if b'l' in flags:
2312 self.wvfs.symlink(data, filename)
2312 self.wvfs.symlink(data, filename)
2313 else:
2313 else:
2314 self.wvfs.write(
2314 self.wvfs.write(
2315 filename, data, backgroundclose=backgroundclose, **kwargs
2315 filename, data, backgroundclose=backgroundclose, **kwargs
2316 )
2316 )
2317 if b'x' in flags:
2317 if b'x' in flags:
2318 self.wvfs.setflags(filename, False, True)
2318 self.wvfs.setflags(filename, False, True)
2319 else:
2319 else:
2320 self.wvfs.setflags(filename, False, False)
2320 self.wvfs.setflags(filename, False, False)
2321 return len(data)
2321 return len(data)
2322
2322
2323 def wwritedata(self, filename, data):
2323 def wwritedata(self, filename, data):
2324 return self._filter(self._decodefilterpats, filename, data)
2324 return self._filter(self._decodefilterpats, filename, data)
2325
2325
2326 def currenttransaction(self):
2326 def currenttransaction(self):
2327 """return the current transaction or None if non exists"""
2327 """return the current transaction or None if non exists"""
2328 if self._transref:
2328 if self._transref:
2329 tr = self._transref()
2329 tr = self._transref()
2330 else:
2330 else:
2331 tr = None
2331 tr = None
2332
2332
2333 if tr and tr.running():
2333 if tr and tr.running():
2334 return tr
2334 return tr
2335 return None
2335 return None
2336
2336
2337 def transaction(self, desc, report=None):
2337 def transaction(self, desc, report=None):
2338 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2338 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2339 b'devel', b'check-locks'
2339 b'devel', b'check-locks'
2340 ):
2340 ):
2341 if self._currentlock(self._lockref) is None:
2341 if self._currentlock(self._lockref) is None:
2342 raise error.ProgrammingError(b'transaction requires locking')
2342 raise error.ProgrammingError(b'transaction requires locking')
2343 tr = self.currenttransaction()
2343 tr = self.currenttransaction()
2344 if tr is not None:
2344 if tr is not None:
2345 return tr.nest(name=desc)
2345 return tr.nest(name=desc)
2346
2346
2347 # abort here if the journal already exists
2347 # abort here if the journal already exists
2348 if self.svfs.exists(b"journal"):
2348 if self.svfs.exists(b"journal"):
2349 raise error.RepoError(
2349 raise error.RepoError(
2350 _(b"abandoned transaction found"),
2350 _(b"abandoned transaction found"),
2351 hint=_(b"run 'hg recover' to clean up transaction"),
2351 hint=_(b"run 'hg recover' to clean up transaction"),
2352 )
2352 )
2353
2353
2354 idbase = b"%.40f#%f" % (random.random(), time.time())
2354 idbase = b"%.40f#%f" % (random.random(), time.time())
2355 ha = hex(hashutil.sha1(idbase).digest())
2355 ha = hex(hashutil.sha1(idbase).digest())
2356 txnid = b'TXN:' + ha
2356 txnid = b'TXN:' + ha
2357 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2357 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2358
2358
2359 self._writejournal(desc)
2359 self._writejournal(desc)
2360 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2360 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2361 if report:
2361 if report:
2362 rp = report
2362 rp = report
2363 else:
2363 else:
2364 rp = self.ui.warn
2364 rp = self.ui.warn
2365 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2365 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2366 # we must avoid cyclic reference between repo and transaction.
2366 # we must avoid cyclic reference between repo and transaction.
2367 reporef = weakref.ref(self)
2367 reporef = weakref.ref(self)
2368 # Code to track tag movement
2368 # Code to track tag movement
2369 #
2369 #
2370 # Since tags are all handled as file content, it is actually quite hard
2370 # Since tags are all handled as file content, it is actually quite hard
2371 # to track these movement from a code perspective. So we fallback to a
2371 # to track these movement from a code perspective. So we fallback to a
2372 # tracking at the repository level. One could envision to track changes
2372 # tracking at the repository level. One could envision to track changes
2373 # to the '.hgtags' file through changegroup apply but that fails to
2373 # to the '.hgtags' file through changegroup apply but that fails to
2374 # cope with case where transaction expose new heads without changegroup
2374 # cope with case where transaction expose new heads without changegroup
2375 # being involved (eg: phase movement).
2375 # being involved (eg: phase movement).
2376 #
2376 #
2377 # For now, We gate the feature behind a flag since this likely comes
2377 # For now, We gate the feature behind a flag since this likely comes
2378 # with performance impacts. The current code run more often than needed
2378 # with performance impacts. The current code run more often than needed
2379 # and do not use caches as much as it could. The current focus is on
2379 # and do not use caches as much as it could. The current focus is on
2380 # the behavior of the feature so we disable it by default. The flag
2380 # the behavior of the feature so we disable it by default. The flag
2381 # will be removed when we are happy with the performance impact.
2381 # will be removed when we are happy with the performance impact.
2382 #
2382 #
2383 # Once this feature is no longer experimental move the following
2383 # Once this feature is no longer experimental move the following
2384 # documentation to the appropriate help section:
2384 # documentation to the appropriate help section:
2385 #
2385 #
2386 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2386 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2387 # tags (new or changed or deleted tags). In addition the details of
2387 # tags (new or changed or deleted tags). In addition the details of
2388 # these changes are made available in a file at:
2388 # these changes are made available in a file at:
2389 # ``REPOROOT/.hg/changes/tags.changes``.
2389 # ``REPOROOT/.hg/changes/tags.changes``.
2390 # Make sure you check for HG_TAG_MOVED before reading that file as it
2390 # Make sure you check for HG_TAG_MOVED before reading that file as it
2391 # might exist from a previous transaction even if no tag were touched
2391 # might exist from a previous transaction even if no tag were touched
2392 # in this one. Changes are recorded in a line base format::
2392 # in this one. Changes are recorded in a line base format::
2393 #
2393 #
2394 # <action> <hex-node> <tag-name>\n
2394 # <action> <hex-node> <tag-name>\n
2395 #
2395 #
2396 # Actions are defined as follow:
2396 # Actions are defined as follow:
2397 # "-R": tag is removed,
2397 # "-R": tag is removed,
2398 # "+A": tag is added,
2398 # "+A": tag is added,
2399 # "-M": tag is moved (old value),
2399 # "-M": tag is moved (old value),
2400 # "+M": tag is moved (new value),
2400 # "+M": tag is moved (new value),
2401 tracktags = lambda x: None
2401 tracktags = lambda x: None
2402 # experimental config: experimental.hook-track-tags
2402 # experimental config: experimental.hook-track-tags
2403 shouldtracktags = self.ui.configbool(
2403 shouldtracktags = self.ui.configbool(
2404 b'experimental', b'hook-track-tags'
2404 b'experimental', b'hook-track-tags'
2405 )
2405 )
2406 if desc != b'strip' and shouldtracktags:
2406 if desc != b'strip' and shouldtracktags:
2407 oldheads = self.changelog.headrevs()
2407 oldheads = self.changelog.headrevs()
2408
2408
2409 def tracktags(tr2):
2409 def tracktags(tr2):
2410 repo = reporef()
2410 repo = reporef()
2411 assert repo is not None # help pytype
2411 assert repo is not None # help pytype
2412 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2412 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2413 newheads = repo.changelog.headrevs()
2413 newheads = repo.changelog.headrevs()
2414 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2414 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2415 # notes: we compare lists here.
2415 # notes: we compare lists here.
2416 # As we do it only once buiding set would not be cheaper
2416 # As we do it only once buiding set would not be cheaper
2417 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2417 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2418 if changes:
2418 if changes:
2419 tr2.hookargs[b'tag_moved'] = b'1'
2419 tr2.hookargs[b'tag_moved'] = b'1'
2420 with repo.vfs(
2420 with repo.vfs(
2421 b'changes/tags.changes', b'w', atomictemp=True
2421 b'changes/tags.changes', b'w', atomictemp=True
2422 ) as changesfile:
2422 ) as changesfile:
2423 # note: we do not register the file to the transaction
2423 # note: we do not register the file to the transaction
2424 # because we needs it to still exist on the transaction
2424 # because we needs it to still exist on the transaction
2425 # is close (for txnclose hooks)
2425 # is close (for txnclose hooks)
2426 tagsmod.writediff(changesfile, changes)
2426 tagsmod.writediff(changesfile, changes)
2427
2427
2428 def validate(tr2):
2428 def validate(tr2):
2429 """will run pre-closing hooks"""
2429 """will run pre-closing hooks"""
2430 # XXX the transaction API is a bit lacking here so we take a hacky
2430 # XXX the transaction API is a bit lacking here so we take a hacky
2431 # path for now
2431 # path for now
2432 #
2432 #
2433 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2433 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2434 # dict is copied before these run. In addition we needs the data
2434 # dict is copied before these run. In addition we needs the data
2435 # available to in memory hooks too.
2435 # available to in memory hooks too.
2436 #
2436 #
2437 # Moreover, we also need to make sure this runs before txnclose
2437 # Moreover, we also need to make sure this runs before txnclose
2438 # hooks and there is no "pending" mechanism that would execute
2438 # hooks and there is no "pending" mechanism that would execute
2439 # logic only if hooks are about to run.
2439 # logic only if hooks are about to run.
2440 #
2440 #
2441 # Fixing this limitation of the transaction is also needed to track
2441 # Fixing this limitation of the transaction is also needed to track
2442 # other families of changes (bookmarks, phases, obsolescence).
2442 # other families of changes (bookmarks, phases, obsolescence).
2443 #
2443 #
2444 # This will have to be fixed before we remove the experimental
2444 # This will have to be fixed before we remove the experimental
2445 # gating.
2445 # gating.
2446 tracktags(tr2)
2446 tracktags(tr2)
2447 repo = reporef()
2447 repo = reporef()
2448 assert repo is not None # help pytype
2448 assert repo is not None # help pytype
2449
2449
2450 singleheadopt = (b'experimental', b'single-head-per-branch')
2450 singleheadopt = (b'experimental', b'single-head-per-branch')
2451 singlehead = repo.ui.configbool(*singleheadopt)
2451 singlehead = repo.ui.configbool(*singleheadopt)
2452 if singlehead:
2452 if singlehead:
2453 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2453 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2454 accountclosed = singleheadsub.get(
2454 accountclosed = singleheadsub.get(
2455 b"account-closed-heads", False
2455 b"account-closed-heads", False
2456 )
2456 )
2457 if singleheadsub.get(b"public-changes-only", False):
2457 if singleheadsub.get(b"public-changes-only", False):
2458 filtername = b"immutable"
2458 filtername = b"immutable"
2459 else:
2459 else:
2460 filtername = b"visible"
2460 filtername = b"visible"
2461 scmutil.enforcesinglehead(
2461 scmutil.enforcesinglehead(
2462 repo, tr2, desc, accountclosed, filtername
2462 repo, tr2, desc, accountclosed, filtername
2463 )
2463 )
2464 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2464 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2465 for name, (old, new) in sorted(
2465 for name, (old, new) in sorted(
2466 tr.changes[b'bookmarks'].items()
2466 tr.changes[b'bookmarks'].items()
2467 ):
2467 ):
2468 args = tr.hookargs.copy()
2468 args = tr.hookargs.copy()
2469 args.update(bookmarks.preparehookargs(name, old, new))
2469 args.update(bookmarks.preparehookargs(name, old, new))
2470 repo.hook(
2470 repo.hook(
2471 b'pretxnclose-bookmark',
2471 b'pretxnclose-bookmark',
2472 throw=True,
2472 throw=True,
2473 **pycompat.strkwargs(args)
2473 **pycompat.strkwargs(args)
2474 )
2474 )
2475 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2475 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2476 cl = repo.unfiltered().changelog
2476 cl = repo.unfiltered().changelog
2477 for revs, (old, new) in tr.changes[b'phases']:
2477 for revs, (old, new) in tr.changes[b'phases']:
2478 for rev in revs:
2478 for rev in revs:
2479 args = tr.hookargs.copy()
2479 args = tr.hookargs.copy()
2480 node = hex(cl.node(rev))
2480 node = hex(cl.node(rev))
2481 args.update(phases.preparehookargs(node, old, new))
2481 args.update(phases.preparehookargs(node, old, new))
2482 repo.hook(
2482 repo.hook(
2483 b'pretxnclose-phase',
2483 b'pretxnclose-phase',
2484 throw=True,
2484 throw=True,
2485 **pycompat.strkwargs(args)
2485 **pycompat.strkwargs(args)
2486 )
2486 )
2487
2487
2488 repo.hook(
2488 repo.hook(
2489 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2489 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2490 )
2490 )
2491
2491
2492 def releasefn(tr, success):
2492 def releasefn(tr, success):
2493 repo = reporef()
2493 repo = reporef()
2494 if repo is None:
2494 if repo is None:
2495 # If the repo has been GC'd (and this release function is being
2495 # If the repo has been GC'd (and this release function is being
2496 # called from transaction.__del__), there's not much we can do,
2496 # called from transaction.__del__), there's not much we can do,
2497 # so just leave the unfinished transaction there and let the
2497 # so just leave the unfinished transaction there and let the
2498 # user run `hg recover`.
2498 # user run `hg recover`.
2499 return
2499 return
2500 if success:
2500 if success:
2501 # this should be explicitly invoked here, because
2501 # this should be explicitly invoked here, because
2502 # in-memory changes aren't written out at closing
2502 # in-memory changes aren't written out at closing
2503 # transaction, if tr.addfilegenerator (via
2503 # transaction, if tr.addfilegenerator (via
2504 # dirstate.write or so) isn't invoked while
2504 # dirstate.write or so) isn't invoked while
2505 # transaction running
2505 # transaction running
2506 repo.dirstate.write(None)
2506 repo.dirstate.write(None)
2507 else:
2507 else:
2508 # discard all changes (including ones already written
2508 # discard all changes (including ones already written
2509 # out) in this transaction
2509 # out) in this transaction
2510 narrowspec.restorebackup(self, b'journal.narrowspec')
2510 narrowspec.restorebackup(self, b'journal.narrowspec')
2511 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2511 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2512 repo.dirstate.restorebackup(None, b'journal.dirstate')
2512 repo.dirstate.restorebackup(None, b'journal.dirstate')
2513
2513
2514 repo.invalidate(clearfilecache=True)
2514 repo.invalidate(clearfilecache=True)
2515
2515
2516 tr = transaction.transaction(
2516 tr = transaction.transaction(
2517 rp,
2517 rp,
2518 self.svfs,
2518 self.svfs,
2519 vfsmap,
2519 vfsmap,
2520 b"journal",
2520 b"journal",
2521 b"undo",
2521 b"undo",
2522 aftertrans(renames),
2522 aftertrans(renames),
2523 self.store.createmode,
2523 self.store.createmode,
2524 validator=validate,
2524 validator=validate,
2525 releasefn=releasefn,
2525 releasefn=releasefn,
2526 checkambigfiles=_cachedfiles,
2526 checkambigfiles=_cachedfiles,
2527 name=desc,
2527 name=desc,
2528 )
2528 )
2529 tr.changes[b'origrepolen'] = len(self)
2529 tr.changes[b'origrepolen'] = len(self)
2530 tr.changes[b'obsmarkers'] = set()
2530 tr.changes[b'obsmarkers'] = set()
2531 tr.changes[b'phases'] = []
2531 tr.changes[b'phases'] = []
2532 tr.changes[b'bookmarks'] = {}
2532 tr.changes[b'bookmarks'] = {}
2533
2533
2534 tr.hookargs[b'txnid'] = txnid
2534 tr.hookargs[b'txnid'] = txnid
2535 tr.hookargs[b'txnname'] = desc
2535 tr.hookargs[b'txnname'] = desc
2536 tr.hookargs[b'changes'] = tr.changes
2536 tr.hookargs[b'changes'] = tr.changes
2537 # note: writing the fncache only during finalize mean that the file is
2537 # note: writing the fncache only during finalize mean that the file is
2538 # outdated when running hooks. As fncache is used for streaming clone,
2538 # outdated when running hooks. As fncache is used for streaming clone,
2539 # this is not expected to break anything that happen during the hooks.
2539 # this is not expected to break anything that happen during the hooks.
2540 tr.addfinalize(b'flush-fncache', self.store.write)
2540 tr.addfinalize(b'flush-fncache', self.store.write)
2541
2541
2542 def txnclosehook(tr2):
2542 def txnclosehook(tr2):
2543 """To be run if transaction is successful, will schedule a hook run"""
2543 """To be run if transaction is successful, will schedule a hook run"""
2544 # Don't reference tr2 in hook() so we don't hold a reference.
2544 # Don't reference tr2 in hook() so we don't hold a reference.
2545 # This reduces memory consumption when there are multiple
2545 # This reduces memory consumption when there are multiple
2546 # transactions per lock. This can likely go away if issue5045
2546 # transactions per lock. This can likely go away if issue5045
2547 # fixes the function accumulation.
2547 # fixes the function accumulation.
2548 hookargs = tr2.hookargs
2548 hookargs = tr2.hookargs
2549
2549
2550 def hookfunc(unused_success):
2550 def hookfunc(unused_success):
2551 repo = reporef()
2551 repo = reporef()
2552 assert repo is not None # help pytype
2552 assert repo is not None # help pytype
2553
2553
2554 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2554 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2555 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2555 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2556 for name, (old, new) in bmchanges:
2556 for name, (old, new) in bmchanges:
2557 args = tr.hookargs.copy()
2557 args = tr.hookargs.copy()
2558 args.update(bookmarks.preparehookargs(name, old, new))
2558 args.update(bookmarks.preparehookargs(name, old, new))
2559 repo.hook(
2559 repo.hook(
2560 b'txnclose-bookmark',
2560 b'txnclose-bookmark',
2561 throw=False,
2561 throw=False,
2562 **pycompat.strkwargs(args)
2562 **pycompat.strkwargs(args)
2563 )
2563 )
2564
2564
2565 if hook.hashook(repo.ui, b'txnclose-phase'):
2565 if hook.hashook(repo.ui, b'txnclose-phase'):
2566 cl = repo.unfiltered().changelog
2566 cl = repo.unfiltered().changelog
2567 phasemv = sorted(
2567 phasemv = sorted(
2568 tr.changes[b'phases'], key=lambda r: r[0][0]
2568 tr.changes[b'phases'], key=lambda r: r[0][0]
2569 )
2569 )
2570 for revs, (old, new) in phasemv:
2570 for revs, (old, new) in phasemv:
2571 for rev in revs:
2571 for rev in revs:
2572 args = tr.hookargs.copy()
2572 args = tr.hookargs.copy()
2573 node = hex(cl.node(rev))
2573 node = hex(cl.node(rev))
2574 args.update(phases.preparehookargs(node, old, new))
2574 args.update(phases.preparehookargs(node, old, new))
2575 repo.hook(
2575 repo.hook(
2576 b'txnclose-phase',
2576 b'txnclose-phase',
2577 throw=False,
2577 throw=False,
2578 **pycompat.strkwargs(args)
2578 **pycompat.strkwargs(args)
2579 )
2579 )
2580
2580
2581 repo.hook(
2581 repo.hook(
2582 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2582 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2583 )
2583 )
2584
2584
2585 repo = reporef()
2585 repo = reporef()
2586 assert repo is not None # help pytype
2586 assert repo is not None # help pytype
2587 repo._afterlock(hookfunc)
2587 repo._afterlock(hookfunc)
2588
2588
2589 tr.addfinalize(b'txnclose-hook', txnclosehook)
2589 tr.addfinalize(b'txnclose-hook', txnclosehook)
2590 # Include a leading "-" to make it happen before the transaction summary
2590 # Include a leading "-" to make it happen before the transaction summary
2591 # reports registered via scmutil.registersummarycallback() whose names
2591 # reports registered via scmutil.registersummarycallback() whose names
2592 # are 00-txnreport etc. That way, the caches will be warm when the
2592 # are 00-txnreport etc. That way, the caches will be warm when the
2593 # callbacks run.
2593 # callbacks run.
2594 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2594 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2595
2595
2596 def txnaborthook(tr2):
2596 def txnaborthook(tr2):
2597 """To be run if transaction is aborted"""
2597 """To be run if transaction is aborted"""
2598 repo = reporef()
2598 repo = reporef()
2599 assert repo is not None # help pytype
2599 assert repo is not None # help pytype
2600 repo.hook(
2600 repo.hook(
2601 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2601 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2602 )
2602 )
2603
2603
2604 tr.addabort(b'txnabort-hook', txnaborthook)
2604 tr.addabort(b'txnabort-hook', txnaborthook)
2605 # avoid eager cache invalidation. in-memory data should be identical
2605 # avoid eager cache invalidation. in-memory data should be identical
2606 # to stored data if transaction has no error.
2606 # to stored data if transaction has no error.
2607 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2607 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2608 self._transref = weakref.ref(tr)
2608 self._transref = weakref.ref(tr)
2609 scmutil.registersummarycallback(self, tr, desc)
2609 scmutil.registersummarycallback(self, tr, desc)
2610 return tr
2610 return tr
2611
2611
2612 def _journalfiles(self):
2612 def _journalfiles(self):
2613 return (
2613 return (
2614 (self.svfs, b'journal'),
2614 (self.svfs, b'journal'),
2615 (self.svfs, b'journal.narrowspec'),
2615 (self.svfs, b'journal.narrowspec'),
2616 (self.vfs, b'journal.narrowspec.dirstate'),
2616 (self.vfs, b'journal.narrowspec.dirstate'),
2617 (self.vfs, b'journal.dirstate'),
2617 (self.vfs, b'journal.dirstate'),
2618 (self.vfs, b'journal.branch'),
2618 (self.vfs, b'journal.branch'),
2619 (self.vfs, b'journal.desc'),
2619 (self.vfs, b'journal.desc'),
2620 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2620 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2621 (self.svfs, b'journal.phaseroots'),
2621 (self.svfs, b'journal.phaseroots'),
2622 )
2622 )
2623
2623
2624 def undofiles(self):
2624 def undofiles(self):
2625 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2625 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2626
2626
2627 @unfilteredmethod
2627 @unfilteredmethod
2628 def _writejournal(self, desc):
2628 def _writejournal(self, desc):
2629 self.dirstate.savebackup(None, b'journal.dirstate')
2629 self.dirstate.savebackup(None, b'journal.dirstate')
2630 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2630 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2631 narrowspec.savebackup(self, b'journal.narrowspec')
2631 narrowspec.savebackup(self, b'journal.narrowspec')
2632 self.vfs.write(
2632 self.vfs.write(
2633 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2633 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2634 )
2634 )
2635 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2635 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2636 bookmarksvfs = bookmarks.bookmarksvfs(self)
2636 bookmarksvfs = bookmarks.bookmarksvfs(self)
2637 bookmarksvfs.write(
2637 bookmarksvfs.write(
2638 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2638 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2639 )
2639 )
2640 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2640 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2641
2641
2642 def recover(self):
2642 def recover(self):
2643 with self.lock():
2643 with self.lock():
2644 if self.svfs.exists(b"journal"):
2644 if self.svfs.exists(b"journal"):
2645 self.ui.status(_(b"rolling back interrupted transaction\n"))
2645 self.ui.status(_(b"rolling back interrupted transaction\n"))
2646 vfsmap = {
2646 vfsmap = {
2647 b'': self.svfs,
2647 b'': self.svfs,
2648 b'plain': self.vfs,
2648 b'plain': self.vfs,
2649 }
2649 }
2650 transaction.rollback(
2650 transaction.rollback(
2651 self.svfs,
2651 self.svfs,
2652 vfsmap,
2652 vfsmap,
2653 b"journal",
2653 b"journal",
2654 self.ui.warn,
2654 self.ui.warn,
2655 checkambigfiles=_cachedfiles,
2655 checkambigfiles=_cachedfiles,
2656 )
2656 )
2657 self.invalidate()
2657 self.invalidate()
2658 return True
2658 return True
2659 else:
2659 else:
2660 self.ui.warn(_(b"no interrupted transaction available\n"))
2660 self.ui.warn(_(b"no interrupted transaction available\n"))
2661 return False
2661 return False
2662
2662
2663 def rollback(self, dryrun=False, force=False):
2663 def rollback(self, dryrun=False, force=False):
2664 wlock = lock = dsguard = None
2664 wlock = lock = dsguard = None
2665 try:
2665 try:
2666 wlock = self.wlock()
2666 wlock = self.wlock()
2667 lock = self.lock()
2667 lock = self.lock()
2668 if self.svfs.exists(b"undo"):
2668 if self.svfs.exists(b"undo"):
2669 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2669 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2670
2670
2671 return self._rollback(dryrun, force, dsguard)
2671 return self._rollback(dryrun, force, dsguard)
2672 else:
2672 else:
2673 self.ui.warn(_(b"no rollback information available\n"))
2673 self.ui.warn(_(b"no rollback information available\n"))
2674 return 1
2674 return 1
2675 finally:
2675 finally:
2676 release(dsguard, lock, wlock)
2676 release(dsguard, lock, wlock)
2677
2677
2678 @unfilteredmethod # Until we get smarter cache management
2678 @unfilteredmethod # Until we get smarter cache management
2679 def _rollback(self, dryrun, force, dsguard):
2679 def _rollback(self, dryrun, force, dsguard):
2680 ui = self.ui
2680 ui = self.ui
2681 try:
2681 try:
2682 args = self.vfs.read(b'undo.desc').splitlines()
2682 args = self.vfs.read(b'undo.desc').splitlines()
2683 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2683 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2684 if len(args) >= 3:
2684 if len(args) >= 3:
2685 detail = args[2]
2685 detail = args[2]
2686 oldtip = oldlen - 1
2686 oldtip = oldlen - 1
2687
2687
2688 if detail and ui.verbose:
2688 if detail and ui.verbose:
2689 msg = _(
2689 msg = _(
2690 b'repository tip rolled back to revision %d'
2690 b'repository tip rolled back to revision %d'
2691 b' (undo %s: %s)\n'
2691 b' (undo %s: %s)\n'
2692 ) % (oldtip, desc, detail)
2692 ) % (oldtip, desc, detail)
2693 else:
2693 else:
2694 msg = _(
2694 msg = _(
2695 b'repository tip rolled back to revision %d (undo %s)\n'
2695 b'repository tip rolled back to revision %d (undo %s)\n'
2696 ) % (oldtip, desc)
2696 ) % (oldtip, desc)
2697 except IOError:
2697 except IOError:
2698 msg = _(b'rolling back unknown transaction\n')
2698 msg = _(b'rolling back unknown transaction\n')
2699 desc = None
2699 desc = None
2700
2700
2701 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2701 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2702 raise error.Abort(
2702 raise error.Abort(
2703 _(
2703 _(
2704 b'rollback of last commit while not checked out '
2704 b'rollback of last commit while not checked out '
2705 b'may lose data'
2705 b'may lose data'
2706 ),
2706 ),
2707 hint=_(b'use -f to force'),
2707 hint=_(b'use -f to force'),
2708 )
2708 )
2709
2709
2710 ui.status(msg)
2710 ui.status(msg)
2711 if dryrun:
2711 if dryrun:
2712 return 0
2712 return 0
2713
2713
2714 parents = self.dirstate.parents()
2714 parents = self.dirstate.parents()
2715 self.destroying()
2715 self.destroying()
2716 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2716 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2717 transaction.rollback(
2717 transaction.rollback(
2718 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2718 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2719 )
2719 )
2720 bookmarksvfs = bookmarks.bookmarksvfs(self)
2720 bookmarksvfs = bookmarks.bookmarksvfs(self)
2721 if bookmarksvfs.exists(b'undo.bookmarks'):
2721 if bookmarksvfs.exists(b'undo.bookmarks'):
2722 bookmarksvfs.rename(
2722 bookmarksvfs.rename(
2723 b'undo.bookmarks', b'bookmarks', checkambig=True
2723 b'undo.bookmarks', b'bookmarks', checkambig=True
2724 )
2724 )
2725 if self.svfs.exists(b'undo.phaseroots'):
2725 if self.svfs.exists(b'undo.phaseroots'):
2726 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2726 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2727 self.invalidate()
2727 self.invalidate()
2728
2728
2729 has_node = self.changelog.index.has_node
2729 has_node = self.changelog.index.has_node
2730 parentgone = any(not has_node(p) for p in parents)
2730 parentgone = any(not has_node(p) for p in parents)
2731 if parentgone:
2731 if parentgone:
2732 # prevent dirstateguard from overwriting already restored one
2732 # prevent dirstateguard from overwriting already restored one
2733 dsguard.close()
2733 dsguard.close()
2734
2734
2735 narrowspec.restorebackup(self, b'undo.narrowspec')
2735 narrowspec.restorebackup(self, b'undo.narrowspec')
2736 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2736 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2737 self.dirstate.restorebackup(None, b'undo.dirstate')
2737 self.dirstate.restorebackup(None, b'undo.dirstate')
2738 try:
2738 try:
2739 branch = self.vfs.read(b'undo.branch')
2739 branch = self.vfs.read(b'undo.branch')
2740 self.dirstate.setbranch(encoding.tolocal(branch))
2740 self.dirstate.setbranch(encoding.tolocal(branch))
2741 except IOError:
2741 except IOError:
2742 ui.warn(
2742 ui.warn(
2743 _(
2743 _(
2744 b'named branch could not be reset: '
2744 b'named branch could not be reset: '
2745 b'current branch is still \'%s\'\n'
2745 b'current branch is still \'%s\'\n'
2746 )
2746 )
2747 % self.dirstate.branch()
2747 % self.dirstate.branch()
2748 )
2748 )
2749
2749
2750 parents = tuple([p.rev() for p in self[None].parents()])
2750 parents = tuple([p.rev() for p in self[None].parents()])
2751 if len(parents) > 1:
2751 if len(parents) > 1:
2752 ui.status(
2752 ui.status(
2753 _(
2753 _(
2754 b'working directory now based on '
2754 b'working directory now based on '
2755 b'revisions %d and %d\n'
2755 b'revisions %d and %d\n'
2756 )
2756 )
2757 % parents
2757 % parents
2758 )
2758 )
2759 else:
2759 else:
2760 ui.status(
2760 ui.status(
2761 _(b'working directory now based on revision %d\n') % parents
2761 _(b'working directory now based on revision %d\n') % parents
2762 )
2762 )
2763 mergestatemod.mergestate.clean(self)
2763 mergestatemod.mergestate.clean(self)
2764
2764
2765 # TODO: if we know which new heads may result from this rollback, pass
2765 # TODO: if we know which new heads may result from this rollback, pass
2766 # them to destroy(), which will prevent the branchhead cache from being
2766 # them to destroy(), which will prevent the branchhead cache from being
2767 # invalidated.
2767 # invalidated.
2768 self.destroyed()
2768 self.destroyed()
2769 return 0
2769 return 0
2770
2770
2771 def _buildcacheupdater(self, newtransaction):
2771 def _buildcacheupdater(self, newtransaction):
2772 """called during transaction to build the callback updating cache
2772 """called during transaction to build the callback updating cache
2773
2773
2774 Lives on the repository to help extension who might want to augment
2774 Lives on the repository to help extension who might want to augment
2775 this logic. For this purpose, the created transaction is passed to the
2775 this logic. For this purpose, the created transaction is passed to the
2776 method.
2776 method.
2777 """
2777 """
2778 # we must avoid cyclic reference between repo and transaction.
2778 # we must avoid cyclic reference between repo and transaction.
2779 reporef = weakref.ref(self)
2779 reporef = weakref.ref(self)
2780
2780
2781 def updater(tr):
2781 def updater(tr):
2782 repo = reporef()
2782 repo = reporef()
2783 assert repo is not None # help pytype
2783 assert repo is not None # help pytype
2784 repo.updatecaches(tr)
2784 repo.updatecaches(tr)
2785
2785
2786 return updater
2786 return updater
2787
2787
2788 @unfilteredmethod
2788 @unfilteredmethod
2789 def updatecaches(self, tr=None, full=False, caches=None):
2789 def updatecaches(self, tr=None, full=False, caches=None):
2790 """warm appropriate caches
2790 """warm appropriate caches
2791
2791
2792 If this function is called after a transaction closed. The transaction
2792 If this function is called after a transaction closed. The transaction
2793 will be available in the 'tr' argument. This can be used to selectively
2793 will be available in the 'tr' argument. This can be used to selectively
2794 update caches relevant to the changes in that transaction.
2794 update caches relevant to the changes in that transaction.
2795
2795
2796 If 'full' is set, make sure all caches the function knows about have
2796 If 'full' is set, make sure all caches the function knows about have
2797 up-to-date data. Even the ones usually loaded more lazily.
2797 up-to-date data. Even the ones usually loaded more lazily.
2798
2798
2799 The `full` argument can take a special "post-clone" value. In this case
2799 The `full` argument can take a special "post-clone" value. In this case
2800 the cache warming is made after a clone and of the slower cache might
2800 the cache warming is made after a clone and of the slower cache might
2801 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2801 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2802 as we plan for a cleaner way to deal with this for 5.9.
2802 as we plan for a cleaner way to deal with this for 5.9.
2803 """
2803 """
2804 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2804 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2805 # During strip, many caches are invalid but
2805 # During strip, many caches are invalid but
2806 # later call to `destroyed` will refresh them.
2806 # later call to `destroyed` will refresh them.
2807 return
2807 return
2808
2808
2809 unfi = self.unfiltered()
2809 unfi = self.unfiltered()
2810
2810
2811 if full:
2811 if full:
2812 msg = (
2812 msg = (
2813 "`full` argument for `repo.updatecaches` is deprecated\n"
2813 "`full` argument for `repo.updatecaches` is deprecated\n"
2814 "(use `caches=repository.CACHE_ALL` instead)"
2814 "(use `caches=repository.CACHE_ALL` instead)"
2815 )
2815 )
2816 self.ui.deprecwarn(msg, b"5.9")
2816 self.ui.deprecwarn(msg, b"5.9")
2817 caches = repository.CACHES_ALL
2817 caches = repository.CACHES_ALL
2818 if full == b"post-clone":
2818 if full == b"post-clone":
2819 caches = repository.CACHES_POST_CLONE
2819 caches = repository.CACHES_POST_CLONE
2820 caches = repository.CACHES_ALL
2820 caches = repository.CACHES_ALL
2821 elif caches is None:
2821 elif caches is None:
2822 caches = repository.CACHES_DEFAULT
2822 caches = repository.CACHES_DEFAULT
2823
2823
2824 if repository.CACHE_BRANCHMAP_SERVED in caches:
2824 if repository.CACHE_BRANCHMAP_SERVED in caches:
2825 if tr is None or tr.changes[b'origrepolen'] < len(self):
2825 if tr is None or tr.changes[b'origrepolen'] < len(self):
2826 # accessing the 'served' branchmap should refresh all the others,
2826 # accessing the 'served' branchmap should refresh all the others,
2827 self.ui.debug(b'updating the branch cache\n')
2827 self.ui.debug(b'updating the branch cache\n')
2828 self.filtered(b'served').branchmap()
2828 self.filtered(b'served').branchmap()
2829 self.filtered(b'served.hidden').branchmap()
2829 self.filtered(b'served.hidden').branchmap()
2830 # flush all possibly delayed write.
2830 # flush all possibly delayed write.
2831 self._branchcaches.write_delayed(self)
2831 self._branchcaches.write_delayed(self)
2832
2832
2833 if repository.CACHE_CHANGELOG_CACHE in caches:
2833 if repository.CACHE_CHANGELOG_CACHE in caches:
2834 self.changelog.update_caches(transaction=tr)
2834 self.changelog.update_caches(transaction=tr)
2835
2835
2836 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2836 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2837 self.manifestlog.update_caches(transaction=tr)
2837 self.manifestlog.update_caches(transaction=tr)
2838
2838
2839 if repository.CACHE_REV_BRANCH in caches:
2839 if repository.CACHE_REV_BRANCH in caches:
2840 rbc = unfi.revbranchcache()
2840 rbc = unfi.revbranchcache()
2841 for r in unfi.changelog:
2841 for r in unfi.changelog:
2842 rbc.branchinfo(r)
2842 rbc.branchinfo(r)
2843 rbc.write()
2843 rbc.write()
2844
2844
2845 if repository.CACHE_FULL_MANIFEST in caches:
2845 if repository.CACHE_FULL_MANIFEST in caches:
2846 # ensure the working copy parents are in the manifestfulltextcache
2846 # ensure the working copy parents are in the manifestfulltextcache
2847 for ctx in self[b'.'].parents():
2847 for ctx in self[b'.'].parents():
2848 ctx.manifest() # accessing the manifest is enough
2848 ctx.manifest() # accessing the manifest is enough
2849
2849
2850 if repository.CACHE_FILE_NODE_TAGS in caches:
2850 if repository.CACHE_FILE_NODE_TAGS in caches:
2851 # accessing fnode cache warms the cache
2851 # accessing fnode cache warms the cache
2852 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2852 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2853
2853
2854 if repository.CACHE_TAGS_DEFAULT in caches:
2854 if repository.CACHE_TAGS_DEFAULT in caches:
2855 # accessing tags warm the cache
2855 # accessing tags warm the cache
2856 self.tags()
2856 self.tags()
2857 if repository.CACHE_TAGS_SERVED in caches:
2857 if repository.CACHE_TAGS_SERVED in caches:
2858 self.filtered(b'served').tags()
2858 self.filtered(b'served').tags()
2859
2859
2860 if repository.CACHE_BRANCHMAP_ALL in caches:
2860 if repository.CACHE_BRANCHMAP_ALL in caches:
2861 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2861 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2862 # so we're forcing a write to cause these caches to be warmed up
2862 # so we're forcing a write to cause these caches to be warmed up
2863 # even if they haven't explicitly been requested yet (if they've
2863 # even if they haven't explicitly been requested yet (if they've
2864 # never been used by hg, they won't ever have been written, even if
2864 # never been used by hg, they won't ever have been written, even if
2865 # they're a subset of another kind of cache that *has* been used).
2865 # they're a subset of another kind of cache that *has* been used).
2866 for filt in repoview.filtertable.keys():
2866 for filt in repoview.filtertable.keys():
2867 filtered = self.filtered(filt)
2867 filtered = self.filtered(filt)
2868 filtered.branchmap().write(filtered)
2868 filtered.branchmap().write(filtered)
2869
2869
2870 def invalidatecaches(self):
2870 def invalidatecaches(self):
2871
2871
2872 if '_tagscache' in vars(self):
2872 if '_tagscache' in vars(self):
2873 # can't use delattr on proxy
2873 # can't use delattr on proxy
2874 del self.__dict__['_tagscache']
2874 del self.__dict__['_tagscache']
2875
2875
2876 self._branchcaches.clear()
2876 self._branchcaches.clear()
2877 self.invalidatevolatilesets()
2877 self.invalidatevolatilesets()
2878 self._sparsesignaturecache.clear()
2878 self._sparsesignaturecache.clear()
2879
2879
2880 def invalidatevolatilesets(self):
2880 def invalidatevolatilesets(self):
2881 self.filteredrevcache.clear()
2881 self.filteredrevcache.clear()
2882 obsolete.clearobscaches(self)
2882 obsolete.clearobscaches(self)
2883 self._quick_access_changeid_invalidate()
2883 self._quick_access_changeid_invalidate()
2884
2884
2885 def invalidatedirstate(self):
2885 def invalidatedirstate(self):
2886 """Invalidates the dirstate, causing the next call to dirstate
2886 """Invalidates the dirstate, causing the next call to dirstate
2887 to check if it was modified since the last time it was read,
2887 to check if it was modified since the last time it was read,
2888 rereading it if it has.
2888 rereading it if it has.
2889
2889
2890 This is different to dirstate.invalidate() that it doesn't always
2890 This is different to dirstate.invalidate() that it doesn't always
2891 rereads the dirstate. Use dirstate.invalidate() if you want to
2891 rereads the dirstate. Use dirstate.invalidate() if you want to
2892 explicitly read the dirstate again (i.e. restoring it to a previous
2892 explicitly read the dirstate again (i.e. restoring it to a previous
2893 known good state)."""
2893 known good state)."""
2894 if hasunfilteredcache(self, 'dirstate'):
2894 if hasunfilteredcache(self, 'dirstate'):
2895 for k in self.dirstate._filecache:
2895 for k in self.dirstate._filecache:
2896 try:
2896 try:
2897 delattr(self.dirstate, k)
2897 delattr(self.dirstate, k)
2898 except AttributeError:
2898 except AttributeError:
2899 pass
2899 pass
2900 delattr(self.unfiltered(), 'dirstate')
2900 delattr(self.unfiltered(), 'dirstate')
2901
2901
2902 def invalidate(self, clearfilecache=False):
2902 def invalidate(self, clearfilecache=False):
2903 """Invalidates both store and non-store parts other than dirstate
2903 """Invalidates both store and non-store parts other than dirstate
2904
2904
2905 If a transaction is running, invalidation of store is omitted,
2905 If a transaction is running, invalidation of store is omitted,
2906 because discarding in-memory changes might cause inconsistency
2906 because discarding in-memory changes might cause inconsistency
2907 (e.g. incomplete fncache causes unintentional failure, but
2907 (e.g. incomplete fncache causes unintentional failure, but
2908 redundant one doesn't).
2908 redundant one doesn't).
2909 """
2909 """
2910 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2910 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2911 for k in list(self._filecache.keys()):
2911 for k in list(self._filecache.keys()):
2912 # dirstate is invalidated separately in invalidatedirstate()
2912 # dirstate is invalidated separately in invalidatedirstate()
2913 if k == b'dirstate':
2913 if k == b'dirstate':
2914 continue
2914 continue
2915 if (
2915 if (
2916 k == b'changelog'
2916 k == b'changelog'
2917 and self.currenttransaction()
2917 and self.currenttransaction()
2918 and self.changelog._delayed
2918 and self.changelog._delayed
2919 ):
2919 ):
2920 # The changelog object may store unwritten revisions. We don't
2920 # The changelog object may store unwritten revisions. We don't
2921 # want to lose them.
2921 # want to lose them.
2922 # TODO: Solve the problem instead of working around it.
2922 # TODO: Solve the problem instead of working around it.
2923 continue
2923 continue
2924
2924
2925 if clearfilecache:
2925 if clearfilecache:
2926 del self._filecache[k]
2926 del self._filecache[k]
2927 try:
2927 try:
2928 delattr(unfiltered, k)
2928 delattr(unfiltered, k)
2929 except AttributeError:
2929 except AttributeError:
2930 pass
2930 pass
2931 self.invalidatecaches()
2931 self.invalidatecaches()
2932 if not self.currenttransaction():
2932 if not self.currenttransaction():
2933 # TODO: Changing contents of store outside transaction
2933 # TODO: Changing contents of store outside transaction
2934 # causes inconsistency. We should make in-memory store
2934 # causes inconsistency. We should make in-memory store
2935 # changes detectable, and abort if changed.
2935 # changes detectable, and abort if changed.
2936 self.store.invalidatecaches()
2936 self.store.invalidatecaches()
2937
2937
2938 def invalidateall(self):
2938 def invalidateall(self):
2939 """Fully invalidates both store and non-store parts, causing the
2939 """Fully invalidates both store and non-store parts, causing the
2940 subsequent operation to reread any outside changes."""
2940 subsequent operation to reread any outside changes."""
2941 # extension should hook this to invalidate its caches
2941 # extension should hook this to invalidate its caches
2942 self.invalidate()
2942 self.invalidate()
2943 self.invalidatedirstate()
2943 self.invalidatedirstate()
2944
2944
2945 @unfilteredmethod
2945 @unfilteredmethod
2946 def _refreshfilecachestats(self, tr):
2946 def _refreshfilecachestats(self, tr):
2947 """Reload stats of cached files so that they are flagged as valid"""
2947 """Reload stats of cached files so that they are flagged as valid"""
2948 for k, ce in self._filecache.items():
2948 for k, ce in self._filecache.items():
2949 k = pycompat.sysstr(k)
2949 k = pycompat.sysstr(k)
2950 if k == 'dirstate' or k not in self.__dict__:
2950 if k == 'dirstate' or k not in self.__dict__:
2951 continue
2951 continue
2952 ce.refresh()
2952 ce.refresh()
2953
2953
2954 def _lock(
2954 def _lock(
2955 self,
2955 self,
2956 vfs,
2956 vfs,
2957 lockname,
2957 lockname,
2958 wait,
2958 wait,
2959 releasefn,
2959 releasefn,
2960 acquirefn,
2960 acquirefn,
2961 desc,
2961 desc,
2962 ):
2962 ):
2963 timeout = 0
2963 timeout = 0
2964 warntimeout = 0
2964 warntimeout = 0
2965 if wait:
2965 if wait:
2966 timeout = self.ui.configint(b"ui", b"timeout")
2966 timeout = self.ui.configint(b"ui", b"timeout")
2967 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2967 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2968 # internal config: ui.signal-safe-lock
2968 # internal config: ui.signal-safe-lock
2969 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2969 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2970
2970
2971 l = lockmod.trylock(
2971 l = lockmod.trylock(
2972 self.ui,
2972 self.ui,
2973 vfs,
2973 vfs,
2974 lockname,
2974 lockname,
2975 timeout,
2975 timeout,
2976 warntimeout,
2976 warntimeout,
2977 releasefn=releasefn,
2977 releasefn=releasefn,
2978 acquirefn=acquirefn,
2978 acquirefn=acquirefn,
2979 desc=desc,
2979 desc=desc,
2980 signalsafe=signalsafe,
2980 signalsafe=signalsafe,
2981 )
2981 )
2982 return l
2982 return l
2983
2983
2984 def _afterlock(self, callback):
2984 def _afterlock(self, callback):
2985 """add a callback to be run when the repository is fully unlocked
2985 """add a callback to be run when the repository is fully unlocked
2986
2986
2987 The callback will be executed when the outermost lock is released
2987 The callback will be executed when the outermost lock is released
2988 (with wlock being higher level than 'lock')."""
2988 (with wlock being higher level than 'lock')."""
2989 for ref in (self._wlockref, self._lockref):
2989 for ref in (self._wlockref, self._lockref):
2990 l = ref and ref()
2990 l = ref and ref()
2991 if l and l.held:
2991 if l and l.held:
2992 l.postrelease.append(callback)
2992 l.postrelease.append(callback)
2993 break
2993 break
2994 else: # no lock have been found.
2994 else: # no lock have been found.
2995 callback(True)
2995 callback(True)
2996
2996
2997 def lock(self, wait=True):
2997 def lock(self, wait=True):
2998 """Lock the repository store (.hg/store) and return a weak reference
2998 """Lock the repository store (.hg/store) and return a weak reference
2999 to the lock. Use this before modifying the store (e.g. committing or
2999 to the lock. Use this before modifying the store (e.g. committing or
3000 stripping). If you are opening a transaction, get a lock as well.)
3000 stripping). If you are opening a transaction, get a lock as well.)
3001
3001
3002 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3002 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 'wlock' first to avoid a dead-lock hazard."""
3003 'wlock' first to avoid a dead-lock hazard."""
3004 l = self._currentlock(self._lockref)
3004 l = self._currentlock(self._lockref)
3005 if l is not None:
3005 if l is not None:
3006 l.lock()
3006 l.lock()
3007 return l
3007 return l
3008
3008
3009 l = self._lock(
3009 l = self._lock(
3010 vfs=self.svfs,
3010 vfs=self.svfs,
3011 lockname=b"lock",
3011 lockname=b"lock",
3012 wait=wait,
3012 wait=wait,
3013 releasefn=None,
3013 releasefn=None,
3014 acquirefn=self.invalidate,
3014 acquirefn=self.invalidate,
3015 desc=_(b'repository %s') % self.origroot,
3015 desc=_(b'repository %s') % self.origroot,
3016 )
3016 )
3017 self._lockref = weakref.ref(l)
3017 self._lockref = weakref.ref(l)
3018 return l
3018 return l
3019
3019
3020 def wlock(self, wait=True):
3020 def wlock(self, wait=True):
3021 """Lock the non-store parts of the repository (everything under
3021 """Lock the non-store parts of the repository (everything under
3022 .hg except .hg/store) and return a weak reference to the lock.
3022 .hg except .hg/store) and return a weak reference to the lock.
3023
3023
3024 Use this before modifying files in .hg.
3024 Use this before modifying files in .hg.
3025
3025
3026 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3026 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3027 'wlock' first to avoid a dead-lock hazard."""
3027 'wlock' first to avoid a dead-lock hazard."""
3028 l = self._wlockref() if self._wlockref else None
3028 l = self._wlockref() if self._wlockref else None
3029 if l is not None and l.held:
3029 if l is not None and l.held:
3030 l.lock()
3030 l.lock()
3031 return l
3031 return l
3032
3032
3033 # We do not need to check for non-waiting lock acquisition. Such
3033 # We do not need to check for non-waiting lock acquisition. Such
3034 # acquisition would not cause dead-lock as they would just fail.
3034 # acquisition would not cause dead-lock as they would just fail.
3035 if wait and (
3035 if wait and (
3036 self.ui.configbool(b'devel', b'all-warnings')
3036 self.ui.configbool(b'devel', b'all-warnings')
3037 or self.ui.configbool(b'devel', b'check-locks')
3037 or self.ui.configbool(b'devel', b'check-locks')
3038 ):
3038 ):
3039 if self._currentlock(self._lockref) is not None:
3039 if self._currentlock(self._lockref) is not None:
3040 self.ui.develwarn(b'"wlock" acquired after "lock"')
3040 self.ui.develwarn(b'"wlock" acquired after "lock"')
3041
3041
3042 def unlock():
3042 def unlock():
3043 if self.dirstate.pendingparentchange():
3043 if self.dirstate.pendingparentchange():
3044 self.dirstate.invalidate()
3044 self.dirstate.invalidate()
3045 else:
3045 else:
3046 self.dirstate.write(None)
3046 self.dirstate.write(None)
3047
3047
3048 self._filecache[b'dirstate'].refresh()
3048 self._filecache[b'dirstate'].refresh()
3049
3049
3050 l = self._lock(
3050 l = self._lock(
3051 self.vfs,
3051 self.vfs,
3052 b"wlock",
3052 b"wlock",
3053 wait,
3053 wait,
3054 unlock,
3054 unlock,
3055 self.invalidatedirstate,
3055 self.invalidatedirstate,
3056 _(b'working directory of %s') % self.origroot,
3056 _(b'working directory of %s') % self.origroot,
3057 )
3057 )
3058 self._wlockref = weakref.ref(l)
3058 self._wlockref = weakref.ref(l)
3059 return l
3059 return l
3060
3060
3061 def _currentlock(self, lockref):
3061 def _currentlock(self, lockref):
3062 """Returns the lock if it's held, or None if it's not."""
3062 """Returns the lock if it's held, or None if it's not."""
3063 if lockref is None:
3063 if lockref is None:
3064 return None
3064 return None
3065 l = lockref()
3065 l = lockref()
3066 if l is None or not l.held:
3066 if l is None or not l.held:
3067 return None
3067 return None
3068 return l
3068 return l
3069
3069
3070 def currentwlock(self):
3070 def currentwlock(self):
3071 """Returns the wlock if it's held, or None if it's not."""
3071 """Returns the wlock if it's held, or None if it's not."""
3072 return self._currentlock(self._wlockref)
3072 return self._currentlock(self._wlockref)
3073
3073
3074 def checkcommitpatterns(self, wctx, match, status, fail):
3074 def checkcommitpatterns(self, wctx, match, status, fail):
3075 """check for commit arguments that aren't committable"""
3075 """check for commit arguments that aren't committable"""
3076 if match.isexact() or match.prefix():
3076 if match.isexact() or match.prefix():
3077 matched = set(status.modified + status.added + status.removed)
3077 matched = set(status.modified + status.added + status.removed)
3078
3078
3079 for f in match.files():
3079 for f in match.files():
3080 f = self.dirstate.normalize(f)
3080 f = self.dirstate.normalize(f)
3081 if f == b'.' or f in matched or f in wctx.substate:
3081 if f == b'.' or f in matched or f in wctx.substate:
3082 continue
3082 continue
3083 if f in status.deleted:
3083 if f in status.deleted:
3084 fail(f, _(b'file not found!'))
3084 fail(f, _(b'file not found!'))
3085 # Is it a directory that exists or used to exist?
3085 # Is it a directory that exists or used to exist?
3086 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3086 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3087 d = f + b'/'
3087 d = f + b'/'
3088 for mf in matched:
3088 for mf in matched:
3089 if mf.startswith(d):
3089 if mf.startswith(d):
3090 break
3090 break
3091 else:
3091 else:
3092 fail(f, _(b"no match under directory!"))
3092 fail(f, _(b"no match under directory!"))
3093 elif f not in self.dirstate:
3093 elif f not in self.dirstate:
3094 fail(f, _(b"file not tracked!"))
3094 fail(f, _(b"file not tracked!"))
3095
3095
3096 @unfilteredmethod
3096 @unfilteredmethod
3097 def commit(
3097 def commit(
3098 self,
3098 self,
3099 text=b"",
3099 text=b"",
3100 user=None,
3100 user=None,
3101 date=None,
3101 date=None,
3102 match=None,
3102 match=None,
3103 force=False,
3103 force=False,
3104 editor=None,
3104 editor=None,
3105 extra=None,
3105 extra=None,
3106 ):
3106 ):
3107 """Add a new revision to current repository.
3107 """Add a new revision to current repository.
3108
3108
3109 Revision information is gathered from the working directory,
3109 Revision information is gathered from the working directory,
3110 match can be used to filter the committed files. If editor is
3110 match can be used to filter the committed files. If editor is
3111 supplied, it is called to get a commit message.
3111 supplied, it is called to get a commit message.
3112 """
3112 """
3113 if extra is None:
3113 if extra is None:
3114 extra = {}
3114 extra = {}
3115
3115
3116 def fail(f, msg):
3116 def fail(f, msg):
3117 raise error.InputError(b'%s: %s' % (f, msg))
3117 raise error.InputError(b'%s: %s' % (f, msg))
3118
3118
3119 if not match:
3119 if not match:
3120 match = matchmod.always()
3120 match = matchmod.always()
3121
3121
3122 if not force:
3122 if not force:
3123 match.bad = fail
3123 match.bad = fail
3124
3124
3125 # lock() for recent changelog (see issue4368)
3125 # lock() for recent changelog (see issue4368)
3126 with self.wlock(), self.lock():
3126 with self.wlock(), self.lock():
3127 wctx = self[None]
3127 wctx = self[None]
3128 merge = len(wctx.parents()) > 1
3128 merge = len(wctx.parents()) > 1
3129
3129
3130 if not force and merge and not match.always():
3130 if not force and merge and not match.always():
3131 raise error.Abort(
3131 raise error.Abort(
3132 _(
3132 _(
3133 b'cannot partially commit a merge '
3133 b'cannot partially commit a merge '
3134 b'(do not specify files or patterns)'
3134 b'(do not specify files or patterns)'
3135 )
3135 )
3136 )
3136 )
3137
3137
3138 status = self.status(match=match, clean=force)
3138 status = self.status(match=match, clean=force)
3139 if force:
3139 if force:
3140 status.modified.extend(
3140 status.modified.extend(
3141 status.clean
3141 status.clean
3142 ) # mq may commit clean files
3142 ) # mq may commit clean files
3143
3143
3144 # check subrepos
3144 # check subrepos
3145 subs, commitsubs, newstate = subrepoutil.precommit(
3145 subs, commitsubs, newstate = subrepoutil.precommit(
3146 self.ui, wctx, status, match, force=force
3146 self.ui, wctx, status, match, force=force
3147 )
3147 )
3148
3148
3149 # make sure all explicit patterns are matched
3149 # make sure all explicit patterns are matched
3150 if not force:
3150 if not force:
3151 self.checkcommitpatterns(wctx, match, status, fail)
3151 self.checkcommitpatterns(wctx, match, status, fail)
3152
3152
3153 cctx = context.workingcommitctx(
3153 cctx = context.workingcommitctx(
3154 self, status, text, user, date, extra
3154 self, status, text, user, date, extra
3155 )
3155 )
3156
3156
3157 ms = mergestatemod.mergestate.read(self)
3157 ms = mergestatemod.mergestate.read(self)
3158 mergeutil.checkunresolved(ms)
3158 mergeutil.checkunresolved(ms)
3159
3159
3160 # internal config: ui.allowemptycommit
3160 # internal config: ui.allowemptycommit
3161 if cctx.isempty() and not self.ui.configbool(
3161 if cctx.isempty() and not self.ui.configbool(
3162 b'ui', b'allowemptycommit'
3162 b'ui', b'allowemptycommit'
3163 ):
3163 ):
3164 self.ui.debug(b'nothing to commit, clearing merge state\n')
3164 self.ui.debug(b'nothing to commit, clearing merge state\n')
3165 ms.reset()
3165 ms.reset()
3166 return None
3166 return None
3167
3167
3168 if merge and cctx.deleted():
3168 if merge and cctx.deleted():
3169 raise error.Abort(_(b"cannot commit merge with missing files"))
3169 raise error.Abort(_(b"cannot commit merge with missing files"))
3170
3170
3171 if editor:
3171 if editor:
3172 cctx._text = editor(self, cctx, subs)
3172 cctx._text = editor(self, cctx, subs)
3173 edited = text != cctx._text
3173 edited = text != cctx._text
3174
3174
3175 # Save commit message in case this transaction gets rolled back
3175 # Save commit message in case this transaction gets rolled back
3176 # (e.g. by a pretxncommit hook). Leave the content alone on
3176 # (e.g. by a pretxncommit hook). Leave the content alone on
3177 # the assumption that the user will use the same editor again.
3177 # the assumption that the user will use the same editor again.
3178 msgfn = self.savecommitmessage(cctx._text)
3178 msg_path = self.savecommitmessage(cctx._text)
3179
3179
3180 # commit subs and write new state
3180 # commit subs and write new state
3181 if subs:
3181 if subs:
3182 uipathfn = scmutil.getuipathfn(self)
3182 uipathfn = scmutil.getuipathfn(self)
3183 for s in sorted(commitsubs):
3183 for s in sorted(commitsubs):
3184 sub = wctx.sub(s)
3184 sub = wctx.sub(s)
3185 self.ui.status(
3185 self.ui.status(
3186 _(b'committing subrepository %s\n')
3186 _(b'committing subrepository %s\n')
3187 % uipathfn(subrepoutil.subrelpath(sub))
3187 % uipathfn(subrepoutil.subrelpath(sub))
3188 )
3188 )
3189 sr = sub.commit(cctx._text, user, date)
3189 sr = sub.commit(cctx._text, user, date)
3190 newstate[s] = (newstate[s][0], sr)
3190 newstate[s] = (newstate[s][0], sr)
3191 subrepoutil.writestate(self, newstate)
3191 subrepoutil.writestate(self, newstate)
3192
3192
3193 p1, p2 = self.dirstate.parents()
3193 p1, p2 = self.dirstate.parents()
3194 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3194 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3195 try:
3195 try:
3196 self.hook(
3196 self.hook(
3197 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3197 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3198 )
3198 )
3199 with self.transaction(b'commit'):
3199 with self.transaction(b'commit'):
3200 ret = self.commitctx(cctx, True)
3200 ret = self.commitctx(cctx, True)
3201 # update bookmarks, dirstate and mergestate
3201 # update bookmarks, dirstate and mergestate
3202 bookmarks.update(self, [p1, p2], ret)
3202 bookmarks.update(self, [p1, p2], ret)
3203 cctx.markcommitted(ret)
3203 cctx.markcommitted(ret)
3204 ms.reset()
3204 ms.reset()
3205 except: # re-raises
3205 except: # re-raises
3206 if edited:
3206 if edited:
3207 self.ui.write(
3207 self.ui.write(
3208 _(b'note: commit message saved in %s\n') % msgfn
3208 _(b'note: commit message saved in %s\n') % msg_path
3209 )
3209 )
3210 self.ui.write(
3210 self.ui.write(
3211 _(
3211 _(
3212 b"note: use 'hg commit --logfile "
3212 b"note: use 'hg commit --logfile "
3213 b".hg/last-message.txt --edit' to reuse it\n"
3213 b"%s --edit' to reuse it\n"
3214 )
3214 )
3215 % msg_path
3215 )
3216 )
3216 raise
3217 raise
3217
3218
3218 def commithook(unused_success):
3219 def commithook(unused_success):
3219 # hack for command that use a temporary commit (eg: histedit)
3220 # hack for command that use a temporary commit (eg: histedit)
3220 # temporary commit got stripped before hook release
3221 # temporary commit got stripped before hook release
3221 if self.changelog.hasnode(ret):
3222 if self.changelog.hasnode(ret):
3222 self.hook(
3223 self.hook(
3223 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3224 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3224 )
3225 )
3225
3226
3226 self._afterlock(commithook)
3227 self._afterlock(commithook)
3227 return ret
3228 return ret
3228
3229
3229 @unfilteredmethod
3230 @unfilteredmethod
3230 def commitctx(self, ctx, error=False, origctx=None):
3231 def commitctx(self, ctx, error=False, origctx=None):
3231 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3232 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3232
3233
3233 @unfilteredmethod
3234 @unfilteredmethod
3234 def destroying(self):
3235 def destroying(self):
3235 """Inform the repository that nodes are about to be destroyed.
3236 """Inform the repository that nodes are about to be destroyed.
3236 Intended for use by strip and rollback, so there's a common
3237 Intended for use by strip and rollback, so there's a common
3237 place for anything that has to be done before destroying history.
3238 place for anything that has to be done before destroying history.
3238
3239
3239 This is mostly useful for saving state that is in memory and waiting
3240 This is mostly useful for saving state that is in memory and waiting
3240 to be flushed when the current lock is released. Because a call to
3241 to be flushed when the current lock is released. Because a call to
3241 destroyed is imminent, the repo will be invalidated causing those
3242 destroyed is imminent, the repo will be invalidated causing those
3242 changes to stay in memory (waiting for the next unlock), or vanish
3243 changes to stay in memory (waiting for the next unlock), or vanish
3243 completely.
3244 completely.
3244 """
3245 """
3245 # When using the same lock to commit and strip, the phasecache is left
3246 # When using the same lock to commit and strip, the phasecache is left
3246 # dirty after committing. Then when we strip, the repo is invalidated,
3247 # dirty after committing. Then when we strip, the repo is invalidated,
3247 # causing those changes to disappear.
3248 # causing those changes to disappear.
3248 if '_phasecache' in vars(self):
3249 if '_phasecache' in vars(self):
3249 self._phasecache.write()
3250 self._phasecache.write()
3250
3251
3251 @unfilteredmethod
3252 @unfilteredmethod
3252 def destroyed(self):
3253 def destroyed(self):
3253 """Inform the repository that nodes have been destroyed.
3254 """Inform the repository that nodes have been destroyed.
3254 Intended for use by strip and rollback, so there's a common
3255 Intended for use by strip and rollback, so there's a common
3255 place for anything that has to be done after destroying history.
3256 place for anything that has to be done after destroying history.
3256 """
3257 """
3257 # When one tries to:
3258 # When one tries to:
3258 # 1) destroy nodes thus calling this method (e.g. strip)
3259 # 1) destroy nodes thus calling this method (e.g. strip)
3259 # 2) use phasecache somewhere (e.g. commit)
3260 # 2) use phasecache somewhere (e.g. commit)
3260 #
3261 #
3261 # then 2) will fail because the phasecache contains nodes that were
3262 # then 2) will fail because the phasecache contains nodes that were
3262 # removed. We can either remove phasecache from the filecache,
3263 # removed. We can either remove phasecache from the filecache,
3263 # causing it to reload next time it is accessed, or simply filter
3264 # causing it to reload next time it is accessed, or simply filter
3264 # the removed nodes now and write the updated cache.
3265 # the removed nodes now and write the updated cache.
3265 self._phasecache.filterunknown(self)
3266 self._phasecache.filterunknown(self)
3266 self._phasecache.write()
3267 self._phasecache.write()
3267
3268
3268 # refresh all repository caches
3269 # refresh all repository caches
3269 self.updatecaches()
3270 self.updatecaches()
3270
3271
3271 # Ensure the persistent tag cache is updated. Doing it now
3272 # Ensure the persistent tag cache is updated. Doing it now
3272 # means that the tag cache only has to worry about destroyed
3273 # means that the tag cache only has to worry about destroyed
3273 # heads immediately after a strip/rollback. That in turn
3274 # heads immediately after a strip/rollback. That in turn
3274 # guarantees that "cachetip == currenttip" (comparing both rev
3275 # guarantees that "cachetip == currenttip" (comparing both rev
3275 # and node) always means no nodes have been added or destroyed.
3276 # and node) always means no nodes have been added or destroyed.
3276
3277
3277 # XXX this is suboptimal when qrefresh'ing: we strip the current
3278 # XXX this is suboptimal when qrefresh'ing: we strip the current
3278 # head, refresh the tag cache, then immediately add a new head.
3279 # head, refresh the tag cache, then immediately add a new head.
3279 # But I think doing it this way is necessary for the "instant
3280 # But I think doing it this way is necessary for the "instant
3280 # tag cache retrieval" case to work.
3281 # tag cache retrieval" case to work.
3281 self.invalidate()
3282 self.invalidate()
3282
3283
3283 def status(
3284 def status(
3284 self,
3285 self,
3285 node1=b'.',
3286 node1=b'.',
3286 node2=None,
3287 node2=None,
3287 match=None,
3288 match=None,
3288 ignored=False,
3289 ignored=False,
3289 clean=False,
3290 clean=False,
3290 unknown=False,
3291 unknown=False,
3291 listsubrepos=False,
3292 listsubrepos=False,
3292 ):
3293 ):
3293 '''a convenience method that calls node1.status(node2)'''
3294 '''a convenience method that calls node1.status(node2)'''
3294 return self[node1].status(
3295 return self[node1].status(
3295 node2, match, ignored, clean, unknown, listsubrepos
3296 node2, match, ignored, clean, unknown, listsubrepos
3296 )
3297 )
3297
3298
3298 def addpostdsstatus(self, ps):
3299 def addpostdsstatus(self, ps):
3299 """Add a callback to run within the wlock, at the point at which status
3300 """Add a callback to run within the wlock, at the point at which status
3300 fixups happen.
3301 fixups happen.
3301
3302
3302 On status completion, callback(wctx, status) will be called with the
3303 On status completion, callback(wctx, status) will be called with the
3303 wlock held, unless the dirstate has changed from underneath or the wlock
3304 wlock held, unless the dirstate has changed from underneath or the wlock
3304 couldn't be grabbed.
3305 couldn't be grabbed.
3305
3306
3306 Callbacks should not capture and use a cached copy of the dirstate --
3307 Callbacks should not capture and use a cached copy of the dirstate --
3307 it might change in the meanwhile. Instead, they should access the
3308 it might change in the meanwhile. Instead, they should access the
3308 dirstate via wctx.repo().dirstate.
3309 dirstate via wctx.repo().dirstate.
3309
3310
3310 This list is emptied out after each status run -- extensions should
3311 This list is emptied out after each status run -- extensions should
3311 make sure it adds to this list each time dirstate.status is called.
3312 make sure it adds to this list each time dirstate.status is called.
3312 Extensions should also make sure they don't call this for statuses
3313 Extensions should also make sure they don't call this for statuses
3313 that don't involve the dirstate.
3314 that don't involve the dirstate.
3314 """
3315 """
3315
3316
3316 # The list is located here for uniqueness reasons -- it is actually
3317 # The list is located here for uniqueness reasons -- it is actually
3317 # managed by the workingctx, but that isn't unique per-repo.
3318 # managed by the workingctx, but that isn't unique per-repo.
3318 self._postdsstatus.append(ps)
3319 self._postdsstatus.append(ps)
3319
3320
3320 def postdsstatus(self):
3321 def postdsstatus(self):
3321 """Used by workingctx to get the list of post-dirstate-status hooks."""
3322 """Used by workingctx to get the list of post-dirstate-status hooks."""
3322 return self._postdsstatus
3323 return self._postdsstatus
3323
3324
3324 def clearpostdsstatus(self):
3325 def clearpostdsstatus(self):
3325 """Used by workingctx to clear post-dirstate-status hooks."""
3326 """Used by workingctx to clear post-dirstate-status hooks."""
3326 del self._postdsstatus[:]
3327 del self._postdsstatus[:]
3327
3328
3328 def heads(self, start=None):
3329 def heads(self, start=None):
3329 if start is None:
3330 if start is None:
3330 cl = self.changelog
3331 cl = self.changelog
3331 headrevs = reversed(cl.headrevs())
3332 headrevs = reversed(cl.headrevs())
3332 return [cl.node(rev) for rev in headrevs]
3333 return [cl.node(rev) for rev in headrevs]
3333
3334
3334 heads = self.changelog.heads(start)
3335 heads = self.changelog.heads(start)
3335 # sort the output in rev descending order
3336 # sort the output in rev descending order
3336 return sorted(heads, key=self.changelog.rev, reverse=True)
3337 return sorted(heads, key=self.changelog.rev, reverse=True)
3337
3338
3338 def branchheads(self, branch=None, start=None, closed=False):
3339 def branchheads(self, branch=None, start=None, closed=False):
3339 """return a (possibly filtered) list of heads for the given branch
3340 """return a (possibly filtered) list of heads for the given branch
3340
3341
3341 Heads are returned in topological order, from newest to oldest.
3342 Heads are returned in topological order, from newest to oldest.
3342 If branch is None, use the dirstate branch.
3343 If branch is None, use the dirstate branch.
3343 If start is not None, return only heads reachable from start.
3344 If start is not None, return only heads reachable from start.
3344 If closed is True, return heads that are marked as closed as well.
3345 If closed is True, return heads that are marked as closed as well.
3345 """
3346 """
3346 if branch is None:
3347 if branch is None:
3347 branch = self[None].branch()
3348 branch = self[None].branch()
3348 branches = self.branchmap()
3349 branches = self.branchmap()
3349 if not branches.hasbranch(branch):
3350 if not branches.hasbranch(branch):
3350 return []
3351 return []
3351 # the cache returns heads ordered lowest to highest
3352 # the cache returns heads ordered lowest to highest
3352 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3353 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3353 if start is not None:
3354 if start is not None:
3354 # filter out the heads that cannot be reached from startrev
3355 # filter out the heads that cannot be reached from startrev
3355 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3356 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3356 bheads = [h for h in bheads if h in fbheads]
3357 bheads = [h for h in bheads if h in fbheads]
3357 return bheads
3358 return bheads
3358
3359
3359 def branches(self, nodes):
3360 def branches(self, nodes):
3360 if not nodes:
3361 if not nodes:
3361 nodes = [self.changelog.tip()]
3362 nodes = [self.changelog.tip()]
3362 b = []
3363 b = []
3363 for n in nodes:
3364 for n in nodes:
3364 t = n
3365 t = n
3365 while True:
3366 while True:
3366 p = self.changelog.parents(n)
3367 p = self.changelog.parents(n)
3367 if p[1] != self.nullid or p[0] == self.nullid:
3368 if p[1] != self.nullid or p[0] == self.nullid:
3368 b.append((t, n, p[0], p[1]))
3369 b.append((t, n, p[0], p[1]))
3369 break
3370 break
3370 n = p[0]
3371 n = p[0]
3371 return b
3372 return b
3372
3373
3373 def between(self, pairs):
3374 def between(self, pairs):
3374 r = []
3375 r = []
3375
3376
3376 for top, bottom in pairs:
3377 for top, bottom in pairs:
3377 n, l, i = top, [], 0
3378 n, l, i = top, [], 0
3378 f = 1
3379 f = 1
3379
3380
3380 while n != bottom and n != self.nullid:
3381 while n != bottom and n != self.nullid:
3381 p = self.changelog.parents(n)[0]
3382 p = self.changelog.parents(n)[0]
3382 if i == f:
3383 if i == f:
3383 l.append(n)
3384 l.append(n)
3384 f = f * 2
3385 f = f * 2
3385 n = p
3386 n = p
3386 i += 1
3387 i += 1
3387
3388
3388 r.append(l)
3389 r.append(l)
3389
3390
3390 return r
3391 return r
3391
3392
3392 def checkpush(self, pushop):
3393 def checkpush(self, pushop):
3393 """Extensions can override this function if additional checks have
3394 """Extensions can override this function if additional checks have
3394 to be performed before pushing, or call it if they override push
3395 to be performed before pushing, or call it if they override push
3395 command.
3396 command.
3396 """
3397 """
3397
3398
3398 @unfilteredpropertycache
3399 @unfilteredpropertycache
3399 def prepushoutgoinghooks(self):
3400 def prepushoutgoinghooks(self):
3400 """Return util.hooks consists of a pushop with repo, remote, outgoing
3401 """Return util.hooks consists of a pushop with repo, remote, outgoing
3401 methods, which are called before pushing changesets.
3402 methods, which are called before pushing changesets.
3402 """
3403 """
3403 return util.hooks()
3404 return util.hooks()
3404
3405
3405 def pushkey(self, namespace, key, old, new):
3406 def pushkey(self, namespace, key, old, new):
3406 try:
3407 try:
3407 tr = self.currenttransaction()
3408 tr = self.currenttransaction()
3408 hookargs = {}
3409 hookargs = {}
3409 if tr is not None:
3410 if tr is not None:
3410 hookargs.update(tr.hookargs)
3411 hookargs.update(tr.hookargs)
3411 hookargs = pycompat.strkwargs(hookargs)
3412 hookargs = pycompat.strkwargs(hookargs)
3412 hookargs['namespace'] = namespace
3413 hookargs['namespace'] = namespace
3413 hookargs['key'] = key
3414 hookargs['key'] = key
3414 hookargs['old'] = old
3415 hookargs['old'] = old
3415 hookargs['new'] = new
3416 hookargs['new'] = new
3416 self.hook(b'prepushkey', throw=True, **hookargs)
3417 self.hook(b'prepushkey', throw=True, **hookargs)
3417 except error.HookAbort as exc:
3418 except error.HookAbort as exc:
3418 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3419 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3419 if exc.hint:
3420 if exc.hint:
3420 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3421 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3421 return False
3422 return False
3422 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3423 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3423 ret = pushkey.push(self, namespace, key, old, new)
3424 ret = pushkey.push(self, namespace, key, old, new)
3424
3425
3425 def runhook(unused_success):
3426 def runhook(unused_success):
3426 self.hook(
3427 self.hook(
3427 b'pushkey',
3428 b'pushkey',
3428 namespace=namespace,
3429 namespace=namespace,
3429 key=key,
3430 key=key,
3430 old=old,
3431 old=old,
3431 new=new,
3432 new=new,
3432 ret=ret,
3433 ret=ret,
3433 )
3434 )
3434
3435
3435 self._afterlock(runhook)
3436 self._afterlock(runhook)
3436 return ret
3437 return ret
3437
3438
3438 def listkeys(self, namespace):
3439 def listkeys(self, namespace):
3439 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3440 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3440 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3441 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3441 values = pushkey.list(self, namespace)
3442 values = pushkey.list(self, namespace)
3442 self.hook(b'listkeys', namespace=namespace, values=values)
3443 self.hook(b'listkeys', namespace=namespace, values=values)
3443 return values
3444 return values
3444
3445
3445 def debugwireargs(self, one, two, three=None, four=None, five=None):
3446 def debugwireargs(self, one, two, three=None, four=None, five=None):
3446 '''used to test argument passing over the wire'''
3447 '''used to test argument passing over the wire'''
3447 return b"%s %s %s %s %s" % (
3448 return b"%s %s %s %s %s" % (
3448 one,
3449 one,
3449 two,
3450 two,
3450 pycompat.bytestr(three),
3451 pycompat.bytestr(three),
3451 pycompat.bytestr(four),
3452 pycompat.bytestr(four),
3452 pycompat.bytestr(five),
3453 pycompat.bytestr(five),
3453 )
3454 )
3454
3455
3455 def savecommitmessage(self, text):
3456 def savecommitmessage(self, text):
3456 fp = self.vfs(b'last-message.txt', b'wb')
3457 fp = self.vfs(b'last-message.txt', b'wb')
3457 try:
3458 try:
3458 fp.write(text)
3459 fp.write(text)
3459 finally:
3460 finally:
3460 fp.close()
3461 fp.close()
3461 return self.pathto(fp.name[len(self.root) + 1 :])
3462 return self.pathto(fp.name[len(self.root) + 1 :])
3462
3463
3463 def register_wanted_sidedata(self, category):
3464 def register_wanted_sidedata(self, category):
3464 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3465 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3465 # Only revlogv2 repos can want sidedata.
3466 # Only revlogv2 repos can want sidedata.
3466 return
3467 return
3467 self._wanted_sidedata.add(pycompat.bytestr(category))
3468 self._wanted_sidedata.add(pycompat.bytestr(category))
3468
3469
3469 def register_sidedata_computer(
3470 def register_sidedata_computer(
3470 self, kind, category, keys, computer, flags, replace=False
3471 self, kind, category, keys, computer, flags, replace=False
3471 ):
3472 ):
3472 if kind not in revlogconst.ALL_KINDS:
3473 if kind not in revlogconst.ALL_KINDS:
3473 msg = _(b"unexpected revlog kind '%s'.")
3474 msg = _(b"unexpected revlog kind '%s'.")
3474 raise error.ProgrammingError(msg % kind)
3475 raise error.ProgrammingError(msg % kind)
3475 category = pycompat.bytestr(category)
3476 category = pycompat.bytestr(category)
3476 already_registered = category in self._sidedata_computers.get(kind, [])
3477 already_registered = category in self._sidedata_computers.get(kind, [])
3477 if already_registered and not replace:
3478 if already_registered and not replace:
3478 msg = _(
3479 msg = _(
3479 b"cannot register a sidedata computer twice for category '%s'."
3480 b"cannot register a sidedata computer twice for category '%s'."
3480 )
3481 )
3481 raise error.ProgrammingError(msg % category)
3482 raise error.ProgrammingError(msg % category)
3482 if replace and not already_registered:
3483 if replace and not already_registered:
3483 msg = _(
3484 msg = _(
3484 b"cannot replace a sidedata computer that isn't registered "
3485 b"cannot replace a sidedata computer that isn't registered "
3485 b"for category '%s'."
3486 b"for category '%s'."
3486 )
3487 )
3487 raise error.ProgrammingError(msg % category)
3488 raise error.ProgrammingError(msg % category)
3488 self._sidedata_computers.setdefault(kind, {})
3489 self._sidedata_computers.setdefault(kind, {})
3489 self._sidedata_computers[kind][category] = (keys, computer, flags)
3490 self._sidedata_computers[kind][category] = (keys, computer, flags)
3490
3491
3491
3492
3492 # used to avoid circular references so destructors work
3493 # used to avoid circular references so destructors work
3493 def aftertrans(files):
3494 def aftertrans(files):
3494 renamefiles = [tuple(t) for t in files]
3495 renamefiles = [tuple(t) for t in files]
3495
3496
3496 def a():
3497 def a():
3497 for vfs, src, dest in renamefiles:
3498 for vfs, src, dest in renamefiles:
3498 # if src and dest refer to a same file, vfs.rename is a no-op,
3499 # if src and dest refer to a same file, vfs.rename is a no-op,
3499 # leaving both src and dest on disk. delete dest to make sure
3500 # leaving both src and dest on disk. delete dest to make sure
3500 # the rename couldn't be such a no-op.
3501 # the rename couldn't be such a no-op.
3501 vfs.tryunlink(dest)
3502 vfs.tryunlink(dest)
3502 try:
3503 try:
3503 vfs.rename(src, dest)
3504 vfs.rename(src, dest)
3504 except OSError as exc: # journal file does not yet exist
3505 except OSError as exc: # journal file does not yet exist
3505 if exc.errno != errno.ENOENT:
3506 if exc.errno != errno.ENOENT:
3506 raise
3507 raise
3507
3508
3508 return a
3509 return a
3509
3510
3510
3511
3511 def undoname(fn):
3512 def undoname(fn):
3512 base, name = os.path.split(fn)
3513 base, name = os.path.split(fn)
3513 assert name.startswith(b'journal')
3514 assert name.startswith(b'journal')
3514 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3515
3516
3516
3517
3517 def instance(ui, path, create, intents=None, createopts=None):
3518 def instance(ui, path, create, intents=None, createopts=None):
3518 localpath = urlutil.urllocalpath(path)
3519 localpath = urlutil.urllocalpath(path)
3519 if create:
3520 if create:
3520 createrepository(ui, localpath, createopts=createopts)
3521 createrepository(ui, localpath, createopts=createopts)
3521
3522
3522 return makelocalrepository(ui, localpath, intents=intents)
3523 return makelocalrepository(ui, localpath, intents=intents)
3523
3524
3524
3525
3525 def islocal(path):
3526 def islocal(path):
3526 return True
3527 return True
3527
3528
3528
3529
3529 def defaultcreateopts(ui, createopts=None):
3530 def defaultcreateopts(ui, createopts=None):
3530 """Populate the default creation options for a repository.
3531 """Populate the default creation options for a repository.
3531
3532
3532 A dictionary of explicitly requested creation options can be passed
3533 A dictionary of explicitly requested creation options can be passed
3533 in. Missing keys will be populated.
3534 in. Missing keys will be populated.
3534 """
3535 """
3535 createopts = dict(createopts or {})
3536 createopts = dict(createopts or {})
3536
3537
3537 if b'backend' not in createopts:
3538 if b'backend' not in createopts:
3538 # experimental config: storage.new-repo-backend
3539 # experimental config: storage.new-repo-backend
3539 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3540
3541
3541 return createopts
3542 return createopts
3542
3543
3543
3544
3544 def clone_requirements(ui, createopts, srcrepo):
3545 def clone_requirements(ui, createopts, srcrepo):
3545 """clone the requirements of a local repo for a local clone
3546 """clone the requirements of a local repo for a local clone
3546
3547
3547 The store requirements are unchanged while the working copy requirements
3548 The store requirements are unchanged while the working copy requirements
3548 depends on the configuration
3549 depends on the configuration
3549 """
3550 """
3550 target_requirements = set()
3551 target_requirements = set()
3551 if not srcrepo.requirements:
3552 if not srcrepo.requirements:
3552 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3553 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3553 # with it.
3554 # with it.
3554 return target_requirements
3555 return target_requirements
3555 createopts = defaultcreateopts(ui, createopts=createopts)
3556 createopts = defaultcreateopts(ui, createopts=createopts)
3556 for r in newreporequirements(ui, createopts):
3557 for r in newreporequirements(ui, createopts):
3557 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3558 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3558 target_requirements.add(r)
3559 target_requirements.add(r)
3559
3560
3560 for r in srcrepo.requirements:
3561 for r in srcrepo.requirements:
3561 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3562 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3562 target_requirements.add(r)
3563 target_requirements.add(r)
3563 return target_requirements
3564 return target_requirements
3564
3565
3565
3566
3566 def newreporequirements(ui, createopts):
3567 def newreporequirements(ui, createopts):
3567 """Determine the set of requirements for a new local repository.
3568 """Determine the set of requirements for a new local repository.
3568
3569
3569 Extensions can wrap this function to specify custom requirements for
3570 Extensions can wrap this function to specify custom requirements for
3570 new repositories.
3571 new repositories.
3571 """
3572 """
3572
3573
3573 if b'backend' not in createopts:
3574 if b'backend' not in createopts:
3574 raise error.ProgrammingError(
3575 raise error.ProgrammingError(
3575 b'backend key not present in createopts; '
3576 b'backend key not present in createopts; '
3576 b'was defaultcreateopts() called?'
3577 b'was defaultcreateopts() called?'
3577 )
3578 )
3578
3579
3579 if createopts[b'backend'] != b'revlogv1':
3580 if createopts[b'backend'] != b'revlogv1':
3580 raise error.Abort(
3581 raise error.Abort(
3581 _(
3582 _(
3582 b'unable to determine repository requirements for '
3583 b'unable to determine repository requirements for '
3583 b'storage backend: %s'
3584 b'storage backend: %s'
3584 )
3585 )
3585 % createopts[b'backend']
3586 % createopts[b'backend']
3586 )
3587 )
3587
3588
3588 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3589 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3589 if ui.configbool(b'format', b'usestore'):
3590 if ui.configbool(b'format', b'usestore'):
3590 requirements.add(requirementsmod.STORE_REQUIREMENT)
3591 requirements.add(requirementsmod.STORE_REQUIREMENT)
3591 if ui.configbool(b'format', b'usefncache'):
3592 if ui.configbool(b'format', b'usefncache'):
3592 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3593 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3593 if ui.configbool(b'format', b'dotencode'):
3594 if ui.configbool(b'format', b'dotencode'):
3594 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3595 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3595
3596
3596 compengines = ui.configlist(b'format', b'revlog-compression')
3597 compengines = ui.configlist(b'format', b'revlog-compression')
3597 for compengine in compengines:
3598 for compengine in compengines:
3598 if compengine in util.compengines:
3599 if compengine in util.compengines:
3599 engine = util.compengines[compengine]
3600 engine = util.compengines[compengine]
3600 if engine.available() and engine.revlogheader():
3601 if engine.available() and engine.revlogheader():
3601 break
3602 break
3602 else:
3603 else:
3603 raise error.Abort(
3604 raise error.Abort(
3604 _(
3605 _(
3605 b'compression engines %s defined by '
3606 b'compression engines %s defined by '
3606 b'format.revlog-compression not available'
3607 b'format.revlog-compression not available'
3607 )
3608 )
3608 % b', '.join(b'"%s"' % e for e in compengines),
3609 % b', '.join(b'"%s"' % e for e in compengines),
3609 hint=_(
3610 hint=_(
3610 b'run "hg debuginstall" to list available '
3611 b'run "hg debuginstall" to list available '
3611 b'compression engines'
3612 b'compression engines'
3612 ),
3613 ),
3613 )
3614 )
3614
3615
3615 # zlib is the historical default and doesn't need an explicit requirement.
3616 # zlib is the historical default and doesn't need an explicit requirement.
3616 if compengine == b'zstd':
3617 if compengine == b'zstd':
3617 requirements.add(b'revlog-compression-zstd')
3618 requirements.add(b'revlog-compression-zstd')
3618 elif compengine != b'zlib':
3619 elif compengine != b'zlib':
3619 requirements.add(b'exp-compression-%s' % compengine)
3620 requirements.add(b'exp-compression-%s' % compengine)
3620
3621
3621 if scmutil.gdinitconfig(ui):
3622 if scmutil.gdinitconfig(ui):
3622 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3623 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3623 if ui.configbool(b'format', b'sparse-revlog'):
3624 if ui.configbool(b'format', b'sparse-revlog'):
3624 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3625 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3625
3626
3626 # experimental config: format.use-dirstate-v2
3627 # experimental config: format.use-dirstate-v2
3627 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3628 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3628 if ui.configbool(b'format', b'use-dirstate-v2'):
3629 if ui.configbool(b'format', b'use-dirstate-v2'):
3629 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3630 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3630
3631
3631 # experimental config: format.exp-use-copies-side-data-changeset
3632 # experimental config: format.exp-use-copies-side-data-changeset
3632 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3633 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3633 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3634 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3634 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3635 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3635 if ui.configbool(b'experimental', b'treemanifest'):
3636 if ui.configbool(b'experimental', b'treemanifest'):
3636 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3637 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3637
3638
3638 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3639 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3639 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3640 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3640 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3641 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3641
3642
3642 revlogv2 = ui.config(b'experimental', b'revlogv2')
3643 revlogv2 = ui.config(b'experimental', b'revlogv2')
3643 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3644 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3644 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3645 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3645 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3646 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3646 # experimental config: format.internal-phase
3647 # experimental config: format.internal-phase
3647 if ui.configbool(b'format', b'internal-phase'):
3648 if ui.configbool(b'format', b'internal-phase'):
3648 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3649 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3649
3650
3650 if createopts.get(b'narrowfiles'):
3651 if createopts.get(b'narrowfiles'):
3651 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3652 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3652
3653
3653 if createopts.get(b'lfs'):
3654 if createopts.get(b'lfs'):
3654 requirements.add(b'lfs')
3655 requirements.add(b'lfs')
3655
3656
3656 if ui.configbool(b'format', b'bookmarks-in-store'):
3657 if ui.configbool(b'format', b'bookmarks-in-store'):
3657 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3658 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3658
3659
3659 if ui.configbool(b'format', b'use-persistent-nodemap'):
3660 if ui.configbool(b'format', b'use-persistent-nodemap'):
3660 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3661 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3661
3662
3662 # if share-safe is enabled, let's create the new repository with the new
3663 # if share-safe is enabled, let's create the new repository with the new
3663 # requirement
3664 # requirement
3664 if ui.configbool(b'format', b'use-share-safe'):
3665 if ui.configbool(b'format', b'use-share-safe'):
3665 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3666 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3666
3667
3667 # if we are creating a share-repo¹ we have to handle requirement
3668 # if we are creating a share-repo¹ we have to handle requirement
3668 # differently.
3669 # differently.
3669 #
3670 #
3670 # [1] (i.e. reusing the store from another repository, just having a
3671 # [1] (i.e. reusing the store from another repository, just having a
3671 # working copy)
3672 # working copy)
3672 if b'sharedrepo' in createopts:
3673 if b'sharedrepo' in createopts:
3673 source_requirements = set(createopts[b'sharedrepo'].requirements)
3674 source_requirements = set(createopts[b'sharedrepo'].requirements)
3674
3675
3675 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3676 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3676 # share to an old school repository, we have to copy the
3677 # share to an old school repository, we have to copy the
3677 # requirements and hope for the best.
3678 # requirements and hope for the best.
3678 requirements = source_requirements
3679 requirements = source_requirements
3679 else:
3680 else:
3680 # We have control on the working copy only, so "copy" the non
3681 # We have control on the working copy only, so "copy" the non
3681 # working copy part over, ignoring previous logic.
3682 # working copy part over, ignoring previous logic.
3682 to_drop = set()
3683 to_drop = set()
3683 for req in requirements:
3684 for req in requirements:
3684 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3685 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3685 continue
3686 continue
3686 if req in source_requirements:
3687 if req in source_requirements:
3687 continue
3688 continue
3688 to_drop.add(req)
3689 to_drop.add(req)
3689 requirements -= to_drop
3690 requirements -= to_drop
3690 requirements |= source_requirements
3691 requirements |= source_requirements
3691
3692
3692 if createopts.get(b'sharedrelative'):
3693 if createopts.get(b'sharedrelative'):
3693 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3694 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3694 else:
3695 else:
3695 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3696 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3696
3697
3697 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3698 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3698 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3699 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3699 msg = _("ignoring unknown tracked key version: %d\n")
3700 msg = _("ignoring unknown tracked key version: %d\n")
3700 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3701 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3701 if version != 1:
3702 if version != 1:
3702 ui.warn(msg % version, hint=hint)
3703 ui.warn(msg % version, hint=hint)
3703 else:
3704 else:
3704 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3705 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3705
3706
3706 return requirements
3707 return requirements
3707
3708
3708
3709
3709 def checkrequirementscompat(ui, requirements):
3710 def checkrequirementscompat(ui, requirements):
3710 """Checks compatibility of repository requirements enabled and disabled.
3711 """Checks compatibility of repository requirements enabled and disabled.
3711
3712
3712 Returns a set of requirements which needs to be dropped because dependend
3713 Returns a set of requirements which needs to be dropped because dependend
3713 requirements are not enabled. Also warns users about it"""
3714 requirements are not enabled. Also warns users about it"""
3714
3715
3715 dropped = set()
3716 dropped = set()
3716
3717
3717 if requirementsmod.STORE_REQUIREMENT not in requirements:
3718 if requirementsmod.STORE_REQUIREMENT not in requirements:
3718 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3719 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3719 ui.warn(
3720 ui.warn(
3720 _(
3721 _(
3721 b'ignoring enabled \'format.bookmarks-in-store\' config '
3722 b'ignoring enabled \'format.bookmarks-in-store\' config '
3722 b'beacuse it is incompatible with disabled '
3723 b'beacuse it is incompatible with disabled '
3723 b'\'format.usestore\' config\n'
3724 b'\'format.usestore\' config\n'
3724 )
3725 )
3725 )
3726 )
3726 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3727 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3727
3728
3728 if (
3729 if (
3729 requirementsmod.SHARED_REQUIREMENT in requirements
3730 requirementsmod.SHARED_REQUIREMENT in requirements
3730 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3731 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3731 ):
3732 ):
3732 raise error.Abort(
3733 raise error.Abort(
3733 _(
3734 _(
3734 b"cannot create shared repository as source was created"
3735 b"cannot create shared repository as source was created"
3735 b" with 'format.usestore' config disabled"
3736 b" with 'format.usestore' config disabled"
3736 )
3737 )
3737 )
3738 )
3738
3739
3739 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3740 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3740 if ui.hasconfig(b'format', b'use-share-safe'):
3741 if ui.hasconfig(b'format', b'use-share-safe'):
3741 msg = _(
3742 msg = _(
3742 b"ignoring enabled 'format.use-share-safe' config because "
3743 b"ignoring enabled 'format.use-share-safe' config because "
3743 b"it is incompatible with disabled 'format.usestore'"
3744 b"it is incompatible with disabled 'format.usestore'"
3744 b" config\n"
3745 b" config\n"
3745 )
3746 )
3746 ui.warn(msg)
3747 ui.warn(msg)
3747 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3748 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3748
3749
3749 return dropped
3750 return dropped
3750
3751
3751
3752
3752 def filterknowncreateopts(ui, createopts):
3753 def filterknowncreateopts(ui, createopts):
3753 """Filters a dict of repo creation options against options that are known.
3754 """Filters a dict of repo creation options against options that are known.
3754
3755
3755 Receives a dict of repo creation options and returns a dict of those
3756 Receives a dict of repo creation options and returns a dict of those
3756 options that we don't know how to handle.
3757 options that we don't know how to handle.
3757
3758
3758 This function is called as part of repository creation. If the
3759 This function is called as part of repository creation. If the
3759 returned dict contains any items, repository creation will not
3760 returned dict contains any items, repository creation will not
3760 be allowed, as it means there was a request to create a repository
3761 be allowed, as it means there was a request to create a repository
3761 with options not recognized by loaded code.
3762 with options not recognized by loaded code.
3762
3763
3763 Extensions can wrap this function to filter out creation options
3764 Extensions can wrap this function to filter out creation options
3764 they know how to handle.
3765 they know how to handle.
3765 """
3766 """
3766 known = {
3767 known = {
3767 b'backend',
3768 b'backend',
3768 b'lfs',
3769 b'lfs',
3769 b'narrowfiles',
3770 b'narrowfiles',
3770 b'sharedrepo',
3771 b'sharedrepo',
3771 b'sharedrelative',
3772 b'sharedrelative',
3772 b'shareditems',
3773 b'shareditems',
3773 b'shallowfilestore',
3774 b'shallowfilestore',
3774 }
3775 }
3775
3776
3776 return {k: v for k, v in createopts.items() if k not in known}
3777 return {k: v for k, v in createopts.items() if k not in known}
3777
3778
3778
3779
3779 def createrepository(ui, path, createopts=None, requirements=None):
3780 def createrepository(ui, path, createopts=None, requirements=None):
3780 """Create a new repository in a vfs.
3781 """Create a new repository in a vfs.
3781
3782
3782 ``path`` path to the new repo's working directory.
3783 ``path`` path to the new repo's working directory.
3783 ``createopts`` options for the new repository.
3784 ``createopts`` options for the new repository.
3784 ``requirement`` predefined set of requirements.
3785 ``requirement`` predefined set of requirements.
3785 (incompatible with ``createopts``)
3786 (incompatible with ``createopts``)
3786
3787
3787 The following keys for ``createopts`` are recognized:
3788 The following keys for ``createopts`` are recognized:
3788
3789
3789 backend
3790 backend
3790 The storage backend to use.
3791 The storage backend to use.
3791 lfs
3792 lfs
3792 Repository will be created with ``lfs`` requirement. The lfs extension
3793 Repository will be created with ``lfs`` requirement. The lfs extension
3793 will automatically be loaded when the repository is accessed.
3794 will automatically be loaded when the repository is accessed.
3794 narrowfiles
3795 narrowfiles
3795 Set up repository to support narrow file storage.
3796 Set up repository to support narrow file storage.
3796 sharedrepo
3797 sharedrepo
3797 Repository object from which storage should be shared.
3798 Repository object from which storage should be shared.
3798 sharedrelative
3799 sharedrelative
3799 Boolean indicating if the path to the shared repo should be
3800 Boolean indicating if the path to the shared repo should be
3800 stored as relative. By default, the pointer to the "parent" repo
3801 stored as relative. By default, the pointer to the "parent" repo
3801 is stored as an absolute path.
3802 is stored as an absolute path.
3802 shareditems
3803 shareditems
3803 Set of items to share to the new repository (in addition to storage).
3804 Set of items to share to the new repository (in addition to storage).
3804 shallowfilestore
3805 shallowfilestore
3805 Indicates that storage for files should be shallow (not all ancestor
3806 Indicates that storage for files should be shallow (not all ancestor
3806 revisions are known).
3807 revisions are known).
3807 """
3808 """
3808
3809
3809 if requirements is not None:
3810 if requirements is not None:
3810 if createopts is not None:
3811 if createopts is not None:
3811 msg = b'cannot specify both createopts and requirements'
3812 msg = b'cannot specify both createopts and requirements'
3812 raise error.ProgrammingError(msg)
3813 raise error.ProgrammingError(msg)
3813 createopts = {}
3814 createopts = {}
3814 else:
3815 else:
3815 createopts = defaultcreateopts(ui, createopts=createopts)
3816 createopts = defaultcreateopts(ui, createopts=createopts)
3816
3817
3817 unknownopts = filterknowncreateopts(ui, createopts)
3818 unknownopts = filterknowncreateopts(ui, createopts)
3818
3819
3819 if not isinstance(unknownopts, dict):
3820 if not isinstance(unknownopts, dict):
3820 raise error.ProgrammingError(
3821 raise error.ProgrammingError(
3821 b'filterknowncreateopts() did not return a dict'
3822 b'filterknowncreateopts() did not return a dict'
3822 )
3823 )
3823
3824
3824 if unknownopts:
3825 if unknownopts:
3825 raise error.Abort(
3826 raise error.Abort(
3826 _(
3827 _(
3827 b'unable to create repository because of unknown '
3828 b'unable to create repository because of unknown '
3828 b'creation option: %s'
3829 b'creation option: %s'
3829 )
3830 )
3830 % b', '.join(sorted(unknownopts)),
3831 % b', '.join(sorted(unknownopts)),
3831 hint=_(b'is a required extension not loaded?'),
3832 hint=_(b'is a required extension not loaded?'),
3832 )
3833 )
3833
3834
3834 requirements = newreporequirements(ui, createopts=createopts)
3835 requirements = newreporequirements(ui, createopts=createopts)
3835 requirements -= checkrequirementscompat(ui, requirements)
3836 requirements -= checkrequirementscompat(ui, requirements)
3836
3837
3837 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3838 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3838
3839
3839 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3840 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3840 if hgvfs.exists():
3841 if hgvfs.exists():
3841 raise error.RepoError(_(b'repository %s already exists') % path)
3842 raise error.RepoError(_(b'repository %s already exists') % path)
3842
3843
3843 if b'sharedrepo' in createopts:
3844 if b'sharedrepo' in createopts:
3844 sharedpath = createopts[b'sharedrepo'].sharedpath
3845 sharedpath = createopts[b'sharedrepo'].sharedpath
3845
3846
3846 if createopts.get(b'sharedrelative'):
3847 if createopts.get(b'sharedrelative'):
3847 try:
3848 try:
3848 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3849 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3849 sharedpath = util.pconvert(sharedpath)
3850 sharedpath = util.pconvert(sharedpath)
3850 except (IOError, ValueError) as e:
3851 except (IOError, ValueError) as e:
3851 # ValueError is raised on Windows if the drive letters differ
3852 # ValueError is raised on Windows if the drive letters differ
3852 # on each path.
3853 # on each path.
3853 raise error.Abort(
3854 raise error.Abort(
3854 _(b'cannot calculate relative path'),
3855 _(b'cannot calculate relative path'),
3855 hint=stringutil.forcebytestr(e),
3856 hint=stringutil.forcebytestr(e),
3856 )
3857 )
3857
3858
3858 if not wdirvfs.exists():
3859 if not wdirvfs.exists():
3859 wdirvfs.makedirs()
3860 wdirvfs.makedirs()
3860
3861
3861 hgvfs.makedir(notindexed=True)
3862 hgvfs.makedir(notindexed=True)
3862 if b'sharedrepo' not in createopts:
3863 if b'sharedrepo' not in createopts:
3863 hgvfs.mkdir(b'cache')
3864 hgvfs.mkdir(b'cache')
3864 hgvfs.mkdir(b'wcache')
3865 hgvfs.mkdir(b'wcache')
3865
3866
3866 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3867 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3867 if has_store and b'sharedrepo' not in createopts:
3868 if has_store and b'sharedrepo' not in createopts:
3868 hgvfs.mkdir(b'store')
3869 hgvfs.mkdir(b'store')
3869
3870
3870 # We create an invalid changelog outside the store so very old
3871 # We create an invalid changelog outside the store so very old
3871 # Mercurial versions (which didn't know about the requirements
3872 # Mercurial versions (which didn't know about the requirements
3872 # file) encounter an error on reading the changelog. This
3873 # file) encounter an error on reading the changelog. This
3873 # effectively locks out old clients and prevents them from
3874 # effectively locks out old clients and prevents them from
3874 # mucking with a repo in an unknown format.
3875 # mucking with a repo in an unknown format.
3875 #
3876 #
3876 # The revlog header has version 65535, which won't be recognized by
3877 # The revlog header has version 65535, which won't be recognized by
3877 # such old clients.
3878 # such old clients.
3878 hgvfs.append(
3879 hgvfs.append(
3879 b'00changelog.i',
3880 b'00changelog.i',
3880 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3881 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3881 b'layout',
3882 b'layout',
3882 )
3883 )
3883
3884
3884 # Filter the requirements into working copy and store ones
3885 # Filter the requirements into working copy and store ones
3885 wcreq, storereq = scmutil.filterrequirements(requirements)
3886 wcreq, storereq = scmutil.filterrequirements(requirements)
3886 # write working copy ones
3887 # write working copy ones
3887 scmutil.writerequires(hgvfs, wcreq)
3888 scmutil.writerequires(hgvfs, wcreq)
3888 # If there are store requirements and the current repository
3889 # If there are store requirements and the current repository
3889 # is not a shared one, write stored requirements
3890 # is not a shared one, write stored requirements
3890 # For new shared repository, we don't need to write the store
3891 # For new shared repository, we don't need to write the store
3891 # requirements as they are already present in store requires
3892 # requirements as they are already present in store requires
3892 if storereq and b'sharedrepo' not in createopts:
3893 if storereq and b'sharedrepo' not in createopts:
3893 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3894 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3894 scmutil.writerequires(storevfs, storereq)
3895 scmutil.writerequires(storevfs, storereq)
3895
3896
3896 # Write out file telling readers where to find the shared store.
3897 # Write out file telling readers where to find the shared store.
3897 if b'sharedrepo' in createopts:
3898 if b'sharedrepo' in createopts:
3898 hgvfs.write(b'sharedpath', sharedpath)
3899 hgvfs.write(b'sharedpath', sharedpath)
3899
3900
3900 if createopts.get(b'shareditems'):
3901 if createopts.get(b'shareditems'):
3901 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3902 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3902 hgvfs.write(b'shared', shared)
3903 hgvfs.write(b'shared', shared)
3903
3904
3904
3905
3905 def poisonrepository(repo):
3906 def poisonrepository(repo):
3906 """Poison a repository instance so it can no longer be used."""
3907 """Poison a repository instance so it can no longer be used."""
3907 # Perform any cleanup on the instance.
3908 # Perform any cleanup on the instance.
3908 repo.close()
3909 repo.close()
3909
3910
3910 # Our strategy is to replace the type of the object with one that
3911 # Our strategy is to replace the type of the object with one that
3911 # has all attribute lookups result in error.
3912 # has all attribute lookups result in error.
3912 #
3913 #
3913 # But we have to allow the close() method because some constructors
3914 # But we have to allow the close() method because some constructors
3914 # of repos call close() on repo references.
3915 # of repos call close() on repo references.
3915 class poisonedrepository:
3916 class poisonedrepository:
3916 def __getattribute__(self, item):
3917 def __getattribute__(self, item):
3917 if item == 'close':
3918 if item == 'close':
3918 return object.__getattribute__(self, item)
3919 return object.__getattribute__(self, item)
3919
3920
3920 raise error.ProgrammingError(
3921 raise error.ProgrammingError(
3921 b'repo instances should not be used after unshare'
3922 b'repo instances should not be used after unshare'
3922 )
3923 )
3923
3924
3924 def close(self):
3925 def close(self):
3925 pass
3926 pass
3926
3927
3927 # We may have a repoview, which intercepts __setattr__. So be sure
3928 # We may have a repoview, which intercepts __setattr__. So be sure
3928 # we operate at the lowest level possible.
3929 # we operate at the lowest level possible.
3929 object.__setattr__(repo, '__class__', poisonedrepository)
3930 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,78 +1,96 b''
1 '''This is the last release to support Python 2. Mercurial is Python 3 only starting with 6.2'''
1 '''This is the last release to support Python 2. Mercurial is Python 3 only starting with 6.2'''
2
2
3 = Mercurial 6.1.2 =
4
5 * Improve Windows test suite
6 * Fix `debuglock` not ignoring a missing lockfile when forcing a lock
7 * Improve help of `ui.large-file-limit`
8 * Set the large-file-limit to 10MB (from 10MiB) for clarity
9 * While rewriting desc hashes, ignore ambiguous prefix "hashes"
10 * Fix a crash in partial amend with copies
11 * Fix a py3 compatiblity bug
12 * Fix incorrect metadata causing dirstate-v2 data loss in edge case
13 * Fix cleanup of old dirstate-v2 data files when using `rhg`
14 * Make reference to `.hg/last_message.txt` relative in commit
15 * Fix an infinite hang when `rhg` is used in the background
16 * Fix Python DLL loading bug in Windows
17 * Add `--docket` flag to `debugstate` to check out dirstate-v2 metadata
18 * Remove `debugdirstateignorepatternhash` in favor of `debugstate --docket`
19 * Fix incorrect metadata causing systematic complete dirstate-v2 rewrite
20
3 = Mercurial 6.1.1 =
21 = Mercurial 6.1.1 =
4
22
5 * Fix Rust compilation on `aarcch64`
23 * Fix Rust compilation on `aarcch64`
6 * Fix Rust compilation on architectures where `char` is unsigned
24 * Fix Rust compilation on architectures where `char` is unsigned
7 * When the merge tool uses `$output`, don't leave markers in `$local`
25 * When the merge tool uses `$output`, don't leave markers in `$local`
8 * Improve test suite support on big-endian platforms
26 * Improve test suite support on big-endian platforms
9 * Cap the number of concurrent threads to 16 in Rust `hg status` to prevent huge speed regression at higher thread counts
27 * Cap the number of concurrent threads to 16 in Rust `hg status` to prevent huge speed regression at higher thread counts
10 * Fix `amend` with copies in extras
28 * Fix `amend` with copies in extras
11 * Abort if commit we're trying to `unamend` was not created by `hg [un]amend`
29 * Abort if commit we're trying to `unamend` was not created by `hg [un]amend`
12 * Fix file name in the pullbundle help text
30 * Fix file name in the pullbundle help text
13 * Fix an issue with data not being correctly reset in the C implementation of dirstate-v2
31 * Fix an issue with data not being correctly reset in the C implementation of dirstate-v2
14 * Fix issue6673 where some tags were missing from cache after a merge
32 * Fix issue6673 where some tags were missing from cache after a merge
15 * Fix stream-cloning a repo with empty requirements
33 * Fix stream-cloning a repo with empty requirements
16 * Fix a false warning about content-divergence creation
34 * Fix a false warning about content-divergence creation
17 * Fix silly blackbox entries when hg is interrupted
35 * Fix silly blackbox entries when hg is interrupted
18 * Fix unsoundness (no known exploits) in Rust extensions (see cfd270d83169 and dd6b67d5c256)
36 * Fix unsoundness (no known exploits) in Rust extensions (see cfd270d83169 and dd6b67d5c256)
19 * Fix Rust dirstate counters not being updated correctly leading to some potential bugs (none known)
37 * Fix Rust dirstate counters not being updated correctly leading to some potential bugs (none known)
20 * Stop relying on a compiler implementation detail in Rust HgPath
38 * Stop relying on a compiler implementation detail in Rust HgPath
21
39
22 = Mercurial 6.1 =
40 = Mercurial 6.1 =
23
41
24 == New Features ==
42 == New Features ==
25 * Added a way of specifying required extensions that prevent Mercurial from starting if they are not found. See `hg help config.extensions`.
43 * Added a way of specifying required extensions that prevent Mercurial from starting if they are not found. See `hg help config.extensions`.
26 * Merge conflict markers have been made clearer (see backwards compatibility below)
44 * Merge conflict markers have been made clearer (see backwards compatibility below)
27 * Improve detailed error codes
45 * Improve detailed error codes
28 * Added a hint about mangled whitespace on bad patch
46 * Added a hint about mangled whitespace on bad patch
29 * Explain which order the commits are presented in `chistedit`
47 * Explain which order the commits are presented in `chistedit`
30 * Introduce a `dirstate-tracked-hint` feature to help automation keep track of changes to tracked files. See `hg help config.use-dirstate-tracked-hint`.
48 * Introduce a `dirstate-tracked-hint` feature to help automation keep track of changes to tracked files. See `hg help config.use-dirstate-tracked-hint`.
31 * Shared repositories can be upgraded if the upgrade is specific to the share. For now, this only applies to `dirstate-v2` and `dirstate-tracked-hint`.
49 * Shared repositories can be upgraded if the upgrade is specific to the share. For now, this only applies to `dirstate-v2` and `dirstate-tracked-hint`.
32 * When using the `narrow` extension, non-conflicting changes to files outside of the narrow specification can now be merged.
50 * When using the `narrow` extension, non-conflicting changes to files outside of the narrow specification can now be merged.
33 * When cloning a repository using stream-clone, the client can now control the repository format variants to use as long as the stream content does not restrict that variant.
51 * When cloning a repository using stream-clone, the client can now control the repository format variants to use as long as the stream content does not restrict that variant.
34
52
35 == Default Format Change ==
53 == Default Format Change ==
36
54
37 These changes affect newly created repositories (or new clones) done with Mercurial 6.1.
55 These changes affect newly created repositories (or new clones) done with Mercurial 6.1.
38
56
39 * The `share-safe` format variant is now enabled by default. It makes configuration and requirements more consistent across repository and their shares. This introduces a behavior change as shares from a repository using the new format will also use their main repository's configuration. See `hg help config.format.use-share-safe` for details about the feature and the available options for auto-upgrading existing shares.
57 * The `share-safe` format variant is now enabled by default. It makes configuration and requirements more consistent across repository and their shares. This introduces a behavior change as shares from a repository using the new format will also use their main repository's configuration. See `hg help config.format.use-share-safe` for details about the feature and the available options for auto-upgrading existing shares.
40
58
41
59
42 == New Experimental Features ==
60 == New Experimental Features ==
43 * The pure Rust version of Mercurial called `rhg` added support for most common invocations of `hg status`. See `hg help rust.rhg` for details on how to try it out.
61 * The pure Rust version of Mercurial called `rhg` added support for most common invocations of `hg status`. See `hg help rust.rhg` for details on how to try it out.
44 * `rhg` supports narrow clones and sparse checkouts.
62 * `rhg` supports narrow clones and sparse checkouts.
45
63
46 == Bug Fixes ==
64 == Bug Fixes ==
47
65
48 * '''Obsolete revisions are skipped while computing heads. In conjunction with the `evolve` extension >= 10.5.0, this leads to massive exchange (push/pull) speedups in repositories with a lot of heads and/or obsolete revisions.'''
66 * '''Obsolete revisions are skipped while computing heads. In conjunction with the `evolve` extension >= 10.5.0, this leads to massive exchange (push/pull) speedups in repositories with a lot of heads and/or obsolete revisions.'''
49 * Stream-clones now properly advertise all requirements needed. This can result in the stream-clone feature being disabled for some clients using < 6.0.2. A small bugfix patch for these older client is available if necessary.
67 * Stream-clones now properly advertise all requirements needed. This can result in the stream-clone feature being disabled for some clients using < 6.0.2. A small bugfix patch for these older client is available if necessary.
50 * The `--no-check` and `--no-merge` flags now properly overwrite the behavior from `commands.update.check`
68 * The `--no-check` and `--no-merge` flags now properly overwrite the behavior from `commands.update.check`
51 * `rhg`'s fallback detection is more robust in general in the presence of more advanced configs
69 * `rhg`'s fallback detection is more robust in general in the presence of more advanced configs
52 * `rhg`'s `blackbox` now supports milliseconds by default and uses the same ISO 8601 format as the Python implementation
70 * `rhg`'s `blackbox` now supports milliseconds by default and uses the same ISO 8601 format as the Python implementation
53 * Fix `rhg` crash on non-generaldelta revlogs
71 * Fix `rhg` crash on non-generaldelta revlogs
54 * The `lfs`, `largefiles` and `sparse` extensions now correctly take the appropriate lock before writing requirements
72 * The `lfs`, `largefiles` and `sparse` extensions now correctly take the appropriate lock before writing requirements
55 * The `notify` extension does not produce errors anymore if a revision is not found
73 * The `notify` extension does not produce errors anymore if a revision is not found
56 * Remove unnecessary and overly strict check for divergence in `hg fix`
74 * Remove unnecessary and overly strict check for divergence in `hg fix`
57 * Windows compatibility improvements
75 * Windows compatibility improvements
58 * Miscellaneous Python 3 and typing improvements
76 * Miscellaneous Python 3 and typing improvements
59 * Many other small or internal fixes
77 * Many other small or internal fixes
60
78
61 == Backwards Compatibility Changes ==
79 == Backwards Compatibility Changes ==
62
80
63 * The use of `share-safe`, means shares (of new repositories) will also use their main repository's configuration see the `Default Format Change` section for details.
81 * The use of `share-safe`, means shares (of new repositories) will also use their main repository's configuration see the `Default Format Change` section for details.
64 * The fix to stream-clone requirements advertising means some requirements previously (wrongly) omitted are now sent. This can confuse client using a Mercurial version < 6.0.2. The client would consider these requirements unsupported even if it actually know them. If you encounter this, either upgrade your client or apply the necessary patch.
82 * The fix to stream-clone requirements advertising means some requirements previously (wrongly) omitted are now sent. This can confuse client using a Mercurial version < 6.0.2. The client would consider these requirements unsupported even if it actually know them. If you encounter this, either upgrade your client or apply the necessary patch.
65 * The labels passed to merge tools have changed slightly. Merge tools can get labels passed to them if you include `$labellocal`, `$labelbase`, and/or `$labelother` in the `merge-tool.<tool name>.args` configuration. These labels used to have some space-padding, and truncation to fit within 72 columns. Both the padding and the truncation has been removed.
83 * The labels passed to merge tools have changed slightly. Merge tools can get labels passed to them if you include `$labellocal`, `$labelbase`, and/or `$labelother` in the `merge-tool.<tool name>.args` configuration. These labels used to have some space-padding, and truncation to fit within 72 columns. Both the padding and the truncation has been removed.
66 * Some of the text in labels passed to merge tools has changed. For example, in conflicts while running `hg histedit`, the labels used to be "local", "base", and "histedit". They are now "already edited", "parent of current change", and "current change", respectively.
84 * Some of the text in labels passed to merge tools has changed. For example, in conflicts while running `hg histedit`, the labels used to be "local", "base", and "histedit". They are now "already edited", "parent of current change", and "current change", respectively.
67 * The computation of namedbranch heads now ignores obsolete revisions. User of the Changeset Evolution feature may see a change in location and numbers of heads of each named branches (and topic).
85 * The computation of namedbranch heads now ignores obsolete revisions. User of the Changeset Evolution feature may see a change in location and numbers of heads of each named branches (and topic).
68 * The remotefilelog extension now requires an appropriate excludepattern for subrepositories.
86 * The remotefilelog extension now requires an appropriate excludepattern for subrepositories.
69 * `debugsparse`'s interface has been reworked to be more flexible. Since it's a debug command it is not actually a breaking change, but it is worth noting here.
87 * `debugsparse`'s interface has been reworked to be more flexible. Since it's a debug command it is not actually a breaking change, but it is worth noting here.
70 * Temporary files for merges are now all inside a `hgmerge` directory instead of at the root of `TMPDIR`. The corresponding experimental `mergetempdirprefix` config option has been removed.
88 * Temporary files for merges are now all inside a `hgmerge` directory instead of at the root of `TMPDIR`. The corresponding experimental `mergetempdirprefix` config option has been removed.
71
89
72 == Internal API Changes ==
90 == Internal API Changes ==
73
91
74 * The dirstate API received minor modifications.
92 * The dirstate API received minor modifications.
75
93
76 == Miscellaneous ==
94 == Miscellaneous ==
77
95
78 * Removed exchange-v2. It was a prototype that had never been in a working state and had been left untouched since 2017.
96 * Removed exchange-v2. It was a prototype that had never been in a working state and had been left untouched since 2017.
@@ -1,1313 +1,1325 b''
1 # This file is automatically @generated by Cargo.
1 # This file is automatically @generated by Cargo.
2 # It is not intended for manual editing.
2 # It is not intended for manual editing.
3 version = 3
3 version = 3
4
4
5 [[package]]
5 [[package]]
6 name = "Inflector"
6 name = "Inflector"
7 version = "0.11.4"
7 version = "0.11.4"
8 source = "registry+https://github.com/rust-lang/crates.io-index"
8 source = "registry+https://github.com/rust-lang/crates.io-index"
9 checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
9 checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
10
10
11 [[package]]
11 [[package]]
12 name = "adler"
12 name = "adler"
13 version = "0.2.3"
13 version = "0.2.3"
14 source = "registry+https://github.com/rust-lang/crates.io-index"
14 source = "registry+https://github.com/rust-lang/crates.io-index"
15 checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
15 checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
16
16
17 [[package]]
17 [[package]]
18 name = "ahash"
18 name = "ahash"
19 version = "0.4.7"
19 version = "0.4.7"
20 source = "registry+https://github.com/rust-lang/crates.io-index"
20 source = "registry+https://github.com/rust-lang/crates.io-index"
21 checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
21 checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
22
22
23 [[package]]
23 [[package]]
24 name = "aho-corasick"
24 name = "aho-corasick"
25 version = "0.7.18"
25 version = "0.7.18"
26 source = "registry+https://github.com/rust-lang/crates.io-index"
26 source = "registry+https://github.com/rust-lang/crates.io-index"
27 checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
27 checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
28 dependencies = [
28 dependencies = [
29 "memchr",
29 "memchr",
30 ]
30 ]
31
31
32 [[package]]
32 [[package]]
33 name = "aliasable"
33 name = "aliasable"
34 version = "0.1.3"
34 version = "0.1.3"
35 source = "registry+https://github.com/rust-lang/crates.io-index"
35 source = "registry+https://github.com/rust-lang/crates.io-index"
36 checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
36 checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
37
37
38 [[package]]
38 [[package]]
39 name = "ansi_term"
39 name = "ansi_term"
40 version = "0.12.1"
40 version = "0.12.1"
41 source = "registry+https://github.com/rust-lang/crates.io-index"
41 source = "registry+https://github.com/rust-lang/crates.io-index"
42 checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
42 checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
43 dependencies = [
43 dependencies = [
44 "winapi",
44 "winapi",
45 ]
45 ]
46
46
47 [[package]]
47 [[package]]
48 name = "atty"
48 name = "atty"
49 version = "0.2.14"
49 version = "0.2.14"
50 source = "registry+https://github.com/rust-lang/crates.io-index"
50 source = "registry+https://github.com/rust-lang/crates.io-index"
51 checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
51 checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
52 dependencies = [
52 dependencies = [
53 "hermit-abi",
53 "hermit-abi",
54 "libc",
54 "libc",
55 "winapi",
55 "winapi",
56 ]
56 ]
57
57
58 [[package]]
58 [[package]]
59 name = "autocfg"
59 name = "autocfg"
60 version = "1.0.1"
60 version = "1.0.1"
61 source = "registry+https://github.com/rust-lang/crates.io-index"
61 source = "registry+https://github.com/rust-lang/crates.io-index"
62 checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
62 checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
63
63
64 [[package]]
64 [[package]]
65 name = "bitflags"
65 name = "bitflags"
66 version = "1.3.2"
66 version = "1.3.2"
67 source = "registry+https://github.com/rust-lang/crates.io-index"
67 source = "registry+https://github.com/rust-lang/crates.io-index"
68 checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
68 checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
69
69
70 [[package]]
70 [[package]]
71 name = "bitmaps"
71 name = "bitmaps"
72 version = "2.1.0"
72 version = "2.1.0"
73 source = "registry+https://github.com/rust-lang/crates.io-index"
73 source = "registry+https://github.com/rust-lang/crates.io-index"
74 checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
74 checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2"
75 dependencies = [
75 dependencies = [
76 "typenum",
76 "typenum",
77 ]
77 ]
78
78
79 [[package]]
79 [[package]]
80 name = "block-buffer"
80 name = "block-buffer"
81 version = "0.9.0"
81 version = "0.9.0"
82 source = "registry+https://github.com/rust-lang/crates.io-index"
82 source = "registry+https://github.com/rust-lang/crates.io-index"
83 checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
83 checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
84 dependencies = [
84 dependencies = [
85 "generic-array",
85 "generic-array",
86 ]
86 ]
87
87
88 [[package]]
88 [[package]]
89 name = "block-buffer"
89 name = "block-buffer"
90 version = "0.10.2"
90 version = "0.10.2"
91 source = "registry+https://github.com/rust-lang/crates.io-index"
91 source = "registry+https://github.com/rust-lang/crates.io-index"
92 checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
92 checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
93 dependencies = [
93 dependencies = [
94 "generic-array",
94 "generic-array",
95 ]
95 ]
96
96
97 [[package]]
97 [[package]]
98 name = "byteorder"
98 name = "byteorder"
99 version = "1.4.3"
99 version = "1.4.3"
100 source = "registry+https://github.com/rust-lang/crates.io-index"
100 source = "registry+https://github.com/rust-lang/crates.io-index"
101 checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
101 checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
102
102
103 [[package]]
103 [[package]]
104 name = "bytes-cast"
104 name = "bytes-cast"
105 version = "0.2.0"
105 version = "0.2.0"
106 source = "registry+https://github.com/rust-lang/crates.io-index"
106 source = "registry+https://github.com/rust-lang/crates.io-index"
107 checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
107 checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
108 dependencies = [
108 dependencies = [
109 "bytes-cast-derive",
109 "bytes-cast-derive",
110 ]
110 ]
111
111
112 [[package]]
112 [[package]]
113 name = "bytes-cast-derive"
113 name = "bytes-cast-derive"
114 version = "0.1.0"
114 version = "0.1.0"
115 source = "registry+https://github.com/rust-lang/crates.io-index"
115 source = "registry+https://github.com/rust-lang/crates.io-index"
116 checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
116 checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
117 dependencies = [
117 dependencies = [
118 "proc-macro2",
118 "proc-macro2",
119 "quote",
119 "quote",
120 "syn",
120 "syn",
121 ]
121 ]
122
122
123 [[package]]
123 [[package]]
124 name = "cc"
124 name = "cc"
125 version = "1.0.66"
125 version = "1.0.66"
126 source = "registry+https://github.com/rust-lang/crates.io-index"
126 source = "registry+https://github.com/rust-lang/crates.io-index"
127 checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
127 checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
128 dependencies = [
128 dependencies = [
129 "jobserver",
129 "jobserver",
130 ]
130 ]
131
131
132 [[package]]
132 [[package]]
133 name = "cfg-if"
133 name = "cfg-if"
134 version = "0.1.10"
134 version = "0.1.10"
135 source = "registry+https://github.com/rust-lang/crates.io-index"
135 source = "registry+https://github.com/rust-lang/crates.io-index"
136 checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
136 checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
137
137
138 [[package]]
138 [[package]]
139 name = "cfg-if"
139 name = "cfg-if"
140 version = "1.0.0"
140 version = "1.0.0"
141 source = "registry+https://github.com/rust-lang/crates.io-index"
141 source = "registry+https://github.com/rust-lang/crates.io-index"
142 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
142 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
143
143
144 [[package]]
144 [[package]]
145 name = "chrono"
145 name = "chrono"
146 version = "0.4.19"
146 version = "0.4.19"
147 source = "registry+https://github.com/rust-lang/crates.io-index"
147 source = "registry+https://github.com/rust-lang/crates.io-index"
148 checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
148 checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
149 dependencies = [
149 dependencies = [
150 "libc",
150 "libc",
151 "num-integer",
151 "num-integer",
152 "num-traits",
152 "num-traits",
153 "time",
153 "time",
154 "winapi",
154 "winapi",
155 ]
155 ]
156
156
157 [[package]]
157 [[package]]
158 name = "clap"
158 name = "clap"
159 version = "2.34.0"
159 version = "2.34.0"
160 source = "registry+https://github.com/rust-lang/crates.io-index"
160 source = "registry+https://github.com/rust-lang/crates.io-index"
161 checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
161 checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
162 dependencies = [
162 dependencies = [
163 "ansi_term",
163 "ansi_term",
164 "atty",
164 "atty",
165 "bitflags",
165 "bitflags",
166 "strsim",
166 "strsim",
167 "textwrap",
167 "textwrap",
168 "unicode-width",
168 "unicode-width",
169 "vec_map",
169 "vec_map",
170 ]
170 ]
171
171
172 [[package]]
172 [[package]]
173 name = "const_fn"
173 name = "const_fn"
174 version = "0.4.4"
174 version = "0.4.4"
175 source = "registry+https://github.com/rust-lang/crates.io-index"
175 source = "registry+https://github.com/rust-lang/crates.io-index"
176 checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
176 checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
177
177
178 [[package]]
178 [[package]]
179 name = "convert_case"
179 name = "convert_case"
180 version = "0.4.0"
180 version = "0.4.0"
181 source = "registry+https://github.com/rust-lang/crates.io-index"
181 source = "registry+https://github.com/rust-lang/crates.io-index"
182 checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
182 checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
183
183
184 [[package]]
184 [[package]]
185 name = "cpufeatures"
185 name = "cpufeatures"
186 version = "0.1.4"
186 version = "0.1.4"
187 source = "registry+https://github.com/rust-lang/crates.io-index"
187 source = "registry+https://github.com/rust-lang/crates.io-index"
188 checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
188 checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
189 dependencies = [
189 dependencies = [
190 "libc",
190 "libc",
191 ]
191 ]
192
192
193 [[package]]
193 [[package]]
194 name = "cpufeatures"
194 name = "cpufeatures"
195 version = "0.2.1"
195 version = "0.2.1"
196 source = "registry+https://github.com/rust-lang/crates.io-index"
196 source = "registry+https://github.com/rust-lang/crates.io-index"
197 checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"
197 checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"
198 dependencies = [
198 dependencies = [
199 "libc",
199 "libc",
200 ]
200 ]
201
201
202 [[package]]
202 [[package]]
203 name = "cpython"
203 name = "cpython"
204 version = "0.7.0"
204 version = "0.7.0"
205 source = "registry+https://github.com/rust-lang/crates.io-index"
205 source = "registry+https://github.com/rust-lang/crates.io-index"
206 checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
206 checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
207 dependencies = [
207 dependencies = [
208 "libc",
208 "libc",
209 "num-traits",
209 "num-traits",
210 "paste",
210 "paste",
211 "python3-sys",
211 "python3-sys",
212 ]
212 ]
213
213
214 [[package]]
214 [[package]]
215 name = "crc32fast"
215 name = "crc32fast"
216 version = "1.2.1"
216 version = "1.2.1"
217 source = "registry+https://github.com/rust-lang/crates.io-index"
217 source = "registry+https://github.com/rust-lang/crates.io-index"
218 checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
218 checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
219 dependencies = [
219 dependencies = [
220 "cfg-if 1.0.0",
220 "cfg-if 1.0.0",
221 ]
221 ]
222
222
223 [[package]]
223 [[package]]
224 name = "crossbeam-channel"
224 name = "crossbeam-channel"
225 version = "0.4.4"
225 version = "0.4.4"
226 source = "registry+https://github.com/rust-lang/crates.io-index"
226 source = "registry+https://github.com/rust-lang/crates.io-index"
227 checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
227 checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
228 dependencies = [
228 dependencies = [
229 "crossbeam-utils 0.7.2",
229 "crossbeam-utils 0.7.2",
230 "maybe-uninit",
230 "maybe-uninit",
231 ]
231 ]
232
232
233 [[package]]
233 [[package]]
234 name = "crossbeam-channel"
234 name = "crossbeam-channel"
235 version = "0.5.2"
235 version = "0.5.2"
236 source = "registry+https://github.com/rust-lang/crates.io-index"
236 source = "registry+https://github.com/rust-lang/crates.io-index"
237 checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
237 checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
238 dependencies = [
238 dependencies = [
239 "cfg-if 1.0.0",
239 "cfg-if 1.0.0",
240 "crossbeam-utils 0.8.1",
240 "crossbeam-utils 0.8.1",
241 ]
241 ]
242
242
243 [[package]]
243 [[package]]
244 name = "crossbeam-deque"
244 name = "crossbeam-deque"
245 version = "0.8.0"
245 version = "0.8.0"
246 source = "registry+https://github.com/rust-lang/crates.io-index"
246 source = "registry+https://github.com/rust-lang/crates.io-index"
247 checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
247 checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
248 dependencies = [
248 dependencies = [
249 "cfg-if 1.0.0",
249 "cfg-if 1.0.0",
250 "crossbeam-epoch",
250 "crossbeam-epoch",
251 "crossbeam-utils 0.8.1",
251 "crossbeam-utils 0.8.1",
252 ]
252 ]
253
253
254 [[package]]
254 [[package]]
255 name = "crossbeam-epoch"
255 name = "crossbeam-epoch"
256 version = "0.9.1"
256 version = "0.9.1"
257 source = "registry+https://github.com/rust-lang/crates.io-index"
257 source = "registry+https://github.com/rust-lang/crates.io-index"
258 checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
258 checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
259 dependencies = [
259 dependencies = [
260 "cfg-if 1.0.0",
260 "cfg-if 1.0.0",
261 "const_fn",
261 "const_fn",
262 "crossbeam-utils 0.8.1",
262 "crossbeam-utils 0.8.1",
263 "lazy_static",
263 "lazy_static",
264 "memoffset",
264 "memoffset",
265 "scopeguard",
265 "scopeguard",
266 ]
266 ]
267
267
268 [[package]]
268 [[package]]
269 name = "crossbeam-utils"
269 name = "crossbeam-utils"
270 version = "0.7.2"
270 version = "0.7.2"
271 source = "registry+https://github.com/rust-lang/crates.io-index"
271 source = "registry+https://github.com/rust-lang/crates.io-index"
272 checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
272 checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
273 dependencies = [
273 dependencies = [
274 "autocfg",
274 "autocfg",
275 "cfg-if 0.1.10",
275 "cfg-if 0.1.10",
276 "lazy_static",
276 "lazy_static",
277 ]
277 ]
278
278
279 [[package]]
279 [[package]]
280 name = "crossbeam-utils"
280 name = "crossbeam-utils"
281 version = "0.8.1"
281 version = "0.8.1"
282 source = "registry+https://github.com/rust-lang/crates.io-index"
282 source = "registry+https://github.com/rust-lang/crates.io-index"
283 checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
283 checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
284 dependencies = [
284 dependencies = [
285 "autocfg",
285 "autocfg",
286 "cfg-if 1.0.0",
286 "cfg-if 1.0.0",
287 "lazy_static",
287 "lazy_static",
288 ]
288 ]
289
289
290 [[package]]
290 [[package]]
291 name = "crypto-common"
291 name = "crypto-common"
292 version = "0.1.2"
292 version = "0.1.2"
293 source = "registry+https://github.com/rust-lang/crates.io-index"
293 source = "registry+https://github.com/rust-lang/crates.io-index"
294 checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06"
294 checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06"
295 dependencies = [
295 dependencies = [
296 "generic-array",
296 "generic-array",
297 ]
297 ]
298
298
299 [[package]]
299 [[package]]
300 name = "ctor"
300 name = "ctor"
301 version = "0.1.16"
301 version = "0.1.16"
302 source = "registry+https://github.com/rust-lang/crates.io-index"
302 source = "registry+https://github.com/rust-lang/crates.io-index"
303 checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
303 checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
304 dependencies = [
304 dependencies = [
305 "quote",
305 "quote",
306 "syn",
306 "syn",
307 ]
307 ]
308
308
309 [[package]]
309 [[package]]
310 name = "derive_more"
310 name = "derive_more"
311 version = "0.99.17"
311 version = "0.99.17"
312 source = "registry+https://github.com/rust-lang/crates.io-index"
312 source = "registry+https://github.com/rust-lang/crates.io-index"
313 checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
313 checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
314 dependencies = [
314 dependencies = [
315 "convert_case",
315 "convert_case",
316 "proc-macro2",
316 "proc-macro2",
317 "quote",
317 "quote",
318 "rustc_version",
318 "rustc_version",
319 "syn",
319 "syn",
320 ]
320 ]
321
321
322 [[package]]
322 [[package]]
323 name = "diff"
323 name = "diff"
324 version = "0.1.12"
324 version = "0.1.12"
325 source = "registry+https://github.com/rust-lang/crates.io-index"
325 source = "registry+https://github.com/rust-lang/crates.io-index"
326 checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499"
326 checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499"
327
327
328 [[package]]
328 [[package]]
329 name = "digest"
329 name = "digest"
330 version = "0.9.0"
330 version = "0.9.0"
331 source = "registry+https://github.com/rust-lang/crates.io-index"
331 source = "registry+https://github.com/rust-lang/crates.io-index"
332 checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
332 checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
333 dependencies = [
333 dependencies = [
334 "generic-array",
334 "generic-array",
335 ]
335 ]
336
336
337 [[package]]
337 [[package]]
338 name = "digest"
338 name = "digest"
339 version = "0.10.2"
339 version = "0.10.2"
340 source = "registry+https://github.com/rust-lang/crates.io-index"
340 source = "registry+https://github.com/rust-lang/crates.io-index"
341 checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837"
341 checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837"
342 dependencies = [
342 dependencies = [
343 "block-buffer 0.10.2",
343 "block-buffer 0.10.2",
344 "crypto-common",
344 "crypto-common",
345 ]
345 ]
346
346
347 [[package]]
347 [[package]]
348 name = "either"
348 name = "either"
349 version = "1.6.1"
349 version = "1.6.1"
350 source = "registry+https://github.com/rust-lang/crates.io-index"
350 source = "registry+https://github.com/rust-lang/crates.io-index"
351 checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
351 checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
352
352
353 [[package]]
353 [[package]]
354 name = "env_logger"
354 name = "env_logger"
355 version = "0.9.0"
355 version = "0.9.0"
356 source = "registry+https://github.com/rust-lang/crates.io-index"
356 source = "registry+https://github.com/rust-lang/crates.io-index"
357 checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
357 checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
358 dependencies = [
358 dependencies = [
359 "atty",
359 "atty",
360 "humantime",
360 "humantime",
361 "log",
361 "log",
362 "regex",
362 "regex",
363 "termcolor",
363 "termcolor",
364 ]
364 ]
365
365
366 [[package]]
366 [[package]]
367 name = "fastrand"
367 name = "fastrand"
368 version = "1.7.0"
368 version = "1.7.0"
369 source = "registry+https://github.com/rust-lang/crates.io-index"
369 source = "registry+https://github.com/rust-lang/crates.io-index"
370 checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
370 checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
371 dependencies = [
371 dependencies = [
372 "instant",
372 "instant",
373 ]
373 ]
374
374
375 [[package]]
375 [[package]]
376 name = "flate2"
376 name = "flate2"
377 version = "1.0.22"
377 version = "1.0.22"
378 source = "registry+https://github.com/rust-lang/crates.io-index"
378 source = "registry+https://github.com/rust-lang/crates.io-index"
379 checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f"
379 checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f"
380 dependencies = [
380 dependencies = [
381 "cfg-if 1.0.0",
381 "cfg-if 1.0.0",
382 "crc32fast",
382 "crc32fast",
383 "libc",
383 "libc",
384 "libz-sys",
384 "libz-sys",
385 "miniz_oxide",
385 "miniz_oxide",
386 ]
386 ]
387
387
388 [[package]]
388 [[package]]
389 name = "format-bytes"
389 name = "format-bytes"
390 version = "0.3.0"
390 version = "0.3.0"
391 source = "registry+https://github.com/rust-lang/crates.io-index"
391 source = "registry+https://github.com/rust-lang/crates.io-index"
392 checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd"
392 checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd"
393 dependencies = [
393 dependencies = [
394 "format-bytes-macros",
394 "format-bytes-macros",
395 ]
395 ]
396
396
397 [[package]]
397 [[package]]
398 name = "format-bytes-macros"
398 name = "format-bytes-macros"
399 version = "0.4.0"
399 version = "0.4.0"
400 source = "registry+https://github.com/rust-lang/crates.io-index"
400 source = "registry+https://github.com/rust-lang/crates.io-index"
401 checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f"
401 checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f"
402 dependencies = [
402 dependencies = [
403 "proc-macro2",
403 "proc-macro2",
404 "quote",
404 "quote",
405 "syn",
405 "syn",
406 ]
406 ]
407
407
408 [[package]]
408 [[package]]
409 name = "generic-array"
409 name = "generic-array"
410 version = "0.14.4"
410 version = "0.14.4"
411 source = "registry+https://github.com/rust-lang/crates.io-index"
411 source = "registry+https://github.com/rust-lang/crates.io-index"
412 checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
412 checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
413 dependencies = [
413 dependencies = [
414 "typenum",
414 "typenum",
415 "version_check",
415 "version_check",
416 ]
416 ]
417
417
418 [[package]]
418 [[package]]
419 name = "getrandom"
419 name = "getrandom"
420 version = "0.1.15"
420 version = "0.1.15"
421 source = "registry+https://github.com/rust-lang/crates.io-index"
421 source = "registry+https://github.com/rust-lang/crates.io-index"
422 checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
422 checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
423 dependencies = [
423 dependencies = [
424 "cfg-if 0.1.10",
424 "cfg-if 0.1.10",
425 "libc",
425 "libc",
426 "wasi 0.9.0+wasi-snapshot-preview1",
426 "wasi 0.9.0+wasi-snapshot-preview1",
427 ]
427 ]
428
428
429 [[package]]
429 [[package]]
430 name = "getrandom"
430 name = "getrandom"
431 version = "0.2.4"
431 version = "0.2.4"
432 source = "registry+https://github.com/rust-lang/crates.io-index"
432 source = "registry+https://github.com/rust-lang/crates.io-index"
433 checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
433 checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
434 dependencies = [
434 dependencies = [
435 "cfg-if 1.0.0",
435 "cfg-if 1.0.0",
436 "libc",
436 "libc",
437 "wasi 0.10.0+wasi-snapshot-preview1",
437 "wasi 0.10.0+wasi-snapshot-preview1",
438 ]
438 ]
439
439
440 [[package]]
440 [[package]]
441 name = "glob"
441 name = "glob"
442 version = "0.3.0"
442 version = "0.3.0"
443 source = "registry+https://github.com/rust-lang/crates.io-index"
443 source = "registry+https://github.com/rust-lang/crates.io-index"
444 checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
444 checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
445
445
446 [[package]]
446 [[package]]
447 name = "hashbrown"
447 name = "hashbrown"
448 version = "0.9.1"
448 version = "0.9.1"
449 source = "registry+https://github.com/rust-lang/crates.io-index"
449 source = "registry+https://github.com/rust-lang/crates.io-index"
450 checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
450 checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
451 dependencies = [
451 dependencies = [
452 "ahash",
452 "ahash",
453 "rayon",
453 "rayon",
454 ]
454 ]
455
455
456 [[package]]
456 [[package]]
457 name = "hermit-abi"
457 name = "hermit-abi"
458 version = "0.1.17"
458 version = "0.1.17"
459 source = "registry+https://github.com/rust-lang/crates.io-index"
459 source = "registry+https://github.com/rust-lang/crates.io-index"
460 checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
460 checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
461 dependencies = [
461 dependencies = [
462 "libc",
462 "libc",
463 ]
463 ]
464
464
465 [[package]]
465 [[package]]
466 name = "hex"
466 name = "hex"
467 version = "0.4.3"
467 version = "0.4.3"
468 source = "registry+https://github.com/rust-lang/crates.io-index"
468 source = "registry+https://github.com/rust-lang/crates.io-index"
469 checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
469 checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
470
470
471 [[package]]
471 [[package]]
472 name = "hg-core"
472 name = "hg-core"
473 version = "0.1.0"
473 version = "0.1.0"
474 dependencies = [
474 dependencies = [
475 "bitflags",
475 "bitflags",
476 "byteorder",
476 "byteorder",
477 "bytes-cast",
477 "bytes-cast",
478 "clap",
478 "clap",
479 "crossbeam-channel 0.4.4",
479 "crossbeam-channel 0.4.4",
480 "derive_more",
480 "derive_more",
481 "flate2",
481 "flate2",
482 "format-bytes",
482 "format-bytes",
483 "hashbrown",
483 "hashbrown",
484 "home",
484 "home",
485 "im-rc",
485 "im-rc",
486 "itertools 0.10.3",
486 "itertools 0.10.3",
487 "lazy_static",
487 "lazy_static",
488 "libc",
488 "libc",
489 "log",
489 "log",
490 "memmap2",
490 "memmap2",
491 "micro-timer 0.3.1",
491 "micro-timer 0.3.1",
492 "ouroboros",
492 "ouroboros",
493 "pretty_assertions",
493 "pretty_assertions",
494 "rand 0.8.5",
494 "rand 0.8.5",
495 "rand_distr",
495 "rand_distr",
496 "rand_pcg",
496 "rand_pcg",
497 "rayon",
497 "rayon",
498 "regex",
498 "regex",
499 "same-file",
499 "same-file",
500 "sha-1 0.10.0",
500 "sha-1 0.10.0",
501 "tempfile",
501 "tempfile",
502 "twox-hash",
502 "twox-hash",
503 "zstd",
503 "zstd",
504 ]
504 ]
505
505
506 [[package]]
506 [[package]]
507 name = "hg-cpython"
507 name = "hg-cpython"
508 version = "0.1.0"
508 version = "0.1.0"
509 dependencies = [
509 dependencies = [
510 "cpython",
510 "cpython",
511 "crossbeam-channel 0.5.2",
511 "crossbeam-channel 0.5.2",
512 "env_logger",
512 "env_logger",
513 "hg-core",
513 "hg-core",
514 "libc",
514 "libc",
515 "log",
515 "log",
516 "stable_deref_trait",
516 "stable_deref_trait",
517 "vcsgraph",
517 "vcsgraph",
518 ]
518 ]
519
519
520 [[package]]
520 [[package]]
521 name = "home"
521 name = "home"
522 version = "0.5.3"
522 version = "0.5.3"
523 source = "registry+https://github.com/rust-lang/crates.io-index"
523 source = "registry+https://github.com/rust-lang/crates.io-index"
524 checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
524 checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
525 dependencies = [
525 dependencies = [
526 "winapi",
526 "winapi",
527 ]
527 ]
528
528
529 [[package]]
529 [[package]]
530 name = "humantime"
530 name = "humantime"
531 version = "2.1.0"
531 version = "2.1.0"
532 source = "registry+https://github.com/rust-lang/crates.io-index"
532 source = "registry+https://github.com/rust-lang/crates.io-index"
533 checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
533 checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
534
534
535 [[package]]
535 [[package]]
536 name = "im-rc"
536 name = "im-rc"
537 version = "15.0.0"
537 version = "15.0.0"
538 source = "registry+https://github.com/rust-lang/crates.io-index"
538 source = "registry+https://github.com/rust-lang/crates.io-index"
539 checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
539 checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
540 dependencies = [
540 dependencies = [
541 "bitmaps",
541 "bitmaps",
542 "rand_core 0.5.1",
542 "rand_core 0.5.1",
543 "rand_xoshiro",
543 "rand_xoshiro",
544 "sized-chunks",
544 "sized-chunks",
545 "typenum",
545 "typenum",
546 "version_check",
546 "version_check",
547 ]
547 ]
548
548
549 [[package]]
549 [[package]]
550 name = "instant"
550 name = "instant"
551 version = "0.1.12"
551 version = "0.1.12"
552 source = "registry+https://github.com/rust-lang/crates.io-index"
552 source = "registry+https://github.com/rust-lang/crates.io-index"
553 checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
553 checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
554 dependencies = [
554 dependencies = [
555 "cfg-if 1.0.0",
555 "cfg-if 1.0.0",
556 ]
556 ]
557
557
558 [[package]]
558 [[package]]
559 name = "itertools"
559 name = "itertools"
560 version = "0.9.0"
560 version = "0.9.0"
561 source = "registry+https://github.com/rust-lang/crates.io-index"
561 source = "registry+https://github.com/rust-lang/crates.io-index"
562 checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
562 checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
563 dependencies = [
563 dependencies = [
564 "either",
564 "either",
565 ]
565 ]
566
566
567 [[package]]
567 [[package]]
568 name = "itertools"
568 name = "itertools"
569 version = "0.10.3"
569 version = "0.10.3"
570 source = "registry+https://github.com/rust-lang/crates.io-index"
570 source = "registry+https://github.com/rust-lang/crates.io-index"
571 checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
571 checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
572 dependencies = [
572 dependencies = [
573 "either",
573 "either",
574 ]
574 ]
575
575
576 [[package]]
576 [[package]]
577 name = "jobserver"
577 name = "jobserver"
578 version = "0.1.21"
578 version = "0.1.21"
579 source = "registry+https://github.com/rust-lang/crates.io-index"
579 source = "registry+https://github.com/rust-lang/crates.io-index"
580 checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
580 checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
581 dependencies = [
581 dependencies = [
582 "libc",
582 "libc",
583 ]
583 ]
584
584
585 [[package]]
585 [[package]]
586 name = "lazy_static"
586 name = "lazy_static"
587 version = "1.4.0"
587 version = "1.4.0"
588 source = "registry+https://github.com/rust-lang/crates.io-index"
588 source = "registry+https://github.com/rust-lang/crates.io-index"
589 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
589 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
590
590
591 [[package]]
591 [[package]]
592 name = "libc"
592 name = "libc"
593 version = "0.2.119"
593 version = "0.2.124"
594 source = "registry+https://github.com/rust-lang/crates.io-index"
594 source = "registry+https://github.com/rust-lang/crates.io-index"
595 checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
595 checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50"
596
596
597 [[package]]
597 [[package]]
598 name = "libm"
598 name = "libm"
599 version = "0.2.1"
599 version = "0.2.1"
600 source = "registry+https://github.com/rust-lang/crates.io-index"
600 source = "registry+https://github.com/rust-lang/crates.io-index"
601 checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
601 checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
602
602
603 [[package]]
603 [[package]]
604 name = "libz-sys"
604 name = "libz-sys"
605 version = "1.1.2"
605 version = "1.1.2"
606 source = "registry+https://github.com/rust-lang/crates.io-index"
606 source = "registry+https://github.com/rust-lang/crates.io-index"
607 checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
607 checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
608 dependencies = [
608 dependencies = [
609 "cc",
609 "cc",
610 "pkg-config",
610 "pkg-config",
611 "vcpkg",
611 "vcpkg",
612 ]
612 ]
613
613
614 [[package]]
614 [[package]]
615 name = "log"
615 name = "log"
616 version = "0.4.14"
616 version = "0.4.14"
617 source = "registry+https://github.com/rust-lang/crates.io-index"
617 source = "registry+https://github.com/rust-lang/crates.io-index"
618 checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
618 checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
619 dependencies = [
619 dependencies = [
620 "cfg-if 1.0.0",
620 "cfg-if 1.0.0",
621 ]
621 ]
622
622
623 [[package]]
623 [[package]]
624 name = "maybe-uninit"
624 name = "maybe-uninit"
625 version = "2.0.0"
625 version = "2.0.0"
626 source = "registry+https://github.com/rust-lang/crates.io-index"
626 source = "registry+https://github.com/rust-lang/crates.io-index"
627 checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
627 checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
628
628
629 [[package]]
629 [[package]]
630 name = "memchr"
630 name = "memchr"
631 version = "2.4.1"
631 version = "2.4.1"
632 source = "registry+https://github.com/rust-lang/crates.io-index"
632 source = "registry+https://github.com/rust-lang/crates.io-index"
633 checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
633 checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
634
634
635 [[package]]
635 [[package]]
636 name = "memmap2"
636 name = "memmap2"
637 version = "0.4.0"
637 version = "0.4.0"
638 source = "registry+https://github.com/rust-lang/crates.io-index"
638 source = "registry+https://github.com/rust-lang/crates.io-index"
639 checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8"
639 checksum = "de5d3112c080d58ce560081baeaab7e1e864ca21795ddbf533d5b1842bb1ecf8"
640 dependencies = [
640 dependencies = [
641 "libc",
641 "libc",
642 "stable_deref_trait",
642 "stable_deref_trait",
643 ]
643 ]
644
644
645 [[package]]
645 [[package]]
646 name = "memoffset"
646 name = "memoffset"
647 version = "0.6.1"
647 version = "0.6.1"
648 source = "registry+https://github.com/rust-lang/crates.io-index"
648 source = "registry+https://github.com/rust-lang/crates.io-index"
649 checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
649 checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
650 dependencies = [
650 dependencies = [
651 "autocfg",
651 "autocfg",
652 ]
652 ]
653
653
654 [[package]]
654 [[package]]
655 name = "micro-timer"
655 name = "micro-timer"
656 version = "0.3.1"
656 version = "0.3.1"
657 source = "registry+https://github.com/rust-lang/crates.io-index"
657 source = "registry+https://github.com/rust-lang/crates.io-index"
658 checksum = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
658 checksum = "2620153e1d903d26b72b89f0e9c48d8c4756cba941c185461dddc234980c298c"
659 dependencies = [
659 dependencies = [
660 "micro-timer-macros 0.3.1",
660 "micro-timer-macros 0.3.1",
661 "scopeguard",
661 "scopeguard",
662 ]
662 ]
663
663
664 [[package]]
664 [[package]]
665 name = "micro-timer"
665 name = "micro-timer"
666 version = "0.4.0"
666 version = "0.4.0"
667 source = "registry+https://github.com/rust-lang/crates.io-index"
667 source = "registry+https://github.com/rust-lang/crates.io-index"
668 checksum = "5de32cb59a062672560d6f0842c4aa7714727457b9fe2daf8987d995a176a405"
668 checksum = "5de32cb59a062672560d6f0842c4aa7714727457b9fe2daf8987d995a176a405"
669 dependencies = [
669 dependencies = [
670 "micro-timer-macros 0.4.0",
670 "micro-timer-macros 0.4.0",
671 "scopeguard",
671 "scopeguard",
672 ]
672 ]
673
673
674 [[package]]
674 [[package]]
675 name = "micro-timer-macros"
675 name = "micro-timer-macros"
676 version = "0.3.1"
676 version = "0.3.1"
677 source = "registry+https://github.com/rust-lang/crates.io-index"
677 source = "registry+https://github.com/rust-lang/crates.io-index"
678 checksum = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
678 checksum = "e28a3473e6abd6e9aab36aaeef32ad22ae0bd34e79f376643594c2b152ec1c5d"
679 dependencies = [
679 dependencies = [
680 "proc-macro2",
680 "proc-macro2",
681 "quote",
681 "quote",
682 "scopeguard",
682 "scopeguard",
683 "syn",
683 "syn",
684 ]
684 ]
685
685
686 [[package]]
686 [[package]]
687 name = "micro-timer-macros"
687 name = "micro-timer-macros"
688 version = "0.4.0"
688 version = "0.4.0"
689 source = "registry+https://github.com/rust-lang/crates.io-index"
689 source = "registry+https://github.com/rust-lang/crates.io-index"
690 checksum = "cee948b94700125b52dfb68dd17c19f6326696c1df57f92c05ee857463c93ba1"
690 checksum = "cee948b94700125b52dfb68dd17c19f6326696c1df57f92c05ee857463c93ba1"
691 dependencies = [
691 dependencies = [
692 "proc-macro2",
692 "proc-macro2",
693 "quote",
693 "quote",
694 "scopeguard",
694 "scopeguard",
695 "syn",
695 "syn",
696 ]
696 ]
697
697
698 [[package]]
698 [[package]]
699 name = "miniz_oxide"
699 name = "miniz_oxide"
700 version = "0.4.3"
700 version = "0.4.3"
701 source = "registry+https://github.com/rust-lang/crates.io-index"
701 source = "registry+https://github.com/rust-lang/crates.io-index"
702 checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
702 checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
703 dependencies = [
703 dependencies = [
704 "adler",
704 "adler",
705 "autocfg",
705 "autocfg",
706 ]
706 ]
707
707
708 [[package]]
708 [[package]]
709 name = "num-integer"
709 name = "num-integer"
710 version = "0.1.44"
710 version = "0.1.44"
711 source = "registry+https://github.com/rust-lang/crates.io-index"
711 source = "registry+https://github.com/rust-lang/crates.io-index"
712 checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
712 checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
713 dependencies = [
713 dependencies = [
714 "autocfg",
714 "autocfg",
715 "num-traits",
715 "num-traits",
716 ]
716 ]
717
717
718 [[package]]
718 [[package]]
719 name = "num-traits"
719 name = "num-traits"
720 version = "0.2.14"
720 version = "0.2.14"
721 source = "registry+https://github.com/rust-lang/crates.io-index"
721 source = "registry+https://github.com/rust-lang/crates.io-index"
722 checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
722 checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
723 dependencies = [
723 dependencies = [
724 "autocfg",
724 "autocfg",
725 "libm",
725 "libm",
726 ]
726 ]
727
727
728 [[package]]
728 [[package]]
729 name = "num_cpus"
729 name = "num_cpus"
730 version = "1.13.0"
730 version = "1.13.0"
731 source = "registry+https://github.com/rust-lang/crates.io-index"
731 source = "registry+https://github.com/rust-lang/crates.io-index"
732 checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
732 checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
733 dependencies = [
733 dependencies = [
734 "hermit-abi",
734 "hermit-abi",
735 "libc",
735 "libc",
736 ]
736 ]
737
737
738 [[package]]
738 [[package]]
739 name = "opaque-debug"
739 name = "opaque-debug"
740 version = "0.3.0"
740 version = "0.3.0"
741 source = "registry+https://github.com/rust-lang/crates.io-index"
741 source = "registry+https://github.com/rust-lang/crates.io-index"
742 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
742 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
743
743
744 [[package]]
744 [[package]]
745 name = "ouroboros"
745 name = "ouroboros"
746 version = "0.15.0"
746 version = "0.15.0"
747 source = "registry+https://github.com/rust-lang/crates.io-index"
747 source = "registry+https://github.com/rust-lang/crates.io-index"
748 checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf"
748 checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf"
749 dependencies = [
749 dependencies = [
750 "aliasable",
750 "aliasable",
751 "ouroboros_macro",
751 "ouroboros_macro",
752 "stable_deref_trait",
752 "stable_deref_trait",
753 ]
753 ]
754
754
755 [[package]]
755 [[package]]
756 name = "ouroboros_macro"
756 name = "ouroboros_macro"
757 version = "0.15.0"
757 version = "0.15.0"
758 source = "registry+https://github.com/rust-lang/crates.io-index"
758 source = "registry+https://github.com/rust-lang/crates.io-index"
759 checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408"
759 checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408"
760 dependencies = [
760 dependencies = [
761 "Inflector",
761 "Inflector",
762 "proc-macro-error",
762 "proc-macro-error",
763 "proc-macro2",
763 "proc-macro2",
764 "quote",
764 "quote",
765 "syn",
765 "syn",
766 ]
766 ]
767
767
768 [[package]]
768 [[package]]
769 name = "output_vt100"
769 name = "output_vt100"
770 version = "0.1.2"
770 version = "0.1.2"
771 source = "registry+https://github.com/rust-lang/crates.io-index"
771 source = "registry+https://github.com/rust-lang/crates.io-index"
772 checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
772 checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
773 dependencies = [
773 dependencies = [
774 "winapi",
774 "winapi",
775 ]
775 ]
776
776
777 [[package]]
777 [[package]]
778 name = "paste"
778 name = "paste"
779 version = "1.0.5"
779 version = "1.0.5"
780 source = "registry+https://github.com/rust-lang/crates.io-index"
780 source = "registry+https://github.com/rust-lang/crates.io-index"
781 checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
781 checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
782
782
783 [[package]]
783 [[package]]
784 name = "pkg-config"
784 name = "pkg-config"
785 version = "0.3.19"
785 version = "0.3.19"
786 source = "registry+https://github.com/rust-lang/crates.io-index"
786 source = "registry+https://github.com/rust-lang/crates.io-index"
787 checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
787 checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
788
788
789 [[package]]
789 [[package]]
790 name = "ppv-lite86"
790 name = "ppv-lite86"
791 version = "0.2.10"
791 version = "0.2.10"
792 source = "registry+https://github.com/rust-lang/crates.io-index"
792 source = "registry+https://github.com/rust-lang/crates.io-index"
793 checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
793 checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
794
794
795 [[package]]
795 [[package]]
796 name = "pretty_assertions"
796 name = "pretty_assertions"
797 version = "1.1.0"
797 version = "1.1.0"
798 source = "registry+https://github.com/rust-lang/crates.io-index"
798 source = "registry+https://github.com/rust-lang/crates.io-index"
799 checksum = "76d5b548b725018ab5496482b45cb8bef21e9fed1858a6d674e3a8a0f0bb5d50"
799 checksum = "76d5b548b725018ab5496482b45cb8bef21e9fed1858a6d674e3a8a0f0bb5d50"
800 dependencies = [
800 dependencies = [
801 "ansi_term",
801 "ansi_term",
802 "ctor",
802 "ctor",
803 "diff",
803 "diff",
804 "output_vt100",
804 "output_vt100",
805 ]
805 ]
806
806
807 [[package]]
807 [[package]]
808 name = "proc-macro-error"
808 name = "proc-macro-error"
809 version = "1.0.4"
809 version = "1.0.4"
810 source = "registry+https://github.com/rust-lang/crates.io-index"
810 source = "registry+https://github.com/rust-lang/crates.io-index"
811 checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
811 checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
812 dependencies = [
812 dependencies = [
813 "proc-macro-error-attr",
813 "proc-macro-error-attr",
814 "proc-macro2",
814 "proc-macro2",
815 "quote",
815 "quote",
816 "syn",
816 "syn",
817 "version_check",
817 "version_check",
818 ]
818 ]
819
819
820 [[package]]
820 [[package]]
821 name = "proc-macro-error-attr"
821 name = "proc-macro-error-attr"
822 version = "1.0.4"
822 version = "1.0.4"
823 source = "registry+https://github.com/rust-lang/crates.io-index"
823 source = "registry+https://github.com/rust-lang/crates.io-index"
824 checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
824 checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
825 dependencies = [
825 dependencies = [
826 "proc-macro2",
826 "proc-macro2",
827 "quote",
827 "quote",
828 "version_check",
828 "version_check",
829 ]
829 ]
830
830
831 [[package]]
831 [[package]]
832 name = "proc-macro2"
832 name = "proc-macro2"
833 version = "1.0.24"
833 version = "1.0.24"
834 source = "registry+https://github.com/rust-lang/crates.io-index"
834 source = "registry+https://github.com/rust-lang/crates.io-index"
835 checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
835 checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
836 dependencies = [
836 dependencies = [
837 "unicode-xid",
837 "unicode-xid",
838 ]
838 ]
839
839
840 [[package]]
840 [[package]]
841 name = "python3-sys"
841 name = "python3-sys"
842 version = "0.7.0"
842 version = "0.7.0"
843 source = "registry+https://github.com/rust-lang/crates.io-index"
843 source = "registry+https://github.com/rust-lang/crates.io-index"
844 checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
844 checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
845 dependencies = [
845 dependencies = [
846 "libc",
846 "libc",
847 "regex",
847 "regex",
848 ]
848 ]
849
849
850 [[package]]
850 [[package]]
851 name = "quote"
851 name = "quote"
852 version = "1.0.7"
852 version = "1.0.7"
853 source = "registry+https://github.com/rust-lang/crates.io-index"
853 source = "registry+https://github.com/rust-lang/crates.io-index"
854 checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
854 checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
855 dependencies = [
855 dependencies = [
856 "proc-macro2",
856 "proc-macro2",
857 ]
857 ]
858
858
859 [[package]]
859 [[package]]
860 name = "rand"
860 name = "rand"
861 version = "0.7.3"
861 version = "0.7.3"
862 source = "registry+https://github.com/rust-lang/crates.io-index"
862 source = "registry+https://github.com/rust-lang/crates.io-index"
863 checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
863 checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
864 dependencies = [
864 dependencies = [
865 "getrandom 0.1.15",
865 "getrandom 0.1.15",
866 "libc",
866 "libc",
867 "rand_chacha 0.2.2",
867 "rand_chacha 0.2.2",
868 "rand_core 0.5.1",
868 "rand_core 0.5.1",
869 "rand_hc",
869 "rand_hc",
870 ]
870 ]
871
871
872 [[package]]
872 [[package]]
873 name = "rand"
873 name = "rand"
874 version = "0.8.5"
874 version = "0.8.5"
875 source = "registry+https://github.com/rust-lang/crates.io-index"
875 source = "registry+https://github.com/rust-lang/crates.io-index"
876 checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
876 checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
877 dependencies = [
877 dependencies = [
878 "libc",
878 "libc",
879 "rand_chacha 0.3.1",
879 "rand_chacha 0.3.1",
880 "rand_core 0.6.3",
880 "rand_core 0.6.3",
881 ]
881 ]
882
882
883 [[package]]
883 [[package]]
884 name = "rand_chacha"
884 name = "rand_chacha"
885 version = "0.2.2"
885 version = "0.2.2"
886 source = "registry+https://github.com/rust-lang/crates.io-index"
886 source = "registry+https://github.com/rust-lang/crates.io-index"
887 checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
887 checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
888 dependencies = [
888 dependencies = [
889 "ppv-lite86",
889 "ppv-lite86",
890 "rand_core 0.5.1",
890 "rand_core 0.5.1",
891 ]
891 ]
892
892
893 [[package]]
893 [[package]]
894 name = "rand_chacha"
894 name = "rand_chacha"
895 version = "0.3.1"
895 version = "0.3.1"
896 source = "registry+https://github.com/rust-lang/crates.io-index"
896 source = "registry+https://github.com/rust-lang/crates.io-index"
897 checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
897 checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
898 dependencies = [
898 dependencies = [
899 "ppv-lite86",
899 "ppv-lite86",
900 "rand_core 0.6.3",
900 "rand_core 0.6.3",
901 ]
901 ]
902
902
903 [[package]]
903 [[package]]
904 name = "rand_core"
904 name = "rand_core"
905 version = "0.5.1"
905 version = "0.5.1"
906 source = "registry+https://github.com/rust-lang/crates.io-index"
906 source = "registry+https://github.com/rust-lang/crates.io-index"
907 checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
907 checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
908 dependencies = [
908 dependencies = [
909 "getrandom 0.1.15",
909 "getrandom 0.1.15",
910 ]
910 ]
911
911
912 [[package]]
912 [[package]]
913 name = "rand_core"
913 name = "rand_core"
914 version = "0.6.3"
914 version = "0.6.3"
915 source = "registry+https://github.com/rust-lang/crates.io-index"
915 source = "registry+https://github.com/rust-lang/crates.io-index"
916 checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
916 checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
917 dependencies = [
917 dependencies = [
918 "getrandom 0.2.4",
918 "getrandom 0.2.4",
919 ]
919 ]
920
920
921 [[package]]
921 [[package]]
922 name = "rand_distr"
922 name = "rand_distr"
923 version = "0.4.3"
923 version = "0.4.3"
924 source = "registry+https://github.com/rust-lang/crates.io-index"
924 source = "registry+https://github.com/rust-lang/crates.io-index"
925 checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
925 checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"
926 dependencies = [
926 dependencies = [
927 "num-traits",
927 "num-traits",
928 "rand 0.8.5",
928 "rand 0.8.5",
929 ]
929 ]
930
930
931 [[package]]
931 [[package]]
932 name = "rand_hc"
932 name = "rand_hc"
933 version = "0.2.0"
933 version = "0.2.0"
934 source = "registry+https://github.com/rust-lang/crates.io-index"
934 source = "registry+https://github.com/rust-lang/crates.io-index"
935 checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
935 checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
936 dependencies = [
936 dependencies = [
937 "rand_core 0.5.1",
937 "rand_core 0.5.1",
938 ]
938 ]
939
939
940 [[package]]
940 [[package]]
941 name = "rand_pcg"
941 name = "rand_pcg"
942 version = "0.3.1"
942 version = "0.3.1"
943 source = "registry+https://github.com/rust-lang/crates.io-index"
943 source = "registry+https://github.com/rust-lang/crates.io-index"
944 checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
944 checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
945 dependencies = [
945 dependencies = [
946 "rand_core 0.6.3",
946 "rand_core 0.6.3",
947 ]
947 ]
948
948
949 [[package]]
949 [[package]]
950 name = "rand_xoshiro"
950 name = "rand_xoshiro"
951 version = "0.4.0"
951 version = "0.4.0"
952 source = "registry+https://github.com/rust-lang/crates.io-index"
952 source = "registry+https://github.com/rust-lang/crates.io-index"
953 checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
953 checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
954 dependencies = [
954 dependencies = [
955 "rand_core 0.5.1",
955 "rand_core 0.5.1",
956 ]
956 ]
957
957
958 [[package]]
958 [[package]]
959 name = "rayon"
959 name = "rayon"
960 version = "1.5.1"
960 version = "1.5.1"
961 source = "registry+https://github.com/rust-lang/crates.io-index"
961 source = "registry+https://github.com/rust-lang/crates.io-index"
962 checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
962 checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
963 dependencies = [
963 dependencies = [
964 "autocfg",
964 "autocfg",
965 "crossbeam-deque",
965 "crossbeam-deque",
966 "either",
966 "either",
967 "rayon-core",
967 "rayon-core",
968 ]
968 ]
969
969
970 [[package]]
970 [[package]]
971 name = "rayon-core"
971 name = "rayon-core"
972 version = "1.9.1"
972 version = "1.9.1"
973 source = "registry+https://github.com/rust-lang/crates.io-index"
973 source = "registry+https://github.com/rust-lang/crates.io-index"
974 checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
974 checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
975 dependencies = [
975 dependencies = [
976 "crossbeam-channel 0.5.2",
976 "crossbeam-channel 0.5.2",
977 "crossbeam-deque",
977 "crossbeam-deque",
978 "crossbeam-utils 0.8.1",
978 "crossbeam-utils 0.8.1",
979 "lazy_static",
979 "lazy_static",
980 "num_cpus",
980 "num_cpus",
981 ]
981 ]
982
982
983 [[package]]
983 [[package]]
984 name = "redox_syscall"
984 name = "redox_syscall"
985 version = "0.2.11"
985 version = "0.2.11"
986 source = "registry+https://github.com/rust-lang/crates.io-index"
986 source = "registry+https://github.com/rust-lang/crates.io-index"
987 checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c"
987 checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c"
988 dependencies = [
988 dependencies = [
989 "bitflags",
989 "bitflags",
990 ]
990 ]
991
991
992 [[package]]
992 [[package]]
993 name = "regex"
993 name = "regex"
994 version = "1.5.5"
994 version = "1.5.5"
995 source = "registry+https://github.com/rust-lang/crates.io-index"
995 source = "registry+https://github.com/rust-lang/crates.io-index"
996 checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
996 checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
997 dependencies = [
997 dependencies = [
998 "aho-corasick",
998 "aho-corasick",
999 "memchr",
999 "memchr",
1000 "regex-syntax",
1000 "regex-syntax",
1001 ]
1001 ]
1002
1002
1003 [[package]]
1003 [[package]]
1004 name = "regex-syntax"
1004 name = "regex-syntax"
1005 version = "0.6.25"
1005 version = "0.6.25"
1006 source = "registry+https://github.com/rust-lang/crates.io-index"
1006 source = "registry+https://github.com/rust-lang/crates.io-index"
1007 checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
1007 checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
1008
1008
1009 [[package]]
1009 [[package]]
1010 name = "remove_dir_all"
1010 name = "remove_dir_all"
1011 version = "0.5.3"
1011 version = "0.5.3"
1012 source = "registry+https://github.com/rust-lang/crates.io-index"
1012 source = "registry+https://github.com/rust-lang/crates.io-index"
1013 checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
1013 checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
1014 dependencies = [
1014 dependencies = [
1015 "winapi",
1015 "winapi",
1016 ]
1016 ]
1017
1017
1018 [[package]]
1018 [[package]]
1019 name = "rhg"
1019 name = "rhg"
1020 version = "0.1.0"
1020 version = "0.1.0"
1021 dependencies = [
1021 dependencies = [
1022 "atty",
1022 "atty",
1023 "chrono",
1023 "chrono",
1024 "clap",
1024 "clap",
1025 "derive_more",
1025 "derive_more",
1026 "env_logger",
1026 "env_logger",
1027 "format-bytes",
1027 "format-bytes",
1028 "hg-core",
1028 "hg-core",
1029 "home",
1029 "home",
1030 "lazy_static",
1030 "lazy_static",
1031 "log",
1031 "log",
1032 "micro-timer 0.4.0",
1032 "micro-timer 0.4.0",
1033 "regex",
1033 "regex",
1034 "users",
1034 "users",
1035 "which",
1035 ]
1036 ]
1036
1037
1037 [[package]]
1038 [[package]]
1038 name = "rustc_version"
1039 name = "rustc_version"
1039 version = "0.4.0"
1040 version = "0.4.0"
1040 source = "registry+https://github.com/rust-lang/crates.io-index"
1041 source = "registry+https://github.com/rust-lang/crates.io-index"
1041 checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
1042 checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
1042 dependencies = [
1043 dependencies = [
1043 "semver",
1044 "semver",
1044 ]
1045 ]
1045
1046
1046 [[package]]
1047 [[package]]
1047 name = "same-file"
1048 name = "same-file"
1048 version = "1.0.6"
1049 version = "1.0.6"
1049 source = "registry+https://github.com/rust-lang/crates.io-index"
1050 source = "registry+https://github.com/rust-lang/crates.io-index"
1050 checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
1051 checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
1051 dependencies = [
1052 dependencies = [
1052 "winapi-util",
1053 "winapi-util",
1053 ]
1054 ]
1054
1055
1055 [[package]]
1056 [[package]]
1056 name = "scopeguard"
1057 name = "scopeguard"
1057 version = "1.1.0"
1058 version = "1.1.0"
1058 source = "registry+https://github.com/rust-lang/crates.io-index"
1059 source = "registry+https://github.com/rust-lang/crates.io-index"
1059 checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
1060 checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
1060
1061
1061 [[package]]
1062 [[package]]
1062 name = "semver"
1063 name = "semver"
1063 version = "1.0.6"
1064 version = "1.0.6"
1064 source = "registry+https://github.com/rust-lang/crates.io-index"
1065 source = "registry+https://github.com/rust-lang/crates.io-index"
1065 checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d"
1066 checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d"
1066
1067
1067 [[package]]
1068 [[package]]
1068 name = "sha-1"
1069 name = "sha-1"
1069 version = "0.9.6"
1070 version = "0.9.6"
1070 source = "registry+https://github.com/rust-lang/crates.io-index"
1071 source = "registry+https://github.com/rust-lang/crates.io-index"
1071 checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
1072 checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
1072 dependencies = [
1073 dependencies = [
1073 "block-buffer 0.9.0",
1074 "block-buffer 0.9.0",
1074 "cfg-if 1.0.0",
1075 "cfg-if 1.0.0",
1075 "cpufeatures 0.1.4",
1076 "cpufeatures 0.1.4",
1076 "digest 0.9.0",
1077 "digest 0.9.0",
1077 "opaque-debug",
1078 "opaque-debug",
1078 ]
1079 ]
1079
1080
1080 [[package]]
1081 [[package]]
1081 name = "sha-1"
1082 name = "sha-1"
1082 version = "0.10.0"
1083 version = "0.10.0"
1083 source = "registry+https://github.com/rust-lang/crates.io-index"
1084 source = "registry+https://github.com/rust-lang/crates.io-index"
1084 checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
1085 checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
1085 dependencies = [
1086 dependencies = [
1086 "cfg-if 1.0.0",
1087 "cfg-if 1.0.0",
1087 "cpufeatures 0.2.1",
1088 "cpufeatures 0.2.1",
1088 "digest 0.10.2",
1089 "digest 0.10.2",
1089 ]
1090 ]
1090
1091
1091 [[package]]
1092 [[package]]
1092 name = "sized-chunks"
1093 name = "sized-chunks"
1093 version = "0.6.2"
1094 version = "0.6.2"
1094 source = "registry+https://github.com/rust-lang/crates.io-index"
1095 source = "registry+https://github.com/rust-lang/crates.io-index"
1095 checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
1096 checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
1096 dependencies = [
1097 dependencies = [
1097 "bitmaps",
1098 "bitmaps",
1098 "typenum",
1099 "typenum",
1099 ]
1100 ]
1100
1101
1101 [[package]]
1102 [[package]]
1102 name = "stable_deref_trait"
1103 name = "stable_deref_trait"
1103 version = "1.2.0"
1104 version = "1.2.0"
1104 source = "registry+https://github.com/rust-lang/crates.io-index"
1105 source = "registry+https://github.com/rust-lang/crates.io-index"
1105 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
1106 checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
1106
1107
1107 [[package]]
1108 [[package]]
1108 name = "static_assertions"
1109 name = "static_assertions"
1109 version = "1.1.0"
1110 version = "1.1.0"
1110 source = "registry+https://github.com/rust-lang/crates.io-index"
1111 source = "registry+https://github.com/rust-lang/crates.io-index"
1111 checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
1112 checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
1112
1113
1113 [[package]]
1114 [[package]]
1114 name = "strsim"
1115 name = "strsim"
1115 version = "0.8.0"
1116 version = "0.8.0"
1116 source = "registry+https://github.com/rust-lang/crates.io-index"
1117 source = "registry+https://github.com/rust-lang/crates.io-index"
1117 checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
1118 checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
1118
1119
1119 [[package]]
1120 [[package]]
1120 name = "syn"
1121 name = "syn"
1121 version = "1.0.54"
1122 version = "1.0.54"
1122 source = "registry+https://github.com/rust-lang/crates.io-index"
1123 source = "registry+https://github.com/rust-lang/crates.io-index"
1123 checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
1124 checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
1124 dependencies = [
1125 dependencies = [
1125 "proc-macro2",
1126 "proc-macro2",
1126 "quote",
1127 "quote",
1127 "unicode-xid",
1128 "unicode-xid",
1128 ]
1129 ]
1129
1130
1130 [[package]]
1131 [[package]]
1131 name = "tempfile"
1132 name = "tempfile"
1132 version = "3.3.0"
1133 version = "3.3.0"
1133 source = "registry+https://github.com/rust-lang/crates.io-index"
1134 source = "registry+https://github.com/rust-lang/crates.io-index"
1134 checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
1135 checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
1135 dependencies = [
1136 dependencies = [
1136 "cfg-if 1.0.0",
1137 "cfg-if 1.0.0",
1137 "fastrand",
1138 "fastrand",
1138 "libc",
1139 "libc",
1139 "redox_syscall",
1140 "redox_syscall",
1140 "remove_dir_all",
1141 "remove_dir_all",
1141 "winapi",
1142 "winapi",
1142 ]
1143 ]
1143
1144
1144 [[package]]
1145 [[package]]
1145 name = "termcolor"
1146 name = "termcolor"
1146 version = "1.1.2"
1147 version = "1.1.2"
1147 source = "registry+https://github.com/rust-lang/crates.io-index"
1148 source = "registry+https://github.com/rust-lang/crates.io-index"
1148 checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
1149 checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
1149 dependencies = [
1150 dependencies = [
1150 "winapi-util",
1151 "winapi-util",
1151 ]
1152 ]
1152
1153
1153 [[package]]
1154 [[package]]
1154 name = "textwrap"
1155 name = "textwrap"
1155 version = "0.11.0"
1156 version = "0.11.0"
1156 source = "registry+https://github.com/rust-lang/crates.io-index"
1157 source = "registry+https://github.com/rust-lang/crates.io-index"
1157 checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
1158 checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
1158 dependencies = [
1159 dependencies = [
1159 "unicode-width",
1160 "unicode-width",
1160 ]
1161 ]
1161
1162
1162 [[package]]
1163 [[package]]
1163 name = "time"
1164 name = "time"
1164 version = "0.1.44"
1165 version = "0.1.44"
1165 source = "registry+https://github.com/rust-lang/crates.io-index"
1166 source = "registry+https://github.com/rust-lang/crates.io-index"
1166 checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
1167 checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
1167 dependencies = [
1168 dependencies = [
1168 "libc",
1169 "libc",
1169 "wasi 0.10.0+wasi-snapshot-preview1",
1170 "wasi 0.10.0+wasi-snapshot-preview1",
1170 "winapi",
1171 "winapi",
1171 ]
1172 ]
1172
1173
1173 [[package]]
1174 [[package]]
1174 name = "twox-hash"
1175 name = "twox-hash"
1175 version = "1.6.2"
1176 version = "1.6.2"
1176 source = "registry+https://github.com/rust-lang/crates.io-index"
1177 source = "registry+https://github.com/rust-lang/crates.io-index"
1177 checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
1178 checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
1178 dependencies = [
1179 dependencies = [
1179 "cfg-if 1.0.0",
1180 "cfg-if 1.0.0",
1180 "rand 0.8.5",
1181 "rand 0.8.5",
1181 "static_assertions",
1182 "static_assertions",
1182 ]
1183 ]
1183
1184
1184 [[package]]
1185 [[package]]
1185 name = "typenum"
1186 name = "typenum"
1186 version = "1.12.0"
1187 version = "1.12.0"
1187 source = "registry+https://github.com/rust-lang/crates.io-index"
1188 source = "registry+https://github.com/rust-lang/crates.io-index"
1188 checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
1189 checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
1189
1190
1190 [[package]]
1191 [[package]]
1191 name = "unicode-width"
1192 name = "unicode-width"
1192 version = "0.1.9"
1193 version = "0.1.9"
1193 source = "registry+https://github.com/rust-lang/crates.io-index"
1194 source = "registry+https://github.com/rust-lang/crates.io-index"
1194 checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
1195 checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
1195
1196
1196 [[package]]
1197 [[package]]
1197 name = "unicode-xid"
1198 name = "unicode-xid"
1198 version = "0.2.1"
1199 version = "0.2.1"
1199 source = "registry+https://github.com/rust-lang/crates.io-index"
1200 source = "registry+https://github.com/rust-lang/crates.io-index"
1200 checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
1201 checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
1201
1202
1202 [[package]]
1203 [[package]]
1203 name = "users"
1204 name = "users"
1204 version = "0.11.0"
1205 version = "0.11.0"
1205 source = "registry+https://github.com/rust-lang/crates.io-index"
1206 source = "registry+https://github.com/rust-lang/crates.io-index"
1206 checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032"
1207 checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032"
1207 dependencies = [
1208 dependencies = [
1208 "libc",
1209 "libc",
1209 "log",
1210 "log",
1210 ]
1211 ]
1211
1212
1212 [[package]]
1213 [[package]]
1213 name = "vcpkg"
1214 name = "vcpkg"
1214 version = "0.2.11"
1215 version = "0.2.11"
1215 source = "registry+https://github.com/rust-lang/crates.io-index"
1216 source = "registry+https://github.com/rust-lang/crates.io-index"
1216 checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
1217 checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
1217
1218
1218 [[package]]
1219 [[package]]
1219 name = "vcsgraph"
1220 name = "vcsgraph"
1220 version = "0.2.0"
1221 version = "0.2.0"
1221 source = "registry+https://github.com/rust-lang/crates.io-index"
1222 source = "registry+https://github.com/rust-lang/crates.io-index"
1222 checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7"
1223 checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7"
1223 dependencies = [
1224 dependencies = [
1224 "hex",
1225 "hex",
1225 "rand 0.7.3",
1226 "rand 0.7.3",
1226 "sha-1 0.9.6",
1227 "sha-1 0.9.6",
1227 ]
1228 ]
1228
1229
1229 [[package]]
1230 [[package]]
1230 name = "vec_map"
1231 name = "vec_map"
1231 version = "0.8.2"
1232 version = "0.8.2"
1232 source = "registry+https://github.com/rust-lang/crates.io-index"
1233 source = "registry+https://github.com/rust-lang/crates.io-index"
1233 checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
1234 checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
1234
1235
1235 [[package]]
1236 [[package]]
1236 name = "version_check"
1237 name = "version_check"
1237 version = "0.9.2"
1238 version = "0.9.2"
1238 source = "registry+https://github.com/rust-lang/crates.io-index"
1239 source = "registry+https://github.com/rust-lang/crates.io-index"
1239 checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
1240 checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
1240
1241
1241 [[package]]
1242 [[package]]
1242 name = "wasi"
1243 name = "wasi"
1243 version = "0.9.0+wasi-snapshot-preview1"
1244 version = "0.9.0+wasi-snapshot-preview1"
1244 source = "registry+https://github.com/rust-lang/crates.io-index"
1245 source = "registry+https://github.com/rust-lang/crates.io-index"
1245 checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
1246 checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
1246
1247
1247 [[package]]
1248 [[package]]
1248 name = "wasi"
1249 name = "wasi"
1249 version = "0.10.0+wasi-snapshot-preview1"
1250 version = "0.10.0+wasi-snapshot-preview1"
1250 source = "registry+https://github.com/rust-lang/crates.io-index"
1251 source = "registry+https://github.com/rust-lang/crates.io-index"
1251 checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
1252 checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
1252
1253
1253 [[package]]
1254 [[package]]
1255 name = "which"
1256 version = "4.2.5"
1257 source = "registry+https://github.com/rust-lang/crates.io-index"
1258 checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae"
1259 dependencies = [
1260 "either",
1261 "lazy_static",
1262 "libc",
1263 ]
1264
1265 [[package]]
1254 name = "winapi"
1266 name = "winapi"
1255 version = "0.3.9"
1267 version = "0.3.9"
1256 source = "registry+https://github.com/rust-lang/crates.io-index"
1268 source = "registry+https://github.com/rust-lang/crates.io-index"
1257 checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
1269 checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
1258 dependencies = [
1270 dependencies = [
1259 "winapi-i686-pc-windows-gnu",
1271 "winapi-i686-pc-windows-gnu",
1260 "winapi-x86_64-pc-windows-gnu",
1272 "winapi-x86_64-pc-windows-gnu",
1261 ]
1273 ]
1262
1274
1263 [[package]]
1275 [[package]]
1264 name = "winapi-i686-pc-windows-gnu"
1276 name = "winapi-i686-pc-windows-gnu"
1265 version = "0.4.0"
1277 version = "0.4.0"
1266 source = "registry+https://github.com/rust-lang/crates.io-index"
1278 source = "registry+https://github.com/rust-lang/crates.io-index"
1267 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
1279 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
1268
1280
1269 [[package]]
1281 [[package]]
1270 name = "winapi-util"
1282 name = "winapi-util"
1271 version = "0.1.5"
1283 version = "0.1.5"
1272 source = "registry+https://github.com/rust-lang/crates.io-index"
1284 source = "registry+https://github.com/rust-lang/crates.io-index"
1273 checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
1285 checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
1274 dependencies = [
1286 dependencies = [
1275 "winapi",
1287 "winapi",
1276 ]
1288 ]
1277
1289
1278 [[package]]
1290 [[package]]
1279 name = "winapi-x86_64-pc-windows-gnu"
1291 name = "winapi-x86_64-pc-windows-gnu"
1280 version = "0.4.0"
1292 version = "0.4.0"
1281 source = "registry+https://github.com/rust-lang/crates.io-index"
1293 source = "registry+https://github.com/rust-lang/crates.io-index"
1282 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
1294 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
1283
1295
1284 [[package]]
1296 [[package]]
1285 name = "zstd"
1297 name = "zstd"
1286 version = "0.5.4+zstd.1.4.7"
1298 version = "0.5.4+zstd.1.4.7"
1287 source = "registry+https://github.com/rust-lang/crates.io-index"
1299 source = "registry+https://github.com/rust-lang/crates.io-index"
1288 checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910"
1300 checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910"
1289 dependencies = [
1301 dependencies = [
1290 "zstd-safe",
1302 "zstd-safe",
1291 ]
1303 ]
1292
1304
1293 [[package]]
1305 [[package]]
1294 name = "zstd-safe"
1306 name = "zstd-safe"
1295 version = "2.0.6+zstd.1.4.7"
1307 version = "2.0.6+zstd.1.4.7"
1296 source = "registry+https://github.com/rust-lang/crates.io-index"
1308 source = "registry+https://github.com/rust-lang/crates.io-index"
1297 checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e"
1309 checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e"
1298 dependencies = [
1310 dependencies = [
1299 "libc",
1311 "libc",
1300 "zstd-sys",
1312 "zstd-sys",
1301 ]
1313 ]
1302
1314
1303 [[package]]
1315 [[package]]
1304 name = "zstd-sys"
1316 name = "zstd-sys"
1305 version = "1.4.18+zstd.1.4.7"
1317 version = "1.4.18+zstd.1.4.7"
1306 source = "registry+https://github.com/rust-lang/crates.io-index"
1318 source = "registry+https://github.com/rust-lang/crates.io-index"
1307 checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81"
1319 checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81"
1308 dependencies = [
1320 dependencies = [
1309 "cc",
1321 "cc",
1310 "glob",
1322 "glob",
1311 "itertools 0.9.0",
1323 "itertools 0.9.0",
1312 "libc",
1324 "libc",
1313 ]
1325 ]
@@ -1,844 +1,849 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4
4
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
7 use crate::dirstate_tree::path_with_basename::WithBasename;
7 use crate::dirstate_tree::path_with_basename::WithBasename;
8 use crate::errors::HgError;
8 use crate::errors::HgError;
9 use crate::utils::hg_path::HgPath;
9 use crate::utils::hg_path::HgPath;
10 use crate::DirstateEntry;
10 use crate::DirstateEntry;
11 use crate::DirstateError;
11 use crate::DirstateError;
12 use crate::DirstateParents;
12 use crate::DirstateParents;
13 use bitflags::bitflags;
13 use bitflags::bitflags;
14 use bytes_cast::unaligned::{U16Be, U32Be};
14 use bytes_cast::unaligned::{U16Be, U32Be};
15 use bytes_cast::BytesCast;
15 use bytes_cast::BytesCast;
16 use format_bytes::format_bytes;
16 use format_bytes::format_bytes;
17 use rand::Rng;
17 use rand::Rng;
18 use std::borrow::Cow;
18 use std::borrow::Cow;
19 use std::convert::{TryFrom, TryInto};
19 use std::convert::{TryFrom, TryInto};
20 use std::fmt::Write;
20 use std::fmt::Write;
21
21
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
23 /// This a redundant sanity check more than an actual "magic number" since
23 /// This a redundant sanity check more than an actual "magic number" since
24 /// `.hg/requires` already governs which format should be used.
24 /// `.hg/requires` already governs which format should be used.
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
26
26
27 /// Keep space for 256-bit hashes
27 /// Keep space for 256-bit hashes
28 const STORED_NODE_ID_BYTES: usize = 32;
28 const STORED_NODE_ID_BYTES: usize = 32;
29
29
30 /// … even though only 160 bits are used for now, with SHA-1
30 /// … even though only 160 bits are used for now, with SHA-1
31 const USED_NODE_ID_BYTES: usize = 20;
31 const USED_NODE_ID_BYTES: usize = 20;
32
32
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
35
35
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
37 const TREE_METADATA_SIZE: usize = 44;
37 const TREE_METADATA_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
39
39
40 /// Make sure that size-affecting changes are made knowingly
40 /// Make sure that size-affecting changes are made knowingly
41 #[allow(unused)]
41 #[allow(unused)]
42 fn static_assert_size_of() {
42 fn static_assert_size_of() {
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
46 }
46 }
47
47
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
49 #[derive(BytesCast)]
49 #[derive(BytesCast)]
50 #[repr(C)]
50 #[repr(C)]
51 struct DocketHeader {
51 struct DocketHeader {
52 marker: [u8; V2_FORMAT_MARKER.len()],
52 marker: [u8; V2_FORMAT_MARKER.len()],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
55
55
56 metadata: TreeMetadata,
56 metadata: TreeMetadata,
57
57
58 /// Counted in bytes
58 /// Counted in bytes
59 data_size: Size,
59 data_size: Size,
60
60
61 uuid_size: u8,
61 uuid_size: u8,
62 }
62 }
63
63
64 pub struct Docket<'on_disk> {
64 pub struct Docket<'on_disk> {
65 header: &'on_disk DocketHeader,
65 header: &'on_disk DocketHeader,
66 pub uuid: &'on_disk [u8],
66 pub uuid: &'on_disk [u8],
67 }
67 }
68
68
69 /// Fields are documented in the *Tree metadata in the docket file*
69 /// Fields are documented in the *Tree metadata in the docket file*
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
71 #[derive(BytesCast)]
71 #[derive(BytesCast)]
72 #[repr(C)]
72 #[repr(C)]
73 pub struct TreeMetadata {
73 pub struct TreeMetadata {
74 root_nodes: ChildNodes,
74 root_nodes: ChildNodes,
75 nodes_with_entry_count: Size,
75 nodes_with_entry_count: Size,
76 nodes_with_copy_source_count: Size,
76 nodes_with_copy_source_count: Size,
77 unreachable_bytes: Size,
77 unreachable_bytes: Size,
78 unused: [u8; 4],
78 unused: [u8; 4],
79
79
80 /// See *Optional hash of ignore patterns* section of
80 /// See *Optional hash of ignore patterns* section of
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
82 ignore_patterns_hash: IgnorePatternsHash,
82 ignore_patterns_hash: IgnorePatternsHash,
83 }
83 }
84
84
85 /// Fields are documented in the *The data file format*
85 /// Fields are documented in the *The data file format*
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
87 #[derive(BytesCast, Debug)]
87 #[derive(BytesCast, Debug)]
88 #[repr(C)]
88 #[repr(C)]
89 pub(super) struct Node {
89 pub(super) struct Node {
90 full_path: PathSlice,
90 full_path: PathSlice,
91
91
92 /// In bytes from `self.full_path.start`
92 /// In bytes from `self.full_path.start`
93 base_name_start: PathSize,
93 base_name_start: PathSize,
94
94
95 copy_source: OptPathSlice,
95 copy_source: OptPathSlice,
96 children: ChildNodes,
96 children: ChildNodes,
97 pub(super) descendants_with_entry_count: Size,
97 pub(super) descendants_with_entry_count: Size,
98 pub(super) tracked_descendants_count: Size,
98 pub(super) tracked_descendants_count: Size,
99 flags: U16Be,
99 flags: U16Be,
100 size: U32Be,
100 size: U32Be,
101 mtime: PackedTruncatedTimestamp,
101 mtime: PackedTruncatedTimestamp,
102 }
102 }
103
103
104 bitflags! {
104 bitflags! {
105 #[repr(C)]
105 #[repr(C)]
106 struct Flags: u16 {
106 struct Flags: u16 {
107 const WDIR_TRACKED = 1 << 0;
107 const WDIR_TRACKED = 1 << 0;
108 const P1_TRACKED = 1 << 1;
108 const P1_TRACKED = 1 << 1;
109 const P2_INFO = 1 << 2;
109 const P2_INFO = 1 << 2;
110 const MODE_EXEC_PERM = 1 << 3;
110 const MODE_EXEC_PERM = 1 << 3;
111 const MODE_IS_SYMLINK = 1 << 4;
111 const MODE_IS_SYMLINK = 1 << 4;
112 const HAS_FALLBACK_EXEC = 1 << 5;
112 const HAS_FALLBACK_EXEC = 1 << 5;
113 const FALLBACK_EXEC = 1 << 6;
113 const FALLBACK_EXEC = 1 << 6;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
115 const FALLBACK_SYMLINK = 1 << 8;
115 const FALLBACK_SYMLINK = 1 << 8;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
117 const HAS_MODE_AND_SIZE = 1 <<10;
117 const HAS_MODE_AND_SIZE = 1 <<10;
118 const HAS_MTIME = 1 <<11;
118 const HAS_MTIME = 1 <<11;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
120 const DIRECTORY = 1 <<13;
120 const DIRECTORY = 1 <<13;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
122 const ALL_IGNORED_RECORDED = 1 <<15;
122 const ALL_IGNORED_RECORDED = 1 <<15;
123 }
123 }
124 }
124 }
125
125
126 /// Duration since the Unix epoch
126 /// Duration since the Unix epoch
127 #[derive(BytesCast, Copy, Clone, Debug)]
127 #[derive(BytesCast, Copy, Clone, Debug)]
128 #[repr(C)]
128 #[repr(C)]
129 struct PackedTruncatedTimestamp {
129 struct PackedTruncatedTimestamp {
130 truncated_seconds: U32Be,
130 truncated_seconds: U32Be,
131 nanoseconds: U32Be,
131 nanoseconds: U32Be,
132 }
132 }
133
133
134 /// Counted in bytes from the start of the file
134 /// Counted in bytes from the start of the file
135 ///
135 ///
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
137 type Offset = U32Be;
137 type Offset = U32Be;
138
138
139 /// Counted in number of items
139 /// Counted in number of items
140 ///
140 ///
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
142 type Size = U32Be;
142 type Size = U32Be;
143
143
144 /// Counted in bytes
144 /// Counted in bytes
145 ///
145 ///
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
147 type PathSize = U16Be;
147 type PathSize = U16Be;
148
148
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
150 /// of either some other node or of the repository root.
150 /// of either some other node or of the repository root.
151 ///
151 ///
152 /// Always sorted by ascending `full_path`, to allow binary search.
152 /// Always sorted by ascending `full_path`, to allow binary search.
153 /// Since nodes with the same parent nodes also have the same parent path,
153 /// Since nodes with the same parent nodes also have the same parent path,
154 /// only the `base_name`s need to be compared during binary search.
154 /// only the `base_name`s need to be compared during binary search.
155 #[derive(BytesCast, Copy, Clone, Debug)]
155 #[derive(BytesCast, Copy, Clone, Debug)]
156 #[repr(C)]
156 #[repr(C)]
157 struct ChildNodes {
157 struct ChildNodes {
158 start: Offset,
158 start: Offset,
159 len: Size,
159 len: Size,
160 }
160 }
161
161
162 /// A `HgPath` of `len` bytes
162 /// A `HgPath` of `len` bytes
163 #[derive(BytesCast, Copy, Clone, Debug)]
163 #[derive(BytesCast, Copy, Clone, Debug)]
164 #[repr(C)]
164 #[repr(C)]
165 struct PathSlice {
165 struct PathSlice {
166 start: Offset,
166 start: Offset,
167 len: PathSize,
167 len: PathSize,
168 }
168 }
169
169
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
171 type OptPathSlice = PathSlice;
171 type OptPathSlice = PathSlice;
172
172
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
174 ///
174 ///
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
176 #[derive(Debug)]
176 #[derive(Debug)]
177 pub struct DirstateV2ParseError;
177 pub struct DirstateV2ParseError;
178
178
179 impl From<DirstateV2ParseError> for HgError {
179 impl From<DirstateV2ParseError> for HgError {
180 fn from(_: DirstateV2ParseError) -> Self {
180 fn from(_: DirstateV2ParseError) -> Self {
181 HgError::corrupted("dirstate-v2 parse error")
181 HgError::corrupted("dirstate-v2 parse error")
182 }
182 }
183 }
183 }
184
184
185 impl From<DirstateV2ParseError> for crate::DirstateError {
185 impl From<DirstateV2ParseError> for crate::DirstateError {
186 fn from(error: DirstateV2ParseError) -> Self {
186 fn from(error: DirstateV2ParseError) -> Self {
187 HgError::from(error).into()
187 HgError::from(error).into()
188 }
188 }
189 }
189 }
190
190
191 impl TreeMetadata {
191 impl TreeMetadata {
192 pub fn as_bytes(&self) -> &[u8] {
192 pub fn as_bytes(&self) -> &[u8] {
193 BytesCast::as_bytes(self)
193 BytesCast::as_bytes(self)
194 }
194 }
195 }
195 }
196
196
197 impl<'on_disk> Docket<'on_disk> {
197 impl<'on_disk> Docket<'on_disk> {
198 /// Generate the identifier for a new data file
198 /// Generate the identifier for a new data file
199 ///
199 ///
200 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
200 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
201 /// See `mercurial/revlogutils/docket.py`
201 /// See `mercurial/revlogutils/docket.py`
202 pub fn new_uid() -> String {
202 pub fn new_uid() -> String {
203 const ID_LENGTH: usize = 8;
203 const ID_LENGTH: usize = 8;
204 let mut id = String::with_capacity(ID_LENGTH);
204 let mut id = String::with_capacity(ID_LENGTH);
205 let mut rng = rand::thread_rng();
205 let mut rng = rand::thread_rng();
206 for _ in 0..ID_LENGTH {
206 for _ in 0..ID_LENGTH {
207 // One random hexadecimal digit.
207 // One random hexadecimal digit.
208 // `unwrap` never panics because `impl Write for String`
208 // `unwrap` never panics because `impl Write for String`
209 // never returns an error.
209 // never returns an error.
210 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
210 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
211 }
211 }
212 id
212 id
213 }
213 }
214
214
215 pub fn serialize(
215 pub fn serialize(
216 parents: DirstateParents,
216 parents: DirstateParents,
217 tree_metadata: TreeMetadata,
217 tree_metadata: TreeMetadata,
218 data_size: u64,
218 data_size: u64,
219 uuid: &[u8],
219 uuid: &[u8],
220 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
220 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
221 let header = DocketHeader {
221 let header = DocketHeader {
222 marker: *V2_FORMAT_MARKER,
222 marker: *V2_FORMAT_MARKER,
223 parent_1: parents.p1.pad_to_256_bits(),
223 parent_1: parents.p1.pad_to_256_bits(),
224 parent_2: parents.p2.pad_to_256_bits(),
224 parent_2: parents.p2.pad_to_256_bits(),
225 metadata: tree_metadata,
225 metadata: tree_metadata,
226 data_size: u32::try_from(data_size)?.into(),
226 data_size: u32::try_from(data_size)?.into(),
227 uuid_size: uuid.len().try_into()?,
227 uuid_size: uuid.len().try_into()?,
228 };
228 };
229 let header = header.as_bytes();
229 let header = header.as_bytes();
230 let mut docket = Vec::with_capacity(header.len() + uuid.len());
230 let mut docket = Vec::with_capacity(header.len() + uuid.len());
231 docket.extend_from_slice(header);
231 docket.extend_from_slice(header);
232 docket.extend_from_slice(uuid);
232 docket.extend_from_slice(uuid);
233 Ok(docket)
233 Ok(docket)
234 }
234 }
235
235
236 pub fn parents(&self) -> DirstateParents {
236 pub fn parents(&self) -> DirstateParents {
237 use crate::Node;
237 use crate::Node;
238 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
238 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
239 .unwrap()
239 .unwrap()
240 .clone();
240 .clone();
241 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
241 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
242 .unwrap()
242 .unwrap()
243 .clone();
243 .clone();
244 DirstateParents { p1, p2 }
244 DirstateParents { p1, p2 }
245 }
245 }
246
246
247 pub fn tree_metadata(&self) -> &[u8] {
247 pub fn tree_metadata(&self) -> &[u8] {
248 self.header.metadata.as_bytes()
248 self.header.metadata.as_bytes()
249 }
249 }
250
250
251 pub fn data_size(&self) -> usize {
251 pub fn data_size(&self) -> usize {
252 // This `unwrap` could only panic on a 16-bit CPU
252 // This `unwrap` could only panic on a 16-bit CPU
253 self.header.data_size.get().try_into().unwrap()
253 self.header.data_size.get().try_into().unwrap()
254 }
254 }
255
255
256 pub fn data_filename(&self) -> String {
256 pub fn data_filename(&self) -> String {
257 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
257 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
258 }
258 }
259 }
259 }
260
260
261 pub fn read_docket(
261 pub fn read_docket(
262 on_disk: &[u8],
262 on_disk: &[u8],
263 ) -> Result<Docket<'_>, DirstateV2ParseError> {
263 ) -> Result<Docket<'_>, DirstateV2ParseError> {
264 let (header, uuid) =
264 let (header, uuid) =
265 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
265 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
266 let uuid_size = header.uuid_size as usize;
266 let uuid_size = header.uuid_size as usize;
267 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
267 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
268 Ok(Docket { header, uuid })
268 Ok(Docket { header, uuid })
269 } else {
269 } else {
270 Err(DirstateV2ParseError)
270 Err(DirstateV2ParseError)
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn read<'on_disk>(
274 pub(super) fn read<'on_disk>(
275 on_disk: &'on_disk [u8],
275 on_disk: &'on_disk [u8],
276 metadata: &[u8],
276 metadata: &[u8],
277 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
277 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
278 if on_disk.is_empty() {
278 if on_disk.is_empty() {
279 return Ok(DirstateMap::empty(on_disk));
279 return Ok(DirstateMap::empty(on_disk));
280 }
280 }
281 let (meta, _) = TreeMetadata::from_bytes(metadata)
281 let (meta, _) = TreeMetadata::from_bytes(metadata)
282 .map_err(|_| DirstateV2ParseError)?;
282 .map_err(|_| DirstateV2ParseError)?;
283 let dirstate_map = DirstateMap {
283 let dirstate_map = DirstateMap {
284 on_disk,
284 on_disk,
285 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
285 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
286 on_disk,
286 on_disk,
287 meta.root_nodes,
287 meta.root_nodes,
288 )?),
288 )?),
289 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
289 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
290 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
290 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
291 ignore_patterns_hash: meta.ignore_patterns_hash,
291 ignore_patterns_hash: meta.ignore_patterns_hash,
292 unreachable_bytes: meta.unreachable_bytes.get(),
292 unreachable_bytes: meta.unreachable_bytes.get(),
293 old_data_size: on_disk.len(),
293 old_data_size: on_disk.len(),
294 };
294 };
295 Ok(dirstate_map)
295 Ok(dirstate_map)
296 }
296 }
297
297
298 impl Node {
298 impl Node {
299 pub(super) fn full_path<'on_disk>(
299 pub(super) fn full_path<'on_disk>(
300 &self,
300 &self,
301 on_disk: &'on_disk [u8],
301 on_disk: &'on_disk [u8],
302 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
302 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
303 read_hg_path(on_disk, self.full_path)
303 read_hg_path(on_disk, self.full_path)
304 }
304 }
305
305
306 pub(super) fn base_name_start<'on_disk>(
306 pub(super) fn base_name_start<'on_disk>(
307 &self,
307 &self,
308 ) -> Result<usize, DirstateV2ParseError> {
308 ) -> Result<usize, DirstateV2ParseError> {
309 let start = self.base_name_start.get();
309 let start = self.base_name_start.get();
310 if start < self.full_path.len.get() {
310 if start < self.full_path.len.get() {
311 let start = usize::try_from(start)
311 let start = usize::try_from(start)
312 // u32 -> usize, could only panic on a 16-bit CPU
312 // u32 -> usize, could only panic on a 16-bit CPU
313 .expect("dirstate-v2 base_name_start out of bounds");
313 .expect("dirstate-v2 base_name_start out of bounds");
314 Ok(start)
314 Ok(start)
315 } else {
315 } else {
316 Err(DirstateV2ParseError)
316 Err(DirstateV2ParseError)
317 }
317 }
318 }
318 }
319
319
320 pub(super) fn base_name<'on_disk>(
320 pub(super) fn base_name<'on_disk>(
321 &self,
321 &self,
322 on_disk: &'on_disk [u8],
322 on_disk: &'on_disk [u8],
323 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
323 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
324 let full_path = self.full_path(on_disk)?;
324 let full_path = self.full_path(on_disk)?;
325 let base_name_start = self.base_name_start()?;
325 let base_name_start = self.base_name_start()?;
326 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
326 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
327 }
327 }
328
328
329 pub(super) fn path<'on_disk>(
329 pub(super) fn path<'on_disk>(
330 &self,
330 &self,
331 on_disk: &'on_disk [u8],
331 on_disk: &'on_disk [u8],
332 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
332 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
333 Ok(WithBasename::from_raw_parts(
333 Ok(WithBasename::from_raw_parts(
334 Cow::Borrowed(self.full_path(on_disk)?),
334 Cow::Borrowed(self.full_path(on_disk)?),
335 self.base_name_start()?,
335 self.base_name_start()?,
336 ))
336 ))
337 }
337 }
338
338
339 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
339 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
340 self.copy_source.start.get() != 0
340 self.copy_source.start.get() != 0
341 }
341 }
342
342
343 pub(super) fn copy_source<'on_disk>(
343 pub(super) fn copy_source<'on_disk>(
344 &self,
344 &self,
345 on_disk: &'on_disk [u8],
345 on_disk: &'on_disk [u8],
346 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
346 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
347 Ok(if self.has_copy_source() {
347 Ok(if self.has_copy_source() {
348 Some(read_hg_path(on_disk, self.copy_source)?)
348 Some(read_hg_path(on_disk, self.copy_source)?)
349 } else {
349 } else {
350 None
350 None
351 })
351 })
352 }
352 }
353
353
354 fn flags(&self) -> Flags {
354 fn flags(&self) -> Flags {
355 Flags::from_bits_truncate(self.flags.get())
355 Flags::from_bits_truncate(self.flags.get())
356 }
356 }
357
357
358 fn has_entry(&self) -> bool {
358 fn has_entry(&self) -> bool {
359 self.flags().intersects(
359 self.flags().intersects(
360 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
360 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
361 )
361 )
362 }
362 }
363
363
364 pub(super) fn node_data(
364 pub(super) fn node_data(
365 &self,
365 &self,
366 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
366 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
367 if self.has_entry() {
367 if self.has_entry() {
368 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
368 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
369 } else if let Some(mtime) = self.cached_directory_mtime()? {
369 } else if let Some(mtime) = self.cached_directory_mtime()? {
370 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
370 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
371 } else {
371 } else {
372 Ok(dirstate_map::NodeData::None)
372 Ok(dirstate_map::NodeData::None)
373 }
373 }
374 }
374 }
375
375
376 pub(super) fn cached_directory_mtime(
376 pub(super) fn cached_directory_mtime(
377 &self,
377 &self,
378 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
378 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
379 // For now we do not have code to handle the absence of
379 // For now we do not have code to handle the absence of
380 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
380 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
381 // unset.
381 // unset.
382 if self.flags().contains(Flags::DIRECTORY)
382 if self.flags().contains(Flags::DIRECTORY)
383 && self.flags().contains(Flags::HAS_MTIME)
383 && self.flags().contains(Flags::HAS_MTIME)
384 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
384 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
385 {
385 {
386 Ok(Some(self.mtime()?))
386 Ok(Some(self.mtime()?))
387 } else {
387 } else {
388 Ok(None)
388 Ok(None)
389 }
389 }
390 }
390 }
391
391
392 fn synthesize_unix_mode(&self) -> u32 {
392 fn synthesize_unix_mode(&self) -> u32 {
393 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
393 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
394 libc::S_IFLNK
394 libc::S_IFLNK
395 } else {
395 } else {
396 libc::S_IFREG
396 libc::S_IFREG
397 };
397 };
398 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
398 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
399 0o755
399 0o755
400 } else {
400 } else {
401 0o644
401 0o644
402 };
402 };
403 (file_type | permisions).into()
403 (file_type | permisions).into()
404 }
404 }
405
405
406 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
406 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
407 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
407 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
408 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
408 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
409 m.second_ambiguous = true;
409 m.second_ambiguous = true;
410 }
410 }
411 Ok(m)
411 Ok(m)
412 }
412 }
413
413
414 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
414 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
415 // TODO: convert through raw bits instead?
415 // TODO: convert through raw bits instead?
416 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
416 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
417 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
417 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
418 let p2_info = self.flags().contains(Flags::P2_INFO);
418 let p2_info = self.flags().contains(Flags::P2_INFO);
419 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
419 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
420 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
420 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
421 {
421 {
422 Some((self.synthesize_unix_mode(), self.size.into()))
422 Some((self.synthesize_unix_mode(), self.size.into()))
423 } else {
423 } else {
424 None
424 None
425 };
425 };
426 let mtime = if self.flags().contains(Flags::HAS_MTIME)
426 let mtime = if self.flags().contains(Flags::HAS_MTIME)
427 && !self.flags().contains(Flags::DIRECTORY)
427 && !self.flags().contains(Flags::DIRECTORY)
428 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
428 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
429 {
429 {
430 Some(self.mtime()?)
430 Some(self.mtime()?)
431 } else {
431 } else {
432 None
432 None
433 };
433 };
434 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
434 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
435 {
435 {
436 Some(self.flags().contains(Flags::FALLBACK_EXEC))
436 Some(self.flags().contains(Flags::FALLBACK_EXEC))
437 } else {
437 } else {
438 None
438 None
439 };
439 };
440 let fallback_symlink =
440 let fallback_symlink =
441 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
441 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
442 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
442 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
443 } else {
443 } else {
444 None
444 None
445 };
445 };
446 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
446 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
447 wc_tracked,
447 wc_tracked,
448 p1_tracked,
448 p1_tracked,
449 p2_info,
449 p2_info,
450 mode_size,
450 mode_size,
451 mtime,
451 mtime,
452 fallback_exec,
452 fallback_exec,
453 fallback_symlink,
453 fallback_symlink,
454 }))
454 }))
455 }
455 }
456
456
457 pub(super) fn entry(
457 pub(super) fn entry(
458 &self,
458 &self,
459 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
459 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
460 if self.has_entry() {
460 if self.has_entry() {
461 Ok(Some(self.assume_entry()?))
461 Ok(Some(self.assume_entry()?))
462 } else {
462 } else {
463 Ok(None)
463 Ok(None)
464 }
464 }
465 }
465 }
466
466
467 pub(super) fn children<'on_disk>(
467 pub(super) fn children<'on_disk>(
468 &self,
468 &self,
469 on_disk: &'on_disk [u8],
469 on_disk: &'on_disk [u8],
470 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
470 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
471 read_nodes(on_disk, self.children)
471 read_nodes(on_disk, self.children)
472 }
472 }
473
473
474 pub(super) fn to_in_memory_node<'on_disk>(
474 pub(super) fn to_in_memory_node<'on_disk>(
475 &self,
475 &self,
476 on_disk: &'on_disk [u8],
476 on_disk: &'on_disk [u8],
477 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
477 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
478 Ok(dirstate_map::Node {
478 Ok(dirstate_map::Node {
479 children: dirstate_map::ChildNodes::OnDisk(
479 children: dirstate_map::ChildNodes::OnDisk(
480 self.children(on_disk)?,
480 self.children(on_disk)?,
481 ),
481 ),
482 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
482 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
483 data: self.node_data()?,
483 data: self.node_data()?,
484 descendants_with_entry_count: self
484 descendants_with_entry_count: self
485 .descendants_with_entry_count
485 .descendants_with_entry_count
486 .get(),
486 .get(),
487 tracked_descendants_count: self.tracked_descendants_count.get(),
487 tracked_descendants_count: self.tracked_descendants_count.get(),
488 })
488 })
489 }
489 }
490
490
491 fn from_dirstate_entry(
491 fn from_dirstate_entry(
492 entry: &DirstateEntry,
492 entry: &DirstateEntry,
493 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
493 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
494 let DirstateV2Data {
494 let DirstateV2Data {
495 wc_tracked,
495 wc_tracked,
496 p1_tracked,
496 p1_tracked,
497 p2_info,
497 p2_info,
498 mode_size: mode_size_opt,
498 mode_size: mode_size_opt,
499 mtime: mtime_opt,
499 mtime: mtime_opt,
500 fallback_exec,
500 fallback_exec,
501 fallback_symlink,
501 fallback_symlink,
502 } = entry.v2_data();
502 } = entry.v2_data();
503 // TODO: convert through raw flag bits instead?
503 // TODO: convert through raw flag bits instead?
504 let mut flags = Flags::empty();
504 let mut flags = Flags::empty();
505 flags.set(Flags::WDIR_TRACKED, wc_tracked);
505 flags.set(Flags::WDIR_TRACKED, wc_tracked);
506 flags.set(Flags::P1_TRACKED, p1_tracked);
506 flags.set(Flags::P1_TRACKED, p1_tracked);
507 flags.set(Flags::P2_INFO, p2_info);
507 flags.set(Flags::P2_INFO, p2_info);
508 let size = if let Some((m, s)) = mode_size_opt {
508 let size = if let Some((m, s)) = mode_size_opt {
509 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
509 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
510 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
510 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
511 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
511 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
512 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
512 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
513 flags.insert(Flags::HAS_MODE_AND_SIZE);
513 flags.insert(Flags::HAS_MODE_AND_SIZE);
514 s.into()
514 s.into()
515 } else {
515 } else {
516 0.into()
516 0.into()
517 };
517 };
518 let mtime = if let Some(m) = mtime_opt {
518 let mtime = if let Some(m) = mtime_opt {
519 flags.insert(Flags::HAS_MTIME);
519 flags.insert(Flags::HAS_MTIME);
520 if m.second_ambiguous {
520 if m.second_ambiguous {
521 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
521 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
522 };
522 };
523 m.into()
523 m.into()
524 } else {
524 } else {
525 PackedTruncatedTimestamp::null()
525 PackedTruncatedTimestamp::null()
526 };
526 };
527 if let Some(f_exec) = fallback_exec {
527 if let Some(f_exec) = fallback_exec {
528 flags.insert(Flags::HAS_FALLBACK_EXEC);
528 flags.insert(Flags::HAS_FALLBACK_EXEC);
529 if f_exec {
529 if f_exec {
530 flags.insert(Flags::FALLBACK_EXEC);
530 flags.insert(Flags::FALLBACK_EXEC);
531 }
531 }
532 }
532 }
533 if let Some(f_symlink) = fallback_symlink {
533 if let Some(f_symlink) = fallback_symlink {
534 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
534 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
535 if f_symlink {
535 if f_symlink {
536 flags.insert(Flags::FALLBACK_SYMLINK);
536 flags.insert(Flags::FALLBACK_SYMLINK);
537 }
537 }
538 }
538 }
539 (flags, size, mtime)
539 (flags, size, mtime)
540 }
540 }
541 }
541 }
542
542
543 fn read_hg_path(
543 fn read_hg_path(
544 on_disk: &[u8],
544 on_disk: &[u8],
545 slice: PathSlice,
545 slice: PathSlice,
546 ) -> Result<&HgPath, DirstateV2ParseError> {
546 ) -> Result<&HgPath, DirstateV2ParseError> {
547 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
547 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
548 }
548 }
549
549
550 fn read_nodes(
550 fn read_nodes(
551 on_disk: &[u8],
551 on_disk: &[u8],
552 slice: ChildNodes,
552 slice: ChildNodes,
553 ) -> Result<&[Node], DirstateV2ParseError> {
553 ) -> Result<&[Node], DirstateV2ParseError> {
554 read_slice(on_disk, slice.start, slice.len.get())
554 read_slice(on_disk, slice.start, slice.len.get())
555 }
555 }
556
556
557 fn read_slice<T, Len>(
557 fn read_slice<T, Len>(
558 on_disk: &[u8],
558 on_disk: &[u8],
559 start: Offset,
559 start: Offset,
560 len: Len,
560 len: Len,
561 ) -> Result<&[T], DirstateV2ParseError>
561 ) -> Result<&[T], DirstateV2ParseError>
562 where
562 where
563 T: BytesCast,
563 T: BytesCast,
564 Len: TryInto<usize>,
564 Len: TryInto<usize>,
565 {
565 {
566 // Either `usize::MAX` would result in "out of bounds" error since a single
566 // Either `usize::MAX` would result in "out of bounds" error since a single
567 // `&[u8]` cannot occupy the entire addess space.
567 // `&[u8]` cannot occupy the entire addess space.
568 let start = start.get().try_into().unwrap_or(std::usize::MAX);
568 let start = start.get().try_into().unwrap_or(std::usize::MAX);
569 let len = len.try_into().unwrap_or(std::usize::MAX);
569 let len = len.try_into().unwrap_or(std::usize::MAX);
570 on_disk
570 on_disk
571 .get(start..)
571 .get(start..)
572 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
572 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
573 .map(|(slice, _rest)| slice)
573 .map(|(slice, _rest)| slice)
574 .ok_or_else(|| DirstateV2ParseError)
574 .ok_or_else(|| DirstateV2ParseError)
575 }
575 }
576
576
577 pub(crate) fn for_each_tracked_path<'on_disk>(
577 pub(crate) fn for_each_tracked_path<'on_disk>(
578 on_disk: &'on_disk [u8],
578 on_disk: &'on_disk [u8],
579 metadata: &[u8],
579 metadata: &[u8],
580 mut f: impl FnMut(&'on_disk HgPath),
580 mut f: impl FnMut(&'on_disk HgPath),
581 ) -> Result<(), DirstateV2ParseError> {
581 ) -> Result<(), DirstateV2ParseError> {
582 let (meta, _) = TreeMetadata::from_bytes(metadata)
582 let (meta, _) = TreeMetadata::from_bytes(metadata)
583 .map_err(|_| DirstateV2ParseError)?;
583 .map_err(|_| DirstateV2ParseError)?;
584 fn recur<'on_disk>(
584 fn recur<'on_disk>(
585 on_disk: &'on_disk [u8],
585 on_disk: &'on_disk [u8],
586 nodes: ChildNodes,
586 nodes: ChildNodes,
587 f: &mut impl FnMut(&'on_disk HgPath),
587 f: &mut impl FnMut(&'on_disk HgPath),
588 ) -> Result<(), DirstateV2ParseError> {
588 ) -> Result<(), DirstateV2ParseError> {
589 for node in read_nodes(on_disk, nodes)? {
589 for node in read_nodes(on_disk, nodes)? {
590 if let Some(entry) = node.entry()? {
590 if let Some(entry) = node.entry()? {
591 if entry.tracked() {
591 if entry.tracked() {
592 f(node.full_path(on_disk)?)
592 f(node.full_path(on_disk)?)
593 }
593 }
594 }
594 }
595 recur(on_disk, node.children, f)?
595 recur(on_disk, node.children, f)?
596 }
596 }
597 Ok(())
597 Ok(())
598 }
598 }
599 recur(on_disk, meta.root_nodes, &mut f)
599 recur(on_disk, meta.root_nodes, &mut f)
600 }
600 }
601
601
602 /// Returns new data and metadata, together with whether that data should be
602 /// Returns new data and metadata, together with whether that data should be
603 /// appended to the existing data file whose content is at
603 /// appended to the existing data file whose content is at
604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
605 /// (false), and the previous size of data on disk.
605 /// (false), and the previous size of data on disk.
606 pub(super) fn write(
606 pub(super) fn write(
607 dirstate_map: &DirstateMap,
607 dirstate_map: &DirstateMap,
608 can_append: bool,
608 can_append: bool,
609 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
609 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
610 let append = can_append && dirstate_map.write_should_append();
610 let append = can_append && dirstate_map.write_should_append();
611
611
612 // This ignores the space for paths, and for nodes without an entry.
612 // This ignores the space for paths, and for nodes without an entry.
613 // TODO: better estimate? Skip the `Vec` and write to a file directly?
613 // TODO: better estimate? Skip the `Vec` and write to a file directly?
614 let size_guess = std::mem::size_of::<Node>()
614 let size_guess = std::mem::size_of::<Node>()
615 * dirstate_map.nodes_with_entry_count as usize;
615 * dirstate_map.nodes_with_entry_count as usize;
616
616
617 let mut writer = Writer {
617 let mut writer = Writer {
618 dirstate_map,
618 dirstate_map,
619 append,
619 append,
620 out: Vec::with_capacity(size_guess),
620 out: Vec::with_capacity(size_guess),
621 };
621 };
622
622
623 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
623 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
624
624
625 let unreachable_bytes = if append {
626 dirstate_map.unreachable_bytes
627 } else {
628 0
629 };
625 let meta = TreeMetadata {
630 let meta = TreeMetadata {
626 root_nodes,
631 root_nodes,
627 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
632 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
628 nodes_with_copy_source_count: dirstate_map
633 nodes_with_copy_source_count: dirstate_map
629 .nodes_with_copy_source_count
634 .nodes_with_copy_source_count
630 .into(),
635 .into(),
631 unreachable_bytes: dirstate_map.unreachable_bytes.into(),
636 unreachable_bytes: unreachable_bytes.into(),
632 unused: [0; 4],
637 unused: [0; 4],
633 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
638 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
634 };
639 };
635 Ok((writer.out, meta, append, dirstate_map.old_data_size))
640 Ok((writer.out, meta, append, dirstate_map.old_data_size))
636 }
641 }
637
642
638 struct Writer<'dmap, 'on_disk> {
643 struct Writer<'dmap, 'on_disk> {
639 dirstate_map: &'dmap DirstateMap<'on_disk>,
644 dirstate_map: &'dmap DirstateMap<'on_disk>,
640 append: bool,
645 append: bool,
641 out: Vec<u8>,
646 out: Vec<u8>,
642 }
647 }
643
648
644 impl Writer<'_, '_> {
649 impl Writer<'_, '_> {
645 fn write_nodes(
650 fn write_nodes(
646 &mut self,
651 &mut self,
647 nodes: dirstate_map::ChildNodesRef,
652 nodes: dirstate_map::ChildNodesRef,
648 ) -> Result<ChildNodes, DirstateError> {
653 ) -> Result<ChildNodes, DirstateError> {
649 // Reuse already-written nodes if possible
654 // Reuse already-written nodes if possible
650 if self.append {
655 if self.append {
651 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
656 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
652 let start = self.on_disk_offset_of(nodes_slice).expect(
657 let start = self.on_disk_offset_of(nodes_slice).expect(
653 "dirstate-v2 OnDisk nodes not found within on_disk",
658 "dirstate-v2 OnDisk nodes not found within on_disk",
654 );
659 );
655 let len = child_nodes_len_from_usize(nodes_slice.len());
660 let len = child_nodes_len_from_usize(nodes_slice.len());
656 return Ok(ChildNodes { start, len });
661 return Ok(ChildNodes { start, len });
657 }
662 }
658 }
663 }
659
664
660 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
665 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
661 // undefined iteration order. Sort to enable binary search in the
666 // undefined iteration order. Sort to enable binary search in the
662 // written file.
667 // written file.
663 let nodes = nodes.sorted();
668 let nodes = nodes.sorted();
664 let nodes_len = nodes.len();
669 let nodes_len = nodes.len();
665
670
666 // First accumulate serialized nodes in a `Vec`
671 // First accumulate serialized nodes in a `Vec`
667 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
672 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
668 for node in nodes {
673 for node in nodes {
669 let children =
674 let children =
670 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
675 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
671 let full_path = node.full_path(self.dirstate_map.on_disk)?;
676 let full_path = node.full_path(self.dirstate_map.on_disk)?;
672 let full_path = self.write_path(full_path.as_bytes());
677 let full_path = self.write_path(full_path.as_bytes());
673 let copy_source = if let Some(source) =
678 let copy_source = if let Some(source) =
674 node.copy_source(self.dirstate_map.on_disk)?
679 node.copy_source(self.dirstate_map.on_disk)?
675 {
680 {
676 self.write_path(source.as_bytes())
681 self.write_path(source.as_bytes())
677 } else {
682 } else {
678 PathSlice {
683 PathSlice {
679 start: 0.into(),
684 start: 0.into(),
680 len: 0.into(),
685 len: 0.into(),
681 }
686 }
682 };
687 };
683 on_disk_nodes.push(match node {
688 on_disk_nodes.push(match node {
684 NodeRef::InMemory(path, node) => {
689 NodeRef::InMemory(path, node) => {
685 let (flags, size, mtime) = match &node.data {
690 let (flags, size, mtime) = match &node.data {
686 dirstate_map::NodeData::Entry(entry) => {
691 dirstate_map::NodeData::Entry(entry) => {
687 Node::from_dirstate_entry(entry)
692 Node::from_dirstate_entry(entry)
688 }
693 }
689 dirstate_map::NodeData::CachedDirectory { mtime } => {
694 dirstate_map::NodeData::CachedDirectory { mtime } => {
690 // we currently never set a mtime if unknown file
695 // we currently never set a mtime if unknown file
691 // are present.
696 // are present.
692 // So if we have a mtime for a directory, we know
697 // So if we have a mtime for a directory, we know
693 // they are no unknown
698 // they are no unknown
694 // files and we
699 // files and we
695 // blindly set ALL_UNKNOWN_RECORDED.
700 // blindly set ALL_UNKNOWN_RECORDED.
696 //
701 //
697 // We never set ALL_IGNORED_RECORDED since we
702 // We never set ALL_IGNORED_RECORDED since we
698 // don't track that case
703 // don't track that case
699 // currently.
704 // currently.
700 let mut flags = Flags::DIRECTORY
705 let mut flags = Flags::DIRECTORY
701 | Flags::HAS_MTIME
706 | Flags::HAS_MTIME
702 | Flags::ALL_UNKNOWN_RECORDED;
707 | Flags::ALL_UNKNOWN_RECORDED;
703 if mtime.second_ambiguous {
708 if mtime.second_ambiguous {
704 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
709 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
705 }
710 }
706 (flags, 0.into(), (*mtime).into())
711 (flags, 0.into(), (*mtime).into())
707 }
712 }
708 dirstate_map::NodeData::None => (
713 dirstate_map::NodeData::None => (
709 Flags::DIRECTORY,
714 Flags::DIRECTORY,
710 0.into(),
715 0.into(),
711 PackedTruncatedTimestamp::null(),
716 PackedTruncatedTimestamp::null(),
712 ),
717 ),
713 };
718 };
714 Node {
719 Node {
715 children,
720 children,
716 copy_source,
721 copy_source,
717 full_path,
722 full_path,
718 base_name_start: u16::try_from(path.base_name_start())
723 base_name_start: u16::try_from(path.base_name_start())
719 // Could only panic for paths over 64 KiB
724 // Could only panic for paths over 64 KiB
720 .expect("dirstate-v2 path length overflow")
725 .expect("dirstate-v2 path length overflow")
721 .into(),
726 .into(),
722 descendants_with_entry_count: node
727 descendants_with_entry_count: node
723 .descendants_with_entry_count
728 .descendants_with_entry_count
724 .into(),
729 .into(),
725 tracked_descendants_count: node
730 tracked_descendants_count: node
726 .tracked_descendants_count
731 .tracked_descendants_count
727 .into(),
732 .into(),
728 flags: flags.bits().into(),
733 flags: flags.bits().into(),
729 size,
734 size,
730 mtime,
735 mtime,
731 }
736 }
732 }
737 }
733 NodeRef::OnDisk(node) => Node {
738 NodeRef::OnDisk(node) => Node {
734 children,
739 children,
735 copy_source,
740 copy_source,
736 full_path,
741 full_path,
737 ..*node
742 ..*node
738 },
743 },
739 })
744 })
740 }
745 }
741 // … so we can write them contiguously, after writing everything else
746 // … so we can write them contiguously, after writing everything else
742 // they refer to.
747 // they refer to.
743 let start = self.current_offset();
748 let start = self.current_offset();
744 let len = child_nodes_len_from_usize(nodes_len);
749 let len = child_nodes_len_from_usize(nodes_len);
745 self.out.extend(on_disk_nodes.as_bytes());
750 self.out.extend(on_disk_nodes.as_bytes());
746 Ok(ChildNodes { start, len })
751 Ok(ChildNodes { start, len })
747 }
752 }
748
753
749 /// If the given slice of items is within `on_disk`, returns its offset
754 /// If the given slice of items is within `on_disk`, returns its offset
750 /// from the start of `on_disk`.
755 /// from the start of `on_disk`.
751 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
756 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
752 where
757 where
753 T: BytesCast,
758 T: BytesCast,
754 {
759 {
755 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
760 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
756 let start = slice.as_ptr() as usize;
761 let start = slice.as_ptr() as usize;
757 let end = start + slice.len();
762 let end = start + slice.len();
758 start..=end
763 start..=end
759 }
764 }
760 let slice_addresses = address_range(slice.as_bytes());
765 let slice_addresses = address_range(slice.as_bytes());
761 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
766 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
762 if on_disk_addresses.contains(slice_addresses.start())
767 if on_disk_addresses.contains(slice_addresses.start())
763 && on_disk_addresses.contains(slice_addresses.end())
768 && on_disk_addresses.contains(slice_addresses.end())
764 {
769 {
765 let offset = slice_addresses.start() - on_disk_addresses.start();
770 let offset = slice_addresses.start() - on_disk_addresses.start();
766 Some(offset_from_usize(offset))
771 Some(offset_from_usize(offset))
767 } else {
772 } else {
768 None
773 None
769 }
774 }
770 }
775 }
771
776
772 fn current_offset(&mut self) -> Offset {
777 fn current_offset(&mut self) -> Offset {
773 let mut offset = self.out.len();
778 let mut offset = self.out.len();
774 if self.append {
779 if self.append {
775 offset += self.dirstate_map.on_disk.len()
780 offset += self.dirstate_map.on_disk.len()
776 }
781 }
777 offset_from_usize(offset)
782 offset_from_usize(offset)
778 }
783 }
779
784
780 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
785 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
781 let len = path_len_from_usize(slice.len());
786 let len = path_len_from_usize(slice.len());
782 // Reuse an already-written path if possible
787 // Reuse an already-written path if possible
783 if self.append {
788 if self.append {
784 if let Some(start) = self.on_disk_offset_of(slice) {
789 if let Some(start) = self.on_disk_offset_of(slice) {
785 return PathSlice { start, len };
790 return PathSlice { start, len };
786 }
791 }
787 }
792 }
788 let start = self.current_offset();
793 let start = self.current_offset();
789 self.out.extend(slice.as_bytes());
794 self.out.extend(slice.as_bytes());
790 PathSlice { start, len }
795 PathSlice { start, len }
791 }
796 }
792 }
797 }
793
798
794 fn offset_from_usize(x: usize) -> Offset {
799 fn offset_from_usize(x: usize) -> Offset {
795 u32::try_from(x)
800 u32::try_from(x)
796 // Could only panic for a dirstate file larger than 4 GiB
801 // Could only panic for a dirstate file larger than 4 GiB
797 .expect("dirstate-v2 offset overflow")
802 .expect("dirstate-v2 offset overflow")
798 .into()
803 .into()
799 }
804 }
800
805
801 fn child_nodes_len_from_usize(x: usize) -> Size {
806 fn child_nodes_len_from_usize(x: usize) -> Size {
802 u32::try_from(x)
807 u32::try_from(x)
803 // Could only panic with over 4 billion nodes
808 // Could only panic with over 4 billion nodes
804 .expect("dirstate-v2 slice length overflow")
809 .expect("dirstate-v2 slice length overflow")
805 .into()
810 .into()
806 }
811 }
807
812
808 fn path_len_from_usize(x: usize) -> PathSize {
813 fn path_len_from_usize(x: usize) -> PathSize {
809 u16::try_from(x)
814 u16::try_from(x)
810 // Could only panic for paths over 64 KiB
815 // Could only panic for paths over 64 KiB
811 .expect("dirstate-v2 path length overflow")
816 .expect("dirstate-v2 path length overflow")
812 .into()
817 .into()
813 }
818 }
814
819
815 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
820 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
816 fn from(timestamp: TruncatedTimestamp) -> Self {
821 fn from(timestamp: TruncatedTimestamp) -> Self {
817 Self {
822 Self {
818 truncated_seconds: timestamp.truncated_seconds().into(),
823 truncated_seconds: timestamp.truncated_seconds().into(),
819 nanoseconds: timestamp.nanoseconds().into(),
824 nanoseconds: timestamp.nanoseconds().into(),
820 }
825 }
821 }
826 }
822 }
827 }
823
828
824 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
829 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
825 type Error = DirstateV2ParseError;
830 type Error = DirstateV2ParseError;
826
831
827 fn try_from(
832 fn try_from(
828 timestamp: PackedTruncatedTimestamp,
833 timestamp: PackedTruncatedTimestamp,
829 ) -> Result<Self, Self::Error> {
834 ) -> Result<Self, Self::Error> {
830 Self::from_already_truncated(
835 Self::from_already_truncated(
831 timestamp.truncated_seconds.get(),
836 timestamp.truncated_seconds.get(),
832 timestamp.nanoseconds.get(),
837 timestamp.nanoseconds.get(),
833 false,
838 false,
834 )
839 )
835 }
840 }
836 }
841 }
837 impl PackedTruncatedTimestamp {
842 impl PackedTruncatedTimestamp {
838 fn null() -> Self {
843 fn null() -> Self {
839 Self {
844 Self {
840 truncated_seconds: 0.into(),
845 truncated_seconds: 0.into(),
841 nanoseconds: 0.into(),
846 nanoseconds: 0.into(),
842 }
847 }
843 }
848 }
844 }
849 }
@@ -1,19 +1,22 b''
1 pub type ExitCode = i32;
1 pub type ExitCode = i32;
2
2
3 /// Successful exit
3 /// Successful exit
4 pub const OK: ExitCode = 0;
4 pub const OK: ExitCode = 0;
5
5
6 /// Generic abort
6 /// Generic abort
7 pub const ABORT: ExitCode = 255;
7 pub const ABORT: ExitCode = 255;
8
8
9 // Abort when there is a config related error
9 // Abort when there is a config related error
10 pub const CONFIG_ERROR_ABORT: ExitCode = 30;
10 pub const CONFIG_ERROR_ABORT: ExitCode = 30;
11
11
12 // Abort when there is an error while parsing config
12 // Abort when there is an error while parsing config
13 pub const CONFIG_PARSE_ERROR_ABORT: ExitCode = 10;
13 pub const CONFIG_PARSE_ERROR_ABORT: ExitCode = 10;
14
14
15 /// Generic something completed but did not succeed
15 /// Generic something completed but did not succeed
16 pub const UNSUCCESSFUL: ExitCode = 1;
16 pub const UNSUCCESSFUL: ExitCode = 1;
17
17
18 /// Command or feature not implemented by rhg
18 /// Command or feature not implemented by rhg
19 pub const UNIMPLEMENTED: ExitCode = 252;
19 pub const UNIMPLEMENTED: ExitCode = 252;
20
21 /// The fallback path is not valid
22 pub const INVALID_FALLBACK: ExitCode = 253;
@@ -1,544 +1,551 b''
1 use crate::changelog::Changelog;
1 use crate::changelog::Changelog;
2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::config::{Config, ConfigError, ConfigParseError};
3 use crate::dirstate::DirstateParents;
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
4 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
5 use crate::dirstate_tree::owning::OwningDirstateMap;
5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::errors::HgResultExt;
6 use crate::errors::HgResultExt;
7 use crate::errors::{HgError, IoResultExt};
7 use crate::errors::{HgError, IoResultExt};
8 use crate::lock::{try_with_lock_no_wait, LockError};
8 use crate::lock::{try_with_lock_no_wait, LockError};
9 use crate::manifest::{Manifest, Manifestlog};
9 use crate::manifest::{Manifest, Manifestlog};
10 use crate::revlog::filelog::Filelog;
10 use crate::revlog::filelog::Filelog;
11 use crate::revlog::revlog::RevlogError;
11 use crate::revlog::revlog::RevlogError;
12 use crate::utils::files::get_path_from_bytes;
12 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::hg_path::HgPath;
13 use crate::utils::hg_path::HgPath;
14 use crate::utils::SliceExt;
14 use crate::utils::SliceExt;
15 use crate::vfs::{is_dir, is_file, Vfs};
15 use crate::vfs::{is_dir, is_file, Vfs};
16 use crate::{requirements, NodePrefix};
16 use crate::{requirements, NodePrefix};
17 use crate::{DirstateError, Revision};
17 use crate::{DirstateError, Revision};
18 use std::cell::{Ref, RefCell, RefMut};
18 use std::cell::{Ref, RefCell, RefMut};
19 use std::collections::HashSet;
19 use std::collections::HashSet;
20 use std::io::Seek;
20 use std::io::Seek;
21 use std::io::SeekFrom;
21 use std::io::SeekFrom;
22 use std::io::Write as IoWrite;
22 use std::io::Write as IoWrite;
23 use std::path::{Path, PathBuf};
23 use std::path::{Path, PathBuf};
24
24
25 /// A repository on disk
25 /// A repository on disk
26 pub struct Repo {
26 pub struct Repo {
27 working_directory: PathBuf,
27 working_directory: PathBuf,
28 dot_hg: PathBuf,
28 dot_hg: PathBuf,
29 store: PathBuf,
29 store: PathBuf,
30 requirements: HashSet<String>,
30 requirements: HashSet<String>,
31 config: Config,
31 config: Config,
32 dirstate_parents: LazyCell<DirstateParents, HgError>,
32 dirstate_parents: LazyCell<DirstateParents, HgError>,
33 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>, HgError>,
33 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>, HgError>,
34 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
34 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
35 changelog: LazyCell<Changelog, HgError>,
35 changelog: LazyCell<Changelog, HgError>,
36 manifestlog: LazyCell<Manifestlog, HgError>,
36 manifestlog: LazyCell<Manifestlog, HgError>,
37 }
37 }
38
38
39 #[derive(Debug, derive_more::From)]
39 #[derive(Debug, derive_more::From)]
40 pub enum RepoError {
40 pub enum RepoError {
41 NotFound {
41 NotFound {
42 at: PathBuf,
42 at: PathBuf,
43 },
43 },
44 #[from]
44 #[from]
45 ConfigParseError(ConfigParseError),
45 ConfigParseError(ConfigParseError),
46 #[from]
46 #[from]
47 Other(HgError),
47 Other(HgError),
48 }
48 }
49
49
50 impl From<ConfigError> for RepoError {
50 impl From<ConfigError> for RepoError {
51 fn from(error: ConfigError) -> Self {
51 fn from(error: ConfigError) -> Self {
52 match error {
52 match error {
53 ConfigError::Parse(error) => error.into(),
53 ConfigError::Parse(error) => error.into(),
54 ConfigError::Other(error) => error.into(),
54 ConfigError::Other(error) => error.into(),
55 }
55 }
56 }
56 }
57 }
57 }
58
58
59 impl Repo {
59 impl Repo {
60 /// tries to find nearest repository root in current working directory or
60 /// tries to find nearest repository root in current working directory or
61 /// its ancestors
61 /// its ancestors
62 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
62 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
63 let current_directory = crate::utils::current_dir()?;
63 let current_directory = crate::utils::current_dir()?;
64 // ancestors() is inclusive: it first yields `current_directory`
64 // ancestors() is inclusive: it first yields `current_directory`
65 // as-is.
65 // as-is.
66 for ancestor in current_directory.ancestors() {
66 for ancestor in current_directory.ancestors() {
67 if is_dir(ancestor.join(".hg"))? {
67 if is_dir(ancestor.join(".hg"))? {
68 return Ok(ancestor.to_path_buf());
68 return Ok(ancestor.to_path_buf());
69 }
69 }
70 }
70 }
71 return Err(RepoError::NotFound {
71 return Err(RepoError::NotFound {
72 at: current_directory,
72 at: current_directory,
73 });
73 });
74 }
74 }
75
75
76 /// Find a repository, either at the given path (which must contain a `.hg`
76 /// Find a repository, either at the given path (which must contain a `.hg`
77 /// sub-directory) or by searching the current directory and its
77 /// sub-directory) or by searching the current directory and its
78 /// ancestors.
78 /// ancestors.
79 ///
79 ///
80 /// A method with two very different "modes" like this usually a code smell
80 /// A method with two very different "modes" like this usually a code smell
81 /// to make two methods instead, but in this case an `Option` is what rhg
81 /// to make two methods instead, but in this case an `Option` is what rhg
82 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
82 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
83 /// Having two methods would just move that `if` to almost all callers.
83 /// Having two methods would just move that `if` to almost all callers.
84 pub fn find(
84 pub fn find(
85 config: &Config,
85 config: &Config,
86 explicit_path: Option<PathBuf>,
86 explicit_path: Option<PathBuf>,
87 ) -> Result<Self, RepoError> {
87 ) -> Result<Self, RepoError> {
88 if let Some(root) = explicit_path {
88 if let Some(root) = explicit_path {
89 if is_dir(root.join(".hg"))? {
89 if is_dir(root.join(".hg"))? {
90 Self::new_at_path(root.to_owned(), config)
90 Self::new_at_path(root.to_owned(), config)
91 } else if is_file(&root)? {
91 } else if is_file(&root)? {
92 Err(HgError::unsupported("bundle repository").into())
92 Err(HgError::unsupported("bundle repository").into())
93 } else {
93 } else {
94 Err(RepoError::NotFound {
94 Err(RepoError::NotFound {
95 at: root.to_owned(),
95 at: root.to_owned(),
96 })
96 })
97 }
97 }
98 } else {
98 } else {
99 let root = Self::find_repo_root()?;
99 let root = Self::find_repo_root()?;
100 Self::new_at_path(root, config)
100 Self::new_at_path(root, config)
101 }
101 }
102 }
102 }
103
103
104 /// To be called after checking that `.hg` is a sub-directory
104 /// To be called after checking that `.hg` is a sub-directory
105 fn new_at_path(
105 fn new_at_path(
106 working_directory: PathBuf,
106 working_directory: PathBuf,
107 config: &Config,
107 config: &Config,
108 ) -> Result<Self, RepoError> {
108 ) -> Result<Self, RepoError> {
109 let dot_hg = working_directory.join(".hg");
109 let dot_hg = working_directory.join(".hg");
110
110
111 let mut repo_config_files = Vec::new();
111 let mut repo_config_files = Vec::new();
112 repo_config_files.push(dot_hg.join("hgrc"));
112 repo_config_files.push(dot_hg.join("hgrc"));
113 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
113 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
114
114
115 let hg_vfs = Vfs { base: &dot_hg };
115 let hg_vfs = Vfs { base: &dot_hg };
116 let mut reqs = requirements::load_if_exists(hg_vfs)?;
116 let mut reqs = requirements::load_if_exists(hg_vfs)?;
117 let relative =
117 let relative =
118 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
118 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
119 let shared =
119 let shared =
120 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
120 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
121
121
122 // From `mercurial/localrepo.py`:
122 // From `mercurial/localrepo.py`:
123 //
123 //
124 // if .hg/requires contains the sharesafe requirement, it means
124 // if .hg/requires contains the sharesafe requirement, it means
125 // there exists a `.hg/store/requires` too and we should read it
125 // there exists a `.hg/store/requires` too and we should read it
126 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
126 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
127 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
127 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
128 // is not present, refer checkrequirementscompat() for that
128 // is not present, refer checkrequirementscompat() for that
129 //
129 //
130 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
130 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
131 // repository was shared the old way. We check the share source
131 // repository was shared the old way. We check the share source
132 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
132 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
133 // current repository needs to be reshared
133 // current repository needs to be reshared
134 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
134 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
135
135
136 let store_path;
136 let store_path;
137 if !shared {
137 if !shared {
138 store_path = dot_hg.join("store");
138 store_path = dot_hg.join("store");
139 } else {
139 } else {
140 let bytes = hg_vfs.read("sharedpath")?;
140 let bytes = hg_vfs.read("sharedpath")?;
141 let mut shared_path =
141 let mut shared_path =
142 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
142 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
143 .to_owned();
143 .to_owned();
144 if relative {
144 if relative {
145 shared_path = dot_hg.join(shared_path)
145 shared_path = dot_hg.join(shared_path)
146 }
146 }
147 if !is_dir(&shared_path)? {
147 if !is_dir(&shared_path)? {
148 return Err(HgError::corrupted(format!(
148 return Err(HgError::corrupted(format!(
149 ".hg/sharedpath points to nonexistent directory {}",
149 ".hg/sharedpath points to nonexistent directory {}",
150 shared_path.display()
150 shared_path.display()
151 ))
151 ))
152 .into());
152 .into());
153 }
153 }
154
154
155 store_path = shared_path.join("store");
155 store_path = shared_path.join("store");
156
156
157 let source_is_share_safe =
157 let source_is_share_safe =
158 requirements::load(Vfs { base: &shared_path })?
158 requirements::load(Vfs { base: &shared_path })?
159 .contains(requirements::SHARESAFE_REQUIREMENT);
159 .contains(requirements::SHARESAFE_REQUIREMENT);
160
160
161 if share_safe != source_is_share_safe {
161 if share_safe != source_is_share_safe {
162 return Err(HgError::unsupported("share-safe mismatch").into());
162 return Err(HgError::unsupported("share-safe mismatch").into());
163 }
163 }
164
164
165 if share_safe {
165 if share_safe {
166 repo_config_files.insert(0, shared_path.join("hgrc"))
166 repo_config_files.insert(0, shared_path.join("hgrc"))
167 }
167 }
168 }
168 }
169 if share_safe {
169 if share_safe {
170 reqs.extend(requirements::load(Vfs { base: &store_path })?);
170 reqs.extend(requirements::load(Vfs { base: &store_path })?);
171 }
171 }
172
172
173 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
173 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
174 config.combine_with_repo(&repo_config_files)?
174 config.combine_with_repo(&repo_config_files)?
175 } else {
175 } else {
176 config.clone()
176 config.clone()
177 };
177 };
178
178
179 let repo = Self {
179 let repo = Self {
180 requirements: reqs,
180 requirements: reqs,
181 working_directory,
181 working_directory,
182 store: store_path,
182 store: store_path,
183 dot_hg,
183 dot_hg,
184 config: repo_config,
184 config: repo_config,
185 dirstate_parents: LazyCell::new(Self::read_dirstate_parents),
185 dirstate_parents: LazyCell::new(Self::read_dirstate_parents),
186 dirstate_data_file_uuid: LazyCell::new(
186 dirstate_data_file_uuid: LazyCell::new(
187 Self::read_dirstate_data_file_uuid,
187 Self::read_dirstate_data_file_uuid,
188 ),
188 ),
189 dirstate_map: LazyCell::new(Self::new_dirstate_map),
189 dirstate_map: LazyCell::new(Self::new_dirstate_map),
190 changelog: LazyCell::new(Self::new_changelog),
190 changelog: LazyCell::new(Self::new_changelog),
191 manifestlog: LazyCell::new(Self::new_manifestlog),
191 manifestlog: LazyCell::new(Self::new_manifestlog),
192 };
192 };
193
193
194 requirements::check(&repo)?;
194 requirements::check(&repo)?;
195
195
196 Ok(repo)
196 Ok(repo)
197 }
197 }
198
198
199 pub fn working_directory_path(&self) -> &Path {
199 pub fn working_directory_path(&self) -> &Path {
200 &self.working_directory
200 &self.working_directory
201 }
201 }
202
202
203 pub fn requirements(&self) -> &HashSet<String> {
203 pub fn requirements(&self) -> &HashSet<String> {
204 &self.requirements
204 &self.requirements
205 }
205 }
206
206
207 pub fn config(&self) -> &Config {
207 pub fn config(&self) -> &Config {
208 &self.config
208 &self.config
209 }
209 }
210
210
211 /// For accessing repository files (in `.hg`), except for the store
211 /// For accessing repository files (in `.hg`), except for the store
212 /// (`.hg/store`).
212 /// (`.hg/store`).
213 pub fn hg_vfs(&self) -> Vfs<'_> {
213 pub fn hg_vfs(&self) -> Vfs<'_> {
214 Vfs { base: &self.dot_hg }
214 Vfs { base: &self.dot_hg }
215 }
215 }
216
216
217 /// For accessing repository store files (in `.hg/store`)
217 /// For accessing repository store files (in `.hg/store`)
218 pub fn store_vfs(&self) -> Vfs<'_> {
218 pub fn store_vfs(&self) -> Vfs<'_> {
219 Vfs { base: &self.store }
219 Vfs { base: &self.store }
220 }
220 }
221
221
222 /// For accessing the working copy
222 /// For accessing the working copy
223 pub fn working_directory_vfs(&self) -> Vfs<'_> {
223 pub fn working_directory_vfs(&self) -> Vfs<'_> {
224 Vfs {
224 Vfs {
225 base: &self.working_directory,
225 base: &self.working_directory,
226 }
226 }
227 }
227 }
228
228
229 pub fn try_with_wlock_no_wait<R>(
229 pub fn try_with_wlock_no_wait<R>(
230 &self,
230 &self,
231 f: impl FnOnce() -> R,
231 f: impl FnOnce() -> R,
232 ) -> Result<R, LockError> {
232 ) -> Result<R, LockError> {
233 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
234 }
234 }
235
235
236 pub fn has_dirstate_v2(&self) -> bool {
236 pub fn has_dirstate_v2(&self) -> bool {
237 self.requirements
237 self.requirements
238 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
239 }
239 }
240
240
241 pub fn has_sparse(&self) -> bool {
241 pub fn has_sparse(&self) -> bool {
242 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
242 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
243 }
243 }
244
244
245 pub fn has_narrow(&self) -> bool {
245 pub fn has_narrow(&self) -> bool {
246 self.requirements.contains(requirements::NARROW_REQUIREMENT)
246 self.requirements.contains(requirements::NARROW_REQUIREMENT)
247 }
247 }
248
248
249 pub fn has_nodemap(&self) -> bool {
249 pub fn has_nodemap(&self) -> bool {
250 self.requirements
250 self.requirements
251 .contains(requirements::NODEMAP_REQUIREMENT)
251 .contains(requirements::NODEMAP_REQUIREMENT)
252 }
252 }
253
253
254 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
254 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
255 Ok(self
255 Ok(self
256 .hg_vfs()
256 .hg_vfs()
257 .read("dirstate")
257 .read("dirstate")
258 .io_not_found_as_none()?
258 .io_not_found_as_none()?
259 .unwrap_or(Vec::new()))
259 .unwrap_or(Vec::new()))
260 }
260 }
261
261
262 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
262 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
263 Ok(*self.dirstate_parents.get_or_init(self)?)
263 Ok(*self.dirstate_parents.get_or_init(self)?)
264 }
264 }
265
265
266 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
266 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
267 let dirstate = self.dirstate_file_contents()?;
267 let dirstate = self.dirstate_file_contents()?;
268 let parents = if dirstate.is_empty() {
268 let parents = if dirstate.is_empty() {
269 if self.has_dirstate_v2() {
269 if self.has_dirstate_v2() {
270 self.dirstate_data_file_uuid.set(None);
270 self.dirstate_data_file_uuid.set(None);
271 }
271 }
272 DirstateParents::NULL
272 DirstateParents::NULL
273 } else if self.has_dirstate_v2() {
273 } else if self.has_dirstate_v2() {
274 let docket =
274 let docket =
275 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
275 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
276 self.dirstate_data_file_uuid
276 self.dirstate_data_file_uuid
277 .set(Some(docket.uuid.to_owned()));
277 .set(Some(docket.uuid.to_owned()));
278 docket.parents()
278 docket.parents()
279 } else {
279 } else {
280 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
280 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
281 .clone()
281 .clone()
282 };
282 };
283 self.dirstate_parents.set(parents);
283 self.dirstate_parents.set(parents);
284 Ok(parents)
284 Ok(parents)
285 }
285 }
286
286
287 fn read_dirstate_data_file_uuid(
287 fn read_dirstate_data_file_uuid(
288 &self,
288 &self,
289 ) -> Result<Option<Vec<u8>>, HgError> {
289 ) -> Result<Option<Vec<u8>>, HgError> {
290 assert!(
290 assert!(
291 self.has_dirstate_v2(),
291 self.has_dirstate_v2(),
292 "accessing dirstate data file ID without dirstate-v2"
292 "accessing dirstate data file ID without dirstate-v2"
293 );
293 );
294 let dirstate = self.dirstate_file_contents()?;
294 let dirstate = self.dirstate_file_contents()?;
295 if dirstate.is_empty() {
295 if dirstate.is_empty() {
296 self.dirstate_parents.set(DirstateParents::NULL);
296 self.dirstate_parents.set(DirstateParents::NULL);
297 Ok(None)
297 Ok(None)
298 } else {
298 } else {
299 let docket =
299 let docket =
300 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
300 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
301 self.dirstate_parents.set(docket.parents());
301 self.dirstate_parents.set(docket.parents());
302 Ok(Some(docket.uuid.to_owned()))
302 Ok(Some(docket.uuid.to_owned()))
303 }
303 }
304 }
304 }
305
305
306 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
306 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
307 let dirstate_file_contents = self.dirstate_file_contents()?;
307 let dirstate_file_contents = self.dirstate_file_contents()?;
308 if dirstate_file_contents.is_empty() {
308 if dirstate_file_contents.is_empty() {
309 self.dirstate_parents.set(DirstateParents::NULL);
309 self.dirstate_parents.set(DirstateParents::NULL);
310 if self.has_dirstate_v2() {
310 if self.has_dirstate_v2() {
311 self.dirstate_data_file_uuid.set(None);
311 self.dirstate_data_file_uuid.set(None);
312 }
312 }
313 Ok(OwningDirstateMap::new_empty(Vec::new()))
313 Ok(OwningDirstateMap::new_empty(Vec::new()))
314 } else if self.has_dirstate_v2() {
314 } else if self.has_dirstate_v2() {
315 let docket = crate::dirstate_tree::on_disk::read_docket(
315 let docket = crate::dirstate_tree::on_disk::read_docket(
316 &dirstate_file_contents,
316 &dirstate_file_contents,
317 )?;
317 )?;
318 self.dirstate_parents.set(docket.parents());
318 self.dirstate_parents.set(docket.parents());
319 self.dirstate_data_file_uuid
319 self.dirstate_data_file_uuid
320 .set(Some(docket.uuid.to_owned()));
320 .set(Some(docket.uuid.to_owned()));
321 let data_size = docket.data_size();
321 let data_size = docket.data_size();
322 let metadata = docket.tree_metadata();
322 let metadata = docket.tree_metadata();
323 if let Some(data_mmap) = self
323 if let Some(data_mmap) = self
324 .hg_vfs()
324 .hg_vfs()
325 .mmap_open(docket.data_filename())
325 .mmap_open(docket.data_filename())
326 .io_not_found_as_none()?
326 .io_not_found_as_none()?
327 {
327 {
328 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
328 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
329 } else {
329 } else {
330 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
330 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
331 }
331 }
332 } else {
332 } else {
333 let (map, parents) =
333 let (map, parents) =
334 OwningDirstateMap::new_v1(dirstate_file_contents)?;
334 OwningDirstateMap::new_v1(dirstate_file_contents)?;
335 self.dirstate_parents.set(parents);
335 self.dirstate_parents.set(parents);
336 Ok(map)
336 Ok(map)
337 }
337 }
338 }
338 }
339
339
340 pub fn dirstate_map(
340 pub fn dirstate_map(
341 &self,
341 &self,
342 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
342 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
343 self.dirstate_map.get_or_init(self)
343 self.dirstate_map.get_or_init(self)
344 }
344 }
345
345
346 pub fn dirstate_map_mut(
346 pub fn dirstate_map_mut(
347 &self,
347 &self,
348 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
348 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
349 self.dirstate_map.get_mut_or_init(self)
349 self.dirstate_map.get_mut_or_init(self)
350 }
350 }
351
351
352 fn new_changelog(&self) -> Result<Changelog, HgError> {
352 fn new_changelog(&self) -> Result<Changelog, HgError> {
353 Changelog::open(&self.store_vfs(), self.has_nodemap())
353 Changelog::open(&self.store_vfs(), self.has_nodemap())
354 }
354 }
355
355
356 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
356 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
357 self.changelog.get_or_init(self)
357 self.changelog.get_or_init(self)
358 }
358 }
359
359
360 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
360 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
361 self.changelog.get_mut_or_init(self)
361 self.changelog.get_mut_or_init(self)
362 }
362 }
363
363
364 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
364 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
365 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
365 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
366 }
366 }
367
367
368 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
368 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
369 self.manifestlog.get_or_init(self)
369 self.manifestlog.get_or_init(self)
370 }
370 }
371
371
372 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
372 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
373 self.manifestlog.get_mut_or_init(self)
373 self.manifestlog.get_mut_or_init(self)
374 }
374 }
375
375
376 /// Returns the manifest of the *changeset* with the given node ID
376 /// Returns the manifest of the *changeset* with the given node ID
377 pub fn manifest_for_node(
377 pub fn manifest_for_node(
378 &self,
378 &self,
379 node: impl Into<NodePrefix>,
379 node: impl Into<NodePrefix>,
380 ) -> Result<Manifest, RevlogError> {
380 ) -> Result<Manifest, RevlogError> {
381 self.manifestlog()?.data_for_node(
381 self.manifestlog()?.data_for_node(
382 self.changelog()?
382 self.changelog()?
383 .data_for_node(node.into())?
383 .data_for_node(node.into())?
384 .manifest_node()?
384 .manifest_node()?
385 .into(),
385 .into(),
386 )
386 )
387 }
387 }
388
388
389 /// Returns the manifest of the *changeset* with the given revision number
389 /// Returns the manifest of the *changeset* with the given revision number
390 pub fn manifest_for_rev(
390 pub fn manifest_for_rev(
391 &self,
391 &self,
392 revision: Revision,
392 revision: Revision,
393 ) -> Result<Manifest, RevlogError> {
393 ) -> Result<Manifest, RevlogError> {
394 self.manifestlog()?.data_for_node(
394 self.manifestlog()?.data_for_node(
395 self.changelog()?
395 self.changelog()?
396 .data_for_rev(revision)?
396 .data_for_rev(revision)?
397 .manifest_node()?
397 .manifest_node()?
398 .into(),
398 .into(),
399 )
399 )
400 }
400 }
401
401
402 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
402 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
403 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
403 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
404 Ok(entry.tracked())
404 Ok(entry.tracked())
405 } else {
405 } else {
406 Ok(false)
406 Ok(false)
407 }
407 }
408 }
408 }
409
409
410 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
410 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
411 Filelog::open(self, path)
411 Filelog::open(self, path)
412 }
412 }
413
413
414 /// Write to disk any updates that were made through `dirstate_map_mut`.
414 /// Write to disk any updates that were made through `dirstate_map_mut`.
415 ///
415 ///
416 /// The "wlock" must be held while calling this.
416 /// The "wlock" must be held while calling this.
417 /// See for example `try_with_wlock_no_wait`.
417 /// See for example `try_with_wlock_no_wait`.
418 ///
418 ///
419 /// TODO: have a `WritableRepo` type only accessible while holding the
419 /// TODO: have a `WritableRepo` type only accessible while holding the
420 /// lock?
420 /// lock?
421 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
421 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
422 let map = self.dirstate_map()?;
422 let map = self.dirstate_map()?;
423 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
423 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
424 // it’s unset
424 // it’s unset
425 let parents = self.dirstate_parents()?;
425 let parents = self.dirstate_parents()?;
426 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
426 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
427 let uuid = self.dirstate_data_file_uuid.get_or_init(self)?;
427 let uuid_opt = self.dirstate_data_file_uuid.get_or_init(self)?;
428 let mut uuid = uuid.as_ref();
428 let uuid_opt = uuid_opt.as_ref();
429 let can_append = uuid.is_some();
429 let can_append = uuid_opt.is_some();
430 let (data, tree_metadata, append, old_data_size) =
430 let (data, tree_metadata, append, old_data_size) =
431 map.pack_v2(can_append)?;
431 map.pack_v2(can_append)?;
432 if !append {
432
433 uuid = None
433 // Reuse the uuid, or generate a new one, keeping the old for
434 }
434 // deletion.
435 let (uuid, old_uuid) = if let Some(uuid) = uuid {
435 let (uuid, old_uuid) = match uuid_opt {
436 let as_str = std::str::from_utf8(uuid)
436 Some(uuid) => {
437 .map_err(|_| {
437 let as_str = std::str::from_utf8(uuid)
438 HgError::corrupted("non-UTF-8 dirstate data file ID")
438 .map_err(|_| {
439 })?
439 HgError::corrupted(
440 .to_owned();
440 "non-UTF-8 dirstate data file ID",
441 let old_uuid_to_remove = Some(as_str.to_owned());
441 )
442 (as_str, old_uuid_to_remove)
442 })?
443 } else {
443 .to_owned();
444 (DirstateDocket::new_uid(), None)
444 if append {
445 (as_str, None)
446 } else {
447 (DirstateDocket::new_uid(), Some(as_str))
448 }
449 }
450 None => (DirstateDocket::new_uid(), None),
445 };
451 };
452
446 let data_filename = format!("dirstate.{}", uuid);
453 let data_filename = format!("dirstate.{}", uuid);
447 let data_filename = self.hg_vfs().join(data_filename);
454 let data_filename = self.hg_vfs().join(data_filename);
448 let mut options = std::fs::OpenOptions::new();
455 let mut options = std::fs::OpenOptions::new();
449 if append {
456 if append {
450 options.append(true);
457 options.append(true);
451 } else {
458 } else {
452 options.write(true).create_new(true);
459 options.write(true).create_new(true);
453 }
460 }
454 let data_size = (|| {
461 let data_size = (|| {
455 // TODO: loop and try another random ID if !append and this
462 // TODO: loop and try another random ID if !append and this
456 // returns `ErrorKind::AlreadyExists`? Collision chance of two
463 // returns `ErrorKind::AlreadyExists`? Collision chance of two
457 // random IDs is one in 2**32
464 // random IDs is one in 2**32
458 let mut file = options.open(&data_filename)?;
465 let mut file = options.open(&data_filename)?;
459 if data.is_empty() {
466 if data.is_empty() {
460 // If we're not appending anything, the data size is the
467 // If we're not appending anything, the data size is the
461 // same as in the previous docket. It is *not* the file
468 // same as in the previous docket. It is *not* the file
462 // length, since it could have garbage at the end.
469 // length, since it could have garbage at the end.
463 // We don't have to worry about it when we do have data
470 // We don't have to worry about it when we do have data
464 // to append since we rewrite the root node in this case.
471 // to append since we rewrite the root node in this case.
465 Ok(old_data_size as u64)
472 Ok(old_data_size as u64)
466 } else {
473 } else {
467 file.write_all(&data)?;
474 file.write_all(&data)?;
468 file.flush()?;
475 file.flush()?;
469 // TODO: use https://doc.rust-lang.org/std/io/trait.Seek.html#method.stream_position when we require Rust 1.51+
476 // TODO: use https://doc.rust-lang.org/std/io/trait.Seek.html#method.stream_position when we require Rust 1.51+
470 file.seek(SeekFrom::Current(0))
477 file.seek(SeekFrom::Current(0))
471 }
478 }
472 })()
479 })()
473 .when_writing_file(&data_filename)?;
480 .when_writing_file(&data_filename)?;
474
481
475 let packed_dirstate = DirstateDocket::serialize(
482 let packed_dirstate = DirstateDocket::serialize(
476 parents,
483 parents,
477 tree_metadata,
484 tree_metadata,
478 data_size,
485 data_size,
479 uuid.as_bytes(),
486 uuid.as_bytes(),
480 )
487 )
481 .map_err(|_: std::num::TryFromIntError| {
488 .map_err(|_: std::num::TryFromIntError| {
482 HgError::corrupted("overflow in dirstate docket serialization")
489 HgError::corrupted("overflow in dirstate docket serialization")
483 })?;
490 })?;
484
491
485 (packed_dirstate, old_uuid)
492 (packed_dirstate, old_uuid)
486 } else {
493 } else {
487 (map.pack_v1(parents)?, None)
494 (map.pack_v1(parents)?, None)
488 };
495 };
489
496
490 let vfs = self.hg_vfs();
497 let vfs = self.hg_vfs();
491 vfs.atomic_write("dirstate", &packed_dirstate)?;
498 vfs.atomic_write("dirstate", &packed_dirstate)?;
492 if let Some(uuid) = old_uuid_to_remove {
499 if let Some(uuid) = old_uuid_to_remove {
493 // Remove the old data file after the new docket pointing to the
500 // Remove the old data file after the new docket pointing to the
494 // new data file was written.
501 // new data file was written.
495 vfs.remove_file(format!("dirstate.{}", uuid))?;
502 vfs.remove_file(format!("dirstate.{}", uuid))?;
496 }
503 }
497 Ok(())
504 Ok(())
498 }
505 }
499 }
506 }
500
507
501 /// Lazily-initialized component of `Repo` with interior mutability
508 /// Lazily-initialized component of `Repo` with interior mutability
502 ///
509 ///
503 /// This differs from `OnceCell` in that the value can still be "deinitialized"
510 /// This differs from `OnceCell` in that the value can still be "deinitialized"
504 /// later by setting its inner `Option` to `None`.
511 /// later by setting its inner `Option` to `None`.
505 struct LazyCell<T, E> {
512 struct LazyCell<T, E> {
506 value: RefCell<Option<T>>,
513 value: RefCell<Option<T>>,
507 // `Fn`s that don’t capture environment are zero-size, so this box does
514 // `Fn`s that don’t capture environment are zero-size, so this box does
508 // not allocate:
515 // not allocate:
509 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
516 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
510 }
517 }
511
518
512 impl<T, E> LazyCell<T, E> {
519 impl<T, E> LazyCell<T, E> {
513 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
520 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
514 Self {
521 Self {
515 value: RefCell::new(None),
522 value: RefCell::new(None),
516 init: Box::new(init),
523 init: Box::new(init),
517 }
524 }
518 }
525 }
519
526
520 fn set(&self, value: T) {
527 fn set(&self, value: T) {
521 *self.value.borrow_mut() = Some(value)
528 *self.value.borrow_mut() = Some(value)
522 }
529 }
523
530
524 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
531 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
525 let mut borrowed = self.value.borrow();
532 let mut borrowed = self.value.borrow();
526 if borrowed.is_none() {
533 if borrowed.is_none() {
527 drop(borrowed);
534 drop(borrowed);
528 // Only use `borrow_mut` if it is really needed to avoid panic in
535 // Only use `borrow_mut` if it is really needed to avoid panic in
529 // case there is another outstanding borrow but mutation is not
536 // case there is another outstanding borrow but mutation is not
530 // needed.
537 // needed.
531 *self.value.borrow_mut() = Some((self.init)(repo)?);
538 *self.value.borrow_mut() = Some((self.init)(repo)?);
532 borrowed = self.value.borrow()
539 borrowed = self.value.borrow()
533 }
540 }
534 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
541 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
535 }
542 }
536
543
537 fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
544 fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
538 let mut borrowed = self.value.borrow_mut();
545 let mut borrowed = self.value.borrow_mut();
539 if borrowed.is_none() {
546 if borrowed.is_none() {
540 *borrowed = Some((self.init)(repo)?);
547 *borrowed = Some((self.init)(repo)?);
541 }
548 }
542 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
549 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
543 }
550 }
544 }
551 }
@@ -1,23 +1,24 b''
1 [package]
1 [package]
2 name = "rhg"
2 name = "rhg"
3 version = "0.1.0"
3 version = "0.1.0"
4 authors = [
4 authors = [
5 "Antoine Cezar <antoine.cezar@octobus.net>",
5 "Antoine Cezar <antoine.cezar@octobus.net>",
6 "Raphaël Gomès <raphael.gomes@octobus.net>",
6 "Raphaël Gomès <raphael.gomes@octobus.net>",
7 ]
7 ]
8 edition = "2018"
8 edition = "2018"
9
9
10 [dependencies]
10 [dependencies]
11 atty = "0.2.14"
11 atty = "0.2.14"
12 hg-core = { path = "../hg-core"}
12 hg-core = { path = "../hg-core"}
13 chrono = "0.4.19"
13 chrono = "0.4.19"
14 clap = "2.34.0"
14 clap = "2.34.0"
15 derive_more = "0.99.17"
15 derive_more = "0.99.17"
16 home = "0.5.3"
16 home = "0.5.3"
17 lazy_static = "1.4.0"
17 lazy_static = "1.4.0"
18 log = "0.4.14"
18 log = "0.4.14"
19 micro-timer = "0.4.0"
19 micro-timer = "0.4.0"
20 regex = "1.5.5"
20 regex = "1.5.5"
21 env_logger = "0.9.0"
21 env_logger = "0.9.0"
22 format-bytes = "0.3.0"
22 format-bytes = "0.3.0"
23 users = "0.11.0"
23 users = "0.11.0"
24 which = "4.2.5"
@@ -1,208 +1,211 b''
1 use crate::ui::utf8_to_local;
1 use crate::ui::utf8_to_local;
2 use crate::ui::UiError;
2 use crate::ui::UiError;
3 use crate::NoRepoInCwdError;
3 use crate::NoRepoInCwdError;
4 use format_bytes::format_bytes;
4 use format_bytes::format_bytes;
5 use hg::config::{ConfigError, ConfigParseError, ConfigValueParseError};
5 use hg::config::{ConfigError, ConfigParseError, ConfigValueParseError};
6 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
6 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
7 use hg::errors::HgError;
7 use hg::errors::HgError;
8 use hg::exit_codes;
8 use hg::exit_codes;
9 use hg::repo::RepoError;
9 use hg::repo::RepoError;
10 use hg::revlog::revlog::RevlogError;
10 use hg::revlog::revlog::RevlogError;
11 use hg::utils::files::get_bytes_from_path;
11 use hg::utils::files::get_bytes_from_path;
12 use hg::{DirstateError, DirstateMapError, StatusError};
12 use hg::{DirstateError, DirstateMapError, StatusError};
13 use std::convert::From;
13 use std::convert::From;
14
14
15 /// The kind of command error
15 /// The kind of command error
16 #[derive(Debug)]
16 #[derive(Debug)]
17 pub enum CommandError {
17 pub enum CommandError {
18 /// Exit with an error message and "standard" failure exit code.
18 /// Exit with an error message and "standard" failure exit code.
19 Abort {
19 Abort {
20 message: Vec<u8>,
20 message: Vec<u8>,
21 detailed_exit_code: exit_codes::ExitCode,
21 detailed_exit_code: exit_codes::ExitCode,
22 },
22 },
23
23
24 /// Exit with a failure exit code but no message.
24 /// Exit with a failure exit code but no message.
25 Unsuccessful,
25 Unsuccessful,
26
26
27 /// Encountered something (such as a CLI argument, repository layout, …)
27 /// Encountered something (such as a CLI argument, repository layout, …)
28 /// not supported by this version of `rhg`. Depending on configuration
28 /// not supported by this version of `rhg`. Depending on configuration
29 /// `rhg` may attempt to silently fall back to Python-based `hg`, which
29 /// `rhg` may attempt to silently fall back to Python-based `hg`, which
30 /// may or may not support this feature.
30 /// may or may not support this feature.
31 UnsupportedFeature { message: Vec<u8> },
31 UnsupportedFeature { message: Vec<u8> },
32 /// The fallback executable does not exist (or has some other problem if
33 /// we end up being more precise about broken fallbacks).
34 InvalidFallback { path: Vec<u8>, err: String },
32 }
35 }
33
36
34 impl CommandError {
37 impl CommandError {
35 pub fn abort(message: impl AsRef<str>) -> Self {
38 pub fn abort(message: impl AsRef<str>) -> Self {
36 CommandError::abort_with_exit_code(message, exit_codes::ABORT)
39 CommandError::abort_with_exit_code(message, exit_codes::ABORT)
37 }
40 }
38
41
39 pub fn abort_with_exit_code(
42 pub fn abort_with_exit_code(
40 message: impl AsRef<str>,
43 message: impl AsRef<str>,
41 detailed_exit_code: exit_codes::ExitCode,
44 detailed_exit_code: exit_codes::ExitCode,
42 ) -> Self {
45 ) -> Self {
43 CommandError::Abort {
46 CommandError::Abort {
44 // TODO: bytes-based (instead of Unicode-based) formatting
47 // TODO: bytes-based (instead of Unicode-based) formatting
45 // of error messages to handle non-UTF-8 filenames etc:
48 // of error messages to handle non-UTF-8 filenames etc:
46 // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
49 // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
47 message: utf8_to_local(message.as_ref()).into(),
50 message: utf8_to_local(message.as_ref()).into(),
48 detailed_exit_code: detailed_exit_code,
51 detailed_exit_code: detailed_exit_code,
49 }
52 }
50 }
53 }
51
54
52 pub fn unsupported(message: impl AsRef<str>) -> Self {
55 pub fn unsupported(message: impl AsRef<str>) -> Self {
53 CommandError::UnsupportedFeature {
56 CommandError::UnsupportedFeature {
54 message: utf8_to_local(message.as_ref()).into(),
57 message: utf8_to_local(message.as_ref()).into(),
55 }
58 }
56 }
59 }
57 }
60 }
58
61
59 /// For now we don’t differenciate between invalid CLI args and valid for `hg`
62 /// For now we don’t differenciate between invalid CLI args and valid for `hg`
60 /// but not supported yet by `rhg`.
63 /// but not supported yet by `rhg`.
61 impl From<clap::Error> for CommandError {
64 impl From<clap::Error> for CommandError {
62 fn from(error: clap::Error) -> Self {
65 fn from(error: clap::Error) -> Self {
63 CommandError::unsupported(error.to_string())
66 CommandError::unsupported(error.to_string())
64 }
67 }
65 }
68 }
66
69
67 impl From<HgError> for CommandError {
70 impl From<HgError> for CommandError {
68 fn from(error: HgError) -> Self {
71 fn from(error: HgError) -> Self {
69 match error {
72 match error {
70 HgError::UnsupportedFeature(message) => {
73 HgError::UnsupportedFeature(message) => {
71 CommandError::unsupported(message)
74 CommandError::unsupported(message)
72 }
75 }
73 HgError::Abort {
76 HgError::Abort {
74 message,
77 message,
75 detailed_exit_code,
78 detailed_exit_code,
76 } => {
79 } => {
77 CommandError::abort_with_exit_code(message, detailed_exit_code)
80 CommandError::abort_with_exit_code(message, detailed_exit_code)
78 }
81 }
79 _ => CommandError::abort(error.to_string()),
82 _ => CommandError::abort(error.to_string()),
80 }
83 }
81 }
84 }
82 }
85 }
83
86
84 impl From<ConfigValueParseError> for CommandError {
87 impl From<ConfigValueParseError> for CommandError {
85 fn from(error: ConfigValueParseError) -> Self {
88 fn from(error: ConfigValueParseError) -> Self {
86 CommandError::abort_with_exit_code(
89 CommandError::abort_with_exit_code(
87 error.to_string(),
90 error.to_string(),
88 exit_codes::CONFIG_ERROR_ABORT,
91 exit_codes::CONFIG_ERROR_ABORT,
89 )
92 )
90 }
93 }
91 }
94 }
92
95
93 impl From<UiError> for CommandError {
96 impl From<UiError> for CommandError {
94 fn from(_error: UiError) -> Self {
97 fn from(_error: UiError) -> Self {
95 // If we already failed writing to stdout or stderr,
98 // If we already failed writing to stdout or stderr,
96 // writing an error message to stderr about it would be likely to fail
99 // writing an error message to stderr about it would be likely to fail
97 // too.
100 // too.
98 CommandError::abort("")
101 CommandError::abort("")
99 }
102 }
100 }
103 }
101
104
102 impl From<RepoError> for CommandError {
105 impl From<RepoError> for CommandError {
103 fn from(error: RepoError) -> Self {
106 fn from(error: RepoError) -> Self {
104 match error {
107 match error {
105 RepoError::NotFound { at } => CommandError::Abort {
108 RepoError::NotFound { at } => CommandError::Abort {
106 message: format_bytes!(
109 message: format_bytes!(
107 b"abort: repository {} not found",
110 b"abort: repository {} not found",
108 get_bytes_from_path(at)
111 get_bytes_from_path(at)
109 ),
112 ),
110 detailed_exit_code: exit_codes::ABORT,
113 detailed_exit_code: exit_codes::ABORT,
111 },
114 },
112 RepoError::ConfigParseError(error) => error.into(),
115 RepoError::ConfigParseError(error) => error.into(),
113 RepoError::Other(error) => error.into(),
116 RepoError::Other(error) => error.into(),
114 }
117 }
115 }
118 }
116 }
119 }
117
120
118 impl<'a> From<&'a NoRepoInCwdError> for CommandError {
121 impl<'a> From<&'a NoRepoInCwdError> for CommandError {
119 fn from(error: &'a NoRepoInCwdError) -> Self {
122 fn from(error: &'a NoRepoInCwdError) -> Self {
120 let NoRepoInCwdError { cwd } = error;
123 let NoRepoInCwdError { cwd } = error;
121 CommandError::Abort {
124 CommandError::Abort {
122 message: format_bytes!(
125 message: format_bytes!(
123 b"abort: no repository found in '{}' (.hg not found)!",
126 b"abort: no repository found in '{}' (.hg not found)!",
124 get_bytes_from_path(cwd)
127 get_bytes_from_path(cwd)
125 ),
128 ),
126 detailed_exit_code: exit_codes::ABORT,
129 detailed_exit_code: exit_codes::ABORT,
127 }
130 }
128 }
131 }
129 }
132 }
130
133
131 impl From<ConfigError> for CommandError {
134 impl From<ConfigError> for CommandError {
132 fn from(error: ConfigError) -> Self {
135 fn from(error: ConfigError) -> Self {
133 match error {
136 match error {
134 ConfigError::Parse(error) => error.into(),
137 ConfigError::Parse(error) => error.into(),
135 ConfigError::Other(error) => error.into(),
138 ConfigError::Other(error) => error.into(),
136 }
139 }
137 }
140 }
138 }
141 }
139
142
140 impl From<ConfigParseError> for CommandError {
143 impl From<ConfigParseError> for CommandError {
141 fn from(error: ConfigParseError) -> Self {
144 fn from(error: ConfigParseError) -> Self {
142 let ConfigParseError {
145 let ConfigParseError {
143 origin,
146 origin,
144 line,
147 line,
145 message,
148 message,
146 } = error;
149 } = error;
147 let line_message = if let Some(line_number) = line {
150 let line_message = if let Some(line_number) = line {
148 format_bytes!(b":{}", line_number.to_string().into_bytes())
151 format_bytes!(b":{}", line_number.to_string().into_bytes())
149 } else {
152 } else {
150 Vec::new()
153 Vec::new()
151 };
154 };
152 CommandError::Abort {
155 CommandError::Abort {
153 message: format_bytes!(
156 message: format_bytes!(
154 b"config error at {}{}: {}",
157 b"config error at {}{}: {}",
155 origin,
158 origin,
156 line_message,
159 line_message,
157 message
160 message
158 ),
161 ),
159 detailed_exit_code: exit_codes::CONFIG_ERROR_ABORT,
162 detailed_exit_code: exit_codes::CONFIG_ERROR_ABORT,
160 }
163 }
161 }
164 }
162 }
165 }
163
166
164 impl From<(RevlogError, &str)> for CommandError {
167 impl From<(RevlogError, &str)> for CommandError {
165 fn from((err, rev): (RevlogError, &str)) -> CommandError {
168 fn from((err, rev): (RevlogError, &str)) -> CommandError {
166 match err {
169 match err {
167 RevlogError::WDirUnsupported => CommandError::abort(
170 RevlogError::WDirUnsupported => CommandError::abort(
168 "abort: working directory revision cannot be specified",
171 "abort: working directory revision cannot be specified",
169 ),
172 ),
170 RevlogError::InvalidRevision => CommandError::abort(format!(
173 RevlogError::InvalidRevision => CommandError::abort(format!(
171 "abort: invalid revision identifier: {}",
174 "abort: invalid revision identifier: {}",
172 rev
175 rev
173 )),
176 )),
174 RevlogError::AmbiguousPrefix => CommandError::abort(format!(
177 RevlogError::AmbiguousPrefix => CommandError::abort(format!(
175 "abort: ambiguous revision identifier: {}",
178 "abort: ambiguous revision identifier: {}",
176 rev
179 rev
177 )),
180 )),
178 RevlogError::Other(error) => error.into(),
181 RevlogError::Other(error) => error.into(),
179 }
182 }
180 }
183 }
181 }
184 }
182
185
183 impl From<StatusError> for CommandError {
186 impl From<StatusError> for CommandError {
184 fn from(error: StatusError) -> Self {
187 fn from(error: StatusError) -> Self {
185 CommandError::abort(format!("{}", error))
188 CommandError::abort(format!("{}", error))
186 }
189 }
187 }
190 }
188
191
189 impl From<DirstateMapError> for CommandError {
192 impl From<DirstateMapError> for CommandError {
190 fn from(error: DirstateMapError) -> Self {
193 fn from(error: DirstateMapError) -> Self {
191 CommandError::abort(format!("{}", error))
194 CommandError::abort(format!("{}", error))
192 }
195 }
193 }
196 }
194
197
195 impl From<DirstateError> for CommandError {
198 impl From<DirstateError> for CommandError {
196 fn from(error: DirstateError) -> Self {
199 fn from(error: DirstateError) -> Self {
197 match error {
200 match error {
198 DirstateError::Common(error) => error.into(),
201 DirstateError::Common(error) => error.into(),
199 DirstateError::Map(error) => error.into(),
202 DirstateError::Map(error) => error.into(),
200 }
203 }
201 }
204 }
202 }
205 }
203
206
204 impl From<DirstateV2ParseError> for CommandError {
207 impl From<DirstateV2ParseError> for CommandError {
205 fn from(error: DirstateV2ParseError) -> Self {
208 fn from(error: DirstateV2ParseError) -> Self {
206 HgError::from(error).into()
209 HgError::from(error).into()
207 }
210 }
208 }
211 }
@@ -1,733 +1,754 b''
1 extern crate log;
1 extern crate log;
2 use crate::error::CommandError;
2 use crate::error::CommandError;
3 use crate::ui::{local_to_utf8, Ui};
3 use crate::ui::{local_to_utf8, Ui};
4 use clap::App;
4 use clap::App;
5 use clap::AppSettings;
5 use clap::AppSettings;
6 use clap::Arg;
6 use clap::Arg;
7 use clap::ArgMatches;
7 use clap::ArgMatches;
8 use format_bytes::{format_bytes, join};
8 use format_bytes::{format_bytes, join};
9 use hg::config::{Config, ConfigSource};
9 use hg::config::{Config, ConfigSource};
10 use hg::exit_codes;
10 use hg::exit_codes;
11 use hg::repo::{Repo, RepoError};
11 use hg::repo::{Repo, RepoError};
12 use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes};
12 use hg::utils::files::{get_bytes_from_os_str, get_path_from_bytes};
13 use hg::utils::SliceExt;
13 use hg::utils::SliceExt;
14 use std::collections::HashSet;
14 use std::collections::HashSet;
15 use std::ffi::OsString;
15 use std::ffi::OsString;
16 use std::os::unix::prelude::CommandExt;
16 use std::path::PathBuf;
17 use std::path::PathBuf;
17 use std::process::Command;
18 use std::process::Command;
18
19
19 mod blackbox;
20 mod blackbox;
20 mod color;
21 mod color;
21 mod error;
22 mod error;
22 mod ui;
23 mod ui;
23 pub mod utils {
24 pub mod utils {
24 pub mod path_utils;
25 pub mod path_utils;
25 }
26 }
26
27
27 fn main_with_result(
28 fn main_with_result(
28 argv: Vec<OsString>,
29 argv: Vec<OsString>,
29 process_start_time: &blackbox::ProcessStartTime,
30 process_start_time: &blackbox::ProcessStartTime,
30 ui: &ui::Ui,
31 ui: &ui::Ui,
31 repo: Result<&Repo, &NoRepoInCwdError>,
32 repo: Result<&Repo, &NoRepoInCwdError>,
32 config: &Config,
33 config: &Config,
33 ) -> Result<(), CommandError> {
34 ) -> Result<(), CommandError> {
34 check_unsupported(config, repo)?;
35 check_unsupported(config, repo)?;
35
36
36 let app = App::new("rhg")
37 let app = App::new("rhg")
37 .global_setting(AppSettings::AllowInvalidUtf8)
38 .global_setting(AppSettings::AllowInvalidUtf8)
38 .global_setting(AppSettings::DisableVersion)
39 .global_setting(AppSettings::DisableVersion)
39 .setting(AppSettings::SubcommandRequired)
40 .setting(AppSettings::SubcommandRequired)
40 .setting(AppSettings::VersionlessSubcommands)
41 .setting(AppSettings::VersionlessSubcommands)
41 .arg(
42 .arg(
42 Arg::with_name("repository")
43 Arg::with_name("repository")
43 .help("repository root directory")
44 .help("repository root directory")
44 .short("-R")
45 .short("-R")
45 .long("--repository")
46 .long("--repository")
46 .value_name("REPO")
47 .value_name("REPO")
47 .takes_value(true)
48 .takes_value(true)
48 // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
49 // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
49 .global(true),
50 .global(true),
50 )
51 )
51 .arg(
52 .arg(
52 Arg::with_name("config")
53 Arg::with_name("config")
53 .help("set/override config option (use 'section.name=value')")
54 .help("set/override config option (use 'section.name=value')")
54 .long("--config")
55 .long("--config")
55 .value_name("CONFIG")
56 .value_name("CONFIG")
56 .takes_value(true)
57 .takes_value(true)
57 .global(true)
58 .global(true)
58 // Ok: `--config section.key1=val --config section.key2=val2`
59 // Ok: `--config section.key1=val --config section.key2=val2`
59 .multiple(true)
60 .multiple(true)
60 // Not ok: `--config section.key1=val section.key2=val2`
61 // Not ok: `--config section.key1=val section.key2=val2`
61 .number_of_values(1),
62 .number_of_values(1),
62 )
63 )
63 .arg(
64 .arg(
64 Arg::with_name("cwd")
65 Arg::with_name("cwd")
65 .help("change working directory")
66 .help("change working directory")
66 .long("--cwd")
67 .long("--cwd")
67 .value_name("DIR")
68 .value_name("DIR")
68 .takes_value(true)
69 .takes_value(true)
69 .global(true),
70 .global(true),
70 )
71 )
71 .arg(
72 .arg(
72 Arg::with_name("color")
73 Arg::with_name("color")
73 .help("when to colorize (boolean, always, auto, never, or debug)")
74 .help("when to colorize (boolean, always, auto, never, or debug)")
74 .long("--color")
75 .long("--color")
75 .value_name("TYPE")
76 .value_name("TYPE")
76 .takes_value(true)
77 .takes_value(true)
77 .global(true),
78 .global(true),
78 )
79 )
79 .version("0.0.1");
80 .version("0.0.1");
80 let app = add_subcommand_args(app);
81 let app = add_subcommand_args(app);
81
82
82 let matches = app.clone().get_matches_from_safe(argv.iter())?;
83 let matches = app.clone().get_matches_from_safe(argv.iter())?;
83
84
84 let (subcommand_name, subcommand_matches) = matches.subcommand();
85 let (subcommand_name, subcommand_matches) = matches.subcommand();
85
86
86 // Mercurial allows users to define "defaults" for commands, fallback
87 // Mercurial allows users to define "defaults" for commands, fallback
87 // if a default is detected for the current command
88 // if a default is detected for the current command
88 let defaults = config.get_str(b"defaults", subcommand_name.as_bytes());
89 let defaults = config.get_str(b"defaults", subcommand_name.as_bytes());
89 if defaults?.is_some() {
90 if defaults?.is_some() {
90 let msg = "`defaults` config set";
91 let msg = "`defaults` config set";
91 return Err(CommandError::unsupported(msg));
92 return Err(CommandError::unsupported(msg));
92 }
93 }
93
94
94 for prefix in ["pre", "post", "fail"].iter() {
95 for prefix in ["pre", "post", "fail"].iter() {
95 // Mercurial allows users to define generic hooks for commands,
96 // Mercurial allows users to define generic hooks for commands,
96 // fallback if any are detected
97 // fallback if any are detected
97 let item = format!("{}-{}", prefix, subcommand_name);
98 let item = format!("{}-{}", prefix, subcommand_name);
98 let hook_for_command = config.get_str(b"hooks", item.as_bytes())?;
99 let hook_for_command = config.get_str(b"hooks", item.as_bytes())?;
99 if hook_for_command.is_some() {
100 if hook_for_command.is_some() {
100 let msg = format!("{}-{} hook defined", prefix, subcommand_name);
101 let msg = format!("{}-{} hook defined", prefix, subcommand_name);
101 return Err(CommandError::unsupported(msg));
102 return Err(CommandError::unsupported(msg));
102 }
103 }
103 }
104 }
104 let run = subcommand_run_fn(subcommand_name)
105 let run = subcommand_run_fn(subcommand_name)
105 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
106 .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
106 let subcommand_args = subcommand_matches
107 let subcommand_args = subcommand_matches
107 .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
108 .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
108
109
109 let invocation = CliInvocation {
110 let invocation = CliInvocation {
110 ui,
111 ui,
111 subcommand_args,
112 subcommand_args,
112 config,
113 config,
113 repo,
114 repo,
114 };
115 };
115
116
116 if let Ok(repo) = repo {
117 if let Ok(repo) = repo {
117 // We don't support subrepos, fallback if the subrepos file is present
118 // We don't support subrepos, fallback if the subrepos file is present
118 if repo.working_directory_vfs().join(".hgsub").exists() {
119 if repo.working_directory_vfs().join(".hgsub").exists() {
119 let msg = "subrepos (.hgsub is present)";
120 let msg = "subrepos (.hgsub is present)";
120 return Err(CommandError::unsupported(msg));
121 return Err(CommandError::unsupported(msg));
121 }
122 }
122 }
123 }
123
124
124 if config.is_extension_enabled(b"blackbox") {
125 if config.is_extension_enabled(b"blackbox") {
125 let blackbox =
126 let blackbox =
126 blackbox::Blackbox::new(&invocation, process_start_time)?;
127 blackbox::Blackbox::new(&invocation, process_start_time)?;
127 blackbox.log_command_start(argv.iter());
128 blackbox.log_command_start(argv.iter());
128 let result = run(&invocation);
129 let result = run(&invocation);
129 blackbox.log_command_end(
130 blackbox.log_command_end(
130 argv.iter(),
131 argv.iter(),
131 exit_code(
132 exit_code(
132 &result,
133 &result,
133 // TODO: show a warning or combine with original error if
134 // TODO: show a warning or combine with original error if
134 // `get_bool` returns an error
135 // `get_bool` returns an error
135 config
136 config
136 .get_bool(b"ui", b"detailed-exit-code")
137 .get_bool(b"ui", b"detailed-exit-code")
137 .unwrap_or(false),
138 .unwrap_or(false),
138 ),
139 ),
139 );
140 );
140 result
141 result
141 } else {
142 } else {
142 run(&invocation)
143 run(&invocation)
143 }
144 }
144 }
145 }
145
146
146 fn rhg_main(argv: Vec<OsString>) -> ! {
147 fn rhg_main(argv: Vec<OsString>) -> ! {
147 // Run this first, before we find out if the blackbox extension is even
148 // Run this first, before we find out if the blackbox extension is even
148 // enabled, in order to include everything in-between in the duration
149 // enabled, in order to include everything in-between in the duration
149 // measurements. Reading config files can be slow if they’re on NFS.
150 // measurements. Reading config files can be slow if they’re on NFS.
150 let process_start_time = blackbox::ProcessStartTime::now();
151 let process_start_time = blackbox::ProcessStartTime::now();
151
152
152 env_logger::init();
153 env_logger::init();
153
154
154 let early_args = EarlyArgs::parse(&argv);
155 let early_args = EarlyArgs::parse(&argv);
155
156
156 let initial_current_dir = early_args.cwd.map(|cwd| {
157 let initial_current_dir = early_args.cwd.map(|cwd| {
157 let cwd = get_path_from_bytes(&cwd);
158 let cwd = get_path_from_bytes(&cwd);
158 std::env::current_dir()
159 std::env::current_dir()
159 .and_then(|initial| {
160 .and_then(|initial| {
160 std::env::set_current_dir(cwd)?;
161 std::env::set_current_dir(cwd)?;
161 Ok(initial)
162 Ok(initial)
162 })
163 })
163 .unwrap_or_else(|error| {
164 .unwrap_or_else(|error| {
164 exit(
165 exit(
165 &argv,
166 &argv,
166 &None,
167 &None,
167 &Ui::new_infallible(&Config::empty()),
168 &Ui::new_infallible(&Config::empty()),
168 OnUnsupported::Abort,
169 OnUnsupported::Abort,
169 Err(CommandError::abort(format!(
170 Err(CommandError::abort(format!(
170 "abort: {}: '{}'",
171 "abort: {}: '{}'",
171 error,
172 error,
172 cwd.display()
173 cwd.display()
173 ))),
174 ))),
174 false,
175 false,
175 )
176 )
176 })
177 })
177 });
178 });
178
179
179 let mut non_repo_config =
180 let mut non_repo_config =
180 Config::load_non_repo().unwrap_or_else(|error| {
181 Config::load_non_repo().unwrap_or_else(|error| {
181 // Normally this is decided based on config, but we don’t have that
182 // Normally this is decided based on config, but we don’t have that
182 // available. As of this writing config loading never returns an
183 // available. As of this writing config loading never returns an
183 // "unsupported" error but that is not enforced by the type system.
184 // "unsupported" error but that is not enforced by the type system.
184 let on_unsupported = OnUnsupported::Abort;
185 let on_unsupported = OnUnsupported::Abort;
185
186
186 exit(
187 exit(
187 &argv,
188 &argv,
188 &initial_current_dir,
189 &initial_current_dir,
189 &Ui::new_infallible(&Config::empty()),
190 &Ui::new_infallible(&Config::empty()),
190 on_unsupported,
191 on_unsupported,
191 Err(error.into()),
192 Err(error.into()),
192 false,
193 false,
193 )
194 )
194 });
195 });
195
196
196 non_repo_config
197 non_repo_config
197 .load_cli_args(early_args.config, early_args.color)
198 .load_cli_args(early_args.config, early_args.color)
198 .unwrap_or_else(|error| {
199 .unwrap_or_else(|error| {
199 exit(
200 exit(
200 &argv,
201 &argv,
201 &initial_current_dir,
202 &initial_current_dir,
202 &Ui::new_infallible(&non_repo_config),
203 &Ui::new_infallible(&non_repo_config),
203 OnUnsupported::from_config(&non_repo_config),
204 OnUnsupported::from_config(&non_repo_config),
204 Err(error.into()),
205 Err(error.into()),
205 non_repo_config
206 non_repo_config
206 .get_bool(b"ui", b"detailed-exit-code")
207 .get_bool(b"ui", b"detailed-exit-code")
207 .unwrap_or(false),
208 .unwrap_or(false),
208 )
209 )
209 });
210 });
210
211
211 if let Some(repo_path_bytes) = &early_args.repo {
212 if let Some(repo_path_bytes) = &early_args.repo {
212 lazy_static::lazy_static! {
213 lazy_static::lazy_static! {
213 static ref SCHEME_RE: regex::bytes::Regex =
214 static ref SCHEME_RE: regex::bytes::Regex =
214 // Same as `_matchscheme` in `mercurial/util.py`
215 // Same as `_matchscheme` in `mercurial/util.py`
215 regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
216 regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
216 }
217 }
217 if SCHEME_RE.is_match(&repo_path_bytes) {
218 if SCHEME_RE.is_match(&repo_path_bytes) {
218 exit(
219 exit(
219 &argv,
220 &argv,
220 &initial_current_dir,
221 &initial_current_dir,
221 &Ui::new_infallible(&non_repo_config),
222 &Ui::new_infallible(&non_repo_config),
222 OnUnsupported::from_config(&non_repo_config),
223 OnUnsupported::from_config(&non_repo_config),
223 Err(CommandError::UnsupportedFeature {
224 Err(CommandError::UnsupportedFeature {
224 message: format_bytes!(
225 message: format_bytes!(
225 b"URL-like --repository {}",
226 b"URL-like --repository {}",
226 repo_path_bytes
227 repo_path_bytes
227 ),
228 ),
228 }),
229 }),
229 // TODO: show a warning or combine with original error if
230 // TODO: show a warning or combine with original error if
230 // `get_bool` returns an error
231 // `get_bool` returns an error
231 non_repo_config
232 non_repo_config
232 .get_bool(b"ui", b"detailed-exit-code")
233 .get_bool(b"ui", b"detailed-exit-code")
233 .unwrap_or(false),
234 .unwrap_or(false),
234 )
235 )
235 }
236 }
236 }
237 }
237 let repo_arg = early_args.repo.unwrap_or(Vec::new());
238 let repo_arg = early_args.repo.unwrap_or(Vec::new());
238 let repo_path: Option<PathBuf> = {
239 let repo_path: Option<PathBuf> = {
239 if repo_arg.is_empty() {
240 if repo_arg.is_empty() {
240 None
241 None
241 } else {
242 } else {
242 let local_config = {
243 let local_config = {
243 if std::env::var_os("HGRCSKIPREPO").is_none() {
244 if std::env::var_os("HGRCSKIPREPO").is_none() {
244 // TODO: handle errors from find_repo_root
245 // TODO: handle errors from find_repo_root
245 if let Ok(current_dir_path) = Repo::find_repo_root() {
246 if let Ok(current_dir_path) = Repo::find_repo_root() {
246 let config_files = vec![
247 let config_files = vec![
247 ConfigSource::AbsPath(
248 ConfigSource::AbsPath(
248 current_dir_path.join(".hg/hgrc"),
249 current_dir_path.join(".hg/hgrc"),
249 ),
250 ),
250 ConfigSource::AbsPath(
251 ConfigSource::AbsPath(
251 current_dir_path.join(".hg/hgrc-not-shared"),
252 current_dir_path.join(".hg/hgrc-not-shared"),
252 ),
253 ),
253 ];
254 ];
254 // TODO: handle errors from
255 // TODO: handle errors from
255 // `load_from_explicit_sources`
256 // `load_from_explicit_sources`
256 Config::load_from_explicit_sources(config_files).ok()
257 Config::load_from_explicit_sources(config_files).ok()
257 } else {
258 } else {
258 None
259 None
259 }
260 }
260 } else {
261 } else {
261 None
262 None
262 }
263 }
263 };
264 };
264
265
265 let non_repo_config_val = {
266 let non_repo_config_val = {
266 let non_repo_val = non_repo_config.get(b"paths", &repo_arg);
267 let non_repo_val = non_repo_config.get(b"paths", &repo_arg);
267 match &non_repo_val {
268 match &non_repo_val {
268 Some(val) if val.len() > 0 => home::home_dir()
269 Some(val) if val.len() > 0 => home::home_dir()
269 .unwrap_or_else(|| PathBuf::from("~"))
270 .unwrap_or_else(|| PathBuf::from("~"))
270 .join(get_path_from_bytes(val))
271 .join(get_path_from_bytes(val))
271 .canonicalize()
272 .canonicalize()
272 // TODO: handle error and make it similar to python
273 // TODO: handle error and make it similar to python
273 // implementation maybe?
274 // implementation maybe?
274 .ok(),
275 .ok(),
275 _ => None,
276 _ => None,
276 }
277 }
277 };
278 };
278
279
279 let config_val = match &local_config {
280 let config_val = match &local_config {
280 None => non_repo_config_val,
281 None => non_repo_config_val,
281 Some(val) => {
282 Some(val) => {
282 let local_config_val = val.get(b"paths", &repo_arg);
283 let local_config_val = val.get(b"paths", &repo_arg);
283 match &local_config_val {
284 match &local_config_val {
284 Some(val) if val.len() > 0 => {
285 Some(val) if val.len() > 0 => {
285 // presence of a local_config assures that
286 // presence of a local_config assures that
286 // current_dir
287 // current_dir
287 // wont result in an Error
288 // wont result in an Error
288 let canpath = hg::utils::current_dir()
289 let canpath = hg::utils::current_dir()
289 .unwrap()
290 .unwrap()
290 .join(get_path_from_bytes(val))
291 .join(get_path_from_bytes(val))
291 .canonicalize();
292 .canonicalize();
292 canpath.ok().or(non_repo_config_val)
293 canpath.ok().or(non_repo_config_val)
293 }
294 }
294 _ => non_repo_config_val,
295 _ => non_repo_config_val,
295 }
296 }
296 }
297 }
297 };
298 };
298 config_val.or(Some(get_path_from_bytes(&repo_arg).to_path_buf()))
299 config_val.or(Some(get_path_from_bytes(&repo_arg).to_path_buf()))
299 }
300 }
300 };
301 };
301
302
302 let repo_result = match Repo::find(&non_repo_config, repo_path.to_owned())
303 let repo_result = match Repo::find(&non_repo_config, repo_path.to_owned())
303 {
304 {
304 Ok(repo) => Ok(repo),
305 Ok(repo) => Ok(repo),
305 Err(RepoError::NotFound { at }) if repo_path.is_none() => {
306 Err(RepoError::NotFound { at }) if repo_path.is_none() => {
306 // Not finding a repo is not fatal yet, if `-R` was not given
307 // Not finding a repo is not fatal yet, if `-R` was not given
307 Err(NoRepoInCwdError { cwd: at })
308 Err(NoRepoInCwdError { cwd: at })
308 }
309 }
309 Err(error) => exit(
310 Err(error) => exit(
310 &argv,
311 &argv,
311 &initial_current_dir,
312 &initial_current_dir,
312 &Ui::new_infallible(&non_repo_config),
313 &Ui::new_infallible(&non_repo_config),
313 OnUnsupported::from_config(&non_repo_config),
314 OnUnsupported::from_config(&non_repo_config),
314 Err(error.into()),
315 Err(error.into()),
315 // TODO: show a warning or combine with original error if
316 // TODO: show a warning or combine with original error if
316 // `get_bool` returns an error
317 // `get_bool` returns an error
317 non_repo_config
318 non_repo_config
318 .get_bool(b"ui", b"detailed-exit-code")
319 .get_bool(b"ui", b"detailed-exit-code")
319 .unwrap_or(false),
320 .unwrap_or(false),
320 ),
321 ),
321 };
322 };
322
323
323 let config = if let Ok(repo) = &repo_result {
324 let config = if let Ok(repo) = &repo_result {
324 repo.config()
325 repo.config()
325 } else {
326 } else {
326 &non_repo_config
327 &non_repo_config
327 };
328 };
328 let ui = Ui::new(&config).unwrap_or_else(|error| {
329 let ui = Ui::new(&config).unwrap_or_else(|error| {
329 exit(
330 exit(
330 &argv,
331 &argv,
331 &initial_current_dir,
332 &initial_current_dir,
332 &Ui::new_infallible(&config),
333 &Ui::new_infallible(&config),
333 OnUnsupported::from_config(&config),
334 OnUnsupported::from_config(&config),
334 Err(error.into()),
335 Err(error.into()),
335 config
336 config
336 .get_bool(b"ui", b"detailed-exit-code")
337 .get_bool(b"ui", b"detailed-exit-code")
337 .unwrap_or(false),
338 .unwrap_or(false),
338 )
339 )
339 });
340 });
340 let on_unsupported = OnUnsupported::from_config(config);
341 let on_unsupported = OnUnsupported::from_config(config);
341
342
342 let result = main_with_result(
343 let result = main_with_result(
343 argv.iter().map(|s| s.to_owned()).collect(),
344 argv.iter().map(|s| s.to_owned()).collect(),
344 &process_start_time,
345 &process_start_time,
345 &ui,
346 &ui,
346 repo_result.as_ref(),
347 repo_result.as_ref(),
347 config,
348 config,
348 );
349 );
349 exit(
350 exit(
350 &argv,
351 &argv,
351 &initial_current_dir,
352 &initial_current_dir,
352 &ui,
353 &ui,
353 on_unsupported,
354 on_unsupported,
354 result,
355 result,
355 // TODO: show a warning or combine with original error if `get_bool`
356 // TODO: show a warning or combine with original error if `get_bool`
356 // returns an error
357 // returns an error
357 config
358 config
358 .get_bool(b"ui", b"detailed-exit-code")
359 .get_bool(b"ui", b"detailed-exit-code")
359 .unwrap_or(false),
360 .unwrap_or(false),
360 )
361 )
361 }
362 }
362
363
363 fn main() -> ! {
364 fn main() -> ! {
364 rhg_main(std::env::args_os().collect())
365 rhg_main(std::env::args_os().collect())
365 }
366 }
366
367
367 fn exit_code(
368 fn exit_code(
368 result: &Result<(), CommandError>,
369 result: &Result<(), CommandError>,
369 use_detailed_exit_code: bool,
370 use_detailed_exit_code: bool,
370 ) -> i32 {
371 ) -> i32 {
371 match result {
372 match result {
372 Ok(()) => exit_codes::OK,
373 Ok(()) => exit_codes::OK,
373 Err(CommandError::Abort {
374 Err(CommandError::Abort {
374 message: _,
375 message: _,
375 detailed_exit_code,
376 detailed_exit_code,
376 }) => {
377 }) => {
377 if use_detailed_exit_code {
378 if use_detailed_exit_code {
378 *detailed_exit_code
379 *detailed_exit_code
379 } else {
380 } else {
380 exit_codes::ABORT
381 exit_codes::ABORT
381 }
382 }
382 }
383 }
383 Err(CommandError::Unsuccessful) => exit_codes::UNSUCCESSFUL,
384 Err(CommandError::Unsuccessful) => exit_codes::UNSUCCESSFUL,
384
385 // Exit with a specific code and no error message to let a potential
385 // Exit with a specific code and no error message to let a potential
386 // wrapper script fallback to Python-based Mercurial.
386 // wrapper script fallback to Python-based Mercurial.
387 Err(CommandError::UnsupportedFeature { .. }) => {
387 Err(CommandError::UnsupportedFeature { .. }) => {
388 exit_codes::UNIMPLEMENTED
388 exit_codes::UNIMPLEMENTED
389 }
389 }
390 Err(CommandError::InvalidFallback { .. }) => {
391 exit_codes::INVALID_FALLBACK
392 }
390 }
393 }
391 }
394 }
392
395
393 fn exit<'a>(
396 fn exit<'a>(
394 original_args: &'a [OsString],
397 original_args: &'a [OsString],
395 initial_current_dir: &Option<PathBuf>,
398 initial_current_dir: &Option<PathBuf>,
396 ui: &Ui,
399 ui: &Ui,
397 mut on_unsupported: OnUnsupported,
400 mut on_unsupported: OnUnsupported,
398 result: Result<(), CommandError>,
401 result: Result<(), CommandError>,
399 use_detailed_exit_code: bool,
402 use_detailed_exit_code: bool,
400 ) -> ! {
403 ) -> ! {
401 if let (
404 if let (
402 OnUnsupported::Fallback { executable },
405 OnUnsupported::Fallback { executable },
403 Err(CommandError::UnsupportedFeature { message }),
406 Err(CommandError::UnsupportedFeature { message }),
404 ) = (&on_unsupported, &result)
407 ) = (&on_unsupported, &result)
405 {
408 {
406 let mut args = original_args.iter();
409 let mut args = original_args.iter();
407 let executable = match executable {
410 let executable = match executable {
408 None => {
411 None => {
409 exit_no_fallback(
412 exit_no_fallback(
410 ui,
413 ui,
411 OnUnsupported::Abort,
414 OnUnsupported::Abort,
412 Err(CommandError::abort(
415 Err(CommandError::abort(
413 "abort: 'rhg.on-unsupported=fallback' without \
416 "abort: 'rhg.on-unsupported=fallback' without \
414 'rhg.fallback-executable' set.",
417 'rhg.fallback-executable' set.",
415 )),
418 )),
416 false,
419 false,
417 );
420 );
418 }
421 }
419 Some(executable) => executable,
422 Some(executable) => executable,
420 };
423 };
421 let executable_path = get_path_from_bytes(&executable);
424 let executable_path = get_path_from_bytes(&executable);
422 let this_executable = args.next().expect("exepcted argv[0] to exist");
425 let this_executable = args.next().expect("exepcted argv[0] to exist");
423 if executable_path == &PathBuf::from(this_executable) {
426 if executable_path == &PathBuf::from(this_executable) {
424 // Avoid spawning infinitely many processes until resource
427 // Avoid spawning infinitely many processes until resource
425 // exhaustion.
428 // exhaustion.
426 let _ = ui.write_stderr(&format_bytes!(
429 let _ = ui.write_stderr(&format_bytes!(
427 b"Blocking recursive fallback. The 'rhg.fallback-executable = {}' config \
430 b"Blocking recursive fallback. The 'rhg.fallback-executable = {}' config \
428 points to `rhg` itself.\n",
431 points to `rhg` itself.\n",
429 executable
432 executable
430 ));
433 ));
431 on_unsupported = OnUnsupported::Abort
434 on_unsupported = OnUnsupported::Abort
432 } else {
435 } else {
433 log::debug!("falling back (see trace-level log)");
436 log::debug!("falling back (see trace-level log)");
434 log::trace!("{}", local_to_utf8(message));
437 log::trace!("{}", local_to_utf8(message));
438 if let Err(err) = which::which(executable_path) {
439 exit_no_fallback(
440 ui,
441 OnUnsupported::Abort,
442 Err(CommandError::InvalidFallback {
443 path: executable.to_owned(),
444 err: err.to_string(),
445 }),
446 use_detailed_exit_code,
447 )
448 }
435 // `args` is now `argv[1..]` since we’ve already consumed
449 // `args` is now `argv[1..]` since we’ve already consumed
436 // `argv[0]`
450 // `argv[0]`
437 let mut command = Command::new(executable_path);
451 let mut command = Command::new(executable_path);
438 command.args(args);
452 command.args(args);
439 if let Some(initial) = initial_current_dir {
453 if let Some(initial) = initial_current_dir {
440 command.current_dir(initial);
454 command.current_dir(initial);
441 }
455 }
442 let result = command.status();
456 // We don't use subprocess because proper signal handling is harder
443 match result {
457 // and we don't want to keep `rhg` around after a fallback anyway.
444 Ok(status) => std::process::exit(
458 // For example, if `rhg` is run in the background and falls back to
445 status.code().unwrap_or(exit_codes::ABORT),
459 // `hg` which, in turn, waits for a signal, we'll get stuck if
446 ),
460 // we're doing plain subprocess.
447 Err(error) => {
461 //
448 let _ = ui.write_stderr(&format_bytes!(
462 // If `exec` returns, we can only assume our process is very broken
449 b"tried to fall back to a '{}' sub-process but got error {}\n",
463 // (see its documentation), so only try to forward the error code
450 executable, format_bytes::Utf8(error)
464 // when exiting.
451 ));
465 let err = command.exec();
452 on_unsupported = OnUnsupported::Abort
466 std::process::exit(
453 }
467 err.raw_os_error().unwrap_or(exit_codes::ABORT),
454 }
468 );
455 }
469 }
456 }
470 }
457 exit_no_fallback(ui, on_unsupported, result, use_detailed_exit_code)
471 exit_no_fallback(ui, on_unsupported, result, use_detailed_exit_code)
458 }
472 }
459
473
460 fn exit_no_fallback(
474 fn exit_no_fallback(
461 ui: &Ui,
475 ui: &Ui,
462 on_unsupported: OnUnsupported,
476 on_unsupported: OnUnsupported,
463 result: Result<(), CommandError>,
477 result: Result<(), CommandError>,
464 use_detailed_exit_code: bool,
478 use_detailed_exit_code: bool,
465 ) -> ! {
479 ) -> ! {
466 match &result {
480 match &result {
467 Ok(_) => {}
481 Ok(_) => {}
468 Err(CommandError::Unsuccessful) => {}
482 Err(CommandError::Unsuccessful) => {}
469 Err(CommandError::Abort {
483 Err(CommandError::Abort {
470 message,
484 message,
471 detailed_exit_code: _,
485 detailed_exit_code: _,
472 }) => {
486 }) => {
473 if !message.is_empty() {
487 if !message.is_empty() {
474 // Ignore errors when writing to stderr, we’re already exiting
488 // Ignore errors when writing to stderr, we’re already exiting
475 // with failure code so there’s not much more we can do.
489 // with failure code so there’s not much more we can do.
476 let _ = ui.write_stderr(&format_bytes!(b"{}\n", message));
490 let _ = ui.write_stderr(&format_bytes!(b"{}\n", message));
477 }
491 }
478 }
492 }
479 Err(CommandError::UnsupportedFeature { message }) => {
493 Err(CommandError::UnsupportedFeature { message }) => {
480 match on_unsupported {
494 match on_unsupported {
481 OnUnsupported::Abort => {
495 OnUnsupported::Abort => {
482 let _ = ui.write_stderr(&format_bytes!(
496 let _ = ui.write_stderr(&format_bytes!(
483 b"unsupported feature: {}\n",
497 b"unsupported feature: {}\n",
484 message
498 message
485 ));
499 ));
486 }
500 }
487 OnUnsupported::AbortSilent => {}
501 OnUnsupported::AbortSilent => {}
488 OnUnsupported::Fallback { .. } => unreachable!(),
502 OnUnsupported::Fallback { .. } => unreachable!(),
489 }
503 }
490 }
504 }
505 Err(CommandError::InvalidFallback { path, err }) => {
506 let _ = ui.write_stderr(&format_bytes!(
507 b"abort: invalid fallback '{}': {}\n",
508 path,
509 err.as_bytes(),
510 ));
511 }
491 }
512 }
492 std::process::exit(exit_code(&result, use_detailed_exit_code))
513 std::process::exit(exit_code(&result, use_detailed_exit_code))
493 }
514 }
494
515
495 macro_rules! subcommands {
516 macro_rules! subcommands {
496 ($( $command: ident )+) => {
517 ($( $command: ident )+) => {
497 mod commands {
518 mod commands {
498 $(
519 $(
499 pub mod $command;
520 pub mod $command;
500 )+
521 )+
501 }
522 }
502
523
503 fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
524 fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
504 app
525 app
505 $(
526 $(
506 .subcommand(commands::$command::args())
527 .subcommand(commands::$command::args())
507 )+
528 )+
508 }
529 }
509
530
510 pub type RunFn = fn(&CliInvocation) -> Result<(), CommandError>;
531 pub type RunFn = fn(&CliInvocation) -> Result<(), CommandError>;
511
532
512 fn subcommand_run_fn(name: &str) -> Option<RunFn> {
533 fn subcommand_run_fn(name: &str) -> Option<RunFn> {
513 match name {
534 match name {
514 $(
535 $(
515 stringify!($command) => Some(commands::$command::run),
536 stringify!($command) => Some(commands::$command::run),
516 )+
537 )+
517 _ => None,
538 _ => None,
518 }
539 }
519 }
540 }
520 };
541 };
521 }
542 }
522
543
523 subcommands! {
544 subcommands! {
524 cat
545 cat
525 debugdata
546 debugdata
526 debugrequirements
547 debugrequirements
527 debugignorerhg
548 debugignorerhg
528 files
549 files
529 root
550 root
530 config
551 config
531 status
552 status
532 }
553 }
533
554
534 pub struct CliInvocation<'a> {
555 pub struct CliInvocation<'a> {
535 ui: &'a Ui,
556 ui: &'a Ui,
536 subcommand_args: &'a ArgMatches<'a>,
557 subcommand_args: &'a ArgMatches<'a>,
537 config: &'a Config,
558 config: &'a Config,
538 /// References inside `Result` is a bit peculiar but allow
559 /// References inside `Result` is a bit peculiar but allow
539 /// `invocation.repo?` to work out with `&CliInvocation` since this
560 /// `invocation.repo?` to work out with `&CliInvocation` since this
540 /// `Result` type is `Copy`.
561 /// `Result` type is `Copy`.
541 repo: Result<&'a Repo, &'a NoRepoInCwdError>,
562 repo: Result<&'a Repo, &'a NoRepoInCwdError>,
542 }
563 }
543
564
544 struct NoRepoInCwdError {
565 struct NoRepoInCwdError {
545 cwd: PathBuf,
566 cwd: PathBuf,
546 }
567 }
547
568
548 /// CLI arguments to be parsed "early" in order to be able to read
569 /// CLI arguments to be parsed "early" in order to be able to read
549 /// configuration before using Clap. Ideally we would also use Clap for this,
570 /// configuration before using Clap. Ideally we would also use Clap for this,
550 /// see <https://github.com/clap-rs/clap/discussions/2366>.
571 /// see <https://github.com/clap-rs/clap/discussions/2366>.
551 ///
572 ///
552 /// These arguments are still declared when we do use Clap later, so that Clap
573 /// These arguments are still declared when we do use Clap later, so that Clap
553 /// does not return an error for their presence.
574 /// does not return an error for their presence.
554 struct EarlyArgs {
575 struct EarlyArgs {
555 /// Values of all `--config` arguments. (Possibly none)
576 /// Values of all `--config` arguments. (Possibly none)
556 config: Vec<Vec<u8>>,
577 config: Vec<Vec<u8>>,
557 /// Value of all the `--color` argument, if any.
578 /// Value of all the `--color` argument, if any.
558 color: Option<Vec<u8>>,
579 color: Option<Vec<u8>>,
559 /// Value of the `-R` or `--repository` argument, if any.
580 /// Value of the `-R` or `--repository` argument, if any.
560 repo: Option<Vec<u8>>,
581 repo: Option<Vec<u8>>,
561 /// Value of the `--cwd` argument, if any.
582 /// Value of the `--cwd` argument, if any.
562 cwd: Option<Vec<u8>>,
583 cwd: Option<Vec<u8>>,
563 }
584 }
564
585
565 impl EarlyArgs {
586 impl EarlyArgs {
566 fn parse<'a>(args: impl IntoIterator<Item = &'a OsString>) -> Self {
587 fn parse<'a>(args: impl IntoIterator<Item = &'a OsString>) -> Self {
567 let mut args = args.into_iter().map(get_bytes_from_os_str);
588 let mut args = args.into_iter().map(get_bytes_from_os_str);
568 let mut config = Vec::new();
589 let mut config = Vec::new();
569 let mut color = None;
590 let mut color = None;
570 let mut repo = None;
591 let mut repo = None;
571 let mut cwd = None;
592 let mut cwd = None;
572 // Use `while let` instead of `for` so that we can also call
593 // Use `while let` instead of `for` so that we can also call
573 // `args.next()` inside the loop.
594 // `args.next()` inside the loop.
574 while let Some(arg) = args.next() {
595 while let Some(arg) = args.next() {
575 if arg == b"--config" {
596 if arg == b"--config" {
576 if let Some(value) = args.next() {
597 if let Some(value) = args.next() {
577 config.push(value)
598 config.push(value)
578 }
599 }
579 } else if let Some(value) = arg.drop_prefix(b"--config=") {
600 } else if let Some(value) = arg.drop_prefix(b"--config=") {
580 config.push(value.to_owned())
601 config.push(value.to_owned())
581 }
602 }
582
603
583 if arg == b"--color" {
604 if arg == b"--color" {
584 if let Some(value) = args.next() {
605 if let Some(value) = args.next() {
585 color = Some(value)
606 color = Some(value)
586 }
607 }
587 } else if let Some(value) = arg.drop_prefix(b"--color=") {
608 } else if let Some(value) = arg.drop_prefix(b"--color=") {
588 color = Some(value.to_owned())
609 color = Some(value.to_owned())
589 }
610 }
590
611
591 if arg == b"--cwd" {
612 if arg == b"--cwd" {
592 if let Some(value) = args.next() {
613 if let Some(value) = args.next() {
593 cwd = Some(value)
614 cwd = Some(value)
594 }
615 }
595 } else if let Some(value) = arg.drop_prefix(b"--cwd=") {
616 } else if let Some(value) = arg.drop_prefix(b"--cwd=") {
596 cwd = Some(value.to_owned())
617 cwd = Some(value.to_owned())
597 }
618 }
598
619
599 if arg == b"--repository" || arg == b"-R" {
620 if arg == b"--repository" || arg == b"-R" {
600 if let Some(value) = args.next() {
621 if let Some(value) = args.next() {
601 repo = Some(value)
622 repo = Some(value)
602 }
623 }
603 } else if let Some(value) = arg.drop_prefix(b"--repository=") {
624 } else if let Some(value) = arg.drop_prefix(b"--repository=") {
604 repo = Some(value.to_owned())
625 repo = Some(value.to_owned())
605 } else if let Some(value) = arg.drop_prefix(b"-R") {
626 } else if let Some(value) = arg.drop_prefix(b"-R") {
606 repo = Some(value.to_owned())
627 repo = Some(value.to_owned())
607 }
628 }
608 }
629 }
609 Self {
630 Self {
610 config,
631 config,
611 color,
632 color,
612 repo,
633 repo,
613 cwd,
634 cwd,
614 }
635 }
615 }
636 }
616 }
637 }
617
638
618 /// What to do when encountering some unsupported feature.
639 /// What to do when encountering some unsupported feature.
619 ///
640 ///
620 /// See `HgError::UnsupportedFeature` and `CommandError::UnsupportedFeature`.
641 /// See `HgError::UnsupportedFeature` and `CommandError::UnsupportedFeature`.
621 enum OnUnsupported {
642 enum OnUnsupported {
622 /// Print an error message describing what feature is not supported,
643 /// Print an error message describing what feature is not supported,
623 /// and exit with code 252.
644 /// and exit with code 252.
624 Abort,
645 Abort,
625 /// Silently exit with code 252.
646 /// Silently exit with code 252.
626 AbortSilent,
647 AbortSilent,
627 /// Try running a Python implementation
648 /// Try running a Python implementation
628 Fallback { executable: Option<Vec<u8>> },
649 Fallback { executable: Option<Vec<u8>> },
629 }
650 }
630
651
631 impl OnUnsupported {
652 impl OnUnsupported {
632 const DEFAULT: Self = OnUnsupported::Abort;
653 const DEFAULT: Self = OnUnsupported::Abort;
633
654
634 fn from_config(config: &Config) -> Self {
655 fn from_config(config: &Config) -> Self {
635 match config
656 match config
636 .get(b"rhg", b"on-unsupported")
657 .get(b"rhg", b"on-unsupported")
637 .map(|value| value.to_ascii_lowercase())
658 .map(|value| value.to_ascii_lowercase())
638 .as_deref()
659 .as_deref()
639 {
660 {
640 Some(b"abort") => OnUnsupported::Abort,
661 Some(b"abort") => OnUnsupported::Abort,
641 Some(b"abort-silent") => OnUnsupported::AbortSilent,
662 Some(b"abort-silent") => OnUnsupported::AbortSilent,
642 Some(b"fallback") => OnUnsupported::Fallback {
663 Some(b"fallback") => OnUnsupported::Fallback {
643 executable: config
664 executable: config
644 .get(b"rhg", b"fallback-executable")
665 .get(b"rhg", b"fallback-executable")
645 .map(|x| x.to_owned()),
666 .map(|x| x.to_owned()),
646 },
667 },
647 None => Self::DEFAULT,
668 None => Self::DEFAULT,
648 Some(_) => {
669 Some(_) => {
649 // TODO: warn about unknown config value
670 // TODO: warn about unknown config value
650 Self::DEFAULT
671 Self::DEFAULT
651 }
672 }
652 }
673 }
653 }
674 }
654 }
675 }
655
676
656 /// The `*` extension is an edge-case for config sub-options that apply to all
677 /// The `*` extension is an edge-case for config sub-options that apply to all
657 /// extensions. For now, only `:required` exists, but that may change in the
678 /// extensions. For now, only `:required` exists, but that may change in the
658 /// future.
679 /// future.
659 const SUPPORTED_EXTENSIONS: &[&[u8]] =
680 const SUPPORTED_EXTENSIONS: &[&[u8]] =
660 &[b"blackbox", b"share", b"sparse", b"narrow", b"*"];
681 &[b"blackbox", b"share", b"sparse", b"narrow", b"*"];
661
682
662 fn check_extensions(config: &Config) -> Result<(), CommandError> {
683 fn check_extensions(config: &Config) -> Result<(), CommandError> {
663 if let Some(b"*") = config.get(b"rhg", b"ignored-extensions") {
684 if let Some(b"*") = config.get(b"rhg", b"ignored-extensions") {
664 // All extensions are to be ignored, nothing to do here
685 // All extensions are to be ignored, nothing to do here
665 return Ok(());
686 return Ok(());
666 }
687 }
667
688
668 let enabled: HashSet<&[u8]> = config
689 let enabled: HashSet<&[u8]> = config
669 .get_section_keys(b"extensions")
690 .get_section_keys(b"extensions")
670 .into_iter()
691 .into_iter()
671 .map(|extension| {
692 .map(|extension| {
672 // Ignore extension suboptions. Only `required` exists for now.
693 // Ignore extension suboptions. Only `required` exists for now.
673 // `rhg` either supports an extension or doesn't, so it doesn't
694 // `rhg` either supports an extension or doesn't, so it doesn't
674 // make sense to consider the loading of an extension.
695 // make sense to consider the loading of an extension.
675 extension.split_2(b':').unwrap_or((extension, b"")).0
696 extension.split_2(b':').unwrap_or((extension, b"")).0
676 })
697 })
677 .collect();
698 .collect();
678
699
679 let mut unsupported = enabled;
700 let mut unsupported = enabled;
680 for supported in SUPPORTED_EXTENSIONS {
701 for supported in SUPPORTED_EXTENSIONS {
681 unsupported.remove(supported);
702 unsupported.remove(supported);
682 }
703 }
683
704
684 if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions")
705 if let Some(ignored_list) = config.get_list(b"rhg", b"ignored-extensions")
685 {
706 {
686 for ignored in ignored_list {
707 for ignored in ignored_list {
687 unsupported.remove(ignored.as_slice());
708 unsupported.remove(ignored.as_slice());
688 }
709 }
689 }
710 }
690
711
691 if unsupported.is_empty() {
712 if unsupported.is_empty() {
692 Ok(())
713 Ok(())
693 } else {
714 } else {
694 let mut unsupported: Vec<_> = unsupported.into_iter().collect();
715 let mut unsupported: Vec<_> = unsupported.into_iter().collect();
695 // Sort the extensions to get a stable output
716 // Sort the extensions to get a stable output
696 unsupported.sort();
717 unsupported.sort();
697 Err(CommandError::UnsupportedFeature {
718 Err(CommandError::UnsupportedFeature {
698 message: format_bytes!(
719 message: format_bytes!(
699 b"extensions: {} (consider adding them to 'rhg.ignored-extensions' config)",
720 b"extensions: {} (consider adding them to 'rhg.ignored-extensions' config)",
700 join(unsupported, b", ")
721 join(unsupported, b", ")
701 ),
722 ),
702 })
723 })
703 }
724 }
704 }
725 }
705
726
706 fn check_unsupported(
727 fn check_unsupported(
707 config: &Config,
728 config: &Config,
708 repo: Result<&Repo, &NoRepoInCwdError>,
729 repo: Result<&Repo, &NoRepoInCwdError>,
709 ) -> Result<(), CommandError> {
730 ) -> Result<(), CommandError> {
710 check_extensions(config)?;
731 check_extensions(config)?;
711
732
712 if std::env::var_os("HG_PENDING").is_some() {
733 if std::env::var_os("HG_PENDING").is_some() {
713 // TODO: only if the value is `== repo.working_directory`?
734 // TODO: only if the value is `== repo.working_directory`?
714 // What about relative v.s. absolute paths?
735 // What about relative v.s. absolute paths?
715 Err(CommandError::unsupported("$HG_PENDING"))?
736 Err(CommandError::unsupported("$HG_PENDING"))?
716 }
737 }
717
738
718 if let Ok(repo) = repo {
739 if let Ok(repo) = repo {
719 if repo.has_subrepos()? {
740 if repo.has_subrepos()? {
720 Err(CommandError::unsupported("sub-repositories"))?
741 Err(CommandError::unsupported("sub-repositories"))?
721 }
742 }
722 }
743 }
723
744
724 if config.has_non_empty_section(b"encode") {
745 if config.has_non_empty_section(b"encode") {
725 Err(CommandError::unsupported("[encode] config"))?
746 Err(CommandError::unsupported("[encode] config"))?
726 }
747 }
727
748
728 if config.has_non_empty_section(b"decode") {
749 if config.has_non_empty_section(b"decode") {
729 Err(CommandError::unsupported("[decode] config"))?
750 Err(CommandError::unsupported("[decode] config"))?
730 }
751 }
731
752
732 Ok(())
753 Ok(())
733 }
754 }
@@ -1,447 +1,445 b''
1 Show all commands except debug commands
1 Show all commands except debug commands
2 $ hg debugcomplete
2 $ hg debugcomplete
3 abort
3 abort
4 add
4 add
5 addremove
5 addremove
6 annotate
6 annotate
7 archive
7 archive
8 backout
8 backout
9 bisect
9 bisect
10 bookmarks
10 bookmarks
11 branch
11 branch
12 branches
12 branches
13 bundle
13 bundle
14 cat
14 cat
15 clone
15 clone
16 commit
16 commit
17 config
17 config
18 continue
18 continue
19 copy
19 copy
20 diff
20 diff
21 export
21 export
22 files
22 files
23 forget
23 forget
24 graft
24 graft
25 grep
25 grep
26 heads
26 heads
27 help
27 help
28 identify
28 identify
29 import
29 import
30 incoming
30 incoming
31 init
31 init
32 locate
32 locate
33 log
33 log
34 manifest
34 manifest
35 merge
35 merge
36 outgoing
36 outgoing
37 parents
37 parents
38 paths
38 paths
39 phase
39 phase
40 pull
40 pull
41 purge
41 purge
42 push
42 push
43 recover
43 recover
44 remove
44 remove
45 rename
45 rename
46 resolve
46 resolve
47 revert
47 revert
48 rollback
48 rollback
49 root
49 root
50 serve
50 serve
51 shelve
51 shelve
52 status
52 status
53 summary
53 summary
54 tag
54 tag
55 tags
55 tags
56 tip
56 tip
57 unbundle
57 unbundle
58 unshelve
58 unshelve
59 update
59 update
60 verify
60 verify
61 version
61 version
62
62
63 Show all commands that start with "a"
63 Show all commands that start with "a"
64 $ hg debugcomplete a
64 $ hg debugcomplete a
65 abort
65 abort
66 add
66 add
67 addremove
67 addremove
68 annotate
68 annotate
69 archive
69 archive
70
70
71 Do not show debug commands if there are other candidates
71 Do not show debug commands if there are other candidates
72 $ hg debugcomplete d
72 $ hg debugcomplete d
73 diff
73 diff
74
74
75 Show debug commands if there are no other candidates
75 Show debug commands if there are no other candidates
76 $ hg debugcomplete debug
76 $ hg debugcomplete debug
77 debug-repair-issue6528
77 debug-repair-issue6528
78 debugancestor
78 debugancestor
79 debugantivirusrunning
79 debugantivirusrunning
80 debugapplystreamclonebundle
80 debugapplystreamclonebundle
81 debugbackupbundle
81 debugbackupbundle
82 debugbuilddag
82 debugbuilddag
83 debugbundle
83 debugbundle
84 debugcapabilities
84 debugcapabilities
85 debugchangedfiles
85 debugchangedfiles
86 debugcheckstate
86 debugcheckstate
87 debugcolor
87 debugcolor
88 debugcommands
88 debugcommands
89 debugcomplete
89 debugcomplete
90 debugconfig
90 debugconfig
91 debugcreatestreamclonebundle
91 debugcreatestreamclonebundle
92 debugdag
92 debugdag
93 debugdata
93 debugdata
94 debugdate
94 debugdate
95 debugdeltachain
95 debugdeltachain
96 debugdirstate
96 debugdirstate
97 debugdirstateignorepatternshash
98 debugdiscovery
97 debugdiscovery
99 debugdownload
98 debugdownload
100 debugextensions
99 debugextensions
101 debugfileset
100 debugfileset
102 debugformat
101 debugformat
103 debugfsinfo
102 debugfsinfo
104 debuggetbundle
103 debuggetbundle
105 debugignore
104 debugignore
106 debugindex
105 debugindex
107 debugindexdot
106 debugindexdot
108 debugindexstats
107 debugindexstats
109 debuginstall
108 debuginstall
110 debugknown
109 debugknown
111 debuglabelcomplete
110 debuglabelcomplete
112 debuglocks
111 debuglocks
113 debugmanifestfulltextcache
112 debugmanifestfulltextcache
114 debugmergestate
113 debugmergestate
115 debugnamecomplete
114 debugnamecomplete
116 debugnodemap
115 debugnodemap
117 debugobsolete
116 debugobsolete
118 debugp1copies
117 debugp1copies
119 debugp2copies
118 debugp2copies
120 debugpathcomplete
119 debugpathcomplete
121 debugpathcopies
120 debugpathcopies
122 debugpeer
121 debugpeer
123 debugpickmergetool
122 debugpickmergetool
124 debugpushkey
123 debugpushkey
125 debugpvec
124 debugpvec
126 debugrebuilddirstate
125 debugrebuilddirstate
127 debugrebuildfncache
126 debugrebuildfncache
128 debugrename
127 debugrename
129 debugrequires
128 debugrequires
130 debugrevlog
129 debugrevlog
131 debugrevlogindex
130 debugrevlogindex
132 debugrevspec
131 debugrevspec
133 debugserve
132 debugserve
134 debugsetparents
133 debugsetparents
135 debugshell
134 debugshell
136 debugsidedata
135 debugsidedata
137 debugssl
136 debugssl
138 debugstrip
137 debugstrip
139 debugsub
138 debugsub
140 debugsuccessorssets
139 debugsuccessorssets
141 debugtagscache
140 debugtagscache
142 debugtemplate
141 debugtemplate
143 debuguigetpass
142 debuguigetpass
144 debuguiprompt
143 debuguiprompt
145 debugupdatecaches
144 debugupdatecaches
146 debugupgraderepo
145 debugupgraderepo
147 debugwalk
146 debugwalk
148 debugwhyunstable
147 debugwhyunstable
149 debugwireargs
148 debugwireargs
150 debugwireproto
149 debugwireproto
151
150
152 Do not show the alias of a debug command if there are other candidates
151 Do not show the alias of a debug command if there are other candidates
153 (this should hide rawcommit)
152 (this should hide rawcommit)
154 $ hg debugcomplete r
153 $ hg debugcomplete r
155 recover
154 recover
156 remove
155 remove
157 rename
156 rename
158 resolve
157 resolve
159 revert
158 revert
160 rollback
159 rollback
161 root
160 root
162 Show the alias of a debug command if there are no other candidates
161 Show the alias of a debug command if there are no other candidates
163 $ hg debugcomplete rawc
162 $ hg debugcomplete rawc
164
163
165
164
166 Show the global options
165 Show the global options
167 $ hg debugcomplete --options | sort
166 $ hg debugcomplete --options | sort
168 --color
167 --color
169 --config
168 --config
170 --cwd
169 --cwd
171 --debug
170 --debug
172 --debugger
171 --debugger
173 --encoding
172 --encoding
174 --encodingmode
173 --encodingmode
175 --help
174 --help
176 --hidden
175 --hidden
177 --noninteractive
176 --noninteractive
178 --pager
177 --pager
179 --profile
178 --profile
180 --quiet
179 --quiet
181 --repository
180 --repository
182 --time
181 --time
183 --traceback
182 --traceback
184 --verbose
183 --verbose
185 --version
184 --version
186 -R
185 -R
187 -h
186 -h
188 -q
187 -q
189 -v
188 -v
190 -y
189 -y
191
190
192 Show the options for the "serve" command
191 Show the options for the "serve" command
193 $ hg debugcomplete --options serve | sort
192 $ hg debugcomplete --options serve | sort
194 --accesslog
193 --accesslog
195 --address
194 --address
196 --certificate
195 --certificate
197 --cmdserver
196 --cmdserver
198 --color
197 --color
199 --config
198 --config
200 --cwd
199 --cwd
201 --daemon
200 --daemon
202 --daemon-postexec
201 --daemon-postexec
203 --debug
202 --debug
204 --debugger
203 --debugger
205 --encoding
204 --encoding
206 --encodingmode
205 --encodingmode
207 --errorlog
206 --errorlog
208 --help
207 --help
209 --hidden
208 --hidden
210 --ipv6
209 --ipv6
211 --name
210 --name
212 --noninteractive
211 --noninteractive
213 --pager
212 --pager
214 --pid-file
213 --pid-file
215 --port
214 --port
216 --prefix
215 --prefix
217 --print-url
216 --print-url
218 --profile
217 --profile
219 --quiet
218 --quiet
220 --repository
219 --repository
221 --stdio
220 --stdio
222 --style
221 --style
223 --subrepos
222 --subrepos
224 --templates
223 --templates
225 --time
224 --time
226 --traceback
225 --traceback
227 --verbose
226 --verbose
228 --version
227 --version
229 --web-conf
228 --web-conf
230 -6
229 -6
231 -A
230 -A
232 -E
231 -E
233 -R
232 -R
234 -S
233 -S
235 -a
234 -a
236 -d
235 -d
237 -h
236 -h
238 -n
237 -n
239 -p
238 -p
240 -q
239 -q
241 -t
240 -t
242 -v
241 -v
243 -y
242 -y
244
243
245 Show an error if we use --options with an ambiguous abbreviation
244 Show an error if we use --options with an ambiguous abbreviation
246 $ hg debugcomplete --options s
245 $ hg debugcomplete --options s
247 hg: command 's' is ambiguous:
246 hg: command 's' is ambiguous:
248 serve shelve showconfig status summary
247 serve shelve showconfig status summary
249 [10]
248 [10]
250
249
251 Show all commands + options
250 Show all commands + options
252 $ hg debugcommands
251 $ hg debugcommands
253 abort: dry-run
252 abort: dry-run
254 add: include, exclude, subrepos, dry-run
253 add: include, exclude, subrepos, dry-run
255 addremove: similarity, subrepos, include, exclude, dry-run
254 addremove: similarity, subrepos, include, exclude, dry-run
256 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
255 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
257 archive: no-decode, prefix, rev, type, subrepos, include, exclude
256 archive: no-decode, prefix, rev, type, subrepos, include, exclude
258 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
257 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
259 bisect: reset, good, bad, skip, extend, command, noupdate
258 bisect: reset, good, bad, skip, extend, command, noupdate
260 bookmarks: force, rev, delete, rename, inactive, list, template
259 bookmarks: force, rev, delete, rename, inactive, list, template
261 branch: force, clean, rev
260 branch: force, clean, rev
262 branches: active, closed, rev, template
261 branches: active, closed, rev, template
263 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
262 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
264 cat: output, rev, decode, include, exclude, template
263 cat: output, rev, decode, include, exclude, template
265 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
264 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
266 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
265 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
267 config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
266 config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
268 continue: dry-run
267 continue: dry-run
269 copy: forget, after, at-rev, force, include, exclude, dry-run
268 copy: forget, after, at-rev, force, include, exclude, dry-run
270 debug-repair-issue6528: to-report, from-report, paranoid, dry-run
269 debug-repair-issue6528: to-report, from-report, paranoid, dry-run
271 debugancestor:
270 debugancestor:
272 debugantivirusrunning:
271 debugantivirusrunning:
273 debugapplystreamclonebundle:
272 debugapplystreamclonebundle:
274 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
273 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
275 debugbuilddag: mergeable-file, overwritten-file, new-file, from-existing
274 debugbuilddag: mergeable-file, overwritten-file, new-file, from-existing
276 debugbundle: all, part-type, spec
275 debugbundle: all, part-type, spec
277 debugcapabilities:
276 debugcapabilities:
278 debugchangedfiles: compute
277 debugchangedfiles: compute
279 debugcheckstate:
278 debugcheckstate:
280 debugcolor: style
279 debugcolor: style
281 debugcommands:
280 debugcommands:
282 debugcomplete: options
281 debugcomplete: options
283 debugcreatestreamclonebundle:
282 debugcreatestreamclonebundle:
284 debugdag: tags, branches, dots, spaces
283 debugdag: tags, branches, dots, spaces
285 debugdata: changelog, manifest, dir
284 debugdata: changelog, manifest, dir
286 debugdate: extended
285 debugdate: extended
287 debugdeltachain: changelog, manifest, dir, template
286 debugdeltachain: changelog, manifest, dir, template
288 debugdirstateignorepatternshash:
287 debugdirstate: nodates, dates, datesort, docket, all
289 debugdirstate: nodates, dates, datesort, all
290 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
288 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
291 debugdownload: output
289 debugdownload: output
292 debugextensions: template
290 debugextensions: template
293 debugfileset: rev, all-files, show-matcher, show-stage
291 debugfileset: rev, all-files, show-matcher, show-stage
294 debugformat: template
292 debugformat: template
295 debugfsinfo:
293 debugfsinfo:
296 debuggetbundle: head, common, type
294 debuggetbundle: head, common, type
297 debugignore:
295 debugignore:
298 debugindex: changelog, manifest, dir, template
296 debugindex: changelog, manifest, dir, template
299 debugindexdot: changelog, manifest, dir
297 debugindexdot: changelog, manifest, dir
300 debugindexstats:
298 debugindexstats:
301 debuginstall: template
299 debuginstall: template
302 debugknown:
300 debugknown:
303 debuglabelcomplete:
301 debuglabelcomplete:
304 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
302 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
305 debugmanifestfulltextcache: clear, add
303 debugmanifestfulltextcache: clear, add
306 debugmergestate: style, template
304 debugmergestate: style, template
307 debugnamecomplete:
305 debugnamecomplete:
308 debugnodemap: dump-new, dump-disk, check, metadata
306 debugnodemap: dump-new, dump-disk, check, metadata
309 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
307 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
310 debugp1copies: rev
308 debugp1copies: rev
311 debugp2copies: rev
309 debugp2copies: rev
312 debugpathcomplete: full, normal, added, removed
310 debugpathcomplete: full, normal, added, removed
313 debugpathcopies: include, exclude
311 debugpathcopies: include, exclude
314 debugpeer:
312 debugpeer:
315 debugpickmergetool: rev, changedelete, include, exclude, tool
313 debugpickmergetool: rev, changedelete, include, exclude, tool
316 debugpushkey:
314 debugpushkey:
317 debugpvec:
315 debugpvec:
318 debugrebuilddirstate: rev, minimal
316 debugrebuilddirstate: rev, minimal
319 debugrebuildfncache: only-data
317 debugrebuildfncache: only-data
320 debugrename: rev
318 debugrename: rev
321 debugrequires:
319 debugrequires:
322 debugrevlog: changelog, manifest, dir, dump
320 debugrevlog: changelog, manifest, dir, dump
323 debugrevlogindex: changelog, manifest, dir, format
321 debugrevlogindex: changelog, manifest, dir, format
324 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
322 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
325 debugserve: sshstdio, logiofd, logiofile
323 debugserve: sshstdio, logiofd, logiofile
326 debugsetparents:
324 debugsetparents:
327 debugshell:
325 debugshell:
328 debugsidedata: changelog, manifest, dir
326 debugsidedata: changelog, manifest, dir
329 debugssl:
327 debugssl:
330 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
328 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
331 debugsub: rev
329 debugsub: rev
332 debugsuccessorssets: closest
330 debugsuccessorssets: closest
333 debugtagscache:
331 debugtagscache:
334 debugtemplate: rev, define
332 debugtemplate: rev, define
335 debuguigetpass: prompt
333 debuguigetpass: prompt
336 debuguiprompt: prompt
334 debuguiprompt: prompt
337 debugupdatecaches:
335 debugupdatecaches:
338 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
336 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
339 debugwalk: include, exclude
337 debugwalk: include, exclude
340 debugwhyunstable:
338 debugwhyunstable:
341 debugwireargs: three, four, five, ssh, remotecmd, insecure
339 debugwireargs: three, four, five, ssh, remotecmd, insecure
342 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
340 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
343 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
341 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
344 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
342 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
345 files: rev, print0, include, exclude, template, subrepos
343 files: rev, print0, include, exclude, template, subrepos
346 forget: interactive, include, exclude, dry-run
344 forget: interactive, include, exclude, dry-run
347 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
345 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
348 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
346 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
349 heads: rev, topo, active, closed, style, template
347 heads: rev, topo, active, closed, style, template
350 help: extension, command, keyword, system
348 help: extension, command, keyword, system
351 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
349 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
352 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
350 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
353 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
351 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
354 init: ssh, remotecmd, insecure
352 init: ssh, remotecmd, insecure
355 locate: rev, print0, fullpath, include, exclude
353 locate: rev, print0, fullpath, include, exclude
356 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
354 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
357 manifest: rev, all, template
355 manifest: rev, all, template
358 merge: force, rev, preview, abort, tool
356 merge: force, rev, preview, abort, tool
359 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
357 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
360 parents: rev, style, template
358 parents: rev, style, template
361 paths: template
359 paths: template
362 phase: public, draft, secret, force, rev
360 phase: public, draft, secret, force, rev
363 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
361 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
364 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
362 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
365 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
363 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
366 recover: verify
364 recover: verify
367 remove: after, force, subrepos, include, exclude, dry-run
365 remove: after, force, subrepos, include, exclude, dry-run
368 rename: forget, after, at-rev, force, include, exclude, dry-run
366 rename: forget, after, at-rev, force, include, exclude, dry-run
369 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
367 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
370 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
368 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
371 rollback: dry-run, force
369 rollback: dry-run, force
372 root: template
370 root: template
373 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
371 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
374 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
372 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
375 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
373 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
376 summary: remote
374 summary: remote
377 tag: force, local, rev, remove, edit, message, date, user
375 tag: force, local, rev, remove, edit, message, date, user
378 tags: template
376 tags: template
379 tip: patch, git, style, template
377 tip: patch, git, style, template
380 unbundle: update
378 unbundle: update
381 unshelve: abort, continue, interactive, keep, name, tool, date
379 unshelve: abort, continue, interactive, keep, name, tool, date
382 update: clean, check, merge, date, rev, tool
380 update: clean, check, merge, date, rev, tool
383 verify: full
381 verify: full
384 version: template
382 version: template
385
383
386 $ hg init a
384 $ hg init a
387 $ cd a
385 $ cd a
388 $ echo fee > fee
386 $ echo fee > fee
389 $ hg ci -q -Amfee
387 $ hg ci -q -Amfee
390 $ hg tag fee
388 $ hg tag fee
391 $ mkdir fie
389 $ mkdir fie
392 $ echo dead > fie/dead
390 $ echo dead > fie/dead
393 $ echo live > fie/live
391 $ echo live > fie/live
394 $ hg bookmark fo
392 $ hg bookmark fo
395 $ hg branch -q fie
393 $ hg branch -q fie
396 $ hg ci -q -Amfie
394 $ hg ci -q -Amfie
397 $ echo fo > fo
395 $ echo fo > fo
398 $ hg branch -qf default
396 $ hg branch -qf default
399 $ hg ci -q -Amfo
397 $ hg ci -q -Amfo
400 $ echo Fum > Fum
398 $ echo Fum > Fum
401 $ hg ci -q -AmFum
399 $ hg ci -q -AmFum
402 $ hg bookmark Fum
400 $ hg bookmark Fum
403
401
404 Test debugpathcomplete
402 Test debugpathcomplete
405
403
406 $ hg debugpathcomplete f
404 $ hg debugpathcomplete f
407 fee
405 fee
408 fie
406 fie
409 fo
407 fo
410 $ hg debugpathcomplete -f f
408 $ hg debugpathcomplete -f f
411 fee
409 fee
412 fie/dead
410 fie/dead
413 fie/live
411 fie/live
414 fo
412 fo
415
413
416 $ hg rm Fum
414 $ hg rm Fum
417 $ hg debugpathcomplete -r F
415 $ hg debugpathcomplete -r F
418 Fum
416 Fum
419
417
420 Test debugnamecomplete
418 Test debugnamecomplete
421
419
422 $ hg debugnamecomplete
420 $ hg debugnamecomplete
423 Fum
421 Fum
424 default
422 default
425 fee
423 fee
426 fie
424 fie
427 fo
425 fo
428 tip
426 tip
429 $ hg debugnamecomplete f
427 $ hg debugnamecomplete f
430 fee
428 fee
431 fie
429 fie
432 fo
430 fo
433
431
434 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
432 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
435 used for completions in some shells.
433 used for completions in some shells.
436
434
437 $ hg debuglabelcomplete
435 $ hg debuglabelcomplete
438 Fum
436 Fum
439 default
437 default
440 fee
438 fee
441 fie
439 fie
442 fo
440 fo
443 tip
441 tip
444 $ hg debuglabelcomplete f
442 $ hg debuglabelcomplete f
445 fee
443 fee
446 fie
444 fie
447 fo
445 fo
@@ -1,122 +1,206 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [format]
5 > [format]
6 > use-dirstate-v2=1
6 > use-dirstate-v2=1
7 > [storage]
7 > [storage]
8 > dirstate-v2.slow-path=allow
8 > dirstate-v2.slow-path=allow
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 ------ Test dirstate._dirs refcounting
12 ------ Test dirstate._dirs refcounting
13
13
14 $ hg init t
14 $ hg init t
15 $ cd t
15 $ cd t
16 $ mkdir -p a/b/c/d
16 $ mkdir -p a/b/c/d
17 $ touch a/b/c/d/x
17 $ touch a/b/c/d/x
18 $ touch a/b/c/d/y
18 $ touch a/b/c/d/y
19 $ touch a/b/c/d/z
19 $ touch a/b/c/d/z
20 $ hg ci -Am m
20 $ hg ci -Am m
21 adding a/b/c/d/x
21 adding a/b/c/d/x
22 adding a/b/c/d/y
22 adding a/b/c/d/y
23 adding a/b/c/d/z
23 adding a/b/c/d/z
24 $ hg mv a z
24 $ hg mv a z
25 moving a/b/c/d/x to z/b/c/d/x
25 moving a/b/c/d/x to z/b/c/d/x
26 moving a/b/c/d/y to z/b/c/d/y
26 moving a/b/c/d/y to z/b/c/d/y
27 moving a/b/c/d/z to z/b/c/d/z
27 moving a/b/c/d/z to z/b/c/d/z
28
28
29 Test name collisions
29 Test name collisions
30
30
31 $ rm z/b/c/d/x
31 $ rm z/b/c/d/x
32 $ mkdir z/b/c/d/x
32 $ mkdir z/b/c/d/x
33 $ touch z/b/c/d/x/y
33 $ touch z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
36 [255]
36 [255]
37 $ rm -rf z/b/c/d
37 $ rm -rf z/b/c/d
38 $ touch z/b/c/d
38 $ touch z/b/c/d
39 $ hg add z/b/c/d
39 $ hg add z/b/c/d
40 abort: directory 'z/b/c/d' already in dirstate
40 abort: directory 'z/b/c/d' already in dirstate
41 [255]
41 [255]
42
42
43 $ cd ..
43 $ cd ..
44
44
45 Issue1790: dirstate entry locked into unset if file mtime is set into
45 Issue1790: dirstate entry locked into unset if file mtime is set into
46 the future
46 the future
47
47
48 Prepare test repo:
48 Prepare test repo:
49
49
50 $ hg init u
50 $ hg init u
51 $ cd u
51 $ cd u
52 $ echo a > a
52 $ echo a > a
53 $ hg add
53 $ hg add
54 adding a
54 adding a
55 $ hg ci -m1
55 $ hg ci -m1
56
56
57 Set mtime of a into the future:
57 Set mtime of a into the future:
58
58
59 $ touch -t 203101011200 a
59 $ touch -t 203101011200 a
60
60
61 Status must not set a's entry to unset (issue1790):
61 Status must not set a's entry to unset (issue1790):
62
62
63 $ hg status
63 $ hg status
64 $ hg debugstate
64 $ hg debugstate
65 n 644 2 2031-01-01 12:00:00 a
65 n 644 2 2031-01-01 12:00:00 a
66
66
67 Test modulo storage/comparison of absurd dates:
67 Test modulo storage/comparison of absurd dates:
68
68
69 #if no-aix
69 #if no-aix
70 $ touch -t 195001011200 a
70 $ touch -t 195001011200 a
71 $ hg st
71 $ hg st
72 $ hg debugstate
72 $ hg debugstate
73 n 644 2 2018-01-19 15:14:08 a
73 n 644 2 2018-01-19 15:14:08 a
74 #endif
74 #endif
75
75
76 Verify that exceptions during a dirstate change leave the dirstate
76 Verify that exceptions during a dirstate change leave the dirstate
77 coherent (issue4353)
77 coherent (issue4353)
78
78
79 $ cat > ../dirstateexception.py <<EOF
79 $ cat > ../dirstateexception.py <<EOF
80 > from mercurial import (
80 > from mercurial import (
81 > error,
81 > error,
82 > extensions,
82 > extensions,
83 > mergestate as mergestatemod,
83 > mergestate as mergestatemod,
84 > )
84 > )
85 >
85 >
86 > def wraprecordupdates(*args):
86 > def wraprecordupdates(*args):
87 > raise error.Abort(b"simulated error while recording dirstateupdates")
87 > raise error.Abort(b"simulated error while recording dirstateupdates")
88 >
88 >
89 > def reposetup(ui, repo):
89 > def reposetup(ui, repo):
90 > extensions.wrapfunction(mergestatemod, 'recordupdates',
90 > extensions.wrapfunction(mergestatemod, 'recordupdates',
91 > wraprecordupdates)
91 > wraprecordupdates)
92 > EOF
92 > EOF
93
93
94 $ hg rm a
94 $ hg rm a
95 $ hg commit -m 'rm a'
95 $ hg commit -m 'rm a'
96 $ echo "[extensions]" >> .hg/hgrc
96 $ echo "[extensions]" >> .hg/hgrc
97 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
97 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
98 $ hg up 0
98 $ hg up 0
99 abort: simulated error while recording dirstateupdates
99 abort: simulated error while recording dirstateupdates
100 [255]
100 [255]
101 $ hg log -r . -T '{rev}\n'
101 $ hg log -r . -T '{rev}\n'
102 1
102 1
103 $ hg status
103 $ hg status
104 ? a
104 ? a
105
105
106 #if dirstate-v2
106 #if dirstate-v2
107 Check that folders that are prefixes of others do not throw the packer into an
107 Check that folders that are prefixes of others do not throw the packer into an
108 infinite loop.
108 infinite loop.
109
109
110 $ cd ..
110 $ cd ..
111 $ hg init infinite-loop
111 $ hg init infinite-loop
112 $ cd infinite-loop
112 $ cd infinite-loop
113 $ mkdir hgext3rd hgext
113 $ mkdir hgext3rd hgext
114 $ touch hgext3rd/__init__.py hgext/zeroconf.py
114 $ touch hgext3rd/__init__.py hgext/zeroconf.py
115 $ hg commit -Aqm0
115 $ hg commit -Aqm0
116
116
117 $ hg st -c
117 $ hg st -c
118 C hgext/zeroconf.py
118 C hgext/zeroconf.py
119 C hgext3rd/__init__.py
119 C hgext3rd/__init__.py
120
120
121 $ cd ..
121 $ cd ..
122
123 Check that the old dirstate data file is removed correctly and the new one is
124 valid.
125
126 $ dirstate_data_files () {
127 > find .hg -maxdepth 1 -name "dirstate.*"
128 > }
129
130 $ find_dirstate_uuid () {
131 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
132 > }
133
134 $ dirstate_uuid_has_not_changed () {
135 > # Non-Rust always rewrites the whole dirstate
136 > if [ $# -eq 1 ] || ([ -n "$HGMODULEPOLICY" ] && [ -z "${HGMODULEPOLICY##*rust*}" ]) || [ -n "$RHG_INSTALLED_AS_HG" ]; then
137 > test $current_uid = $(find_dirstate_uuid)
138 > else
139 > echo "not testing because using Python implementation"
140 > fi
141 > }
142
143 $ cd ..
144 $ hg init append-mostly
145 $ cd append-mostly
146 $ mkdir dir dir2
147 $ touch dir/a dir/b dir/c dir/d dir/e dir2/f
148 $ hg commit -Aqm initial
149 $ hg st
150 $ dirstate_data_files | wc -l
151 *1 (re)
152 $ current_uid=$(find_dirstate_uuid)
153
154 Nothing changes here
155
156 $ hg st
157 $ dirstate_data_files | wc -l
158 *1 (re)
159 $ dirstate_uuid_has_not_changed
160 not testing because using Python implementation (no-rust no-rhg !)
161
162 Trigger an append with a small change
163
164 $ echo "modified" > dir2/f
165 $ hg st
166 M dir2/f
167 $ dirstate_data_files | wc -l
168 *1 (re)
169 $ dirstate_uuid_has_not_changed
170 not testing because using Python implementation (no-rust no-rhg !)
171
172 Unused bytes counter is non-0 when appending
173 $ touch file
174 $ hg add file
175 $ current_uid=$(find_dirstate_uuid)
176
177 Trigger a rust/rhg run which updates the unused bytes value
178 $ hg st
179 M dir2/f
180 A file
181 $ dirstate_data_files | wc -l
182 *1 (re)
183 $ dirstate_uuid_has_not_changed
184 not testing because using Python implementation (no-rust no-rhg !)
185
186 $ hg debugstate --docket | grep unused
187 number of unused bytes: 0 (no-rust no-rhg !)
188 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
189 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
190 number of unused bytes: [1-9]\d* (re) (rust rhg !)
191
192 Delete most of the dirstate to trigger a non-append
193 $ hg rm dir/a dir/b dir/c dir/d
194 $ dirstate_data_files | wc -l
195 *1 (re)
196 $ dirstate_uuid_has_not_changed also-if-python
197 [1]
198
199 Check that unused bytes counter is reset when creating a new docket
200
201 $ hg debugstate --docket | grep unused
202 number of unused bytes: 0
203
122 #endif
204 #endif
205
206 $ cd ..
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now