##// END OF EJS Templates
dirstate-v2: Move fixed-size tree metadata into the docket file...
Simon Sapin -
r48482:78f7f0d4 default
parent child Browse files
Show More
@@ -1,4854 +1,4850 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11 import codecs
11 import codecs
12 import collections
12 import collections
13 import contextlib
13 import contextlib
14 import difflib
14 import difflib
15 import errno
15 import errno
16 import glob
16 import glob
17 import operator
17 import operator
18 import os
18 import os
19 import platform
19 import platform
20 import random
20 import random
21 import re
21 import re
22 import socket
22 import socket
23 import ssl
23 import ssl
24 import stat
24 import stat
25 import string
25 import string
26 import subprocess
26 import subprocess
27 import sys
27 import sys
28 import time
28 import time
29
29
30 from .i18n import _
30 from .i18n import _
31 from .node import (
31 from .node import (
32 bin,
32 bin,
33 hex,
33 hex,
34 nullrev,
34 nullrev,
35 short,
35 short,
36 )
36 )
37 from .pycompat import (
37 from .pycompat import (
38 getattr,
38 getattr,
39 open,
39 open,
40 )
40 )
41 from . import (
41 from . import (
42 bundle2,
42 bundle2,
43 bundlerepo,
43 bundlerepo,
44 changegroup,
44 changegroup,
45 cmdutil,
45 cmdutil,
46 color,
46 color,
47 context,
47 context,
48 copies,
48 copies,
49 dagparser,
49 dagparser,
50 encoding,
50 encoding,
51 error,
51 error,
52 exchange,
52 exchange,
53 extensions,
53 extensions,
54 filemerge,
54 filemerge,
55 filesetlang,
55 filesetlang,
56 formatter,
56 formatter,
57 hg,
57 hg,
58 httppeer,
58 httppeer,
59 localrepo,
59 localrepo,
60 lock as lockmod,
60 lock as lockmod,
61 logcmdutil,
61 logcmdutil,
62 mergestate as mergestatemod,
62 mergestate as mergestatemod,
63 metadata,
63 metadata,
64 obsolete,
64 obsolete,
65 obsutil,
65 obsutil,
66 pathutil,
66 pathutil,
67 phases,
67 phases,
68 policy,
68 policy,
69 pvec,
69 pvec,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repair,
72 repair,
73 repoview,
73 repoview,
74 revlog,
74 revlog,
75 revset,
75 revset,
76 revsetlang,
76 revsetlang,
77 scmutil,
77 scmutil,
78 setdiscovery,
78 setdiscovery,
79 simplemerge,
79 simplemerge,
80 sshpeer,
80 sshpeer,
81 sslutil,
81 sslutil,
82 streamclone,
82 streamclone,
83 strip,
83 strip,
84 tags as tagsmod,
84 tags as tagsmod,
85 templater,
85 templater,
86 treediscovery,
86 treediscovery,
87 upgrade,
87 upgrade,
88 url as urlmod,
88 url as urlmod,
89 util,
89 util,
90 vfs as vfsmod,
90 vfs as vfsmod,
91 wireprotoframing,
91 wireprotoframing,
92 wireprotoserver,
92 wireprotoserver,
93 wireprotov2peer,
93 wireprotov2peer,
94 )
94 )
95 from .interfaces import repository
95 from .interfaces import repository
96 from .utils import (
96 from .utils import (
97 cborutil,
97 cborutil,
98 compression,
98 compression,
99 dateutil,
99 dateutil,
100 procutil,
100 procutil,
101 stringutil,
101 stringutil,
102 urlutil,
102 urlutil,
103 )
103 )
104
104
105 from .revlogutils import (
105 from .revlogutils import (
106 deltas as deltautil,
106 deltas as deltautil,
107 nodemap,
107 nodemap,
108 sidedata,
108 sidedata,
109 )
109 )
110
110
111 release = lockmod.release
111 release = lockmod.release
112
112
113 table = {}
113 table = {}
114 table.update(strip.command._table)
114 table.update(strip.command._table)
115 command = registrar.command(table)
115 command = registrar.command(table)
116
116
117
117
118 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
118 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
119 def debugancestor(ui, repo, *args):
119 def debugancestor(ui, repo, *args):
120 """find the ancestor revision of two revisions in a given index"""
120 """find the ancestor revision of two revisions in a given index"""
121 if len(args) == 3:
121 if len(args) == 3:
122 index, rev1, rev2 = args
122 index, rev1, rev2 = args
123 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
123 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
124 lookup = r.lookup
124 lookup = r.lookup
125 elif len(args) == 2:
125 elif len(args) == 2:
126 if not repo:
126 if not repo:
127 raise error.Abort(
127 raise error.Abort(
128 _(b'there is no Mercurial repository here (.hg not found)')
128 _(b'there is no Mercurial repository here (.hg not found)')
129 )
129 )
130 rev1, rev2 = args
130 rev1, rev2 = args
131 r = repo.changelog
131 r = repo.changelog
132 lookup = repo.lookup
132 lookup = repo.lookup
133 else:
133 else:
134 raise error.Abort(_(b'either two or three arguments required'))
134 raise error.Abort(_(b'either two or three arguments required'))
135 a = r.ancestor(lookup(rev1), lookup(rev2))
135 a = r.ancestor(lookup(rev1), lookup(rev2))
136 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
136 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
137
137
138
138
139 @command(b'debugantivirusrunning', [])
139 @command(b'debugantivirusrunning', [])
140 def debugantivirusrunning(ui, repo):
140 def debugantivirusrunning(ui, repo):
141 """attempt to trigger an antivirus scanner to see if one is active"""
141 """attempt to trigger an antivirus scanner to see if one is active"""
142 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
142 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
143 f.write(
143 f.write(
144 util.b85decode(
144 util.b85decode(
145 # This is a base85-armored version of the EICAR test file. See
145 # This is a base85-armored version of the EICAR test file. See
146 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
146 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
147 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
147 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
148 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
148 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
149 )
149 )
150 )
150 )
151 # Give an AV engine time to scan the file.
151 # Give an AV engine time to scan the file.
152 time.sleep(2)
152 time.sleep(2)
153 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
153 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
154
154
155
155
156 @command(b'debugapplystreamclonebundle', [], b'FILE')
156 @command(b'debugapplystreamclonebundle', [], b'FILE')
157 def debugapplystreamclonebundle(ui, repo, fname):
157 def debugapplystreamclonebundle(ui, repo, fname):
158 """apply a stream clone bundle file"""
158 """apply a stream clone bundle file"""
159 f = hg.openpath(ui, fname)
159 f = hg.openpath(ui, fname)
160 gen = exchange.readbundle(ui, f, fname)
160 gen = exchange.readbundle(ui, f, fname)
161 gen.apply(repo)
161 gen.apply(repo)
162
162
163
163
164 @command(
164 @command(
165 b'debugbuilddag',
165 b'debugbuilddag',
166 [
166 [
167 (
167 (
168 b'm',
168 b'm',
169 b'mergeable-file',
169 b'mergeable-file',
170 None,
170 None,
171 _(b'add single file mergeable changes'),
171 _(b'add single file mergeable changes'),
172 ),
172 ),
173 (
173 (
174 b'o',
174 b'o',
175 b'overwritten-file',
175 b'overwritten-file',
176 None,
176 None,
177 _(b'add single file all revs overwrite'),
177 _(b'add single file all revs overwrite'),
178 ),
178 ),
179 (b'n', b'new-file', None, _(b'add new file at each rev')),
179 (b'n', b'new-file', None, _(b'add new file at each rev')),
180 ],
180 ],
181 _(b'[OPTION]... [TEXT]'),
181 _(b'[OPTION]... [TEXT]'),
182 )
182 )
183 def debugbuilddag(
183 def debugbuilddag(
184 ui,
184 ui,
185 repo,
185 repo,
186 text=None,
186 text=None,
187 mergeable_file=False,
187 mergeable_file=False,
188 overwritten_file=False,
188 overwritten_file=False,
189 new_file=False,
189 new_file=False,
190 ):
190 ):
191 """builds a repo with a given DAG from scratch in the current empty repo
191 """builds a repo with a given DAG from scratch in the current empty repo
192
192
193 The description of the DAG is read from stdin if not given on the
193 The description of the DAG is read from stdin if not given on the
194 command line.
194 command line.
195
195
196 Elements:
196 Elements:
197
197
198 - "+n" is a linear run of n nodes based on the current default parent
198 - "+n" is a linear run of n nodes based on the current default parent
199 - "." is a single node based on the current default parent
199 - "." is a single node based on the current default parent
200 - "$" resets the default parent to null (implied at the start);
200 - "$" resets the default parent to null (implied at the start);
201 otherwise the default parent is always the last node created
201 otherwise the default parent is always the last node created
202 - "<p" sets the default parent to the backref p
202 - "<p" sets the default parent to the backref p
203 - "*p" is a fork at parent p, which is a backref
203 - "*p" is a fork at parent p, which is a backref
204 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
204 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
205 - "/p2" is a merge of the preceding node and p2
205 - "/p2" is a merge of the preceding node and p2
206 - ":tag" defines a local tag for the preceding node
206 - ":tag" defines a local tag for the preceding node
207 - "@branch" sets the named branch for subsequent nodes
207 - "@branch" sets the named branch for subsequent nodes
208 - "#...\\n" is a comment up to the end of the line
208 - "#...\\n" is a comment up to the end of the line
209
209
210 Whitespace between the above elements is ignored.
210 Whitespace between the above elements is ignored.
211
211
212 A backref is either
212 A backref is either
213
213
214 - a number n, which references the node curr-n, where curr is the current
214 - a number n, which references the node curr-n, where curr is the current
215 node, or
215 node, or
216 - the name of a local tag you placed earlier using ":tag", or
216 - the name of a local tag you placed earlier using ":tag", or
217 - empty to denote the default parent.
217 - empty to denote the default parent.
218
218
219 All string valued-elements are either strictly alphanumeric, or must
219 All string valued-elements are either strictly alphanumeric, or must
220 be enclosed in double quotes ("..."), with "\\" as escape character.
220 be enclosed in double quotes ("..."), with "\\" as escape character.
221 """
221 """
222
222
223 if text is None:
223 if text is None:
224 ui.status(_(b"reading DAG from stdin\n"))
224 ui.status(_(b"reading DAG from stdin\n"))
225 text = ui.fin.read()
225 text = ui.fin.read()
226
226
227 cl = repo.changelog
227 cl = repo.changelog
228 if len(cl) > 0:
228 if len(cl) > 0:
229 raise error.Abort(_(b'repository is not empty'))
229 raise error.Abort(_(b'repository is not empty'))
230
230
231 # determine number of revs in DAG
231 # determine number of revs in DAG
232 total = 0
232 total = 0
233 for type, data in dagparser.parsedag(text):
233 for type, data in dagparser.parsedag(text):
234 if type == b'n':
234 if type == b'n':
235 total += 1
235 total += 1
236
236
237 if mergeable_file:
237 if mergeable_file:
238 linesperrev = 2
238 linesperrev = 2
239 # make a file with k lines per rev
239 # make a file with k lines per rev
240 initialmergedlines = [
240 initialmergedlines = [
241 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
241 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
242 ]
242 ]
243 initialmergedlines.append(b"")
243 initialmergedlines.append(b"")
244
244
245 tags = []
245 tags = []
246 progress = ui.makeprogress(
246 progress = ui.makeprogress(
247 _(b'building'), unit=_(b'revisions'), total=total
247 _(b'building'), unit=_(b'revisions'), total=total
248 )
248 )
249 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
249 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
250 at = -1
250 at = -1
251 atbranch = b'default'
251 atbranch = b'default'
252 nodeids = []
252 nodeids = []
253 id = 0
253 id = 0
254 progress.update(id)
254 progress.update(id)
255 for type, data in dagparser.parsedag(text):
255 for type, data in dagparser.parsedag(text):
256 if type == b'n':
256 if type == b'n':
257 ui.note((b'node %s\n' % pycompat.bytestr(data)))
257 ui.note((b'node %s\n' % pycompat.bytestr(data)))
258 id, ps = data
258 id, ps = data
259
259
260 files = []
260 files = []
261 filecontent = {}
261 filecontent = {}
262
262
263 p2 = None
263 p2 = None
264 if mergeable_file:
264 if mergeable_file:
265 fn = b"mf"
265 fn = b"mf"
266 p1 = repo[ps[0]]
266 p1 = repo[ps[0]]
267 if len(ps) > 1:
267 if len(ps) > 1:
268 p2 = repo[ps[1]]
268 p2 = repo[ps[1]]
269 pa = p1.ancestor(p2)
269 pa = p1.ancestor(p2)
270 base, local, other = [
270 base, local, other = [
271 x[fn].data() for x in (pa, p1, p2)
271 x[fn].data() for x in (pa, p1, p2)
272 ]
272 ]
273 m3 = simplemerge.Merge3Text(base, local, other)
273 m3 = simplemerge.Merge3Text(base, local, other)
274 ml = [l.strip() for l in m3.merge_lines()]
274 ml = [l.strip() for l in m3.merge_lines()]
275 ml.append(b"")
275 ml.append(b"")
276 elif at > 0:
276 elif at > 0:
277 ml = p1[fn].data().split(b"\n")
277 ml = p1[fn].data().split(b"\n")
278 else:
278 else:
279 ml = initialmergedlines
279 ml = initialmergedlines
280 ml[id * linesperrev] += b" r%i" % id
280 ml[id * linesperrev] += b" r%i" % id
281 mergedtext = b"\n".join(ml)
281 mergedtext = b"\n".join(ml)
282 files.append(fn)
282 files.append(fn)
283 filecontent[fn] = mergedtext
283 filecontent[fn] = mergedtext
284
284
285 if overwritten_file:
285 if overwritten_file:
286 fn = b"of"
286 fn = b"of"
287 files.append(fn)
287 files.append(fn)
288 filecontent[fn] = b"r%i\n" % id
288 filecontent[fn] = b"r%i\n" % id
289
289
290 if new_file:
290 if new_file:
291 fn = b"nf%i" % id
291 fn = b"nf%i" % id
292 files.append(fn)
292 files.append(fn)
293 filecontent[fn] = b"r%i\n" % id
293 filecontent[fn] = b"r%i\n" % id
294 if len(ps) > 1:
294 if len(ps) > 1:
295 if not p2:
295 if not p2:
296 p2 = repo[ps[1]]
296 p2 = repo[ps[1]]
297 for fn in p2:
297 for fn in p2:
298 if fn.startswith(b"nf"):
298 if fn.startswith(b"nf"):
299 files.append(fn)
299 files.append(fn)
300 filecontent[fn] = p2[fn].data()
300 filecontent[fn] = p2[fn].data()
301
301
302 def fctxfn(repo, cx, path):
302 def fctxfn(repo, cx, path):
303 if path in filecontent:
303 if path in filecontent:
304 return context.memfilectx(
304 return context.memfilectx(
305 repo, cx, path, filecontent[path]
305 repo, cx, path, filecontent[path]
306 )
306 )
307 return None
307 return None
308
308
309 if len(ps) == 0 or ps[0] < 0:
309 if len(ps) == 0 or ps[0] < 0:
310 pars = [None, None]
310 pars = [None, None]
311 elif len(ps) == 1:
311 elif len(ps) == 1:
312 pars = [nodeids[ps[0]], None]
312 pars = [nodeids[ps[0]], None]
313 else:
313 else:
314 pars = [nodeids[p] for p in ps]
314 pars = [nodeids[p] for p in ps]
315 cx = context.memctx(
315 cx = context.memctx(
316 repo,
316 repo,
317 pars,
317 pars,
318 b"r%i" % id,
318 b"r%i" % id,
319 files,
319 files,
320 fctxfn,
320 fctxfn,
321 date=(id, 0),
321 date=(id, 0),
322 user=b"debugbuilddag",
322 user=b"debugbuilddag",
323 extra={b'branch': atbranch},
323 extra={b'branch': atbranch},
324 )
324 )
325 nodeid = repo.commitctx(cx)
325 nodeid = repo.commitctx(cx)
326 nodeids.append(nodeid)
326 nodeids.append(nodeid)
327 at = id
327 at = id
328 elif type == b'l':
328 elif type == b'l':
329 id, name = data
329 id, name = data
330 ui.note((b'tag %s\n' % name))
330 ui.note((b'tag %s\n' % name))
331 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
331 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
332 elif type == b'a':
332 elif type == b'a':
333 ui.note((b'branch %s\n' % data))
333 ui.note((b'branch %s\n' % data))
334 atbranch = data
334 atbranch = data
335 progress.update(id)
335 progress.update(id)
336
336
337 if tags:
337 if tags:
338 repo.vfs.write(b"localtags", b"".join(tags))
338 repo.vfs.write(b"localtags", b"".join(tags))
339
339
340
340
341 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
341 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
342 indent_string = b' ' * indent
342 indent_string = b' ' * indent
343 if all:
343 if all:
344 ui.writenoi18n(
344 ui.writenoi18n(
345 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
345 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
346 % indent_string
346 % indent_string
347 )
347 )
348
348
349 def showchunks(named):
349 def showchunks(named):
350 ui.write(b"\n%s%s\n" % (indent_string, named))
350 ui.write(b"\n%s%s\n" % (indent_string, named))
351 for deltadata in gen.deltaiter():
351 for deltadata in gen.deltaiter():
352 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
352 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
353 ui.write(
353 ui.write(
354 b"%s%s %s %s %s %s %d\n"
354 b"%s%s %s %s %s %s %d\n"
355 % (
355 % (
356 indent_string,
356 indent_string,
357 hex(node),
357 hex(node),
358 hex(p1),
358 hex(p1),
359 hex(p2),
359 hex(p2),
360 hex(cs),
360 hex(cs),
361 hex(deltabase),
361 hex(deltabase),
362 len(delta),
362 len(delta),
363 )
363 )
364 )
364 )
365
365
366 gen.changelogheader()
366 gen.changelogheader()
367 showchunks(b"changelog")
367 showchunks(b"changelog")
368 gen.manifestheader()
368 gen.manifestheader()
369 showchunks(b"manifest")
369 showchunks(b"manifest")
370 for chunkdata in iter(gen.filelogheader, {}):
370 for chunkdata in iter(gen.filelogheader, {}):
371 fname = chunkdata[b'filename']
371 fname = chunkdata[b'filename']
372 showchunks(fname)
372 showchunks(fname)
373 else:
373 else:
374 if isinstance(gen, bundle2.unbundle20):
374 if isinstance(gen, bundle2.unbundle20):
375 raise error.Abort(_(b'use debugbundle2 for this file'))
375 raise error.Abort(_(b'use debugbundle2 for this file'))
376 gen.changelogheader()
376 gen.changelogheader()
377 for deltadata in gen.deltaiter():
377 for deltadata in gen.deltaiter():
378 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
378 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
379 ui.write(b"%s%s\n" % (indent_string, hex(node)))
379 ui.write(b"%s%s\n" % (indent_string, hex(node)))
380
380
381
381
382 def _debugobsmarkers(ui, part, indent=0, **opts):
382 def _debugobsmarkers(ui, part, indent=0, **opts):
383 """display version and markers contained in 'data'"""
383 """display version and markers contained in 'data'"""
384 opts = pycompat.byteskwargs(opts)
384 opts = pycompat.byteskwargs(opts)
385 data = part.read()
385 data = part.read()
386 indent_string = b' ' * indent
386 indent_string = b' ' * indent
387 try:
387 try:
388 version, markers = obsolete._readmarkers(data)
388 version, markers = obsolete._readmarkers(data)
389 except error.UnknownVersion as exc:
389 except error.UnknownVersion as exc:
390 msg = b"%sunsupported version: %s (%d bytes)\n"
390 msg = b"%sunsupported version: %s (%d bytes)\n"
391 msg %= indent_string, exc.version, len(data)
391 msg %= indent_string, exc.version, len(data)
392 ui.write(msg)
392 ui.write(msg)
393 else:
393 else:
394 msg = b"%sversion: %d (%d bytes)\n"
394 msg = b"%sversion: %d (%d bytes)\n"
395 msg %= indent_string, version, len(data)
395 msg %= indent_string, version, len(data)
396 ui.write(msg)
396 ui.write(msg)
397 fm = ui.formatter(b'debugobsolete', opts)
397 fm = ui.formatter(b'debugobsolete', opts)
398 for rawmarker in sorted(markers):
398 for rawmarker in sorted(markers):
399 m = obsutil.marker(None, rawmarker)
399 m = obsutil.marker(None, rawmarker)
400 fm.startitem()
400 fm.startitem()
401 fm.plain(indent_string)
401 fm.plain(indent_string)
402 cmdutil.showmarker(fm, m)
402 cmdutil.showmarker(fm, m)
403 fm.end()
403 fm.end()
404
404
405
405
406 def _debugphaseheads(ui, data, indent=0):
406 def _debugphaseheads(ui, data, indent=0):
407 """display version and markers contained in 'data'"""
407 """display version and markers contained in 'data'"""
408 indent_string = b' ' * indent
408 indent_string = b' ' * indent
409 headsbyphase = phases.binarydecode(data)
409 headsbyphase = phases.binarydecode(data)
410 for phase in phases.allphases:
410 for phase in phases.allphases:
411 for head in headsbyphase[phase]:
411 for head in headsbyphase[phase]:
412 ui.write(indent_string)
412 ui.write(indent_string)
413 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
413 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
414
414
415
415
416 def _quasirepr(thing):
416 def _quasirepr(thing):
417 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
417 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
418 return b'{%s}' % (
418 return b'{%s}' % (
419 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
419 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
420 )
420 )
421 return pycompat.bytestr(repr(thing))
421 return pycompat.bytestr(repr(thing))
422
422
423
423
424 def _debugbundle2(ui, gen, all=None, **opts):
424 def _debugbundle2(ui, gen, all=None, **opts):
425 """lists the contents of a bundle2"""
425 """lists the contents of a bundle2"""
426 if not isinstance(gen, bundle2.unbundle20):
426 if not isinstance(gen, bundle2.unbundle20):
427 raise error.Abort(_(b'not a bundle2 file'))
427 raise error.Abort(_(b'not a bundle2 file'))
428 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
428 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
429 parttypes = opts.get('part_type', [])
429 parttypes = opts.get('part_type', [])
430 for part in gen.iterparts():
430 for part in gen.iterparts():
431 if parttypes and part.type not in parttypes:
431 if parttypes and part.type not in parttypes:
432 continue
432 continue
433 msg = b'%s -- %s (mandatory: %r)\n'
433 msg = b'%s -- %s (mandatory: %r)\n'
434 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
434 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
435 if part.type == b'changegroup':
435 if part.type == b'changegroup':
436 version = part.params.get(b'version', b'01')
436 version = part.params.get(b'version', b'01')
437 cg = changegroup.getunbundler(version, part, b'UN')
437 cg = changegroup.getunbundler(version, part, b'UN')
438 if not ui.quiet:
438 if not ui.quiet:
439 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
439 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
440 if part.type == b'obsmarkers':
440 if part.type == b'obsmarkers':
441 if not ui.quiet:
441 if not ui.quiet:
442 _debugobsmarkers(ui, part, indent=4, **opts)
442 _debugobsmarkers(ui, part, indent=4, **opts)
443 if part.type == b'phase-heads':
443 if part.type == b'phase-heads':
444 if not ui.quiet:
444 if not ui.quiet:
445 _debugphaseheads(ui, part, indent=4)
445 _debugphaseheads(ui, part, indent=4)
446
446
447
447
448 @command(
448 @command(
449 b'debugbundle',
449 b'debugbundle',
450 [
450 [
451 (b'a', b'all', None, _(b'show all details')),
451 (b'a', b'all', None, _(b'show all details')),
452 (b'', b'part-type', [], _(b'show only the named part type')),
452 (b'', b'part-type', [], _(b'show only the named part type')),
453 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
453 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
454 ],
454 ],
455 _(b'FILE'),
455 _(b'FILE'),
456 norepo=True,
456 norepo=True,
457 )
457 )
458 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
458 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
459 """lists the contents of a bundle"""
459 """lists the contents of a bundle"""
460 with hg.openpath(ui, bundlepath) as f:
460 with hg.openpath(ui, bundlepath) as f:
461 if spec:
461 if spec:
462 spec = exchange.getbundlespec(ui, f)
462 spec = exchange.getbundlespec(ui, f)
463 ui.write(b'%s\n' % spec)
463 ui.write(b'%s\n' % spec)
464 return
464 return
465
465
466 gen = exchange.readbundle(ui, f, bundlepath)
466 gen = exchange.readbundle(ui, f, bundlepath)
467 if isinstance(gen, bundle2.unbundle20):
467 if isinstance(gen, bundle2.unbundle20):
468 return _debugbundle2(ui, gen, all=all, **opts)
468 return _debugbundle2(ui, gen, all=all, **opts)
469 _debugchangegroup(ui, gen, all=all, **opts)
469 _debugchangegroup(ui, gen, all=all, **opts)
470
470
471
471
472 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
472 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
473 def debugcapabilities(ui, path, **opts):
473 def debugcapabilities(ui, path, **opts):
474 """lists the capabilities of a remote peer"""
474 """lists the capabilities of a remote peer"""
475 opts = pycompat.byteskwargs(opts)
475 opts = pycompat.byteskwargs(opts)
476 peer = hg.peer(ui, opts, path)
476 peer = hg.peer(ui, opts, path)
477 try:
477 try:
478 caps = peer.capabilities()
478 caps = peer.capabilities()
479 ui.writenoi18n(b'Main capabilities:\n')
479 ui.writenoi18n(b'Main capabilities:\n')
480 for c in sorted(caps):
480 for c in sorted(caps):
481 ui.write(b' %s\n' % c)
481 ui.write(b' %s\n' % c)
482 b2caps = bundle2.bundle2caps(peer)
482 b2caps = bundle2.bundle2caps(peer)
483 if b2caps:
483 if b2caps:
484 ui.writenoi18n(b'Bundle2 capabilities:\n')
484 ui.writenoi18n(b'Bundle2 capabilities:\n')
485 for key, values in sorted(pycompat.iteritems(b2caps)):
485 for key, values in sorted(pycompat.iteritems(b2caps)):
486 ui.write(b' %s\n' % key)
486 ui.write(b' %s\n' % key)
487 for v in values:
487 for v in values:
488 ui.write(b' %s\n' % v)
488 ui.write(b' %s\n' % v)
489 finally:
489 finally:
490 peer.close()
490 peer.close()
491
491
492
492
493 @command(
493 @command(
494 b'debugchangedfiles',
494 b'debugchangedfiles',
495 [
495 [
496 (
496 (
497 b'',
497 b'',
498 b'compute',
498 b'compute',
499 False,
499 False,
500 b"compute information instead of reading it from storage",
500 b"compute information instead of reading it from storage",
501 ),
501 ),
502 ],
502 ],
503 b'REV',
503 b'REV',
504 )
504 )
505 def debugchangedfiles(ui, repo, rev, **opts):
505 def debugchangedfiles(ui, repo, rev, **opts):
506 """list the stored files changes for a revision"""
506 """list the stored files changes for a revision"""
507 ctx = scmutil.revsingle(repo, rev, None)
507 ctx = scmutil.revsingle(repo, rev, None)
508 files = None
508 files = None
509
509
510 if opts['compute']:
510 if opts['compute']:
511 files = metadata.compute_all_files_changes(ctx)
511 files = metadata.compute_all_files_changes(ctx)
512 else:
512 else:
513 sd = repo.changelog.sidedata(ctx.rev())
513 sd = repo.changelog.sidedata(ctx.rev())
514 files_block = sd.get(sidedata.SD_FILES)
514 files_block = sd.get(sidedata.SD_FILES)
515 if files_block is not None:
515 if files_block is not None:
516 files = metadata.decode_files_sidedata(sd)
516 files = metadata.decode_files_sidedata(sd)
517 if files is not None:
517 if files is not None:
518 for f in sorted(files.touched):
518 for f in sorted(files.touched):
519 if f in files.added:
519 if f in files.added:
520 action = b"added"
520 action = b"added"
521 elif f in files.removed:
521 elif f in files.removed:
522 action = b"removed"
522 action = b"removed"
523 elif f in files.merged:
523 elif f in files.merged:
524 action = b"merged"
524 action = b"merged"
525 elif f in files.salvaged:
525 elif f in files.salvaged:
526 action = b"salvaged"
526 action = b"salvaged"
527 else:
527 else:
528 action = b"touched"
528 action = b"touched"
529
529
530 copy_parent = b""
530 copy_parent = b""
531 copy_source = b""
531 copy_source = b""
532 if f in files.copied_from_p1:
532 if f in files.copied_from_p1:
533 copy_parent = b"p1"
533 copy_parent = b"p1"
534 copy_source = files.copied_from_p1[f]
534 copy_source = files.copied_from_p1[f]
535 elif f in files.copied_from_p2:
535 elif f in files.copied_from_p2:
536 copy_parent = b"p2"
536 copy_parent = b"p2"
537 copy_source = files.copied_from_p2[f]
537 copy_source = files.copied_from_p2[f]
538
538
539 data = (action, copy_parent, f, copy_source)
539 data = (action, copy_parent, f, copy_source)
540 template = b"%-8s %2s: %s, %s;\n"
540 template = b"%-8s %2s: %s, %s;\n"
541 ui.write(template % data)
541 ui.write(template % data)
542
542
543
543
544 @command(b'debugcheckstate', [], b'')
544 @command(b'debugcheckstate', [], b'')
545 def debugcheckstate(ui, repo):
545 def debugcheckstate(ui, repo):
546 """validate the correctness of the current dirstate"""
546 """validate the correctness of the current dirstate"""
547 parent1, parent2 = repo.dirstate.parents()
547 parent1, parent2 = repo.dirstate.parents()
548 m1 = repo[parent1].manifest()
548 m1 = repo[parent1].manifest()
549 m2 = repo[parent2].manifest()
549 m2 = repo[parent2].manifest()
550 errors = 0
550 errors = 0
551 for f in repo.dirstate:
551 for f in repo.dirstate:
552 state = repo.dirstate[f]
552 state = repo.dirstate[f]
553 if state in b"nr" and f not in m1:
553 if state in b"nr" and f not in m1:
554 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
554 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
555 errors += 1
555 errors += 1
556 if state in b"a" and f in m1:
556 if state in b"a" and f in m1:
557 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
557 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
558 errors += 1
558 errors += 1
559 if state in b"m" and f not in m1 and f not in m2:
559 if state in b"m" and f not in m1 and f not in m2:
560 ui.warn(
560 ui.warn(
561 _(b"%s in state %s, but not in either manifest\n") % (f, state)
561 _(b"%s in state %s, but not in either manifest\n") % (f, state)
562 )
562 )
563 errors += 1
563 errors += 1
564 for f in m1:
564 for f in m1:
565 state = repo.dirstate[f]
565 state = repo.dirstate[f]
566 if state not in b"nrm":
566 if state not in b"nrm":
567 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
567 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
568 errors += 1
568 errors += 1
569 if errors:
569 if errors:
570 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
570 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
571 raise error.Abort(errstr)
571 raise error.Abort(errstr)
572
572
573
573
574 @command(
574 @command(
575 b'debugcolor',
575 b'debugcolor',
576 [(b'', b'style', None, _(b'show all configured styles'))],
576 [(b'', b'style', None, _(b'show all configured styles'))],
577 b'hg debugcolor',
577 b'hg debugcolor',
578 )
578 )
579 def debugcolor(ui, repo, **opts):
579 def debugcolor(ui, repo, **opts):
580 """show available color, effects or style"""
580 """show available color, effects or style"""
581 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
581 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
582 if opts.get('style'):
582 if opts.get('style'):
583 return _debugdisplaystyle(ui)
583 return _debugdisplaystyle(ui)
584 else:
584 else:
585 return _debugdisplaycolor(ui)
585 return _debugdisplaycolor(ui)
586
586
587
587
588 def _debugdisplaycolor(ui):
588 def _debugdisplaycolor(ui):
589 ui = ui.copy()
589 ui = ui.copy()
590 ui._styles.clear()
590 ui._styles.clear()
591 for effect in color._activeeffects(ui).keys():
591 for effect in color._activeeffects(ui).keys():
592 ui._styles[effect] = effect
592 ui._styles[effect] = effect
593 if ui._terminfoparams:
593 if ui._terminfoparams:
594 for k, v in ui.configitems(b'color'):
594 for k, v in ui.configitems(b'color'):
595 if k.startswith(b'color.'):
595 if k.startswith(b'color.'):
596 ui._styles[k] = k[6:]
596 ui._styles[k] = k[6:]
597 elif k.startswith(b'terminfo.'):
597 elif k.startswith(b'terminfo.'):
598 ui._styles[k] = k[9:]
598 ui._styles[k] = k[9:]
599 ui.write(_(b'available colors:\n'))
599 ui.write(_(b'available colors:\n'))
600 # sort label with a '_' after the other to group '_background' entry.
600 # sort label with a '_' after the other to group '_background' entry.
601 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
601 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
602 for colorname, label in items:
602 for colorname, label in items:
603 ui.write(b'%s\n' % colorname, label=label)
603 ui.write(b'%s\n' % colorname, label=label)
604
604
605
605
606 def _debugdisplaystyle(ui):
606 def _debugdisplaystyle(ui):
607 ui.write(_(b'available style:\n'))
607 ui.write(_(b'available style:\n'))
608 if not ui._styles:
608 if not ui._styles:
609 return
609 return
610 width = max(len(s) for s in ui._styles)
610 width = max(len(s) for s in ui._styles)
611 for label, effects in sorted(ui._styles.items()):
611 for label, effects in sorted(ui._styles.items()):
612 ui.write(b'%s' % label, label=label)
612 ui.write(b'%s' % label, label=label)
613 if effects:
613 if effects:
614 # 50
614 # 50
615 ui.write(b': ')
615 ui.write(b': ')
616 ui.write(b' ' * (max(0, width - len(label))))
616 ui.write(b' ' * (max(0, width - len(label))))
617 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
617 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
618 ui.write(b'\n')
618 ui.write(b'\n')
619
619
620
620
621 @command(b'debugcreatestreamclonebundle', [], b'FILE')
621 @command(b'debugcreatestreamclonebundle', [], b'FILE')
622 def debugcreatestreamclonebundle(ui, repo, fname):
622 def debugcreatestreamclonebundle(ui, repo, fname):
623 """create a stream clone bundle file
623 """create a stream clone bundle file
624
624
625 Stream bundles are special bundles that are essentially archives of
625 Stream bundles are special bundles that are essentially archives of
626 revlog files. They are commonly used for cloning very quickly.
626 revlog files. They are commonly used for cloning very quickly.
627 """
627 """
628 # TODO we may want to turn this into an abort when this functionality
628 # TODO we may want to turn this into an abort when this functionality
629 # is moved into `hg bundle`.
629 # is moved into `hg bundle`.
630 if phases.hassecret(repo):
630 if phases.hassecret(repo):
631 ui.warn(
631 ui.warn(
632 _(
632 _(
633 b'(warning: stream clone bundle will contain secret '
633 b'(warning: stream clone bundle will contain secret '
634 b'revisions)\n'
634 b'revisions)\n'
635 )
635 )
636 )
636 )
637
637
638 requirements, gen = streamclone.generatebundlev1(repo)
638 requirements, gen = streamclone.generatebundlev1(repo)
639 changegroup.writechunks(ui, gen, fname)
639 changegroup.writechunks(ui, gen, fname)
640
640
641 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
641 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
642
642
643
643
644 @command(
644 @command(
645 b'debugdag',
645 b'debugdag',
646 [
646 [
647 (b't', b'tags', None, _(b'use tags as labels')),
647 (b't', b'tags', None, _(b'use tags as labels')),
648 (b'b', b'branches', None, _(b'annotate with branch names')),
648 (b'b', b'branches', None, _(b'annotate with branch names')),
649 (b'', b'dots', None, _(b'use dots for runs')),
649 (b'', b'dots', None, _(b'use dots for runs')),
650 (b's', b'spaces', None, _(b'separate elements by spaces')),
650 (b's', b'spaces', None, _(b'separate elements by spaces')),
651 ],
651 ],
652 _(b'[OPTION]... [FILE [REV]...]'),
652 _(b'[OPTION]... [FILE [REV]...]'),
653 optionalrepo=True,
653 optionalrepo=True,
654 )
654 )
655 def debugdag(ui, repo, file_=None, *revs, **opts):
655 def debugdag(ui, repo, file_=None, *revs, **opts):
656 """format the changelog or an index DAG as a concise textual description
656 """format the changelog or an index DAG as a concise textual description
657
657
658 If you pass a revlog index, the revlog's DAG is emitted. If you list
658 If you pass a revlog index, the revlog's DAG is emitted. If you list
659 revision numbers, they get labeled in the output as rN.
659 revision numbers, they get labeled in the output as rN.
660
660
661 Otherwise, the changelog DAG of the current repo is emitted.
661 Otherwise, the changelog DAG of the current repo is emitted.
662 """
662 """
663 spaces = opts.get('spaces')
663 spaces = opts.get('spaces')
664 dots = opts.get('dots')
664 dots = opts.get('dots')
665 if file_:
665 if file_:
666 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
666 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
667 revs = {int(r) for r in revs}
667 revs = {int(r) for r in revs}
668
668
669 def events():
669 def events():
670 for r in rlog:
670 for r in rlog:
671 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
671 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
672 if r in revs:
672 if r in revs:
673 yield b'l', (r, b"r%i" % r)
673 yield b'l', (r, b"r%i" % r)
674
674
675 elif repo:
675 elif repo:
676 cl = repo.changelog
676 cl = repo.changelog
677 tags = opts.get('tags')
677 tags = opts.get('tags')
678 branches = opts.get('branches')
678 branches = opts.get('branches')
679 if tags:
679 if tags:
680 labels = {}
680 labels = {}
681 for l, n in repo.tags().items():
681 for l, n in repo.tags().items():
682 labels.setdefault(cl.rev(n), []).append(l)
682 labels.setdefault(cl.rev(n), []).append(l)
683
683
684 def events():
684 def events():
685 b = b"default"
685 b = b"default"
686 for r in cl:
686 for r in cl:
687 if branches:
687 if branches:
688 newb = cl.read(cl.node(r))[5][b'branch']
688 newb = cl.read(cl.node(r))[5][b'branch']
689 if newb != b:
689 if newb != b:
690 yield b'a', newb
690 yield b'a', newb
691 b = newb
691 b = newb
692 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
692 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
693 if tags:
693 if tags:
694 ls = labels.get(r)
694 ls = labels.get(r)
695 if ls:
695 if ls:
696 for l in ls:
696 for l in ls:
697 yield b'l', (r, l)
697 yield b'l', (r, l)
698
698
699 else:
699 else:
700 raise error.Abort(_(b'need repo for changelog dag'))
700 raise error.Abort(_(b'need repo for changelog dag'))
701
701
702 for line in dagparser.dagtextlines(
702 for line in dagparser.dagtextlines(
703 events(),
703 events(),
704 addspaces=spaces,
704 addspaces=spaces,
705 wraplabels=True,
705 wraplabels=True,
706 wrapannotations=True,
706 wrapannotations=True,
707 wrapnonlinear=dots,
707 wrapnonlinear=dots,
708 usedots=dots,
708 usedots=dots,
709 maxlinewidth=70,
709 maxlinewidth=70,
710 ):
710 ):
711 ui.write(line)
711 ui.write(line)
712 ui.write(b"\n")
712 ui.write(b"\n")
713
713
714
714
715 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
715 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
716 def debugdata(ui, repo, file_, rev=None, **opts):
716 def debugdata(ui, repo, file_, rev=None, **opts):
717 """dump the contents of a data file revision"""
717 """dump the contents of a data file revision"""
718 opts = pycompat.byteskwargs(opts)
718 opts = pycompat.byteskwargs(opts)
719 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
719 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
720 if rev is not None:
720 if rev is not None:
721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
722 file_, rev = None, file_
722 file_, rev = None, file_
723 elif rev is None:
723 elif rev is None:
724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
725 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
725 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
726 try:
726 try:
727 ui.write(r.rawdata(r.lookup(rev)))
727 ui.write(r.rawdata(r.lookup(rev)))
728 except KeyError:
728 except KeyError:
729 raise error.Abort(_(b'invalid revision identifier %s') % rev)
729 raise error.Abort(_(b'invalid revision identifier %s') % rev)
730
730
731
731
732 @command(
732 @command(
733 b'debugdate',
733 b'debugdate',
734 [(b'e', b'extended', None, _(b'try extended date formats'))],
734 [(b'e', b'extended', None, _(b'try extended date formats'))],
735 _(b'[-e] DATE [RANGE]'),
735 _(b'[-e] DATE [RANGE]'),
736 norepo=True,
736 norepo=True,
737 optionalrepo=True,
737 optionalrepo=True,
738 )
738 )
739 def debugdate(ui, date, range=None, **opts):
739 def debugdate(ui, date, range=None, **opts):
740 """parse and display a date"""
740 """parse and display a date"""
741 if opts["extended"]:
741 if opts["extended"]:
742 d = dateutil.parsedate(date, dateutil.extendeddateformats)
742 d = dateutil.parsedate(date, dateutil.extendeddateformats)
743 else:
743 else:
744 d = dateutil.parsedate(date)
744 d = dateutil.parsedate(date)
745 ui.writenoi18n(b"internal: %d %d\n" % d)
745 ui.writenoi18n(b"internal: %d %d\n" % d)
746 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
746 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
747 if range:
747 if range:
748 m = dateutil.matchdate(range)
748 m = dateutil.matchdate(range)
749 ui.writenoi18n(b"match: %s\n" % m(d[0]))
749 ui.writenoi18n(b"match: %s\n" % m(d[0]))
750
750
751
751
752 @command(
752 @command(
753 b'debugdeltachain',
753 b'debugdeltachain',
754 cmdutil.debugrevlogopts + cmdutil.formatteropts,
754 cmdutil.debugrevlogopts + cmdutil.formatteropts,
755 _(b'-c|-m|FILE'),
755 _(b'-c|-m|FILE'),
756 optionalrepo=True,
756 optionalrepo=True,
757 )
757 )
758 def debugdeltachain(ui, repo, file_=None, **opts):
758 def debugdeltachain(ui, repo, file_=None, **opts):
759 """dump information about delta chains in a revlog
759 """dump information about delta chains in a revlog
760
760
761 Output can be templatized. Available template keywords are:
761 Output can be templatized. Available template keywords are:
762
762
763 :``rev``: revision number
763 :``rev``: revision number
764 :``chainid``: delta chain identifier (numbered by unique base)
764 :``chainid``: delta chain identifier (numbered by unique base)
765 :``chainlen``: delta chain length to this revision
765 :``chainlen``: delta chain length to this revision
766 :``prevrev``: previous revision in delta chain
766 :``prevrev``: previous revision in delta chain
767 :``deltatype``: role of delta / how it was computed
767 :``deltatype``: role of delta / how it was computed
768 :``compsize``: compressed size of revision
768 :``compsize``: compressed size of revision
769 :``uncompsize``: uncompressed size of revision
769 :``uncompsize``: uncompressed size of revision
770 :``chainsize``: total size of compressed revisions in chain
770 :``chainsize``: total size of compressed revisions in chain
771 :``chainratio``: total chain size divided by uncompressed revision size
771 :``chainratio``: total chain size divided by uncompressed revision size
772 (new delta chains typically start at ratio 2.00)
772 (new delta chains typically start at ratio 2.00)
773 :``lindist``: linear distance from base revision in delta chain to end
773 :``lindist``: linear distance from base revision in delta chain to end
774 of this revision
774 of this revision
775 :``extradist``: total size of revisions not part of this delta chain from
775 :``extradist``: total size of revisions not part of this delta chain from
776 base of delta chain to end of this revision; a measurement
776 base of delta chain to end of this revision; a measurement
777 of how much extra data we need to read/seek across to read
777 of how much extra data we need to read/seek across to read
778 the delta chain for this revision
778 the delta chain for this revision
779 :``extraratio``: extradist divided by chainsize; another representation of
779 :``extraratio``: extradist divided by chainsize; another representation of
780 how much unrelated data is needed to load this delta chain
780 how much unrelated data is needed to load this delta chain
781
781
782 If the repository is configured to use the sparse read, additional keywords
782 If the repository is configured to use the sparse read, additional keywords
783 are available:
783 are available:
784
784
785 :``readsize``: total size of data read from the disk for a revision
785 :``readsize``: total size of data read from the disk for a revision
786 (sum of the sizes of all the blocks)
786 (sum of the sizes of all the blocks)
787 :``largestblock``: size of the largest block of data read from the disk
787 :``largestblock``: size of the largest block of data read from the disk
788 :``readdensity``: density of useful bytes in the data read from the disk
788 :``readdensity``: density of useful bytes in the data read from the disk
789 :``srchunks``: in how many data hunks the whole revision would be read
789 :``srchunks``: in how many data hunks the whole revision would be read
790
790
791 The sparse read can be enabled with experimental.sparse-read = True
791 The sparse read can be enabled with experimental.sparse-read = True
792 """
792 """
793 opts = pycompat.byteskwargs(opts)
793 opts = pycompat.byteskwargs(opts)
794 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
794 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
795 index = r.index
795 index = r.index
796 start = r.start
796 start = r.start
797 length = r.length
797 length = r.length
798 generaldelta = r._generaldelta
798 generaldelta = r._generaldelta
799 withsparseread = getattr(r, '_withsparseread', False)
799 withsparseread = getattr(r, '_withsparseread', False)
800
800
801 def revinfo(rev):
801 def revinfo(rev):
802 e = index[rev]
802 e = index[rev]
803 compsize = e[1]
803 compsize = e[1]
804 uncompsize = e[2]
804 uncompsize = e[2]
805 chainsize = 0
805 chainsize = 0
806
806
807 if generaldelta:
807 if generaldelta:
808 if e[3] == e[5]:
808 if e[3] == e[5]:
809 deltatype = b'p1'
809 deltatype = b'p1'
810 elif e[3] == e[6]:
810 elif e[3] == e[6]:
811 deltatype = b'p2'
811 deltatype = b'p2'
812 elif e[3] == rev - 1:
812 elif e[3] == rev - 1:
813 deltatype = b'prev'
813 deltatype = b'prev'
814 elif e[3] == rev:
814 elif e[3] == rev:
815 deltatype = b'base'
815 deltatype = b'base'
816 else:
816 else:
817 deltatype = b'other'
817 deltatype = b'other'
818 else:
818 else:
819 if e[3] == rev:
819 if e[3] == rev:
820 deltatype = b'base'
820 deltatype = b'base'
821 else:
821 else:
822 deltatype = b'prev'
822 deltatype = b'prev'
823
823
824 chain = r._deltachain(rev)[0]
824 chain = r._deltachain(rev)[0]
825 for iterrev in chain:
825 for iterrev in chain:
826 e = index[iterrev]
826 e = index[iterrev]
827 chainsize += e[1]
827 chainsize += e[1]
828
828
829 return compsize, uncompsize, deltatype, chain, chainsize
829 return compsize, uncompsize, deltatype, chain, chainsize
830
830
831 fm = ui.formatter(b'debugdeltachain', opts)
831 fm = ui.formatter(b'debugdeltachain', opts)
832
832
833 fm.plain(
833 fm.plain(
834 b' rev chain# chainlen prev delta '
834 b' rev chain# chainlen prev delta '
835 b'size rawsize chainsize ratio lindist extradist '
835 b'size rawsize chainsize ratio lindist extradist '
836 b'extraratio'
836 b'extraratio'
837 )
837 )
838 if withsparseread:
838 if withsparseread:
839 fm.plain(b' readsize largestblk rddensity srchunks')
839 fm.plain(b' readsize largestblk rddensity srchunks')
840 fm.plain(b'\n')
840 fm.plain(b'\n')
841
841
842 chainbases = {}
842 chainbases = {}
843 for rev in r:
843 for rev in r:
844 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
844 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
845 chainbase = chain[0]
845 chainbase = chain[0]
846 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
846 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
847 basestart = start(chainbase)
847 basestart = start(chainbase)
848 revstart = start(rev)
848 revstart = start(rev)
849 lineardist = revstart + comp - basestart
849 lineardist = revstart + comp - basestart
850 extradist = lineardist - chainsize
850 extradist = lineardist - chainsize
851 try:
851 try:
852 prevrev = chain[-2]
852 prevrev = chain[-2]
853 except IndexError:
853 except IndexError:
854 prevrev = -1
854 prevrev = -1
855
855
856 if uncomp != 0:
856 if uncomp != 0:
857 chainratio = float(chainsize) / float(uncomp)
857 chainratio = float(chainsize) / float(uncomp)
858 else:
858 else:
859 chainratio = chainsize
859 chainratio = chainsize
860
860
861 if chainsize != 0:
861 if chainsize != 0:
862 extraratio = float(extradist) / float(chainsize)
862 extraratio = float(extradist) / float(chainsize)
863 else:
863 else:
864 extraratio = extradist
864 extraratio = extradist
865
865
866 fm.startitem()
866 fm.startitem()
867 fm.write(
867 fm.write(
868 b'rev chainid chainlen prevrev deltatype compsize '
868 b'rev chainid chainlen prevrev deltatype compsize '
869 b'uncompsize chainsize chainratio lindist extradist '
869 b'uncompsize chainsize chainratio lindist extradist '
870 b'extraratio',
870 b'extraratio',
871 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
871 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
872 rev,
872 rev,
873 chainid,
873 chainid,
874 len(chain),
874 len(chain),
875 prevrev,
875 prevrev,
876 deltatype,
876 deltatype,
877 comp,
877 comp,
878 uncomp,
878 uncomp,
879 chainsize,
879 chainsize,
880 chainratio,
880 chainratio,
881 lineardist,
881 lineardist,
882 extradist,
882 extradist,
883 extraratio,
883 extraratio,
884 rev=rev,
884 rev=rev,
885 chainid=chainid,
885 chainid=chainid,
886 chainlen=len(chain),
886 chainlen=len(chain),
887 prevrev=prevrev,
887 prevrev=prevrev,
888 deltatype=deltatype,
888 deltatype=deltatype,
889 compsize=comp,
889 compsize=comp,
890 uncompsize=uncomp,
890 uncompsize=uncomp,
891 chainsize=chainsize,
891 chainsize=chainsize,
892 chainratio=chainratio,
892 chainratio=chainratio,
893 lindist=lineardist,
893 lindist=lineardist,
894 extradist=extradist,
894 extradist=extradist,
895 extraratio=extraratio,
895 extraratio=extraratio,
896 )
896 )
897 if withsparseread:
897 if withsparseread:
898 readsize = 0
898 readsize = 0
899 largestblock = 0
899 largestblock = 0
900 srchunks = 0
900 srchunks = 0
901
901
902 for revschunk in deltautil.slicechunk(r, chain):
902 for revschunk in deltautil.slicechunk(r, chain):
903 srchunks += 1
903 srchunks += 1
904 blkend = start(revschunk[-1]) + length(revschunk[-1])
904 blkend = start(revschunk[-1]) + length(revschunk[-1])
905 blksize = blkend - start(revschunk[0])
905 blksize = blkend - start(revschunk[0])
906
906
907 readsize += blksize
907 readsize += blksize
908 if largestblock < blksize:
908 if largestblock < blksize:
909 largestblock = blksize
909 largestblock = blksize
910
910
911 if readsize:
911 if readsize:
912 readdensity = float(chainsize) / float(readsize)
912 readdensity = float(chainsize) / float(readsize)
913 else:
913 else:
914 readdensity = 1
914 readdensity = 1
915
915
916 fm.write(
916 fm.write(
917 b'readsize largestblock readdensity srchunks',
917 b'readsize largestblock readdensity srchunks',
918 b' %10d %10d %9.5f %8d',
918 b' %10d %10d %9.5f %8d',
919 readsize,
919 readsize,
920 largestblock,
920 largestblock,
921 readdensity,
921 readdensity,
922 srchunks,
922 srchunks,
923 readsize=readsize,
923 readsize=readsize,
924 largestblock=largestblock,
924 largestblock=largestblock,
925 readdensity=readdensity,
925 readdensity=readdensity,
926 srchunks=srchunks,
926 srchunks=srchunks,
927 )
927 )
928
928
929 fm.plain(b'\n')
929 fm.plain(b'\n')
930
930
931 fm.end()
931 fm.end()
932
932
933
933
934 @command(
934 @command(
935 b'debugdirstate|debugstate',
935 b'debugdirstate|debugstate',
936 [
936 [
937 (
937 (
938 b'',
938 b'',
939 b'nodates',
939 b'nodates',
940 None,
940 None,
941 _(b'do not display the saved mtime (DEPRECATED)'),
941 _(b'do not display the saved mtime (DEPRECATED)'),
942 ),
942 ),
943 (b'', b'dates', True, _(b'display the saved mtime')),
943 (b'', b'dates', True, _(b'display the saved mtime')),
944 (b'', b'datesort', None, _(b'sort by saved mtime')),
944 (b'', b'datesort', None, _(b'sort by saved mtime')),
945 (b'', b'dirs', False, _(b'display directories')),
945 (b'', b'dirs', False, _(b'display directories')),
946 ],
946 ],
947 _(b'[OPTION]...'),
947 _(b'[OPTION]...'),
948 )
948 )
949 def debugstate(ui, repo, **opts):
949 def debugstate(ui, repo, **opts):
950 """show the contents of the current dirstate"""
950 """show the contents of the current dirstate"""
951
951
952 nodates = not opts['dates']
952 nodates = not opts['dates']
953 if opts.get('nodates') is not None:
953 if opts.get('nodates') is not None:
954 nodates = True
954 nodates = True
955 datesort = opts.get('datesort')
955 datesort = opts.get('datesort')
956
956
957 if datesort:
957 if datesort:
958 keyfunc = lambda x: (
958 keyfunc = lambda x: (
959 x[1].v1_mtime(),
959 x[1].v1_mtime(),
960 x[0],
960 x[0],
961 ) # sort by mtime, then by filename
961 ) # sort by mtime, then by filename
962 else:
962 else:
963 keyfunc = None # sort by filename
963 keyfunc = None # sort by filename
964 entries = list(pycompat.iteritems(repo.dirstate))
964 entries = list(pycompat.iteritems(repo.dirstate))
965 if opts['dirs']:
965 if opts['dirs']:
966 entries.extend(repo.dirstate.directories())
966 entries.extend(repo.dirstate.directories())
967 entries.sort(key=keyfunc)
967 entries.sort(key=keyfunc)
968 for file_, ent in entries:
968 for file_, ent in entries:
969 if ent.v1_mtime() == -1:
969 if ent.v1_mtime() == -1:
970 timestr = b'unset '
970 timestr = b'unset '
971 elif nodates:
971 elif nodates:
972 timestr = b'set '
972 timestr = b'set '
973 else:
973 else:
974 timestr = time.strftime(
974 timestr = time.strftime(
975 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
975 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
976 )
976 )
977 timestr = encoding.strtolocal(timestr)
977 timestr = encoding.strtolocal(timestr)
978 if ent.mode & 0o20000:
978 if ent.mode & 0o20000:
979 mode = b'lnk'
979 mode = b'lnk'
980 else:
980 else:
981 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
981 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
982 ui.write(
982 ui.write(
983 b"%c %s %10d %s%s\n"
983 b"%c %s %10d %s%s\n"
984 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
984 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
985 )
985 )
986 for f in repo.dirstate.copies():
986 for f in repo.dirstate.copies():
987 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
987 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
988
988
989
989
990 @command(
990 @command(
991 b'debugdirstateignorepatternshash',
991 b'debugdirstateignorepatternshash',
992 [],
992 [],
993 _(b''),
993 _(b''),
994 )
994 )
995 def debugdirstateignorepatternshash(ui, repo, **opts):
995 def debugdirstateignorepatternshash(ui, repo, **opts):
996 """show the hash of ignore patterns stored in dirstate if v2,
996 """show the hash of ignore patterns stored in dirstate if v2,
997 or nothing for dirstate-v2
997 or nothing for dirstate-v2
998 """
998 """
999 if repo.dirstate._use_dirstate_v2:
999 if repo.dirstate._use_dirstate_v2:
1000 docket = repo.dirstate._map.docket
1000 docket = repo.dirstate._map.docket
1001 hash_len = 20 # 160 bits for SHA-1
1001 hash_len = 20 # 160 bits for SHA-1
1002 hash_offset = docket.data_size - hash_len # hash is at the end
1002 hash_bytes = docket.tree_metadata[-hash_len:]
1003 data_filename = docket.data_filename()
1004 with repo.vfs(data_filename) as f:
1005 f.seek(hash_offset)
1006 hash_bytes = f.read(hash_len)
1007 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1003 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1008
1004
1009
1005
1010 @command(
1006 @command(
1011 b'debugdiscovery',
1007 b'debugdiscovery',
1012 [
1008 [
1013 (b'', b'old', None, _(b'use old-style discovery')),
1009 (b'', b'old', None, _(b'use old-style discovery')),
1014 (
1010 (
1015 b'',
1011 b'',
1016 b'nonheads',
1012 b'nonheads',
1017 None,
1013 None,
1018 _(b'use old-style discovery with non-heads included'),
1014 _(b'use old-style discovery with non-heads included'),
1019 ),
1015 ),
1020 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1016 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1021 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1017 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1022 (
1018 (
1023 b'',
1019 b'',
1024 b'local-as-revs',
1020 b'local-as-revs',
1025 b"",
1021 b"",
1026 b'treat local has having these revisions only',
1022 b'treat local has having these revisions only',
1027 ),
1023 ),
1028 (
1024 (
1029 b'',
1025 b'',
1030 b'remote-as-revs',
1026 b'remote-as-revs',
1031 b"",
1027 b"",
1032 b'use local as remote, with only these these revisions',
1028 b'use local as remote, with only these these revisions',
1033 ),
1029 ),
1034 ]
1030 ]
1035 + cmdutil.remoteopts
1031 + cmdutil.remoteopts
1036 + cmdutil.formatteropts,
1032 + cmdutil.formatteropts,
1037 _(b'[--rev REV] [OTHER]'),
1033 _(b'[--rev REV] [OTHER]'),
1038 )
1034 )
1039 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1035 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1040 """runs the changeset discovery protocol in isolation
1036 """runs the changeset discovery protocol in isolation
1041
1037
1042 The local peer can be "replaced" by a subset of the local repository by
1038 The local peer can be "replaced" by a subset of the local repository by
1043 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1039 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1044 be "replaced" by a subset of the local repository using the
1040 be "replaced" by a subset of the local repository using the
1045 `--local-as-revs` flag. This is useful to efficiently debug pathological
1041 `--local-as-revs` flag. This is useful to efficiently debug pathological
1046 discovery situation.
1042 discovery situation.
1047
1043
1048 The following developer oriented config are relevant for people playing with this command:
1044 The following developer oriented config are relevant for people playing with this command:
1049
1045
1050 * devel.discovery.exchange-heads=True
1046 * devel.discovery.exchange-heads=True
1051
1047
1052 If False, the discovery will not start with
1048 If False, the discovery will not start with
1053 remote head fetching and local head querying.
1049 remote head fetching and local head querying.
1054
1050
1055 * devel.discovery.grow-sample=True
1051 * devel.discovery.grow-sample=True
1056
1052
1057 If False, the sample size used in set discovery will not be increased
1053 If False, the sample size used in set discovery will not be increased
1058 through the process
1054 through the process
1059
1055
1060 * devel.discovery.grow-sample.dynamic=True
1056 * devel.discovery.grow-sample.dynamic=True
1061
1057
1062 When discovery.grow-sample.dynamic is True, the default, the sample size is
1058 When discovery.grow-sample.dynamic is True, the default, the sample size is
1063 adapted to the shape of the undecided set (it is set to the max of:
1059 adapted to the shape of the undecided set (it is set to the max of:
1064 <target-size>, len(roots(undecided)), len(heads(undecided)
1060 <target-size>, len(roots(undecided)), len(heads(undecided)
1065
1061
1066 * devel.discovery.grow-sample.rate=1.05
1062 * devel.discovery.grow-sample.rate=1.05
1067
1063
1068 the rate at which the sample grow
1064 the rate at which the sample grow
1069
1065
1070 * devel.discovery.randomize=True
1066 * devel.discovery.randomize=True
1071
1067
1072 If andom sampling during discovery are deterministic. It is meant for
1068 If andom sampling during discovery are deterministic. It is meant for
1073 integration tests.
1069 integration tests.
1074
1070
1075 * devel.discovery.sample-size=200
1071 * devel.discovery.sample-size=200
1076
1072
1077 Control the initial size of the discovery sample
1073 Control the initial size of the discovery sample
1078
1074
1079 * devel.discovery.sample-size.initial=100
1075 * devel.discovery.sample-size.initial=100
1080
1076
1081 Control the initial size of the discovery for initial change
1077 Control the initial size of the discovery for initial change
1082 """
1078 """
1083 opts = pycompat.byteskwargs(opts)
1079 opts = pycompat.byteskwargs(opts)
1084 unfi = repo.unfiltered()
1080 unfi = repo.unfiltered()
1085
1081
1086 # setup potential extra filtering
1082 # setup potential extra filtering
1087 local_revs = opts[b"local_as_revs"]
1083 local_revs = opts[b"local_as_revs"]
1088 remote_revs = opts[b"remote_as_revs"]
1084 remote_revs = opts[b"remote_as_revs"]
1089
1085
1090 # make sure tests are repeatable
1086 # make sure tests are repeatable
1091 random.seed(int(opts[b'seed']))
1087 random.seed(int(opts[b'seed']))
1092
1088
1093 if not remote_revs:
1089 if not remote_revs:
1094
1090
1095 remoteurl, branches = urlutil.get_unique_pull_path(
1091 remoteurl, branches = urlutil.get_unique_pull_path(
1096 b'debugdiscovery', repo, ui, remoteurl
1092 b'debugdiscovery', repo, ui, remoteurl
1097 )
1093 )
1098 remote = hg.peer(repo, opts, remoteurl)
1094 remote = hg.peer(repo, opts, remoteurl)
1099 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1095 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1100 else:
1096 else:
1101 branches = (None, [])
1097 branches = (None, [])
1102 remote_filtered_revs = scmutil.revrange(
1098 remote_filtered_revs = scmutil.revrange(
1103 unfi, [b"not (::(%s))" % remote_revs]
1099 unfi, [b"not (::(%s))" % remote_revs]
1104 )
1100 )
1105 remote_filtered_revs = frozenset(remote_filtered_revs)
1101 remote_filtered_revs = frozenset(remote_filtered_revs)
1106
1102
1107 def remote_func(x):
1103 def remote_func(x):
1108 return remote_filtered_revs
1104 return remote_filtered_revs
1109
1105
1110 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1106 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1111
1107
1112 remote = repo.peer()
1108 remote = repo.peer()
1113 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1109 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1114
1110
1115 if local_revs:
1111 if local_revs:
1116 local_filtered_revs = scmutil.revrange(
1112 local_filtered_revs = scmutil.revrange(
1117 unfi, [b"not (::(%s))" % local_revs]
1113 unfi, [b"not (::(%s))" % local_revs]
1118 )
1114 )
1119 local_filtered_revs = frozenset(local_filtered_revs)
1115 local_filtered_revs = frozenset(local_filtered_revs)
1120
1116
1121 def local_func(x):
1117 def local_func(x):
1122 return local_filtered_revs
1118 return local_filtered_revs
1123
1119
1124 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1120 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1125 repo = repo.filtered(b'debug-discovery-local-filter')
1121 repo = repo.filtered(b'debug-discovery-local-filter')
1126
1122
1127 data = {}
1123 data = {}
1128 if opts.get(b'old'):
1124 if opts.get(b'old'):
1129
1125
1130 def doit(pushedrevs, remoteheads, remote=remote):
1126 def doit(pushedrevs, remoteheads, remote=remote):
1131 if not util.safehasattr(remote, b'branches'):
1127 if not util.safehasattr(remote, b'branches'):
1132 # enable in-client legacy support
1128 # enable in-client legacy support
1133 remote = localrepo.locallegacypeer(remote.local())
1129 remote = localrepo.locallegacypeer(remote.local())
1134 common, _in, hds = treediscovery.findcommonincoming(
1130 common, _in, hds = treediscovery.findcommonincoming(
1135 repo, remote, force=True, audit=data
1131 repo, remote, force=True, audit=data
1136 )
1132 )
1137 common = set(common)
1133 common = set(common)
1138 if not opts.get(b'nonheads'):
1134 if not opts.get(b'nonheads'):
1139 ui.writenoi18n(
1135 ui.writenoi18n(
1140 b"unpruned common: %s\n"
1136 b"unpruned common: %s\n"
1141 % b" ".join(sorted(short(n) for n in common))
1137 % b" ".join(sorted(short(n) for n in common))
1142 )
1138 )
1143
1139
1144 clnode = repo.changelog.node
1140 clnode = repo.changelog.node
1145 common = repo.revs(b'heads(::%ln)', common)
1141 common = repo.revs(b'heads(::%ln)', common)
1146 common = {clnode(r) for r in common}
1142 common = {clnode(r) for r in common}
1147 return common, hds
1143 return common, hds
1148
1144
1149 else:
1145 else:
1150
1146
1151 def doit(pushedrevs, remoteheads, remote=remote):
1147 def doit(pushedrevs, remoteheads, remote=remote):
1152 nodes = None
1148 nodes = None
1153 if pushedrevs:
1149 if pushedrevs:
1154 revs = scmutil.revrange(repo, pushedrevs)
1150 revs = scmutil.revrange(repo, pushedrevs)
1155 nodes = [repo[r].node() for r in revs]
1151 nodes = [repo[r].node() for r in revs]
1156 common, any, hds = setdiscovery.findcommonheads(
1152 common, any, hds = setdiscovery.findcommonheads(
1157 ui, repo, remote, ancestorsof=nodes, audit=data
1153 ui, repo, remote, ancestorsof=nodes, audit=data
1158 )
1154 )
1159 return common, hds
1155 return common, hds
1160
1156
1161 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1157 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1162 localrevs = opts[b'rev']
1158 localrevs = opts[b'rev']
1163
1159
1164 fm = ui.formatter(b'debugdiscovery', opts)
1160 fm = ui.formatter(b'debugdiscovery', opts)
1165 if fm.strict_format:
1161 if fm.strict_format:
1166
1162
1167 @contextlib.contextmanager
1163 @contextlib.contextmanager
1168 def may_capture_output():
1164 def may_capture_output():
1169 ui.pushbuffer()
1165 ui.pushbuffer()
1170 yield
1166 yield
1171 data[b'output'] = ui.popbuffer()
1167 data[b'output'] = ui.popbuffer()
1172
1168
1173 else:
1169 else:
1174 may_capture_output = util.nullcontextmanager
1170 may_capture_output = util.nullcontextmanager
1175 with may_capture_output():
1171 with may_capture_output():
1176 with util.timedcm('debug-discovery') as t:
1172 with util.timedcm('debug-discovery') as t:
1177 common, hds = doit(localrevs, remoterevs)
1173 common, hds = doit(localrevs, remoterevs)
1178
1174
1179 # compute all statistics
1175 # compute all statistics
1180 heads_common = set(common)
1176 heads_common = set(common)
1181 heads_remote = set(hds)
1177 heads_remote = set(hds)
1182 heads_local = set(repo.heads())
1178 heads_local = set(repo.heads())
1183 # note: they cannot be a local or remote head that is in common and not
1179 # note: they cannot be a local or remote head that is in common and not
1184 # itself a head of common.
1180 # itself a head of common.
1185 heads_common_local = heads_common & heads_local
1181 heads_common_local = heads_common & heads_local
1186 heads_common_remote = heads_common & heads_remote
1182 heads_common_remote = heads_common & heads_remote
1187 heads_common_both = heads_common & heads_remote & heads_local
1183 heads_common_both = heads_common & heads_remote & heads_local
1188
1184
1189 all = repo.revs(b'all()')
1185 all = repo.revs(b'all()')
1190 common = repo.revs(b'::%ln', common)
1186 common = repo.revs(b'::%ln', common)
1191 roots_common = repo.revs(b'roots(::%ld)', common)
1187 roots_common = repo.revs(b'roots(::%ld)', common)
1192 missing = repo.revs(b'not ::%ld', common)
1188 missing = repo.revs(b'not ::%ld', common)
1193 heads_missing = repo.revs(b'heads(%ld)', missing)
1189 heads_missing = repo.revs(b'heads(%ld)', missing)
1194 roots_missing = repo.revs(b'roots(%ld)', missing)
1190 roots_missing = repo.revs(b'roots(%ld)', missing)
1195 assert len(common) + len(missing) == len(all)
1191 assert len(common) + len(missing) == len(all)
1196
1192
1197 initial_undecided = repo.revs(
1193 initial_undecided = repo.revs(
1198 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1194 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1199 )
1195 )
1200 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1196 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1201 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1197 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1202 common_initial_undecided = initial_undecided & common
1198 common_initial_undecided = initial_undecided & common
1203 missing_initial_undecided = initial_undecided & missing
1199 missing_initial_undecided = initial_undecided & missing
1204
1200
1205 data[b'elapsed'] = t.elapsed
1201 data[b'elapsed'] = t.elapsed
1206 data[b'nb-common-heads'] = len(heads_common)
1202 data[b'nb-common-heads'] = len(heads_common)
1207 data[b'nb-common-heads-local'] = len(heads_common_local)
1203 data[b'nb-common-heads-local'] = len(heads_common_local)
1208 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1204 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1209 data[b'nb-common-heads-both'] = len(heads_common_both)
1205 data[b'nb-common-heads-both'] = len(heads_common_both)
1210 data[b'nb-common-roots'] = len(roots_common)
1206 data[b'nb-common-roots'] = len(roots_common)
1211 data[b'nb-head-local'] = len(heads_local)
1207 data[b'nb-head-local'] = len(heads_local)
1212 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1208 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1213 data[b'nb-head-remote'] = len(heads_remote)
1209 data[b'nb-head-remote'] = len(heads_remote)
1214 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1210 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1215 heads_common_remote
1211 heads_common_remote
1216 )
1212 )
1217 data[b'nb-revs'] = len(all)
1213 data[b'nb-revs'] = len(all)
1218 data[b'nb-revs-common'] = len(common)
1214 data[b'nb-revs-common'] = len(common)
1219 data[b'nb-revs-missing'] = len(missing)
1215 data[b'nb-revs-missing'] = len(missing)
1220 data[b'nb-missing-heads'] = len(heads_missing)
1216 data[b'nb-missing-heads'] = len(heads_missing)
1221 data[b'nb-missing-roots'] = len(roots_missing)
1217 data[b'nb-missing-roots'] = len(roots_missing)
1222 data[b'nb-ini_und'] = len(initial_undecided)
1218 data[b'nb-ini_und'] = len(initial_undecided)
1223 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1219 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1224 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1220 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1225 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1221 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1226 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1222 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1227
1223
1228 fm.startitem()
1224 fm.startitem()
1229 fm.data(**pycompat.strkwargs(data))
1225 fm.data(**pycompat.strkwargs(data))
1230 # display discovery summary
1226 # display discovery summary
1231 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1227 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1232 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1228 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1233 fm.plain(b"heads summary:\n")
1229 fm.plain(b"heads summary:\n")
1234 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1230 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1235 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1231 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1236 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1232 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1237 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1233 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1238 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1234 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1239 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1235 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1240 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1236 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1241 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1237 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1242 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1238 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1243 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1239 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1244 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1240 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1245 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1241 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1246 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1242 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1247 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1243 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1248 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1244 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1249 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1245 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1250 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1246 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1251 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1247 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1252 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1248 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1253 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1249 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1254 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1250 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1255 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1251 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1256
1252
1257 if ui.verbose:
1253 if ui.verbose:
1258 fm.plain(
1254 fm.plain(
1259 b"common heads: %s\n"
1255 b"common heads: %s\n"
1260 % b" ".join(sorted(short(n) for n in heads_common))
1256 % b" ".join(sorted(short(n) for n in heads_common))
1261 )
1257 )
1262 fm.end()
1258 fm.end()
1263
1259
1264
1260
1265 _chunksize = 4 << 10
1261 _chunksize = 4 << 10
1266
1262
1267
1263
1268 @command(
1264 @command(
1269 b'debugdownload',
1265 b'debugdownload',
1270 [
1266 [
1271 (b'o', b'output', b'', _(b'path')),
1267 (b'o', b'output', b'', _(b'path')),
1272 ],
1268 ],
1273 optionalrepo=True,
1269 optionalrepo=True,
1274 )
1270 )
1275 def debugdownload(ui, repo, url, output=None, **opts):
1271 def debugdownload(ui, repo, url, output=None, **opts):
1276 """download a resource using Mercurial logic and config"""
1272 """download a resource using Mercurial logic and config"""
1277 fh = urlmod.open(ui, url, output)
1273 fh = urlmod.open(ui, url, output)
1278
1274
1279 dest = ui
1275 dest = ui
1280 if output:
1276 if output:
1281 dest = open(output, b"wb", _chunksize)
1277 dest = open(output, b"wb", _chunksize)
1282 try:
1278 try:
1283 data = fh.read(_chunksize)
1279 data = fh.read(_chunksize)
1284 while data:
1280 while data:
1285 dest.write(data)
1281 dest.write(data)
1286 data = fh.read(_chunksize)
1282 data = fh.read(_chunksize)
1287 finally:
1283 finally:
1288 if output:
1284 if output:
1289 dest.close()
1285 dest.close()
1290
1286
1291
1287
1292 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1288 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1293 def debugextensions(ui, repo, **opts):
1289 def debugextensions(ui, repo, **opts):
1294 '''show information about active extensions'''
1290 '''show information about active extensions'''
1295 opts = pycompat.byteskwargs(opts)
1291 opts = pycompat.byteskwargs(opts)
1296 exts = extensions.extensions(ui)
1292 exts = extensions.extensions(ui)
1297 hgver = util.version()
1293 hgver = util.version()
1298 fm = ui.formatter(b'debugextensions', opts)
1294 fm = ui.formatter(b'debugextensions', opts)
1299 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1295 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1300 isinternal = extensions.ismoduleinternal(extmod)
1296 isinternal = extensions.ismoduleinternal(extmod)
1301 extsource = None
1297 extsource = None
1302
1298
1303 if util.safehasattr(extmod, '__file__'):
1299 if util.safehasattr(extmod, '__file__'):
1304 extsource = pycompat.fsencode(extmod.__file__)
1300 extsource = pycompat.fsencode(extmod.__file__)
1305 elif getattr(sys, 'oxidized', False):
1301 elif getattr(sys, 'oxidized', False):
1306 extsource = pycompat.sysexecutable
1302 extsource = pycompat.sysexecutable
1307 if isinternal:
1303 if isinternal:
1308 exttestedwith = [] # never expose magic string to users
1304 exttestedwith = [] # never expose magic string to users
1309 else:
1305 else:
1310 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1306 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1311 extbuglink = getattr(extmod, 'buglink', None)
1307 extbuglink = getattr(extmod, 'buglink', None)
1312
1308
1313 fm.startitem()
1309 fm.startitem()
1314
1310
1315 if ui.quiet or ui.verbose:
1311 if ui.quiet or ui.verbose:
1316 fm.write(b'name', b'%s\n', extname)
1312 fm.write(b'name', b'%s\n', extname)
1317 else:
1313 else:
1318 fm.write(b'name', b'%s', extname)
1314 fm.write(b'name', b'%s', extname)
1319 if isinternal or hgver in exttestedwith:
1315 if isinternal or hgver in exttestedwith:
1320 fm.plain(b'\n')
1316 fm.plain(b'\n')
1321 elif not exttestedwith:
1317 elif not exttestedwith:
1322 fm.plain(_(b' (untested!)\n'))
1318 fm.plain(_(b' (untested!)\n'))
1323 else:
1319 else:
1324 lasttestedversion = exttestedwith[-1]
1320 lasttestedversion = exttestedwith[-1]
1325 fm.plain(b' (%s!)\n' % lasttestedversion)
1321 fm.plain(b' (%s!)\n' % lasttestedversion)
1326
1322
1327 fm.condwrite(
1323 fm.condwrite(
1328 ui.verbose and extsource,
1324 ui.verbose and extsource,
1329 b'source',
1325 b'source',
1330 _(b' location: %s\n'),
1326 _(b' location: %s\n'),
1331 extsource or b"",
1327 extsource or b"",
1332 )
1328 )
1333
1329
1334 if ui.verbose:
1330 if ui.verbose:
1335 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1331 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1336 fm.data(bundled=isinternal)
1332 fm.data(bundled=isinternal)
1337
1333
1338 fm.condwrite(
1334 fm.condwrite(
1339 ui.verbose and exttestedwith,
1335 ui.verbose and exttestedwith,
1340 b'testedwith',
1336 b'testedwith',
1341 _(b' tested with: %s\n'),
1337 _(b' tested with: %s\n'),
1342 fm.formatlist(exttestedwith, name=b'ver'),
1338 fm.formatlist(exttestedwith, name=b'ver'),
1343 )
1339 )
1344
1340
1345 fm.condwrite(
1341 fm.condwrite(
1346 ui.verbose and extbuglink,
1342 ui.verbose and extbuglink,
1347 b'buglink',
1343 b'buglink',
1348 _(b' bug reporting: %s\n'),
1344 _(b' bug reporting: %s\n'),
1349 extbuglink or b"",
1345 extbuglink or b"",
1350 )
1346 )
1351
1347
1352 fm.end()
1348 fm.end()
1353
1349
1354
1350
1355 @command(
1351 @command(
1356 b'debugfileset',
1352 b'debugfileset',
1357 [
1353 [
1358 (
1354 (
1359 b'r',
1355 b'r',
1360 b'rev',
1356 b'rev',
1361 b'',
1357 b'',
1362 _(b'apply the filespec on this revision'),
1358 _(b'apply the filespec on this revision'),
1363 _(b'REV'),
1359 _(b'REV'),
1364 ),
1360 ),
1365 (
1361 (
1366 b'',
1362 b'',
1367 b'all-files',
1363 b'all-files',
1368 False,
1364 False,
1369 _(b'test files from all revisions and working directory'),
1365 _(b'test files from all revisions and working directory'),
1370 ),
1366 ),
1371 (
1367 (
1372 b's',
1368 b's',
1373 b'show-matcher',
1369 b'show-matcher',
1374 None,
1370 None,
1375 _(b'print internal representation of matcher'),
1371 _(b'print internal representation of matcher'),
1376 ),
1372 ),
1377 (
1373 (
1378 b'p',
1374 b'p',
1379 b'show-stage',
1375 b'show-stage',
1380 [],
1376 [],
1381 _(b'print parsed tree at the given stage'),
1377 _(b'print parsed tree at the given stage'),
1382 _(b'NAME'),
1378 _(b'NAME'),
1383 ),
1379 ),
1384 ],
1380 ],
1385 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1381 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1386 )
1382 )
1387 def debugfileset(ui, repo, expr, **opts):
1383 def debugfileset(ui, repo, expr, **opts):
1388 '''parse and apply a fileset specification'''
1384 '''parse and apply a fileset specification'''
1389 from . import fileset
1385 from . import fileset
1390
1386
1391 fileset.symbols # force import of fileset so we have predicates to optimize
1387 fileset.symbols # force import of fileset so we have predicates to optimize
1392 opts = pycompat.byteskwargs(opts)
1388 opts = pycompat.byteskwargs(opts)
1393 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1389 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1394
1390
1395 stages = [
1391 stages = [
1396 (b'parsed', pycompat.identity),
1392 (b'parsed', pycompat.identity),
1397 (b'analyzed', filesetlang.analyze),
1393 (b'analyzed', filesetlang.analyze),
1398 (b'optimized', filesetlang.optimize),
1394 (b'optimized', filesetlang.optimize),
1399 ]
1395 ]
1400 stagenames = {n for n, f in stages}
1396 stagenames = {n for n, f in stages}
1401
1397
1402 showalways = set()
1398 showalways = set()
1403 if ui.verbose and not opts[b'show_stage']:
1399 if ui.verbose and not opts[b'show_stage']:
1404 # show parsed tree by --verbose (deprecated)
1400 # show parsed tree by --verbose (deprecated)
1405 showalways.add(b'parsed')
1401 showalways.add(b'parsed')
1406 if opts[b'show_stage'] == [b'all']:
1402 if opts[b'show_stage'] == [b'all']:
1407 showalways.update(stagenames)
1403 showalways.update(stagenames)
1408 else:
1404 else:
1409 for n in opts[b'show_stage']:
1405 for n in opts[b'show_stage']:
1410 if n not in stagenames:
1406 if n not in stagenames:
1411 raise error.Abort(_(b'invalid stage name: %s') % n)
1407 raise error.Abort(_(b'invalid stage name: %s') % n)
1412 showalways.update(opts[b'show_stage'])
1408 showalways.update(opts[b'show_stage'])
1413
1409
1414 tree = filesetlang.parse(expr)
1410 tree = filesetlang.parse(expr)
1415 for n, f in stages:
1411 for n, f in stages:
1416 tree = f(tree)
1412 tree = f(tree)
1417 if n in showalways:
1413 if n in showalways:
1418 if opts[b'show_stage'] or n != b'parsed':
1414 if opts[b'show_stage'] or n != b'parsed':
1419 ui.write(b"* %s:\n" % n)
1415 ui.write(b"* %s:\n" % n)
1420 ui.write(filesetlang.prettyformat(tree), b"\n")
1416 ui.write(filesetlang.prettyformat(tree), b"\n")
1421
1417
1422 files = set()
1418 files = set()
1423 if opts[b'all_files']:
1419 if opts[b'all_files']:
1424 for r in repo:
1420 for r in repo:
1425 c = repo[r]
1421 c = repo[r]
1426 files.update(c.files())
1422 files.update(c.files())
1427 files.update(c.substate)
1423 files.update(c.substate)
1428 if opts[b'all_files'] or ctx.rev() is None:
1424 if opts[b'all_files'] or ctx.rev() is None:
1429 wctx = repo[None]
1425 wctx = repo[None]
1430 files.update(
1426 files.update(
1431 repo.dirstate.walk(
1427 repo.dirstate.walk(
1432 scmutil.matchall(repo),
1428 scmutil.matchall(repo),
1433 subrepos=list(wctx.substate),
1429 subrepos=list(wctx.substate),
1434 unknown=True,
1430 unknown=True,
1435 ignored=True,
1431 ignored=True,
1436 )
1432 )
1437 )
1433 )
1438 files.update(wctx.substate)
1434 files.update(wctx.substate)
1439 else:
1435 else:
1440 files.update(ctx.files())
1436 files.update(ctx.files())
1441 files.update(ctx.substate)
1437 files.update(ctx.substate)
1442
1438
1443 m = ctx.matchfileset(repo.getcwd(), expr)
1439 m = ctx.matchfileset(repo.getcwd(), expr)
1444 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1440 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1445 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1441 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1446 for f in sorted(files):
1442 for f in sorted(files):
1447 if not m(f):
1443 if not m(f):
1448 continue
1444 continue
1449 ui.write(b"%s\n" % f)
1445 ui.write(b"%s\n" % f)
1450
1446
1451
1447
1452 @command(b'debugformat', [] + cmdutil.formatteropts)
1448 @command(b'debugformat', [] + cmdutil.formatteropts)
1453 def debugformat(ui, repo, **opts):
1449 def debugformat(ui, repo, **opts):
1454 """display format information about the current repository
1450 """display format information about the current repository
1455
1451
1456 Use --verbose to get extra information about current config value and
1452 Use --verbose to get extra information about current config value and
1457 Mercurial default."""
1453 Mercurial default."""
1458 opts = pycompat.byteskwargs(opts)
1454 opts = pycompat.byteskwargs(opts)
1459 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1455 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1460 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1456 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1461
1457
1462 def makeformatname(name):
1458 def makeformatname(name):
1463 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1459 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1464
1460
1465 fm = ui.formatter(b'debugformat', opts)
1461 fm = ui.formatter(b'debugformat', opts)
1466 if fm.isplain():
1462 if fm.isplain():
1467
1463
1468 def formatvalue(value):
1464 def formatvalue(value):
1469 if util.safehasattr(value, b'startswith'):
1465 if util.safehasattr(value, b'startswith'):
1470 return value
1466 return value
1471 if value:
1467 if value:
1472 return b'yes'
1468 return b'yes'
1473 else:
1469 else:
1474 return b'no'
1470 return b'no'
1475
1471
1476 else:
1472 else:
1477 formatvalue = pycompat.identity
1473 formatvalue = pycompat.identity
1478
1474
1479 fm.plain(b'format-variant')
1475 fm.plain(b'format-variant')
1480 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1476 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1481 fm.plain(b' repo')
1477 fm.plain(b' repo')
1482 if ui.verbose:
1478 if ui.verbose:
1483 fm.plain(b' config default')
1479 fm.plain(b' config default')
1484 fm.plain(b'\n')
1480 fm.plain(b'\n')
1485 for fv in upgrade.allformatvariant:
1481 for fv in upgrade.allformatvariant:
1486 fm.startitem()
1482 fm.startitem()
1487 repovalue = fv.fromrepo(repo)
1483 repovalue = fv.fromrepo(repo)
1488 configvalue = fv.fromconfig(repo)
1484 configvalue = fv.fromconfig(repo)
1489
1485
1490 if repovalue != configvalue:
1486 if repovalue != configvalue:
1491 namelabel = b'formatvariant.name.mismatchconfig'
1487 namelabel = b'formatvariant.name.mismatchconfig'
1492 repolabel = b'formatvariant.repo.mismatchconfig'
1488 repolabel = b'formatvariant.repo.mismatchconfig'
1493 elif repovalue != fv.default:
1489 elif repovalue != fv.default:
1494 namelabel = b'formatvariant.name.mismatchdefault'
1490 namelabel = b'formatvariant.name.mismatchdefault'
1495 repolabel = b'formatvariant.repo.mismatchdefault'
1491 repolabel = b'formatvariant.repo.mismatchdefault'
1496 else:
1492 else:
1497 namelabel = b'formatvariant.name.uptodate'
1493 namelabel = b'formatvariant.name.uptodate'
1498 repolabel = b'formatvariant.repo.uptodate'
1494 repolabel = b'formatvariant.repo.uptodate'
1499
1495
1500 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1496 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1501 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1497 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1502 if fv.default != configvalue:
1498 if fv.default != configvalue:
1503 configlabel = b'formatvariant.config.special'
1499 configlabel = b'formatvariant.config.special'
1504 else:
1500 else:
1505 configlabel = b'formatvariant.config.default'
1501 configlabel = b'formatvariant.config.default'
1506 fm.condwrite(
1502 fm.condwrite(
1507 ui.verbose,
1503 ui.verbose,
1508 b'config',
1504 b'config',
1509 b' %6s',
1505 b' %6s',
1510 formatvalue(configvalue),
1506 formatvalue(configvalue),
1511 label=configlabel,
1507 label=configlabel,
1512 )
1508 )
1513 fm.condwrite(
1509 fm.condwrite(
1514 ui.verbose,
1510 ui.verbose,
1515 b'default',
1511 b'default',
1516 b' %7s',
1512 b' %7s',
1517 formatvalue(fv.default),
1513 formatvalue(fv.default),
1518 label=b'formatvariant.default',
1514 label=b'formatvariant.default',
1519 )
1515 )
1520 fm.plain(b'\n')
1516 fm.plain(b'\n')
1521 fm.end()
1517 fm.end()
1522
1518
1523
1519
1524 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1520 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1525 def debugfsinfo(ui, path=b"."):
1521 def debugfsinfo(ui, path=b"."):
1526 """show information detected about current filesystem"""
1522 """show information detected about current filesystem"""
1527 ui.writenoi18n(b'path: %s\n' % path)
1523 ui.writenoi18n(b'path: %s\n' % path)
1528 ui.writenoi18n(
1524 ui.writenoi18n(
1529 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1525 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1530 )
1526 )
1531 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1527 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1532 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1528 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1533 ui.writenoi18n(
1529 ui.writenoi18n(
1534 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1530 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1535 )
1531 )
1536 ui.writenoi18n(
1532 ui.writenoi18n(
1537 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1533 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1538 )
1534 )
1539 casesensitive = b'(unknown)'
1535 casesensitive = b'(unknown)'
1540 try:
1536 try:
1541 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1537 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1542 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1538 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1543 except OSError:
1539 except OSError:
1544 pass
1540 pass
1545 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1541 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1546
1542
1547
1543
1548 @command(
1544 @command(
1549 b'debuggetbundle',
1545 b'debuggetbundle',
1550 [
1546 [
1551 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1547 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1552 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1548 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1553 (
1549 (
1554 b't',
1550 b't',
1555 b'type',
1551 b'type',
1556 b'bzip2',
1552 b'bzip2',
1557 _(b'bundle compression type to use'),
1553 _(b'bundle compression type to use'),
1558 _(b'TYPE'),
1554 _(b'TYPE'),
1559 ),
1555 ),
1560 ],
1556 ],
1561 _(b'REPO FILE [-H|-C ID]...'),
1557 _(b'REPO FILE [-H|-C ID]...'),
1562 norepo=True,
1558 norepo=True,
1563 )
1559 )
1564 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1560 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1565 """retrieves a bundle from a repo
1561 """retrieves a bundle from a repo
1566
1562
1567 Every ID must be a full-length hex node id string. Saves the bundle to the
1563 Every ID must be a full-length hex node id string. Saves the bundle to the
1568 given file.
1564 given file.
1569 """
1565 """
1570 opts = pycompat.byteskwargs(opts)
1566 opts = pycompat.byteskwargs(opts)
1571 repo = hg.peer(ui, opts, repopath)
1567 repo = hg.peer(ui, opts, repopath)
1572 if not repo.capable(b'getbundle'):
1568 if not repo.capable(b'getbundle'):
1573 raise error.Abort(b"getbundle() not supported by target repository")
1569 raise error.Abort(b"getbundle() not supported by target repository")
1574 args = {}
1570 args = {}
1575 if common:
1571 if common:
1576 args['common'] = [bin(s) for s in common]
1572 args['common'] = [bin(s) for s in common]
1577 if head:
1573 if head:
1578 args['heads'] = [bin(s) for s in head]
1574 args['heads'] = [bin(s) for s in head]
1579 # TODO: get desired bundlecaps from command line.
1575 # TODO: get desired bundlecaps from command line.
1580 args['bundlecaps'] = None
1576 args['bundlecaps'] = None
1581 bundle = repo.getbundle(b'debug', **args)
1577 bundle = repo.getbundle(b'debug', **args)
1582
1578
1583 bundletype = opts.get(b'type', b'bzip2').lower()
1579 bundletype = opts.get(b'type', b'bzip2').lower()
1584 btypes = {
1580 btypes = {
1585 b'none': b'HG10UN',
1581 b'none': b'HG10UN',
1586 b'bzip2': b'HG10BZ',
1582 b'bzip2': b'HG10BZ',
1587 b'gzip': b'HG10GZ',
1583 b'gzip': b'HG10GZ',
1588 b'bundle2': b'HG20',
1584 b'bundle2': b'HG20',
1589 }
1585 }
1590 bundletype = btypes.get(bundletype)
1586 bundletype = btypes.get(bundletype)
1591 if bundletype not in bundle2.bundletypes:
1587 if bundletype not in bundle2.bundletypes:
1592 raise error.Abort(_(b'unknown bundle type specified with --type'))
1588 raise error.Abort(_(b'unknown bundle type specified with --type'))
1593 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1589 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1594
1590
1595
1591
1596 @command(b'debugignore', [], b'[FILE]')
1592 @command(b'debugignore', [], b'[FILE]')
1597 def debugignore(ui, repo, *files, **opts):
1593 def debugignore(ui, repo, *files, **opts):
1598 """display the combined ignore pattern and information about ignored files
1594 """display the combined ignore pattern and information about ignored files
1599
1595
1600 With no argument display the combined ignore pattern.
1596 With no argument display the combined ignore pattern.
1601
1597
1602 Given space separated file names, shows if the given file is ignored and
1598 Given space separated file names, shows if the given file is ignored and
1603 if so, show the ignore rule (file and line number) that matched it.
1599 if so, show the ignore rule (file and line number) that matched it.
1604 """
1600 """
1605 ignore = repo.dirstate._ignore
1601 ignore = repo.dirstate._ignore
1606 if not files:
1602 if not files:
1607 # Show all the patterns
1603 # Show all the patterns
1608 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1604 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1609 else:
1605 else:
1610 m = scmutil.match(repo[None], pats=files)
1606 m = scmutil.match(repo[None], pats=files)
1611 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1607 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1612 for f in m.files():
1608 for f in m.files():
1613 nf = util.normpath(f)
1609 nf = util.normpath(f)
1614 ignored = None
1610 ignored = None
1615 ignoredata = None
1611 ignoredata = None
1616 if nf != b'.':
1612 if nf != b'.':
1617 if ignore(nf):
1613 if ignore(nf):
1618 ignored = nf
1614 ignored = nf
1619 ignoredata = repo.dirstate._ignorefileandline(nf)
1615 ignoredata = repo.dirstate._ignorefileandline(nf)
1620 else:
1616 else:
1621 for p in pathutil.finddirs(nf):
1617 for p in pathutil.finddirs(nf):
1622 if ignore(p):
1618 if ignore(p):
1623 ignored = p
1619 ignored = p
1624 ignoredata = repo.dirstate._ignorefileandline(p)
1620 ignoredata = repo.dirstate._ignorefileandline(p)
1625 break
1621 break
1626 if ignored:
1622 if ignored:
1627 if ignored == nf:
1623 if ignored == nf:
1628 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1624 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1629 else:
1625 else:
1630 ui.write(
1626 ui.write(
1631 _(
1627 _(
1632 b"%s is ignored because of "
1628 b"%s is ignored because of "
1633 b"containing directory %s\n"
1629 b"containing directory %s\n"
1634 )
1630 )
1635 % (uipathfn(f), ignored)
1631 % (uipathfn(f), ignored)
1636 )
1632 )
1637 ignorefile, lineno, line = ignoredata
1633 ignorefile, lineno, line = ignoredata
1638 ui.write(
1634 ui.write(
1639 _(b"(ignore rule in %s, line %d: '%s')\n")
1635 _(b"(ignore rule in %s, line %d: '%s')\n")
1640 % (ignorefile, lineno, line)
1636 % (ignorefile, lineno, line)
1641 )
1637 )
1642 else:
1638 else:
1643 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1639 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1644
1640
1645
1641
1646 @command(
1642 @command(
1647 b'debugindex',
1643 b'debugindex',
1648 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1644 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1649 _(b'-c|-m|FILE'),
1645 _(b'-c|-m|FILE'),
1650 )
1646 )
1651 def debugindex(ui, repo, file_=None, **opts):
1647 def debugindex(ui, repo, file_=None, **opts):
1652 """dump index data for a storage primitive"""
1648 """dump index data for a storage primitive"""
1653 opts = pycompat.byteskwargs(opts)
1649 opts = pycompat.byteskwargs(opts)
1654 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1650 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1655
1651
1656 if ui.debugflag:
1652 if ui.debugflag:
1657 shortfn = hex
1653 shortfn = hex
1658 else:
1654 else:
1659 shortfn = short
1655 shortfn = short
1660
1656
1661 idlen = 12
1657 idlen = 12
1662 for i in store:
1658 for i in store:
1663 idlen = len(shortfn(store.node(i)))
1659 idlen = len(shortfn(store.node(i)))
1664 break
1660 break
1665
1661
1666 fm = ui.formatter(b'debugindex', opts)
1662 fm = ui.formatter(b'debugindex', opts)
1667 fm.plain(
1663 fm.plain(
1668 b' rev linkrev %s %s p2\n'
1664 b' rev linkrev %s %s p2\n'
1669 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1665 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1670 )
1666 )
1671
1667
1672 for rev in store:
1668 for rev in store:
1673 node = store.node(rev)
1669 node = store.node(rev)
1674 parents = store.parents(node)
1670 parents = store.parents(node)
1675
1671
1676 fm.startitem()
1672 fm.startitem()
1677 fm.write(b'rev', b'%6d ', rev)
1673 fm.write(b'rev', b'%6d ', rev)
1678 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1674 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1679 fm.write(b'node', b'%s ', shortfn(node))
1675 fm.write(b'node', b'%s ', shortfn(node))
1680 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1676 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1681 fm.write(b'p2', b'%s', shortfn(parents[1]))
1677 fm.write(b'p2', b'%s', shortfn(parents[1]))
1682 fm.plain(b'\n')
1678 fm.plain(b'\n')
1683
1679
1684 fm.end()
1680 fm.end()
1685
1681
1686
1682
1687 @command(
1683 @command(
1688 b'debugindexdot',
1684 b'debugindexdot',
1689 cmdutil.debugrevlogopts,
1685 cmdutil.debugrevlogopts,
1690 _(b'-c|-m|FILE'),
1686 _(b'-c|-m|FILE'),
1691 optionalrepo=True,
1687 optionalrepo=True,
1692 )
1688 )
1693 def debugindexdot(ui, repo, file_=None, **opts):
1689 def debugindexdot(ui, repo, file_=None, **opts):
1694 """dump an index DAG as a graphviz dot file"""
1690 """dump an index DAG as a graphviz dot file"""
1695 opts = pycompat.byteskwargs(opts)
1691 opts = pycompat.byteskwargs(opts)
1696 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1692 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1697 ui.writenoi18n(b"digraph G {\n")
1693 ui.writenoi18n(b"digraph G {\n")
1698 for i in r:
1694 for i in r:
1699 node = r.node(i)
1695 node = r.node(i)
1700 pp = r.parents(node)
1696 pp = r.parents(node)
1701 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1697 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1702 if pp[1] != repo.nullid:
1698 if pp[1] != repo.nullid:
1703 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1699 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1704 ui.write(b"}\n")
1700 ui.write(b"}\n")
1705
1701
1706
1702
1707 @command(b'debugindexstats', [])
1703 @command(b'debugindexstats', [])
1708 def debugindexstats(ui, repo):
1704 def debugindexstats(ui, repo):
1709 """show stats related to the changelog index"""
1705 """show stats related to the changelog index"""
1710 repo.changelog.shortest(repo.nullid, 1)
1706 repo.changelog.shortest(repo.nullid, 1)
1711 index = repo.changelog.index
1707 index = repo.changelog.index
1712 if not util.safehasattr(index, b'stats'):
1708 if not util.safehasattr(index, b'stats'):
1713 raise error.Abort(_(b'debugindexstats only works with native code'))
1709 raise error.Abort(_(b'debugindexstats only works with native code'))
1714 for k, v in sorted(index.stats().items()):
1710 for k, v in sorted(index.stats().items()):
1715 ui.write(b'%s: %d\n' % (k, v))
1711 ui.write(b'%s: %d\n' % (k, v))
1716
1712
1717
1713
1718 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1714 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1719 def debuginstall(ui, **opts):
1715 def debuginstall(ui, **opts):
1720 """test Mercurial installation
1716 """test Mercurial installation
1721
1717
1722 Returns 0 on success.
1718 Returns 0 on success.
1723 """
1719 """
1724 opts = pycompat.byteskwargs(opts)
1720 opts = pycompat.byteskwargs(opts)
1725
1721
1726 problems = 0
1722 problems = 0
1727
1723
1728 fm = ui.formatter(b'debuginstall', opts)
1724 fm = ui.formatter(b'debuginstall', opts)
1729 fm.startitem()
1725 fm.startitem()
1730
1726
1731 # encoding might be unknown or wrong. don't translate these messages.
1727 # encoding might be unknown or wrong. don't translate these messages.
1732 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1728 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1733 err = None
1729 err = None
1734 try:
1730 try:
1735 codecs.lookup(pycompat.sysstr(encoding.encoding))
1731 codecs.lookup(pycompat.sysstr(encoding.encoding))
1736 except LookupError as inst:
1732 except LookupError as inst:
1737 err = stringutil.forcebytestr(inst)
1733 err = stringutil.forcebytestr(inst)
1738 problems += 1
1734 problems += 1
1739 fm.condwrite(
1735 fm.condwrite(
1740 err,
1736 err,
1741 b'encodingerror',
1737 b'encodingerror',
1742 b" %s\n (check that your locale is properly set)\n",
1738 b" %s\n (check that your locale is properly set)\n",
1743 err,
1739 err,
1744 )
1740 )
1745
1741
1746 # Python
1742 # Python
1747 pythonlib = None
1743 pythonlib = None
1748 if util.safehasattr(os, '__file__'):
1744 if util.safehasattr(os, '__file__'):
1749 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1745 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1750 elif getattr(sys, 'oxidized', False):
1746 elif getattr(sys, 'oxidized', False):
1751 pythonlib = pycompat.sysexecutable
1747 pythonlib = pycompat.sysexecutable
1752
1748
1753 fm.write(
1749 fm.write(
1754 b'pythonexe',
1750 b'pythonexe',
1755 _(b"checking Python executable (%s)\n"),
1751 _(b"checking Python executable (%s)\n"),
1756 pycompat.sysexecutable or _(b"unknown"),
1752 pycompat.sysexecutable or _(b"unknown"),
1757 )
1753 )
1758 fm.write(
1754 fm.write(
1759 b'pythonimplementation',
1755 b'pythonimplementation',
1760 _(b"checking Python implementation (%s)\n"),
1756 _(b"checking Python implementation (%s)\n"),
1761 pycompat.sysbytes(platform.python_implementation()),
1757 pycompat.sysbytes(platform.python_implementation()),
1762 )
1758 )
1763 fm.write(
1759 fm.write(
1764 b'pythonver',
1760 b'pythonver',
1765 _(b"checking Python version (%s)\n"),
1761 _(b"checking Python version (%s)\n"),
1766 (b"%d.%d.%d" % sys.version_info[:3]),
1762 (b"%d.%d.%d" % sys.version_info[:3]),
1767 )
1763 )
1768 fm.write(
1764 fm.write(
1769 b'pythonlib',
1765 b'pythonlib',
1770 _(b"checking Python lib (%s)...\n"),
1766 _(b"checking Python lib (%s)...\n"),
1771 pythonlib or _(b"unknown"),
1767 pythonlib or _(b"unknown"),
1772 )
1768 )
1773
1769
1774 try:
1770 try:
1775 from . import rustext # pytype: disable=import-error
1771 from . import rustext # pytype: disable=import-error
1776
1772
1777 rustext.__doc__ # trigger lazy import
1773 rustext.__doc__ # trigger lazy import
1778 except ImportError:
1774 except ImportError:
1779 rustext = None
1775 rustext = None
1780
1776
1781 security = set(sslutil.supportedprotocols)
1777 security = set(sslutil.supportedprotocols)
1782 if sslutil.hassni:
1778 if sslutil.hassni:
1783 security.add(b'sni')
1779 security.add(b'sni')
1784
1780
1785 fm.write(
1781 fm.write(
1786 b'pythonsecurity',
1782 b'pythonsecurity',
1787 _(b"checking Python security support (%s)\n"),
1783 _(b"checking Python security support (%s)\n"),
1788 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1784 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1789 )
1785 )
1790
1786
1791 # These are warnings, not errors. So don't increment problem count. This
1787 # These are warnings, not errors. So don't increment problem count. This
1792 # may change in the future.
1788 # may change in the future.
1793 if b'tls1.2' not in security:
1789 if b'tls1.2' not in security:
1794 fm.plain(
1790 fm.plain(
1795 _(
1791 _(
1796 b' TLS 1.2 not supported by Python install; '
1792 b' TLS 1.2 not supported by Python install; '
1797 b'network connections lack modern security\n'
1793 b'network connections lack modern security\n'
1798 )
1794 )
1799 )
1795 )
1800 if b'sni' not in security:
1796 if b'sni' not in security:
1801 fm.plain(
1797 fm.plain(
1802 _(
1798 _(
1803 b' SNI not supported by Python install; may have '
1799 b' SNI not supported by Python install; may have '
1804 b'connectivity issues with some servers\n'
1800 b'connectivity issues with some servers\n'
1805 )
1801 )
1806 )
1802 )
1807
1803
1808 fm.plain(
1804 fm.plain(
1809 _(
1805 _(
1810 b"checking Rust extensions (%s)\n"
1806 b"checking Rust extensions (%s)\n"
1811 % (b'missing' if rustext is None else b'installed')
1807 % (b'missing' if rustext is None else b'installed')
1812 ),
1808 ),
1813 )
1809 )
1814
1810
1815 # TODO print CA cert info
1811 # TODO print CA cert info
1816
1812
1817 # hg version
1813 # hg version
1818 hgver = util.version()
1814 hgver = util.version()
1819 fm.write(
1815 fm.write(
1820 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1816 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1821 )
1817 )
1822 fm.write(
1818 fm.write(
1823 b'hgverextra',
1819 b'hgverextra',
1824 _(b"checking Mercurial custom build (%s)\n"),
1820 _(b"checking Mercurial custom build (%s)\n"),
1825 b'+'.join(hgver.split(b'+')[1:]),
1821 b'+'.join(hgver.split(b'+')[1:]),
1826 )
1822 )
1827
1823
1828 # compiled modules
1824 # compiled modules
1829 hgmodules = None
1825 hgmodules = None
1830 if util.safehasattr(sys.modules[__name__], '__file__'):
1826 if util.safehasattr(sys.modules[__name__], '__file__'):
1831 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1827 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1832 elif getattr(sys, 'oxidized', False):
1828 elif getattr(sys, 'oxidized', False):
1833 hgmodules = pycompat.sysexecutable
1829 hgmodules = pycompat.sysexecutable
1834
1830
1835 fm.write(
1831 fm.write(
1836 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1832 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1837 )
1833 )
1838 fm.write(
1834 fm.write(
1839 b'hgmodules',
1835 b'hgmodules',
1840 _(b"checking installed modules (%s)...\n"),
1836 _(b"checking installed modules (%s)...\n"),
1841 hgmodules or _(b"unknown"),
1837 hgmodules or _(b"unknown"),
1842 )
1838 )
1843
1839
1844 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1840 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1845 rustext = rustandc # for now, that's the only case
1841 rustext = rustandc # for now, that's the only case
1846 cext = policy.policy in (b'c', b'allow') or rustandc
1842 cext = policy.policy in (b'c', b'allow') or rustandc
1847 nopure = cext or rustext
1843 nopure = cext or rustext
1848 if nopure:
1844 if nopure:
1849 err = None
1845 err = None
1850 try:
1846 try:
1851 if cext:
1847 if cext:
1852 from .cext import ( # pytype: disable=import-error
1848 from .cext import ( # pytype: disable=import-error
1853 base85,
1849 base85,
1854 bdiff,
1850 bdiff,
1855 mpatch,
1851 mpatch,
1856 osutil,
1852 osutil,
1857 )
1853 )
1858
1854
1859 # quiet pyflakes
1855 # quiet pyflakes
1860 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1856 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1861 if rustext:
1857 if rustext:
1862 from .rustext import ( # pytype: disable=import-error
1858 from .rustext import ( # pytype: disable=import-error
1863 ancestor,
1859 ancestor,
1864 dirstate,
1860 dirstate,
1865 )
1861 )
1866
1862
1867 dir(ancestor), dir(dirstate) # quiet pyflakes
1863 dir(ancestor), dir(dirstate) # quiet pyflakes
1868 except Exception as inst:
1864 except Exception as inst:
1869 err = stringutil.forcebytestr(inst)
1865 err = stringutil.forcebytestr(inst)
1870 problems += 1
1866 problems += 1
1871 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1867 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1872
1868
1873 compengines = util.compengines._engines.values()
1869 compengines = util.compengines._engines.values()
1874 fm.write(
1870 fm.write(
1875 b'compengines',
1871 b'compengines',
1876 _(b'checking registered compression engines (%s)\n'),
1872 _(b'checking registered compression engines (%s)\n'),
1877 fm.formatlist(
1873 fm.formatlist(
1878 sorted(e.name() for e in compengines),
1874 sorted(e.name() for e in compengines),
1879 name=b'compengine',
1875 name=b'compengine',
1880 fmt=b'%s',
1876 fmt=b'%s',
1881 sep=b', ',
1877 sep=b', ',
1882 ),
1878 ),
1883 )
1879 )
1884 fm.write(
1880 fm.write(
1885 b'compenginesavail',
1881 b'compenginesavail',
1886 _(b'checking available compression engines (%s)\n'),
1882 _(b'checking available compression engines (%s)\n'),
1887 fm.formatlist(
1883 fm.formatlist(
1888 sorted(e.name() for e in compengines if e.available()),
1884 sorted(e.name() for e in compengines if e.available()),
1889 name=b'compengine',
1885 name=b'compengine',
1890 fmt=b'%s',
1886 fmt=b'%s',
1891 sep=b', ',
1887 sep=b', ',
1892 ),
1888 ),
1893 )
1889 )
1894 wirecompengines = compression.compengines.supportedwireengines(
1890 wirecompengines = compression.compengines.supportedwireengines(
1895 compression.SERVERROLE
1891 compression.SERVERROLE
1896 )
1892 )
1897 fm.write(
1893 fm.write(
1898 b'compenginesserver',
1894 b'compenginesserver',
1899 _(
1895 _(
1900 b'checking available compression engines '
1896 b'checking available compression engines '
1901 b'for wire protocol (%s)\n'
1897 b'for wire protocol (%s)\n'
1902 ),
1898 ),
1903 fm.formatlist(
1899 fm.formatlist(
1904 [e.name() for e in wirecompengines if e.wireprotosupport()],
1900 [e.name() for e in wirecompengines if e.wireprotosupport()],
1905 name=b'compengine',
1901 name=b'compengine',
1906 fmt=b'%s',
1902 fmt=b'%s',
1907 sep=b', ',
1903 sep=b', ',
1908 ),
1904 ),
1909 )
1905 )
1910 re2 = b'missing'
1906 re2 = b'missing'
1911 if util._re2:
1907 if util._re2:
1912 re2 = b'available'
1908 re2 = b'available'
1913 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1909 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1914 fm.data(re2=bool(util._re2))
1910 fm.data(re2=bool(util._re2))
1915
1911
1916 # templates
1912 # templates
1917 p = templater.templatedir()
1913 p = templater.templatedir()
1918 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1914 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1919 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1915 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1920 if p:
1916 if p:
1921 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1917 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1922 if m:
1918 if m:
1923 # template found, check if it is working
1919 # template found, check if it is working
1924 err = None
1920 err = None
1925 try:
1921 try:
1926 templater.templater.frommapfile(m)
1922 templater.templater.frommapfile(m)
1927 except Exception as inst:
1923 except Exception as inst:
1928 err = stringutil.forcebytestr(inst)
1924 err = stringutil.forcebytestr(inst)
1929 p = None
1925 p = None
1930 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1926 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1931 else:
1927 else:
1932 p = None
1928 p = None
1933 fm.condwrite(
1929 fm.condwrite(
1934 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1930 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1935 )
1931 )
1936 fm.condwrite(
1932 fm.condwrite(
1937 not m,
1933 not m,
1938 b'defaulttemplatenotfound',
1934 b'defaulttemplatenotfound',
1939 _(b" template '%s' not found\n"),
1935 _(b" template '%s' not found\n"),
1940 b"default",
1936 b"default",
1941 )
1937 )
1942 if not p:
1938 if not p:
1943 problems += 1
1939 problems += 1
1944 fm.condwrite(
1940 fm.condwrite(
1945 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1941 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1946 )
1942 )
1947
1943
1948 # editor
1944 # editor
1949 editor = ui.geteditor()
1945 editor = ui.geteditor()
1950 editor = util.expandpath(editor)
1946 editor = util.expandpath(editor)
1951 editorbin = procutil.shellsplit(editor)[0]
1947 editorbin = procutil.shellsplit(editor)[0]
1952 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1948 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1953 cmdpath = procutil.findexe(editorbin)
1949 cmdpath = procutil.findexe(editorbin)
1954 fm.condwrite(
1950 fm.condwrite(
1955 not cmdpath and editor == b'vi',
1951 not cmdpath and editor == b'vi',
1956 b'vinotfound',
1952 b'vinotfound',
1957 _(
1953 _(
1958 b" No commit editor set and can't find %s in PATH\n"
1954 b" No commit editor set and can't find %s in PATH\n"
1959 b" (specify a commit editor in your configuration"
1955 b" (specify a commit editor in your configuration"
1960 b" file)\n"
1956 b" file)\n"
1961 ),
1957 ),
1962 not cmdpath and editor == b'vi' and editorbin,
1958 not cmdpath and editor == b'vi' and editorbin,
1963 )
1959 )
1964 fm.condwrite(
1960 fm.condwrite(
1965 not cmdpath and editor != b'vi',
1961 not cmdpath and editor != b'vi',
1966 b'editornotfound',
1962 b'editornotfound',
1967 _(
1963 _(
1968 b" Can't find editor '%s' in PATH\n"
1964 b" Can't find editor '%s' in PATH\n"
1969 b" (specify a commit editor in your configuration"
1965 b" (specify a commit editor in your configuration"
1970 b" file)\n"
1966 b" file)\n"
1971 ),
1967 ),
1972 not cmdpath and editorbin,
1968 not cmdpath and editorbin,
1973 )
1969 )
1974 if not cmdpath and editor != b'vi':
1970 if not cmdpath and editor != b'vi':
1975 problems += 1
1971 problems += 1
1976
1972
1977 # check username
1973 # check username
1978 username = None
1974 username = None
1979 err = None
1975 err = None
1980 try:
1976 try:
1981 username = ui.username()
1977 username = ui.username()
1982 except error.Abort as e:
1978 except error.Abort as e:
1983 err = e.message
1979 err = e.message
1984 problems += 1
1980 problems += 1
1985
1981
1986 fm.condwrite(
1982 fm.condwrite(
1987 username, b'username', _(b"checking username (%s)\n"), username
1983 username, b'username', _(b"checking username (%s)\n"), username
1988 )
1984 )
1989 fm.condwrite(
1985 fm.condwrite(
1990 err,
1986 err,
1991 b'usernameerror',
1987 b'usernameerror',
1992 _(
1988 _(
1993 b"checking username...\n %s\n"
1989 b"checking username...\n %s\n"
1994 b" (specify a username in your configuration file)\n"
1990 b" (specify a username in your configuration file)\n"
1995 ),
1991 ),
1996 err,
1992 err,
1997 )
1993 )
1998
1994
1999 for name, mod in extensions.extensions():
1995 for name, mod in extensions.extensions():
2000 handler = getattr(mod, 'debuginstall', None)
1996 handler = getattr(mod, 'debuginstall', None)
2001 if handler is not None:
1997 if handler is not None:
2002 problems += handler(ui, fm)
1998 problems += handler(ui, fm)
2003
1999
2004 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2000 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2005 if not problems:
2001 if not problems:
2006 fm.data(problems=problems)
2002 fm.data(problems=problems)
2007 fm.condwrite(
2003 fm.condwrite(
2008 problems,
2004 problems,
2009 b'problems',
2005 b'problems',
2010 _(b"%d problems detected, please check your install!\n"),
2006 _(b"%d problems detected, please check your install!\n"),
2011 problems,
2007 problems,
2012 )
2008 )
2013 fm.end()
2009 fm.end()
2014
2010
2015 return problems
2011 return problems
2016
2012
2017
2013
2018 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2014 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2019 def debugknown(ui, repopath, *ids, **opts):
2015 def debugknown(ui, repopath, *ids, **opts):
2020 """test whether node ids are known to a repo
2016 """test whether node ids are known to a repo
2021
2017
2022 Every ID must be a full-length hex node id string. Returns a list of 0s
2018 Every ID must be a full-length hex node id string. Returns a list of 0s
2023 and 1s indicating unknown/known.
2019 and 1s indicating unknown/known.
2024 """
2020 """
2025 opts = pycompat.byteskwargs(opts)
2021 opts = pycompat.byteskwargs(opts)
2026 repo = hg.peer(ui, opts, repopath)
2022 repo = hg.peer(ui, opts, repopath)
2027 if not repo.capable(b'known'):
2023 if not repo.capable(b'known'):
2028 raise error.Abort(b"known() not supported by target repository")
2024 raise error.Abort(b"known() not supported by target repository")
2029 flags = repo.known([bin(s) for s in ids])
2025 flags = repo.known([bin(s) for s in ids])
2030 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2026 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2031
2027
2032
2028
2033 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2029 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2034 def debuglabelcomplete(ui, repo, *args):
2030 def debuglabelcomplete(ui, repo, *args):
2035 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2031 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2036 debugnamecomplete(ui, repo, *args)
2032 debugnamecomplete(ui, repo, *args)
2037
2033
2038
2034
2039 @command(
2035 @command(
2040 b'debuglocks',
2036 b'debuglocks',
2041 [
2037 [
2042 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2038 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2043 (
2039 (
2044 b'W',
2040 b'W',
2045 b'force-free-wlock',
2041 b'force-free-wlock',
2046 None,
2042 None,
2047 _(b'free the working state lock (DANGEROUS)'),
2043 _(b'free the working state lock (DANGEROUS)'),
2048 ),
2044 ),
2049 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2045 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2050 (
2046 (
2051 b'S',
2047 b'S',
2052 b'set-wlock',
2048 b'set-wlock',
2053 None,
2049 None,
2054 _(b'set the working state lock until stopped'),
2050 _(b'set the working state lock until stopped'),
2055 ),
2051 ),
2056 ],
2052 ],
2057 _(b'[OPTION]...'),
2053 _(b'[OPTION]...'),
2058 )
2054 )
2059 def debuglocks(ui, repo, **opts):
2055 def debuglocks(ui, repo, **opts):
2060 """show or modify state of locks
2056 """show or modify state of locks
2061
2057
2062 By default, this command will show which locks are held. This
2058 By default, this command will show which locks are held. This
2063 includes the user and process holding the lock, the amount of time
2059 includes the user and process holding the lock, the amount of time
2064 the lock has been held, and the machine name where the process is
2060 the lock has been held, and the machine name where the process is
2065 running if it's not local.
2061 running if it's not local.
2066
2062
2067 Locks protect the integrity of Mercurial's data, so should be
2063 Locks protect the integrity of Mercurial's data, so should be
2068 treated with care. System crashes or other interruptions may cause
2064 treated with care. System crashes or other interruptions may cause
2069 locks to not be properly released, though Mercurial will usually
2065 locks to not be properly released, though Mercurial will usually
2070 detect and remove such stale locks automatically.
2066 detect and remove such stale locks automatically.
2071
2067
2072 However, detecting stale locks may not always be possible (for
2068 However, detecting stale locks may not always be possible (for
2073 instance, on a shared filesystem). Removing locks may also be
2069 instance, on a shared filesystem). Removing locks may also be
2074 blocked by filesystem permissions.
2070 blocked by filesystem permissions.
2075
2071
2076 Setting a lock will prevent other commands from changing the data.
2072 Setting a lock will prevent other commands from changing the data.
2077 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2073 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2078 The set locks are removed when the command exits.
2074 The set locks are removed when the command exits.
2079
2075
2080 Returns 0 if no locks are held.
2076 Returns 0 if no locks are held.
2081
2077
2082 """
2078 """
2083
2079
2084 if opts.get('force_free_lock'):
2080 if opts.get('force_free_lock'):
2085 repo.svfs.unlink(b'lock')
2081 repo.svfs.unlink(b'lock')
2086 if opts.get('force_free_wlock'):
2082 if opts.get('force_free_wlock'):
2087 repo.vfs.unlink(b'wlock')
2083 repo.vfs.unlink(b'wlock')
2088 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2084 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2089 return 0
2085 return 0
2090
2086
2091 locks = []
2087 locks = []
2092 try:
2088 try:
2093 if opts.get('set_wlock'):
2089 if opts.get('set_wlock'):
2094 try:
2090 try:
2095 locks.append(repo.wlock(False))
2091 locks.append(repo.wlock(False))
2096 except error.LockHeld:
2092 except error.LockHeld:
2097 raise error.Abort(_(b'wlock is already held'))
2093 raise error.Abort(_(b'wlock is already held'))
2098 if opts.get('set_lock'):
2094 if opts.get('set_lock'):
2099 try:
2095 try:
2100 locks.append(repo.lock(False))
2096 locks.append(repo.lock(False))
2101 except error.LockHeld:
2097 except error.LockHeld:
2102 raise error.Abort(_(b'lock is already held'))
2098 raise error.Abort(_(b'lock is already held'))
2103 if len(locks):
2099 if len(locks):
2104 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2100 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2105 return 0
2101 return 0
2106 finally:
2102 finally:
2107 release(*locks)
2103 release(*locks)
2108
2104
2109 now = time.time()
2105 now = time.time()
2110 held = 0
2106 held = 0
2111
2107
2112 def report(vfs, name, method):
2108 def report(vfs, name, method):
2113 # this causes stale locks to get reaped for more accurate reporting
2109 # this causes stale locks to get reaped for more accurate reporting
2114 try:
2110 try:
2115 l = method(False)
2111 l = method(False)
2116 except error.LockHeld:
2112 except error.LockHeld:
2117 l = None
2113 l = None
2118
2114
2119 if l:
2115 if l:
2120 l.release()
2116 l.release()
2121 else:
2117 else:
2122 try:
2118 try:
2123 st = vfs.lstat(name)
2119 st = vfs.lstat(name)
2124 age = now - st[stat.ST_MTIME]
2120 age = now - st[stat.ST_MTIME]
2125 user = util.username(st.st_uid)
2121 user = util.username(st.st_uid)
2126 locker = vfs.readlock(name)
2122 locker = vfs.readlock(name)
2127 if b":" in locker:
2123 if b":" in locker:
2128 host, pid = locker.split(b':')
2124 host, pid = locker.split(b':')
2129 if host == socket.gethostname():
2125 if host == socket.gethostname():
2130 locker = b'user %s, process %s' % (user or b'None', pid)
2126 locker = b'user %s, process %s' % (user or b'None', pid)
2131 else:
2127 else:
2132 locker = b'user %s, process %s, host %s' % (
2128 locker = b'user %s, process %s, host %s' % (
2133 user or b'None',
2129 user or b'None',
2134 pid,
2130 pid,
2135 host,
2131 host,
2136 )
2132 )
2137 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2133 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2138 return 1
2134 return 1
2139 except OSError as e:
2135 except OSError as e:
2140 if e.errno != errno.ENOENT:
2136 if e.errno != errno.ENOENT:
2141 raise
2137 raise
2142
2138
2143 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2139 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2144 return 0
2140 return 0
2145
2141
2146 held += report(repo.svfs, b"lock", repo.lock)
2142 held += report(repo.svfs, b"lock", repo.lock)
2147 held += report(repo.vfs, b"wlock", repo.wlock)
2143 held += report(repo.vfs, b"wlock", repo.wlock)
2148
2144
2149 return held
2145 return held
2150
2146
2151
2147
2152 @command(
2148 @command(
2153 b'debugmanifestfulltextcache',
2149 b'debugmanifestfulltextcache',
2154 [
2150 [
2155 (b'', b'clear', False, _(b'clear the cache')),
2151 (b'', b'clear', False, _(b'clear the cache')),
2156 (
2152 (
2157 b'a',
2153 b'a',
2158 b'add',
2154 b'add',
2159 [],
2155 [],
2160 _(b'add the given manifest nodes to the cache'),
2156 _(b'add the given manifest nodes to the cache'),
2161 _(b'NODE'),
2157 _(b'NODE'),
2162 ),
2158 ),
2163 ],
2159 ],
2164 b'',
2160 b'',
2165 )
2161 )
2166 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2162 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2167 """show, clear or amend the contents of the manifest fulltext cache"""
2163 """show, clear or amend the contents of the manifest fulltext cache"""
2168
2164
2169 def getcache():
2165 def getcache():
2170 r = repo.manifestlog.getstorage(b'')
2166 r = repo.manifestlog.getstorage(b'')
2171 try:
2167 try:
2172 return r._fulltextcache
2168 return r._fulltextcache
2173 except AttributeError:
2169 except AttributeError:
2174 msg = _(
2170 msg = _(
2175 b"Current revlog implementation doesn't appear to have a "
2171 b"Current revlog implementation doesn't appear to have a "
2176 b"manifest fulltext cache\n"
2172 b"manifest fulltext cache\n"
2177 )
2173 )
2178 raise error.Abort(msg)
2174 raise error.Abort(msg)
2179
2175
2180 if opts.get('clear'):
2176 if opts.get('clear'):
2181 with repo.wlock():
2177 with repo.wlock():
2182 cache = getcache()
2178 cache = getcache()
2183 cache.clear(clear_persisted_data=True)
2179 cache.clear(clear_persisted_data=True)
2184 return
2180 return
2185
2181
2186 if add:
2182 if add:
2187 with repo.wlock():
2183 with repo.wlock():
2188 m = repo.manifestlog
2184 m = repo.manifestlog
2189 store = m.getstorage(b'')
2185 store = m.getstorage(b'')
2190 for n in add:
2186 for n in add:
2191 try:
2187 try:
2192 manifest = m[store.lookup(n)]
2188 manifest = m[store.lookup(n)]
2193 except error.LookupError as e:
2189 except error.LookupError as e:
2194 raise error.Abort(
2190 raise error.Abort(
2195 bytes(e), hint=b"Check your manifest node id"
2191 bytes(e), hint=b"Check your manifest node id"
2196 )
2192 )
2197 manifest.read() # stores revisision in cache too
2193 manifest.read() # stores revisision in cache too
2198 return
2194 return
2199
2195
2200 cache = getcache()
2196 cache = getcache()
2201 if not len(cache):
2197 if not len(cache):
2202 ui.write(_(b'cache empty\n'))
2198 ui.write(_(b'cache empty\n'))
2203 else:
2199 else:
2204 ui.write(
2200 ui.write(
2205 _(
2201 _(
2206 b'cache contains %d manifest entries, in order of most to '
2202 b'cache contains %d manifest entries, in order of most to '
2207 b'least recent:\n'
2203 b'least recent:\n'
2208 )
2204 )
2209 % (len(cache),)
2205 % (len(cache),)
2210 )
2206 )
2211 totalsize = 0
2207 totalsize = 0
2212 for nodeid in cache:
2208 for nodeid in cache:
2213 # Use cache.get to not update the LRU order
2209 # Use cache.get to not update the LRU order
2214 data = cache.peek(nodeid)
2210 data = cache.peek(nodeid)
2215 size = len(data)
2211 size = len(data)
2216 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2212 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2217 ui.write(
2213 ui.write(
2218 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2214 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2219 )
2215 )
2220 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2216 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2221 ui.write(
2217 ui.write(
2222 _(b'total cache data size %s, on-disk %s\n')
2218 _(b'total cache data size %s, on-disk %s\n')
2223 % (util.bytecount(totalsize), util.bytecount(ondisk))
2219 % (util.bytecount(totalsize), util.bytecount(ondisk))
2224 )
2220 )
2225
2221
2226
2222
2227 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2223 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2228 def debugmergestate(ui, repo, *args, **opts):
2224 def debugmergestate(ui, repo, *args, **opts):
2229 """print merge state
2225 """print merge state
2230
2226
2231 Use --verbose to print out information about whether v1 or v2 merge state
2227 Use --verbose to print out information about whether v1 or v2 merge state
2232 was chosen."""
2228 was chosen."""
2233
2229
2234 if ui.verbose:
2230 if ui.verbose:
2235 ms = mergestatemod.mergestate(repo)
2231 ms = mergestatemod.mergestate(repo)
2236
2232
2237 # sort so that reasonable information is on top
2233 # sort so that reasonable information is on top
2238 v1records = ms._readrecordsv1()
2234 v1records = ms._readrecordsv1()
2239 v2records = ms._readrecordsv2()
2235 v2records = ms._readrecordsv2()
2240
2236
2241 if not v1records and not v2records:
2237 if not v1records and not v2records:
2242 pass
2238 pass
2243 elif not v2records:
2239 elif not v2records:
2244 ui.writenoi18n(b'no version 2 merge state\n')
2240 ui.writenoi18n(b'no version 2 merge state\n')
2245 elif ms._v1v2match(v1records, v2records):
2241 elif ms._v1v2match(v1records, v2records):
2246 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2242 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2247 else:
2243 else:
2248 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2244 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2249
2245
2250 opts = pycompat.byteskwargs(opts)
2246 opts = pycompat.byteskwargs(opts)
2251 if not opts[b'template']:
2247 if not opts[b'template']:
2252 opts[b'template'] = (
2248 opts[b'template'] = (
2253 b'{if(commits, "", "no merge state found\n")}'
2249 b'{if(commits, "", "no merge state found\n")}'
2254 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2250 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2255 b'{files % "file: {path} (state \\"{state}\\")\n'
2251 b'{files % "file: {path} (state \\"{state}\\")\n'
2256 b'{if(local_path, "'
2252 b'{if(local_path, "'
2257 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2253 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2258 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2254 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2259 b' other path: {other_path} (node {other_node})\n'
2255 b' other path: {other_path} (node {other_node})\n'
2260 b'")}'
2256 b'")}'
2261 b'{if(rename_side, "'
2257 b'{if(rename_side, "'
2262 b' rename side: {rename_side}\n'
2258 b' rename side: {rename_side}\n'
2263 b' renamed path: {renamed_path}\n'
2259 b' renamed path: {renamed_path}\n'
2264 b'")}'
2260 b'")}'
2265 b'{extras % " extra: {key} = {value}\n"}'
2261 b'{extras % " extra: {key} = {value}\n"}'
2266 b'"}'
2262 b'"}'
2267 b'{extras % "extra: {file} ({key} = {value})\n"}'
2263 b'{extras % "extra: {file} ({key} = {value})\n"}'
2268 )
2264 )
2269
2265
2270 ms = mergestatemod.mergestate.read(repo)
2266 ms = mergestatemod.mergestate.read(repo)
2271
2267
2272 fm = ui.formatter(b'debugmergestate', opts)
2268 fm = ui.formatter(b'debugmergestate', opts)
2273 fm.startitem()
2269 fm.startitem()
2274
2270
2275 fm_commits = fm.nested(b'commits')
2271 fm_commits = fm.nested(b'commits')
2276 if ms.active():
2272 if ms.active():
2277 for name, node, label_index in (
2273 for name, node, label_index in (
2278 (b'local', ms.local, 0),
2274 (b'local', ms.local, 0),
2279 (b'other', ms.other, 1),
2275 (b'other', ms.other, 1),
2280 ):
2276 ):
2281 fm_commits.startitem()
2277 fm_commits.startitem()
2282 fm_commits.data(name=name)
2278 fm_commits.data(name=name)
2283 fm_commits.data(node=hex(node))
2279 fm_commits.data(node=hex(node))
2284 if ms._labels and len(ms._labels) > label_index:
2280 if ms._labels and len(ms._labels) > label_index:
2285 fm_commits.data(label=ms._labels[label_index])
2281 fm_commits.data(label=ms._labels[label_index])
2286 fm_commits.end()
2282 fm_commits.end()
2287
2283
2288 fm_files = fm.nested(b'files')
2284 fm_files = fm.nested(b'files')
2289 if ms.active():
2285 if ms.active():
2290 for f in ms:
2286 for f in ms:
2291 fm_files.startitem()
2287 fm_files.startitem()
2292 fm_files.data(path=f)
2288 fm_files.data(path=f)
2293 state = ms._state[f]
2289 state = ms._state[f]
2294 fm_files.data(state=state[0])
2290 fm_files.data(state=state[0])
2295 if state[0] in (
2291 if state[0] in (
2296 mergestatemod.MERGE_RECORD_UNRESOLVED,
2292 mergestatemod.MERGE_RECORD_UNRESOLVED,
2297 mergestatemod.MERGE_RECORD_RESOLVED,
2293 mergestatemod.MERGE_RECORD_RESOLVED,
2298 ):
2294 ):
2299 fm_files.data(local_key=state[1])
2295 fm_files.data(local_key=state[1])
2300 fm_files.data(local_path=state[2])
2296 fm_files.data(local_path=state[2])
2301 fm_files.data(ancestor_path=state[3])
2297 fm_files.data(ancestor_path=state[3])
2302 fm_files.data(ancestor_node=state[4])
2298 fm_files.data(ancestor_node=state[4])
2303 fm_files.data(other_path=state[5])
2299 fm_files.data(other_path=state[5])
2304 fm_files.data(other_node=state[6])
2300 fm_files.data(other_node=state[6])
2305 fm_files.data(local_flags=state[7])
2301 fm_files.data(local_flags=state[7])
2306 elif state[0] in (
2302 elif state[0] in (
2307 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2303 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2308 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2304 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2309 ):
2305 ):
2310 fm_files.data(renamed_path=state[1])
2306 fm_files.data(renamed_path=state[1])
2311 fm_files.data(rename_side=state[2])
2307 fm_files.data(rename_side=state[2])
2312 fm_extras = fm_files.nested(b'extras')
2308 fm_extras = fm_files.nested(b'extras')
2313 for k, v in sorted(ms.extras(f).items()):
2309 for k, v in sorted(ms.extras(f).items()):
2314 fm_extras.startitem()
2310 fm_extras.startitem()
2315 fm_extras.data(key=k)
2311 fm_extras.data(key=k)
2316 fm_extras.data(value=v)
2312 fm_extras.data(value=v)
2317 fm_extras.end()
2313 fm_extras.end()
2318
2314
2319 fm_files.end()
2315 fm_files.end()
2320
2316
2321 fm_extras = fm.nested(b'extras')
2317 fm_extras = fm.nested(b'extras')
2322 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2318 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2323 if f in ms:
2319 if f in ms:
2324 # If file is in mergestate, we have already processed it's extras
2320 # If file is in mergestate, we have already processed it's extras
2325 continue
2321 continue
2326 for k, v in pycompat.iteritems(d):
2322 for k, v in pycompat.iteritems(d):
2327 fm_extras.startitem()
2323 fm_extras.startitem()
2328 fm_extras.data(file=f)
2324 fm_extras.data(file=f)
2329 fm_extras.data(key=k)
2325 fm_extras.data(key=k)
2330 fm_extras.data(value=v)
2326 fm_extras.data(value=v)
2331 fm_extras.end()
2327 fm_extras.end()
2332
2328
2333 fm.end()
2329 fm.end()
2334
2330
2335
2331
2336 @command(b'debugnamecomplete', [], _(b'NAME...'))
2332 @command(b'debugnamecomplete', [], _(b'NAME...'))
2337 def debugnamecomplete(ui, repo, *args):
2333 def debugnamecomplete(ui, repo, *args):
2338 '''complete "names" - tags, open branch names, bookmark names'''
2334 '''complete "names" - tags, open branch names, bookmark names'''
2339
2335
2340 names = set()
2336 names = set()
2341 # since we previously only listed open branches, we will handle that
2337 # since we previously only listed open branches, we will handle that
2342 # specially (after this for loop)
2338 # specially (after this for loop)
2343 for name, ns in pycompat.iteritems(repo.names):
2339 for name, ns in pycompat.iteritems(repo.names):
2344 if name != b'branches':
2340 if name != b'branches':
2345 names.update(ns.listnames(repo))
2341 names.update(ns.listnames(repo))
2346 names.update(
2342 names.update(
2347 tag
2343 tag
2348 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2344 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2349 if not closed
2345 if not closed
2350 )
2346 )
2351 completions = set()
2347 completions = set()
2352 if not args:
2348 if not args:
2353 args = [b'']
2349 args = [b'']
2354 for a in args:
2350 for a in args:
2355 completions.update(n for n in names if n.startswith(a))
2351 completions.update(n for n in names if n.startswith(a))
2356 ui.write(b'\n'.join(sorted(completions)))
2352 ui.write(b'\n'.join(sorted(completions)))
2357 ui.write(b'\n')
2353 ui.write(b'\n')
2358
2354
2359
2355
2360 @command(
2356 @command(
2361 b'debugnodemap',
2357 b'debugnodemap',
2362 [
2358 [
2363 (
2359 (
2364 b'',
2360 b'',
2365 b'dump-new',
2361 b'dump-new',
2366 False,
2362 False,
2367 _(b'write a (new) persistent binary nodemap on stdout'),
2363 _(b'write a (new) persistent binary nodemap on stdout'),
2368 ),
2364 ),
2369 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2365 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2370 (
2366 (
2371 b'',
2367 b'',
2372 b'check',
2368 b'check',
2373 False,
2369 False,
2374 _(b'check that the data on disk data are correct.'),
2370 _(b'check that the data on disk data are correct.'),
2375 ),
2371 ),
2376 (
2372 (
2377 b'',
2373 b'',
2378 b'metadata',
2374 b'metadata',
2379 False,
2375 False,
2380 _(b'display the on disk meta data for the nodemap'),
2376 _(b'display the on disk meta data for the nodemap'),
2381 ),
2377 ),
2382 ],
2378 ],
2383 )
2379 )
2384 def debugnodemap(ui, repo, **opts):
2380 def debugnodemap(ui, repo, **opts):
2385 """write and inspect on disk nodemap"""
2381 """write and inspect on disk nodemap"""
2386 if opts['dump_new']:
2382 if opts['dump_new']:
2387 unfi = repo.unfiltered()
2383 unfi = repo.unfiltered()
2388 cl = unfi.changelog
2384 cl = unfi.changelog
2389 if util.safehasattr(cl.index, "nodemap_data_all"):
2385 if util.safehasattr(cl.index, "nodemap_data_all"):
2390 data = cl.index.nodemap_data_all()
2386 data = cl.index.nodemap_data_all()
2391 else:
2387 else:
2392 data = nodemap.persistent_data(cl.index)
2388 data = nodemap.persistent_data(cl.index)
2393 ui.write(data)
2389 ui.write(data)
2394 elif opts['dump_disk']:
2390 elif opts['dump_disk']:
2395 unfi = repo.unfiltered()
2391 unfi = repo.unfiltered()
2396 cl = unfi.changelog
2392 cl = unfi.changelog
2397 nm_data = nodemap.persisted_data(cl)
2393 nm_data = nodemap.persisted_data(cl)
2398 if nm_data is not None:
2394 if nm_data is not None:
2399 docket, data = nm_data
2395 docket, data = nm_data
2400 ui.write(data[:])
2396 ui.write(data[:])
2401 elif opts['check']:
2397 elif opts['check']:
2402 unfi = repo.unfiltered()
2398 unfi = repo.unfiltered()
2403 cl = unfi.changelog
2399 cl = unfi.changelog
2404 nm_data = nodemap.persisted_data(cl)
2400 nm_data = nodemap.persisted_data(cl)
2405 if nm_data is not None:
2401 if nm_data is not None:
2406 docket, data = nm_data
2402 docket, data = nm_data
2407 return nodemap.check_data(ui, cl.index, data)
2403 return nodemap.check_data(ui, cl.index, data)
2408 elif opts['metadata']:
2404 elif opts['metadata']:
2409 unfi = repo.unfiltered()
2405 unfi = repo.unfiltered()
2410 cl = unfi.changelog
2406 cl = unfi.changelog
2411 nm_data = nodemap.persisted_data(cl)
2407 nm_data = nodemap.persisted_data(cl)
2412 if nm_data is not None:
2408 if nm_data is not None:
2413 docket, data = nm_data
2409 docket, data = nm_data
2414 ui.write((b"uid: %s\n") % docket.uid)
2410 ui.write((b"uid: %s\n") % docket.uid)
2415 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2411 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2416 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2412 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2417 ui.write((b"data-length: %d\n") % docket.data_length)
2413 ui.write((b"data-length: %d\n") % docket.data_length)
2418 ui.write((b"data-unused: %d\n") % docket.data_unused)
2414 ui.write((b"data-unused: %d\n") % docket.data_unused)
2419 unused_perc = docket.data_unused * 100.0 / docket.data_length
2415 unused_perc = docket.data_unused * 100.0 / docket.data_length
2420 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2416 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2421
2417
2422
2418
2423 @command(
2419 @command(
2424 b'debugobsolete',
2420 b'debugobsolete',
2425 [
2421 [
2426 (b'', b'flags', 0, _(b'markers flag')),
2422 (b'', b'flags', 0, _(b'markers flag')),
2427 (
2423 (
2428 b'',
2424 b'',
2429 b'record-parents',
2425 b'record-parents',
2430 False,
2426 False,
2431 _(b'record parent information for the precursor'),
2427 _(b'record parent information for the precursor'),
2432 ),
2428 ),
2433 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2429 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2434 (
2430 (
2435 b'',
2431 b'',
2436 b'exclusive',
2432 b'exclusive',
2437 False,
2433 False,
2438 _(b'restrict display to markers only relevant to REV'),
2434 _(b'restrict display to markers only relevant to REV'),
2439 ),
2435 ),
2440 (b'', b'index', False, _(b'display index of the marker')),
2436 (b'', b'index', False, _(b'display index of the marker')),
2441 (b'', b'delete', [], _(b'delete markers specified by indices')),
2437 (b'', b'delete', [], _(b'delete markers specified by indices')),
2442 ]
2438 ]
2443 + cmdutil.commitopts2
2439 + cmdutil.commitopts2
2444 + cmdutil.formatteropts,
2440 + cmdutil.formatteropts,
2445 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2441 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2446 )
2442 )
2447 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2443 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2448 """create arbitrary obsolete marker
2444 """create arbitrary obsolete marker
2449
2445
2450 With no arguments, displays the list of obsolescence markers."""
2446 With no arguments, displays the list of obsolescence markers."""
2451
2447
2452 opts = pycompat.byteskwargs(opts)
2448 opts = pycompat.byteskwargs(opts)
2453
2449
2454 def parsenodeid(s):
2450 def parsenodeid(s):
2455 try:
2451 try:
2456 # We do not use revsingle/revrange functions here to accept
2452 # We do not use revsingle/revrange functions here to accept
2457 # arbitrary node identifiers, possibly not present in the
2453 # arbitrary node identifiers, possibly not present in the
2458 # local repository.
2454 # local repository.
2459 n = bin(s)
2455 n = bin(s)
2460 if len(n) != repo.nodeconstants.nodelen:
2456 if len(n) != repo.nodeconstants.nodelen:
2461 raise TypeError()
2457 raise TypeError()
2462 return n
2458 return n
2463 except TypeError:
2459 except TypeError:
2464 raise error.InputError(
2460 raise error.InputError(
2465 b'changeset references must be full hexadecimal '
2461 b'changeset references must be full hexadecimal '
2466 b'node identifiers'
2462 b'node identifiers'
2467 )
2463 )
2468
2464
2469 if opts.get(b'delete'):
2465 if opts.get(b'delete'):
2470 indices = []
2466 indices = []
2471 for v in opts.get(b'delete'):
2467 for v in opts.get(b'delete'):
2472 try:
2468 try:
2473 indices.append(int(v))
2469 indices.append(int(v))
2474 except ValueError:
2470 except ValueError:
2475 raise error.InputError(
2471 raise error.InputError(
2476 _(b'invalid index value: %r') % v,
2472 _(b'invalid index value: %r') % v,
2477 hint=_(b'use integers for indices'),
2473 hint=_(b'use integers for indices'),
2478 )
2474 )
2479
2475
2480 if repo.currenttransaction():
2476 if repo.currenttransaction():
2481 raise error.Abort(
2477 raise error.Abort(
2482 _(b'cannot delete obsmarkers in the middle of transaction.')
2478 _(b'cannot delete obsmarkers in the middle of transaction.')
2483 )
2479 )
2484
2480
2485 with repo.lock():
2481 with repo.lock():
2486 n = repair.deleteobsmarkers(repo.obsstore, indices)
2482 n = repair.deleteobsmarkers(repo.obsstore, indices)
2487 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2483 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2488
2484
2489 return
2485 return
2490
2486
2491 if precursor is not None:
2487 if precursor is not None:
2492 if opts[b'rev']:
2488 if opts[b'rev']:
2493 raise error.InputError(
2489 raise error.InputError(
2494 b'cannot select revision when creating marker'
2490 b'cannot select revision when creating marker'
2495 )
2491 )
2496 metadata = {}
2492 metadata = {}
2497 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2493 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2498 succs = tuple(parsenodeid(succ) for succ in successors)
2494 succs = tuple(parsenodeid(succ) for succ in successors)
2499 l = repo.lock()
2495 l = repo.lock()
2500 try:
2496 try:
2501 tr = repo.transaction(b'debugobsolete')
2497 tr = repo.transaction(b'debugobsolete')
2502 try:
2498 try:
2503 date = opts.get(b'date')
2499 date = opts.get(b'date')
2504 if date:
2500 if date:
2505 date = dateutil.parsedate(date)
2501 date = dateutil.parsedate(date)
2506 else:
2502 else:
2507 date = None
2503 date = None
2508 prec = parsenodeid(precursor)
2504 prec = parsenodeid(precursor)
2509 parents = None
2505 parents = None
2510 if opts[b'record_parents']:
2506 if opts[b'record_parents']:
2511 if prec not in repo.unfiltered():
2507 if prec not in repo.unfiltered():
2512 raise error.Abort(
2508 raise error.Abort(
2513 b'cannot used --record-parents on '
2509 b'cannot used --record-parents on '
2514 b'unknown changesets'
2510 b'unknown changesets'
2515 )
2511 )
2516 parents = repo.unfiltered()[prec].parents()
2512 parents = repo.unfiltered()[prec].parents()
2517 parents = tuple(p.node() for p in parents)
2513 parents = tuple(p.node() for p in parents)
2518 repo.obsstore.create(
2514 repo.obsstore.create(
2519 tr,
2515 tr,
2520 prec,
2516 prec,
2521 succs,
2517 succs,
2522 opts[b'flags'],
2518 opts[b'flags'],
2523 parents=parents,
2519 parents=parents,
2524 date=date,
2520 date=date,
2525 metadata=metadata,
2521 metadata=metadata,
2526 ui=ui,
2522 ui=ui,
2527 )
2523 )
2528 tr.close()
2524 tr.close()
2529 except ValueError as exc:
2525 except ValueError as exc:
2530 raise error.Abort(
2526 raise error.Abort(
2531 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2527 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2532 )
2528 )
2533 finally:
2529 finally:
2534 tr.release()
2530 tr.release()
2535 finally:
2531 finally:
2536 l.release()
2532 l.release()
2537 else:
2533 else:
2538 if opts[b'rev']:
2534 if opts[b'rev']:
2539 revs = scmutil.revrange(repo, opts[b'rev'])
2535 revs = scmutil.revrange(repo, opts[b'rev'])
2540 nodes = [repo[r].node() for r in revs]
2536 nodes = [repo[r].node() for r in revs]
2541 markers = list(
2537 markers = list(
2542 obsutil.getmarkers(
2538 obsutil.getmarkers(
2543 repo, nodes=nodes, exclusive=opts[b'exclusive']
2539 repo, nodes=nodes, exclusive=opts[b'exclusive']
2544 )
2540 )
2545 )
2541 )
2546 markers.sort(key=lambda x: x._data)
2542 markers.sort(key=lambda x: x._data)
2547 else:
2543 else:
2548 markers = obsutil.getmarkers(repo)
2544 markers = obsutil.getmarkers(repo)
2549
2545
2550 markerstoiter = markers
2546 markerstoiter = markers
2551 isrelevant = lambda m: True
2547 isrelevant = lambda m: True
2552 if opts.get(b'rev') and opts.get(b'index'):
2548 if opts.get(b'rev') and opts.get(b'index'):
2553 markerstoiter = obsutil.getmarkers(repo)
2549 markerstoiter = obsutil.getmarkers(repo)
2554 markerset = set(markers)
2550 markerset = set(markers)
2555 isrelevant = lambda m: m in markerset
2551 isrelevant = lambda m: m in markerset
2556
2552
2557 fm = ui.formatter(b'debugobsolete', opts)
2553 fm = ui.formatter(b'debugobsolete', opts)
2558 for i, m in enumerate(markerstoiter):
2554 for i, m in enumerate(markerstoiter):
2559 if not isrelevant(m):
2555 if not isrelevant(m):
2560 # marker can be irrelevant when we're iterating over a set
2556 # marker can be irrelevant when we're iterating over a set
2561 # of markers (markerstoiter) which is bigger than the set
2557 # of markers (markerstoiter) which is bigger than the set
2562 # of markers we want to display (markers)
2558 # of markers we want to display (markers)
2563 # this can happen if both --index and --rev options are
2559 # this can happen if both --index and --rev options are
2564 # provided and thus we need to iterate over all of the markers
2560 # provided and thus we need to iterate over all of the markers
2565 # to get the correct indices, but only display the ones that
2561 # to get the correct indices, but only display the ones that
2566 # are relevant to --rev value
2562 # are relevant to --rev value
2567 continue
2563 continue
2568 fm.startitem()
2564 fm.startitem()
2569 ind = i if opts.get(b'index') else None
2565 ind = i if opts.get(b'index') else None
2570 cmdutil.showmarker(fm, m, index=ind)
2566 cmdutil.showmarker(fm, m, index=ind)
2571 fm.end()
2567 fm.end()
2572
2568
2573
2569
2574 @command(
2570 @command(
2575 b'debugp1copies',
2571 b'debugp1copies',
2576 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2572 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2577 _(b'[-r REV]'),
2573 _(b'[-r REV]'),
2578 )
2574 )
2579 def debugp1copies(ui, repo, **opts):
2575 def debugp1copies(ui, repo, **opts):
2580 """dump copy information compared to p1"""
2576 """dump copy information compared to p1"""
2581
2577
2582 opts = pycompat.byteskwargs(opts)
2578 opts = pycompat.byteskwargs(opts)
2583 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2579 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2584 for dst, src in ctx.p1copies().items():
2580 for dst, src in ctx.p1copies().items():
2585 ui.write(b'%s -> %s\n' % (src, dst))
2581 ui.write(b'%s -> %s\n' % (src, dst))
2586
2582
2587
2583
2588 @command(
2584 @command(
2589 b'debugp2copies',
2585 b'debugp2copies',
2590 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2586 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2591 _(b'[-r REV]'),
2587 _(b'[-r REV]'),
2592 )
2588 )
2593 def debugp1copies(ui, repo, **opts):
2589 def debugp1copies(ui, repo, **opts):
2594 """dump copy information compared to p2"""
2590 """dump copy information compared to p2"""
2595
2591
2596 opts = pycompat.byteskwargs(opts)
2592 opts = pycompat.byteskwargs(opts)
2597 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2593 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2598 for dst, src in ctx.p2copies().items():
2594 for dst, src in ctx.p2copies().items():
2599 ui.write(b'%s -> %s\n' % (src, dst))
2595 ui.write(b'%s -> %s\n' % (src, dst))
2600
2596
2601
2597
2602 @command(
2598 @command(
2603 b'debugpathcomplete',
2599 b'debugpathcomplete',
2604 [
2600 [
2605 (b'f', b'full', None, _(b'complete an entire path')),
2601 (b'f', b'full', None, _(b'complete an entire path')),
2606 (b'n', b'normal', None, _(b'show only normal files')),
2602 (b'n', b'normal', None, _(b'show only normal files')),
2607 (b'a', b'added', None, _(b'show only added files')),
2603 (b'a', b'added', None, _(b'show only added files')),
2608 (b'r', b'removed', None, _(b'show only removed files')),
2604 (b'r', b'removed', None, _(b'show only removed files')),
2609 ],
2605 ],
2610 _(b'FILESPEC...'),
2606 _(b'FILESPEC...'),
2611 )
2607 )
2612 def debugpathcomplete(ui, repo, *specs, **opts):
2608 def debugpathcomplete(ui, repo, *specs, **opts):
2613 """complete part or all of a tracked path
2609 """complete part or all of a tracked path
2614
2610
2615 This command supports shells that offer path name completion. It
2611 This command supports shells that offer path name completion. It
2616 currently completes only files already known to the dirstate.
2612 currently completes only files already known to the dirstate.
2617
2613
2618 Completion extends only to the next path segment unless
2614 Completion extends only to the next path segment unless
2619 --full is specified, in which case entire paths are used."""
2615 --full is specified, in which case entire paths are used."""
2620
2616
2621 def complete(path, acceptable):
2617 def complete(path, acceptable):
2622 dirstate = repo.dirstate
2618 dirstate = repo.dirstate
2623 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2619 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2624 rootdir = repo.root + pycompat.ossep
2620 rootdir = repo.root + pycompat.ossep
2625 if spec != repo.root and not spec.startswith(rootdir):
2621 if spec != repo.root and not spec.startswith(rootdir):
2626 return [], []
2622 return [], []
2627 if os.path.isdir(spec):
2623 if os.path.isdir(spec):
2628 spec += b'/'
2624 spec += b'/'
2629 spec = spec[len(rootdir) :]
2625 spec = spec[len(rootdir) :]
2630 fixpaths = pycompat.ossep != b'/'
2626 fixpaths = pycompat.ossep != b'/'
2631 if fixpaths:
2627 if fixpaths:
2632 spec = spec.replace(pycompat.ossep, b'/')
2628 spec = spec.replace(pycompat.ossep, b'/')
2633 speclen = len(spec)
2629 speclen = len(spec)
2634 fullpaths = opts['full']
2630 fullpaths = opts['full']
2635 files, dirs = set(), set()
2631 files, dirs = set(), set()
2636 adddir, addfile = dirs.add, files.add
2632 adddir, addfile = dirs.add, files.add
2637 for f, st in pycompat.iteritems(dirstate):
2633 for f, st in pycompat.iteritems(dirstate):
2638 if f.startswith(spec) and st.state in acceptable:
2634 if f.startswith(spec) and st.state in acceptable:
2639 if fixpaths:
2635 if fixpaths:
2640 f = f.replace(b'/', pycompat.ossep)
2636 f = f.replace(b'/', pycompat.ossep)
2641 if fullpaths:
2637 if fullpaths:
2642 addfile(f)
2638 addfile(f)
2643 continue
2639 continue
2644 s = f.find(pycompat.ossep, speclen)
2640 s = f.find(pycompat.ossep, speclen)
2645 if s >= 0:
2641 if s >= 0:
2646 adddir(f[:s])
2642 adddir(f[:s])
2647 else:
2643 else:
2648 addfile(f)
2644 addfile(f)
2649 return files, dirs
2645 return files, dirs
2650
2646
2651 acceptable = b''
2647 acceptable = b''
2652 if opts['normal']:
2648 if opts['normal']:
2653 acceptable += b'nm'
2649 acceptable += b'nm'
2654 if opts['added']:
2650 if opts['added']:
2655 acceptable += b'a'
2651 acceptable += b'a'
2656 if opts['removed']:
2652 if opts['removed']:
2657 acceptable += b'r'
2653 acceptable += b'r'
2658 cwd = repo.getcwd()
2654 cwd = repo.getcwd()
2659 if not specs:
2655 if not specs:
2660 specs = [b'.']
2656 specs = [b'.']
2661
2657
2662 files, dirs = set(), set()
2658 files, dirs = set(), set()
2663 for spec in specs:
2659 for spec in specs:
2664 f, d = complete(spec, acceptable or b'nmar')
2660 f, d = complete(spec, acceptable or b'nmar')
2665 files.update(f)
2661 files.update(f)
2666 dirs.update(d)
2662 dirs.update(d)
2667 files.update(dirs)
2663 files.update(dirs)
2668 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2664 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2669 ui.write(b'\n')
2665 ui.write(b'\n')
2670
2666
2671
2667
2672 @command(
2668 @command(
2673 b'debugpathcopies',
2669 b'debugpathcopies',
2674 cmdutil.walkopts,
2670 cmdutil.walkopts,
2675 b'hg debugpathcopies REV1 REV2 [FILE]',
2671 b'hg debugpathcopies REV1 REV2 [FILE]',
2676 inferrepo=True,
2672 inferrepo=True,
2677 )
2673 )
2678 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2674 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2679 """show copies between two revisions"""
2675 """show copies between two revisions"""
2680 ctx1 = scmutil.revsingle(repo, rev1)
2676 ctx1 = scmutil.revsingle(repo, rev1)
2681 ctx2 = scmutil.revsingle(repo, rev2)
2677 ctx2 = scmutil.revsingle(repo, rev2)
2682 m = scmutil.match(ctx1, pats, opts)
2678 m = scmutil.match(ctx1, pats, opts)
2683 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2679 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2684 ui.write(b'%s -> %s\n' % (src, dst))
2680 ui.write(b'%s -> %s\n' % (src, dst))
2685
2681
2686
2682
2687 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2683 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2688 def debugpeer(ui, path):
2684 def debugpeer(ui, path):
2689 """establish a connection to a peer repository"""
2685 """establish a connection to a peer repository"""
2690 # Always enable peer request logging. Requires --debug to display
2686 # Always enable peer request logging. Requires --debug to display
2691 # though.
2687 # though.
2692 overrides = {
2688 overrides = {
2693 (b'devel', b'debug.peer-request'): True,
2689 (b'devel', b'debug.peer-request'): True,
2694 }
2690 }
2695
2691
2696 with ui.configoverride(overrides):
2692 with ui.configoverride(overrides):
2697 peer = hg.peer(ui, {}, path)
2693 peer = hg.peer(ui, {}, path)
2698
2694
2699 try:
2695 try:
2700 local = peer.local() is not None
2696 local = peer.local() is not None
2701 canpush = peer.canpush()
2697 canpush = peer.canpush()
2702
2698
2703 ui.write(_(b'url: %s\n') % peer.url())
2699 ui.write(_(b'url: %s\n') % peer.url())
2704 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2700 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2705 ui.write(
2701 ui.write(
2706 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2702 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2707 )
2703 )
2708 finally:
2704 finally:
2709 peer.close()
2705 peer.close()
2710
2706
2711
2707
2712 @command(
2708 @command(
2713 b'debugpickmergetool',
2709 b'debugpickmergetool',
2714 [
2710 [
2715 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2711 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2716 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2712 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2717 ]
2713 ]
2718 + cmdutil.walkopts
2714 + cmdutil.walkopts
2719 + cmdutil.mergetoolopts,
2715 + cmdutil.mergetoolopts,
2720 _(b'[PATTERN]...'),
2716 _(b'[PATTERN]...'),
2721 inferrepo=True,
2717 inferrepo=True,
2722 )
2718 )
2723 def debugpickmergetool(ui, repo, *pats, **opts):
2719 def debugpickmergetool(ui, repo, *pats, **opts):
2724 """examine which merge tool is chosen for specified file
2720 """examine which merge tool is chosen for specified file
2725
2721
2726 As described in :hg:`help merge-tools`, Mercurial examines
2722 As described in :hg:`help merge-tools`, Mercurial examines
2727 configurations below in this order to decide which merge tool is
2723 configurations below in this order to decide which merge tool is
2728 chosen for specified file.
2724 chosen for specified file.
2729
2725
2730 1. ``--tool`` option
2726 1. ``--tool`` option
2731 2. ``HGMERGE`` environment variable
2727 2. ``HGMERGE`` environment variable
2732 3. configurations in ``merge-patterns`` section
2728 3. configurations in ``merge-patterns`` section
2733 4. configuration of ``ui.merge``
2729 4. configuration of ``ui.merge``
2734 5. configurations in ``merge-tools`` section
2730 5. configurations in ``merge-tools`` section
2735 6. ``hgmerge`` tool (for historical reason only)
2731 6. ``hgmerge`` tool (for historical reason only)
2736 7. default tool for fallback (``:merge`` or ``:prompt``)
2732 7. default tool for fallback (``:merge`` or ``:prompt``)
2737
2733
2738 This command writes out examination result in the style below::
2734 This command writes out examination result in the style below::
2739
2735
2740 FILE = MERGETOOL
2736 FILE = MERGETOOL
2741
2737
2742 By default, all files known in the first parent context of the
2738 By default, all files known in the first parent context of the
2743 working directory are examined. Use file patterns and/or -I/-X
2739 working directory are examined. Use file patterns and/or -I/-X
2744 options to limit target files. -r/--rev is also useful to examine
2740 options to limit target files. -r/--rev is also useful to examine
2745 files in another context without actual updating to it.
2741 files in another context without actual updating to it.
2746
2742
2747 With --debug, this command shows warning messages while matching
2743 With --debug, this command shows warning messages while matching
2748 against ``merge-patterns`` and so on, too. It is recommended to
2744 against ``merge-patterns`` and so on, too. It is recommended to
2749 use this option with explicit file patterns and/or -I/-X options,
2745 use this option with explicit file patterns and/or -I/-X options,
2750 because this option increases amount of output per file according
2746 because this option increases amount of output per file according
2751 to configurations in hgrc.
2747 to configurations in hgrc.
2752
2748
2753 With -v/--verbose, this command shows configurations below at
2749 With -v/--verbose, this command shows configurations below at
2754 first (only if specified).
2750 first (only if specified).
2755
2751
2756 - ``--tool`` option
2752 - ``--tool`` option
2757 - ``HGMERGE`` environment variable
2753 - ``HGMERGE`` environment variable
2758 - configuration of ``ui.merge``
2754 - configuration of ``ui.merge``
2759
2755
2760 If merge tool is chosen before matching against
2756 If merge tool is chosen before matching against
2761 ``merge-patterns``, this command can't show any helpful
2757 ``merge-patterns``, this command can't show any helpful
2762 information, even with --debug. In such case, information above is
2758 information, even with --debug. In such case, information above is
2763 useful to know why a merge tool is chosen.
2759 useful to know why a merge tool is chosen.
2764 """
2760 """
2765 opts = pycompat.byteskwargs(opts)
2761 opts = pycompat.byteskwargs(opts)
2766 overrides = {}
2762 overrides = {}
2767 if opts[b'tool']:
2763 if opts[b'tool']:
2768 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2764 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2769 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2765 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2770
2766
2771 with ui.configoverride(overrides, b'debugmergepatterns'):
2767 with ui.configoverride(overrides, b'debugmergepatterns'):
2772 hgmerge = encoding.environ.get(b"HGMERGE")
2768 hgmerge = encoding.environ.get(b"HGMERGE")
2773 if hgmerge is not None:
2769 if hgmerge is not None:
2774 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2770 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2775 uimerge = ui.config(b"ui", b"merge")
2771 uimerge = ui.config(b"ui", b"merge")
2776 if uimerge:
2772 if uimerge:
2777 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2773 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2778
2774
2779 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2775 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2780 m = scmutil.match(ctx, pats, opts)
2776 m = scmutil.match(ctx, pats, opts)
2781 changedelete = opts[b'changedelete']
2777 changedelete = opts[b'changedelete']
2782 for path in ctx.walk(m):
2778 for path in ctx.walk(m):
2783 fctx = ctx[path]
2779 fctx = ctx[path]
2784 with ui.silent(
2780 with ui.silent(
2785 error=True
2781 error=True
2786 ) if not ui.debugflag else util.nullcontextmanager():
2782 ) if not ui.debugflag else util.nullcontextmanager():
2787 tool, toolpath = filemerge._picktool(
2783 tool, toolpath = filemerge._picktool(
2788 repo,
2784 repo,
2789 ui,
2785 ui,
2790 path,
2786 path,
2791 fctx.isbinary(),
2787 fctx.isbinary(),
2792 b'l' in fctx.flags(),
2788 b'l' in fctx.flags(),
2793 changedelete,
2789 changedelete,
2794 )
2790 )
2795 ui.write(b'%s = %s\n' % (path, tool))
2791 ui.write(b'%s = %s\n' % (path, tool))
2796
2792
2797
2793
2798 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2794 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2799 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2795 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2800 """access the pushkey key/value protocol
2796 """access the pushkey key/value protocol
2801
2797
2802 With two args, list the keys in the given namespace.
2798 With two args, list the keys in the given namespace.
2803
2799
2804 With five args, set a key to new if it currently is set to old.
2800 With five args, set a key to new if it currently is set to old.
2805 Reports success or failure.
2801 Reports success or failure.
2806 """
2802 """
2807
2803
2808 target = hg.peer(ui, {}, repopath)
2804 target = hg.peer(ui, {}, repopath)
2809 try:
2805 try:
2810 if keyinfo:
2806 if keyinfo:
2811 key, old, new = keyinfo
2807 key, old, new = keyinfo
2812 with target.commandexecutor() as e:
2808 with target.commandexecutor() as e:
2813 r = e.callcommand(
2809 r = e.callcommand(
2814 b'pushkey',
2810 b'pushkey',
2815 {
2811 {
2816 b'namespace': namespace,
2812 b'namespace': namespace,
2817 b'key': key,
2813 b'key': key,
2818 b'old': old,
2814 b'old': old,
2819 b'new': new,
2815 b'new': new,
2820 },
2816 },
2821 ).result()
2817 ).result()
2822
2818
2823 ui.status(pycompat.bytestr(r) + b'\n')
2819 ui.status(pycompat.bytestr(r) + b'\n')
2824 return not r
2820 return not r
2825 else:
2821 else:
2826 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2822 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2827 ui.write(
2823 ui.write(
2828 b"%s\t%s\n"
2824 b"%s\t%s\n"
2829 % (stringutil.escapestr(k), stringutil.escapestr(v))
2825 % (stringutil.escapestr(k), stringutil.escapestr(v))
2830 )
2826 )
2831 finally:
2827 finally:
2832 target.close()
2828 target.close()
2833
2829
2834
2830
2835 @command(b'debugpvec', [], _(b'A B'))
2831 @command(b'debugpvec', [], _(b'A B'))
2836 def debugpvec(ui, repo, a, b=None):
2832 def debugpvec(ui, repo, a, b=None):
2837 ca = scmutil.revsingle(repo, a)
2833 ca = scmutil.revsingle(repo, a)
2838 cb = scmutil.revsingle(repo, b)
2834 cb = scmutil.revsingle(repo, b)
2839 pa = pvec.ctxpvec(ca)
2835 pa = pvec.ctxpvec(ca)
2840 pb = pvec.ctxpvec(cb)
2836 pb = pvec.ctxpvec(cb)
2841 if pa == pb:
2837 if pa == pb:
2842 rel = b"="
2838 rel = b"="
2843 elif pa > pb:
2839 elif pa > pb:
2844 rel = b">"
2840 rel = b">"
2845 elif pa < pb:
2841 elif pa < pb:
2846 rel = b"<"
2842 rel = b"<"
2847 elif pa | pb:
2843 elif pa | pb:
2848 rel = b"|"
2844 rel = b"|"
2849 ui.write(_(b"a: %s\n") % pa)
2845 ui.write(_(b"a: %s\n") % pa)
2850 ui.write(_(b"b: %s\n") % pb)
2846 ui.write(_(b"b: %s\n") % pb)
2851 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2847 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2852 ui.write(
2848 ui.write(
2853 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2849 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2854 % (
2850 % (
2855 abs(pa._depth - pb._depth),
2851 abs(pa._depth - pb._depth),
2856 pvec._hamming(pa._vec, pb._vec),
2852 pvec._hamming(pa._vec, pb._vec),
2857 pa.distance(pb),
2853 pa.distance(pb),
2858 rel,
2854 rel,
2859 )
2855 )
2860 )
2856 )
2861
2857
2862
2858
2863 @command(
2859 @command(
2864 b'debugrebuilddirstate|debugrebuildstate',
2860 b'debugrebuilddirstate|debugrebuildstate',
2865 [
2861 [
2866 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2862 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2867 (
2863 (
2868 b'',
2864 b'',
2869 b'minimal',
2865 b'minimal',
2870 None,
2866 None,
2871 _(
2867 _(
2872 b'only rebuild files that are inconsistent with '
2868 b'only rebuild files that are inconsistent with '
2873 b'the working copy parent'
2869 b'the working copy parent'
2874 ),
2870 ),
2875 ),
2871 ),
2876 ],
2872 ],
2877 _(b'[-r REV]'),
2873 _(b'[-r REV]'),
2878 )
2874 )
2879 def debugrebuilddirstate(ui, repo, rev, **opts):
2875 def debugrebuilddirstate(ui, repo, rev, **opts):
2880 """rebuild the dirstate as it would look like for the given revision
2876 """rebuild the dirstate as it would look like for the given revision
2881
2877
2882 If no revision is specified the first current parent will be used.
2878 If no revision is specified the first current parent will be used.
2883
2879
2884 The dirstate will be set to the files of the given revision.
2880 The dirstate will be set to the files of the given revision.
2885 The actual working directory content or existing dirstate
2881 The actual working directory content or existing dirstate
2886 information such as adds or removes is not considered.
2882 information such as adds or removes is not considered.
2887
2883
2888 ``minimal`` will only rebuild the dirstate status for files that claim to be
2884 ``minimal`` will only rebuild the dirstate status for files that claim to be
2889 tracked but are not in the parent manifest, or that exist in the parent
2885 tracked but are not in the parent manifest, or that exist in the parent
2890 manifest but are not in the dirstate. It will not change adds, removes, or
2886 manifest but are not in the dirstate. It will not change adds, removes, or
2891 modified files that are in the working copy parent.
2887 modified files that are in the working copy parent.
2892
2888
2893 One use of this command is to make the next :hg:`status` invocation
2889 One use of this command is to make the next :hg:`status` invocation
2894 check the actual file content.
2890 check the actual file content.
2895 """
2891 """
2896 ctx = scmutil.revsingle(repo, rev)
2892 ctx = scmutil.revsingle(repo, rev)
2897 with repo.wlock():
2893 with repo.wlock():
2898 dirstate = repo.dirstate
2894 dirstate = repo.dirstate
2899 changedfiles = None
2895 changedfiles = None
2900 # See command doc for what minimal does.
2896 # See command doc for what minimal does.
2901 if opts.get('minimal'):
2897 if opts.get('minimal'):
2902 manifestfiles = set(ctx.manifest().keys())
2898 manifestfiles = set(ctx.manifest().keys())
2903 dirstatefiles = set(dirstate)
2899 dirstatefiles = set(dirstate)
2904 manifestonly = manifestfiles - dirstatefiles
2900 manifestonly = manifestfiles - dirstatefiles
2905 dsonly = dirstatefiles - manifestfiles
2901 dsonly = dirstatefiles - manifestfiles
2906 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2902 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2907 changedfiles = manifestonly | dsnotadded
2903 changedfiles = manifestonly | dsnotadded
2908
2904
2909 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2905 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2910
2906
2911
2907
2912 @command(b'debugrebuildfncache', [], b'')
2908 @command(b'debugrebuildfncache', [], b'')
2913 def debugrebuildfncache(ui, repo):
2909 def debugrebuildfncache(ui, repo):
2914 """rebuild the fncache file"""
2910 """rebuild the fncache file"""
2915 repair.rebuildfncache(ui, repo)
2911 repair.rebuildfncache(ui, repo)
2916
2912
2917
2913
2918 @command(
2914 @command(
2919 b'debugrename',
2915 b'debugrename',
2920 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2916 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2921 _(b'[-r REV] [FILE]...'),
2917 _(b'[-r REV] [FILE]...'),
2922 )
2918 )
2923 def debugrename(ui, repo, *pats, **opts):
2919 def debugrename(ui, repo, *pats, **opts):
2924 """dump rename information"""
2920 """dump rename information"""
2925
2921
2926 opts = pycompat.byteskwargs(opts)
2922 opts = pycompat.byteskwargs(opts)
2927 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2923 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2928 m = scmutil.match(ctx, pats, opts)
2924 m = scmutil.match(ctx, pats, opts)
2929 for abs in ctx.walk(m):
2925 for abs in ctx.walk(m):
2930 fctx = ctx[abs]
2926 fctx = ctx[abs]
2931 o = fctx.filelog().renamed(fctx.filenode())
2927 o = fctx.filelog().renamed(fctx.filenode())
2932 rel = repo.pathto(abs)
2928 rel = repo.pathto(abs)
2933 if o:
2929 if o:
2934 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2930 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2935 else:
2931 else:
2936 ui.write(_(b"%s not renamed\n") % rel)
2932 ui.write(_(b"%s not renamed\n") % rel)
2937
2933
2938
2934
2939 @command(b'debugrequires|debugrequirements', [], b'')
2935 @command(b'debugrequires|debugrequirements', [], b'')
2940 def debugrequirements(ui, repo):
2936 def debugrequirements(ui, repo):
2941 """print the current repo requirements"""
2937 """print the current repo requirements"""
2942 for r in sorted(repo.requirements):
2938 for r in sorted(repo.requirements):
2943 ui.write(b"%s\n" % r)
2939 ui.write(b"%s\n" % r)
2944
2940
2945
2941
2946 @command(
2942 @command(
2947 b'debugrevlog',
2943 b'debugrevlog',
2948 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2944 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2949 _(b'-c|-m|FILE'),
2945 _(b'-c|-m|FILE'),
2950 optionalrepo=True,
2946 optionalrepo=True,
2951 )
2947 )
2952 def debugrevlog(ui, repo, file_=None, **opts):
2948 def debugrevlog(ui, repo, file_=None, **opts):
2953 """show data and statistics about a revlog"""
2949 """show data and statistics about a revlog"""
2954 opts = pycompat.byteskwargs(opts)
2950 opts = pycompat.byteskwargs(opts)
2955 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2951 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2956
2952
2957 if opts.get(b"dump"):
2953 if opts.get(b"dump"):
2958 numrevs = len(r)
2954 numrevs = len(r)
2959 ui.write(
2955 ui.write(
2960 (
2956 (
2961 b"# rev p1rev p2rev start end deltastart base p1 p2"
2957 b"# rev p1rev p2rev start end deltastart base p1 p2"
2962 b" rawsize totalsize compression heads chainlen\n"
2958 b" rawsize totalsize compression heads chainlen\n"
2963 )
2959 )
2964 )
2960 )
2965 ts = 0
2961 ts = 0
2966 heads = set()
2962 heads = set()
2967
2963
2968 for rev in pycompat.xrange(numrevs):
2964 for rev in pycompat.xrange(numrevs):
2969 dbase = r.deltaparent(rev)
2965 dbase = r.deltaparent(rev)
2970 if dbase == -1:
2966 if dbase == -1:
2971 dbase = rev
2967 dbase = rev
2972 cbase = r.chainbase(rev)
2968 cbase = r.chainbase(rev)
2973 clen = r.chainlen(rev)
2969 clen = r.chainlen(rev)
2974 p1, p2 = r.parentrevs(rev)
2970 p1, p2 = r.parentrevs(rev)
2975 rs = r.rawsize(rev)
2971 rs = r.rawsize(rev)
2976 ts = ts + rs
2972 ts = ts + rs
2977 heads -= set(r.parentrevs(rev))
2973 heads -= set(r.parentrevs(rev))
2978 heads.add(rev)
2974 heads.add(rev)
2979 try:
2975 try:
2980 compression = ts / r.end(rev)
2976 compression = ts / r.end(rev)
2981 except ZeroDivisionError:
2977 except ZeroDivisionError:
2982 compression = 0
2978 compression = 0
2983 ui.write(
2979 ui.write(
2984 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2980 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2985 b"%11d %5d %8d\n"
2981 b"%11d %5d %8d\n"
2986 % (
2982 % (
2987 rev,
2983 rev,
2988 p1,
2984 p1,
2989 p2,
2985 p2,
2990 r.start(rev),
2986 r.start(rev),
2991 r.end(rev),
2987 r.end(rev),
2992 r.start(dbase),
2988 r.start(dbase),
2993 r.start(cbase),
2989 r.start(cbase),
2994 r.start(p1),
2990 r.start(p1),
2995 r.start(p2),
2991 r.start(p2),
2996 rs,
2992 rs,
2997 ts,
2993 ts,
2998 compression,
2994 compression,
2999 len(heads),
2995 len(heads),
3000 clen,
2996 clen,
3001 )
2997 )
3002 )
2998 )
3003 return 0
2999 return 0
3004
3000
3005 format = r._format_version
3001 format = r._format_version
3006 v = r._format_flags
3002 v = r._format_flags
3007 flags = []
3003 flags = []
3008 gdelta = False
3004 gdelta = False
3009 if v & revlog.FLAG_INLINE_DATA:
3005 if v & revlog.FLAG_INLINE_DATA:
3010 flags.append(b'inline')
3006 flags.append(b'inline')
3011 if v & revlog.FLAG_GENERALDELTA:
3007 if v & revlog.FLAG_GENERALDELTA:
3012 gdelta = True
3008 gdelta = True
3013 flags.append(b'generaldelta')
3009 flags.append(b'generaldelta')
3014 if not flags:
3010 if not flags:
3015 flags = [b'(none)']
3011 flags = [b'(none)']
3016
3012
3017 ### tracks merge vs single parent
3013 ### tracks merge vs single parent
3018 nummerges = 0
3014 nummerges = 0
3019
3015
3020 ### tracks ways the "delta" are build
3016 ### tracks ways the "delta" are build
3021 # nodelta
3017 # nodelta
3022 numempty = 0
3018 numempty = 0
3023 numemptytext = 0
3019 numemptytext = 0
3024 numemptydelta = 0
3020 numemptydelta = 0
3025 # full file content
3021 # full file content
3026 numfull = 0
3022 numfull = 0
3027 # intermediate snapshot against a prior snapshot
3023 # intermediate snapshot against a prior snapshot
3028 numsemi = 0
3024 numsemi = 0
3029 # snapshot count per depth
3025 # snapshot count per depth
3030 numsnapdepth = collections.defaultdict(lambda: 0)
3026 numsnapdepth = collections.defaultdict(lambda: 0)
3031 # delta against previous revision
3027 # delta against previous revision
3032 numprev = 0
3028 numprev = 0
3033 # delta against first or second parent (not prev)
3029 # delta against first or second parent (not prev)
3034 nump1 = 0
3030 nump1 = 0
3035 nump2 = 0
3031 nump2 = 0
3036 # delta against neither prev nor parents
3032 # delta against neither prev nor parents
3037 numother = 0
3033 numother = 0
3038 # delta against prev that are also first or second parent
3034 # delta against prev that are also first or second parent
3039 # (details of `numprev`)
3035 # (details of `numprev`)
3040 nump1prev = 0
3036 nump1prev = 0
3041 nump2prev = 0
3037 nump2prev = 0
3042
3038
3043 # data about delta chain of each revs
3039 # data about delta chain of each revs
3044 chainlengths = []
3040 chainlengths = []
3045 chainbases = []
3041 chainbases = []
3046 chainspans = []
3042 chainspans = []
3047
3043
3048 # data about each revision
3044 # data about each revision
3049 datasize = [None, 0, 0]
3045 datasize = [None, 0, 0]
3050 fullsize = [None, 0, 0]
3046 fullsize = [None, 0, 0]
3051 semisize = [None, 0, 0]
3047 semisize = [None, 0, 0]
3052 # snapshot count per depth
3048 # snapshot count per depth
3053 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3049 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3054 deltasize = [None, 0, 0]
3050 deltasize = [None, 0, 0]
3055 chunktypecounts = {}
3051 chunktypecounts = {}
3056 chunktypesizes = {}
3052 chunktypesizes = {}
3057
3053
3058 def addsize(size, l):
3054 def addsize(size, l):
3059 if l[0] is None or size < l[0]:
3055 if l[0] is None or size < l[0]:
3060 l[0] = size
3056 l[0] = size
3061 if size > l[1]:
3057 if size > l[1]:
3062 l[1] = size
3058 l[1] = size
3063 l[2] += size
3059 l[2] += size
3064
3060
3065 numrevs = len(r)
3061 numrevs = len(r)
3066 for rev in pycompat.xrange(numrevs):
3062 for rev in pycompat.xrange(numrevs):
3067 p1, p2 = r.parentrevs(rev)
3063 p1, p2 = r.parentrevs(rev)
3068 delta = r.deltaparent(rev)
3064 delta = r.deltaparent(rev)
3069 if format > 0:
3065 if format > 0:
3070 addsize(r.rawsize(rev), datasize)
3066 addsize(r.rawsize(rev), datasize)
3071 if p2 != nullrev:
3067 if p2 != nullrev:
3072 nummerges += 1
3068 nummerges += 1
3073 size = r.length(rev)
3069 size = r.length(rev)
3074 if delta == nullrev:
3070 if delta == nullrev:
3075 chainlengths.append(0)
3071 chainlengths.append(0)
3076 chainbases.append(r.start(rev))
3072 chainbases.append(r.start(rev))
3077 chainspans.append(size)
3073 chainspans.append(size)
3078 if size == 0:
3074 if size == 0:
3079 numempty += 1
3075 numempty += 1
3080 numemptytext += 1
3076 numemptytext += 1
3081 else:
3077 else:
3082 numfull += 1
3078 numfull += 1
3083 numsnapdepth[0] += 1
3079 numsnapdepth[0] += 1
3084 addsize(size, fullsize)
3080 addsize(size, fullsize)
3085 addsize(size, snapsizedepth[0])
3081 addsize(size, snapsizedepth[0])
3086 else:
3082 else:
3087 chainlengths.append(chainlengths[delta] + 1)
3083 chainlengths.append(chainlengths[delta] + 1)
3088 baseaddr = chainbases[delta]
3084 baseaddr = chainbases[delta]
3089 revaddr = r.start(rev)
3085 revaddr = r.start(rev)
3090 chainbases.append(baseaddr)
3086 chainbases.append(baseaddr)
3091 chainspans.append((revaddr - baseaddr) + size)
3087 chainspans.append((revaddr - baseaddr) + size)
3092 if size == 0:
3088 if size == 0:
3093 numempty += 1
3089 numempty += 1
3094 numemptydelta += 1
3090 numemptydelta += 1
3095 elif r.issnapshot(rev):
3091 elif r.issnapshot(rev):
3096 addsize(size, semisize)
3092 addsize(size, semisize)
3097 numsemi += 1
3093 numsemi += 1
3098 depth = r.snapshotdepth(rev)
3094 depth = r.snapshotdepth(rev)
3099 numsnapdepth[depth] += 1
3095 numsnapdepth[depth] += 1
3100 addsize(size, snapsizedepth[depth])
3096 addsize(size, snapsizedepth[depth])
3101 else:
3097 else:
3102 addsize(size, deltasize)
3098 addsize(size, deltasize)
3103 if delta == rev - 1:
3099 if delta == rev - 1:
3104 numprev += 1
3100 numprev += 1
3105 if delta == p1:
3101 if delta == p1:
3106 nump1prev += 1
3102 nump1prev += 1
3107 elif delta == p2:
3103 elif delta == p2:
3108 nump2prev += 1
3104 nump2prev += 1
3109 elif delta == p1:
3105 elif delta == p1:
3110 nump1 += 1
3106 nump1 += 1
3111 elif delta == p2:
3107 elif delta == p2:
3112 nump2 += 1
3108 nump2 += 1
3113 elif delta != nullrev:
3109 elif delta != nullrev:
3114 numother += 1
3110 numother += 1
3115
3111
3116 # Obtain data on the raw chunks in the revlog.
3112 # Obtain data on the raw chunks in the revlog.
3117 if util.safehasattr(r, b'_getsegmentforrevs'):
3113 if util.safehasattr(r, b'_getsegmentforrevs'):
3118 segment = r._getsegmentforrevs(rev, rev)[1]
3114 segment = r._getsegmentforrevs(rev, rev)[1]
3119 else:
3115 else:
3120 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3116 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3121 if segment:
3117 if segment:
3122 chunktype = bytes(segment[0:1])
3118 chunktype = bytes(segment[0:1])
3123 else:
3119 else:
3124 chunktype = b'empty'
3120 chunktype = b'empty'
3125
3121
3126 if chunktype not in chunktypecounts:
3122 if chunktype not in chunktypecounts:
3127 chunktypecounts[chunktype] = 0
3123 chunktypecounts[chunktype] = 0
3128 chunktypesizes[chunktype] = 0
3124 chunktypesizes[chunktype] = 0
3129
3125
3130 chunktypecounts[chunktype] += 1
3126 chunktypecounts[chunktype] += 1
3131 chunktypesizes[chunktype] += size
3127 chunktypesizes[chunktype] += size
3132
3128
3133 # Adjust size min value for empty cases
3129 # Adjust size min value for empty cases
3134 for size in (datasize, fullsize, semisize, deltasize):
3130 for size in (datasize, fullsize, semisize, deltasize):
3135 if size[0] is None:
3131 if size[0] is None:
3136 size[0] = 0
3132 size[0] = 0
3137
3133
3138 numdeltas = numrevs - numfull - numempty - numsemi
3134 numdeltas = numrevs - numfull - numempty - numsemi
3139 numoprev = numprev - nump1prev - nump2prev
3135 numoprev = numprev - nump1prev - nump2prev
3140 totalrawsize = datasize[2]
3136 totalrawsize = datasize[2]
3141 datasize[2] /= numrevs
3137 datasize[2] /= numrevs
3142 fulltotal = fullsize[2]
3138 fulltotal = fullsize[2]
3143 if numfull == 0:
3139 if numfull == 0:
3144 fullsize[2] = 0
3140 fullsize[2] = 0
3145 else:
3141 else:
3146 fullsize[2] /= numfull
3142 fullsize[2] /= numfull
3147 semitotal = semisize[2]
3143 semitotal = semisize[2]
3148 snaptotal = {}
3144 snaptotal = {}
3149 if numsemi > 0:
3145 if numsemi > 0:
3150 semisize[2] /= numsemi
3146 semisize[2] /= numsemi
3151 for depth in snapsizedepth:
3147 for depth in snapsizedepth:
3152 snaptotal[depth] = snapsizedepth[depth][2]
3148 snaptotal[depth] = snapsizedepth[depth][2]
3153 snapsizedepth[depth][2] /= numsnapdepth[depth]
3149 snapsizedepth[depth][2] /= numsnapdepth[depth]
3154
3150
3155 deltatotal = deltasize[2]
3151 deltatotal = deltasize[2]
3156 if numdeltas > 0:
3152 if numdeltas > 0:
3157 deltasize[2] /= numdeltas
3153 deltasize[2] /= numdeltas
3158 totalsize = fulltotal + semitotal + deltatotal
3154 totalsize = fulltotal + semitotal + deltatotal
3159 avgchainlen = sum(chainlengths) / numrevs
3155 avgchainlen = sum(chainlengths) / numrevs
3160 maxchainlen = max(chainlengths)
3156 maxchainlen = max(chainlengths)
3161 maxchainspan = max(chainspans)
3157 maxchainspan = max(chainspans)
3162 compratio = 1
3158 compratio = 1
3163 if totalsize:
3159 if totalsize:
3164 compratio = totalrawsize / totalsize
3160 compratio = totalrawsize / totalsize
3165
3161
3166 basedfmtstr = b'%%%dd\n'
3162 basedfmtstr = b'%%%dd\n'
3167 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3163 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3168
3164
3169 def dfmtstr(max):
3165 def dfmtstr(max):
3170 return basedfmtstr % len(str(max))
3166 return basedfmtstr % len(str(max))
3171
3167
3172 def pcfmtstr(max, padding=0):
3168 def pcfmtstr(max, padding=0):
3173 return basepcfmtstr % (len(str(max)), b' ' * padding)
3169 return basepcfmtstr % (len(str(max)), b' ' * padding)
3174
3170
3175 def pcfmt(value, total):
3171 def pcfmt(value, total):
3176 if total:
3172 if total:
3177 return (value, 100 * float(value) / total)
3173 return (value, 100 * float(value) / total)
3178 else:
3174 else:
3179 return value, 100.0
3175 return value, 100.0
3180
3176
3181 ui.writenoi18n(b'format : %d\n' % format)
3177 ui.writenoi18n(b'format : %d\n' % format)
3182 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3178 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3183
3179
3184 ui.write(b'\n')
3180 ui.write(b'\n')
3185 fmt = pcfmtstr(totalsize)
3181 fmt = pcfmtstr(totalsize)
3186 fmt2 = dfmtstr(totalsize)
3182 fmt2 = dfmtstr(totalsize)
3187 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3183 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3188 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3184 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3189 ui.writenoi18n(
3185 ui.writenoi18n(
3190 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3186 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3191 )
3187 )
3192 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3188 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3193 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3189 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3194 ui.writenoi18n(
3190 ui.writenoi18n(
3195 b' text : '
3191 b' text : '
3196 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3192 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3197 )
3193 )
3198 ui.writenoi18n(
3194 ui.writenoi18n(
3199 b' delta : '
3195 b' delta : '
3200 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3196 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3201 )
3197 )
3202 ui.writenoi18n(
3198 ui.writenoi18n(
3203 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3199 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3204 )
3200 )
3205 for depth in sorted(numsnapdepth):
3201 for depth in sorted(numsnapdepth):
3206 ui.write(
3202 ui.write(
3207 (b' lvl-%-3d : ' % depth)
3203 (b' lvl-%-3d : ' % depth)
3208 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3204 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3209 )
3205 )
3210 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3206 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3211 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3207 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3212 ui.writenoi18n(
3208 ui.writenoi18n(
3213 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3209 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3214 )
3210 )
3215 for depth in sorted(numsnapdepth):
3211 for depth in sorted(numsnapdepth):
3216 ui.write(
3212 ui.write(
3217 (b' lvl-%-3d : ' % depth)
3213 (b' lvl-%-3d : ' % depth)
3218 + fmt % pcfmt(snaptotal[depth], totalsize)
3214 + fmt % pcfmt(snaptotal[depth], totalsize)
3219 )
3215 )
3220 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3216 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3221
3217
3222 def fmtchunktype(chunktype):
3218 def fmtchunktype(chunktype):
3223 if chunktype == b'empty':
3219 if chunktype == b'empty':
3224 return b' %s : ' % chunktype
3220 return b' %s : ' % chunktype
3225 elif chunktype in pycompat.bytestr(string.ascii_letters):
3221 elif chunktype in pycompat.bytestr(string.ascii_letters):
3226 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3222 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3227 else:
3223 else:
3228 return b' 0x%s : ' % hex(chunktype)
3224 return b' 0x%s : ' % hex(chunktype)
3229
3225
3230 ui.write(b'\n')
3226 ui.write(b'\n')
3231 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3227 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3232 for chunktype in sorted(chunktypecounts):
3228 for chunktype in sorted(chunktypecounts):
3233 ui.write(fmtchunktype(chunktype))
3229 ui.write(fmtchunktype(chunktype))
3234 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3230 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3235 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3231 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3236 for chunktype in sorted(chunktypecounts):
3232 for chunktype in sorted(chunktypecounts):
3237 ui.write(fmtchunktype(chunktype))
3233 ui.write(fmtchunktype(chunktype))
3238 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3234 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3239
3235
3240 ui.write(b'\n')
3236 ui.write(b'\n')
3241 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3237 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3242 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3238 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3243 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3239 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3244 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3240 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3245 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3241 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3246
3242
3247 if format > 0:
3243 if format > 0:
3248 ui.write(b'\n')
3244 ui.write(b'\n')
3249 ui.writenoi18n(
3245 ui.writenoi18n(
3250 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3246 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3251 % tuple(datasize)
3247 % tuple(datasize)
3252 )
3248 )
3253 ui.writenoi18n(
3249 ui.writenoi18n(
3254 b'full revision size (min/max/avg) : %d / %d / %d\n'
3250 b'full revision size (min/max/avg) : %d / %d / %d\n'
3255 % tuple(fullsize)
3251 % tuple(fullsize)
3256 )
3252 )
3257 ui.writenoi18n(
3253 ui.writenoi18n(
3258 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3254 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3259 % tuple(semisize)
3255 % tuple(semisize)
3260 )
3256 )
3261 for depth in sorted(snapsizedepth):
3257 for depth in sorted(snapsizedepth):
3262 if depth == 0:
3258 if depth == 0:
3263 continue
3259 continue
3264 ui.writenoi18n(
3260 ui.writenoi18n(
3265 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3261 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3266 % ((depth,) + tuple(snapsizedepth[depth]))
3262 % ((depth,) + tuple(snapsizedepth[depth]))
3267 )
3263 )
3268 ui.writenoi18n(
3264 ui.writenoi18n(
3269 b'delta size (min/max/avg) : %d / %d / %d\n'
3265 b'delta size (min/max/avg) : %d / %d / %d\n'
3270 % tuple(deltasize)
3266 % tuple(deltasize)
3271 )
3267 )
3272
3268
3273 if numdeltas > 0:
3269 if numdeltas > 0:
3274 ui.write(b'\n')
3270 ui.write(b'\n')
3275 fmt = pcfmtstr(numdeltas)
3271 fmt = pcfmtstr(numdeltas)
3276 fmt2 = pcfmtstr(numdeltas, 4)
3272 fmt2 = pcfmtstr(numdeltas, 4)
3277 ui.writenoi18n(
3273 ui.writenoi18n(
3278 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3274 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3279 )
3275 )
3280 if numprev > 0:
3276 if numprev > 0:
3281 ui.writenoi18n(
3277 ui.writenoi18n(
3282 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3278 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3283 )
3279 )
3284 ui.writenoi18n(
3280 ui.writenoi18n(
3285 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3281 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3286 )
3282 )
3287 ui.writenoi18n(
3283 ui.writenoi18n(
3288 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3284 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3289 )
3285 )
3290 if gdelta:
3286 if gdelta:
3291 ui.writenoi18n(
3287 ui.writenoi18n(
3292 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3288 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3293 )
3289 )
3294 ui.writenoi18n(
3290 ui.writenoi18n(
3295 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3291 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3296 )
3292 )
3297 ui.writenoi18n(
3293 ui.writenoi18n(
3298 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3294 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3299 )
3295 )
3300
3296
3301
3297
3302 @command(
3298 @command(
3303 b'debugrevlogindex',
3299 b'debugrevlogindex',
3304 cmdutil.debugrevlogopts
3300 cmdutil.debugrevlogopts
3305 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3301 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3306 _(b'[-f FORMAT] -c|-m|FILE'),
3302 _(b'[-f FORMAT] -c|-m|FILE'),
3307 optionalrepo=True,
3303 optionalrepo=True,
3308 )
3304 )
3309 def debugrevlogindex(ui, repo, file_=None, **opts):
3305 def debugrevlogindex(ui, repo, file_=None, **opts):
3310 """dump the contents of a revlog index"""
3306 """dump the contents of a revlog index"""
3311 opts = pycompat.byteskwargs(opts)
3307 opts = pycompat.byteskwargs(opts)
3312 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3308 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3313 format = opts.get(b'format', 0)
3309 format = opts.get(b'format', 0)
3314 if format not in (0, 1):
3310 if format not in (0, 1):
3315 raise error.Abort(_(b"unknown format %d") % format)
3311 raise error.Abort(_(b"unknown format %d") % format)
3316
3312
3317 if ui.debugflag:
3313 if ui.debugflag:
3318 shortfn = hex
3314 shortfn = hex
3319 else:
3315 else:
3320 shortfn = short
3316 shortfn = short
3321
3317
3322 # There might not be anything in r, so have a sane default
3318 # There might not be anything in r, so have a sane default
3323 idlen = 12
3319 idlen = 12
3324 for i in r:
3320 for i in r:
3325 idlen = len(shortfn(r.node(i)))
3321 idlen = len(shortfn(r.node(i)))
3326 break
3322 break
3327
3323
3328 if format == 0:
3324 if format == 0:
3329 if ui.verbose:
3325 if ui.verbose:
3330 ui.writenoi18n(
3326 ui.writenoi18n(
3331 b" rev offset length linkrev %s %s p2\n"
3327 b" rev offset length linkrev %s %s p2\n"
3332 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3328 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3333 )
3329 )
3334 else:
3330 else:
3335 ui.writenoi18n(
3331 ui.writenoi18n(
3336 b" rev linkrev %s %s p2\n"
3332 b" rev linkrev %s %s p2\n"
3337 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3333 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3338 )
3334 )
3339 elif format == 1:
3335 elif format == 1:
3340 if ui.verbose:
3336 if ui.verbose:
3341 ui.writenoi18n(
3337 ui.writenoi18n(
3342 (
3338 (
3343 b" rev flag offset length size link p1"
3339 b" rev flag offset length size link p1"
3344 b" p2 %s\n"
3340 b" p2 %s\n"
3345 )
3341 )
3346 % b"nodeid".rjust(idlen)
3342 % b"nodeid".rjust(idlen)
3347 )
3343 )
3348 else:
3344 else:
3349 ui.writenoi18n(
3345 ui.writenoi18n(
3350 b" rev flag size link p1 p2 %s\n"
3346 b" rev flag size link p1 p2 %s\n"
3351 % b"nodeid".rjust(idlen)
3347 % b"nodeid".rjust(idlen)
3352 )
3348 )
3353
3349
3354 for i in r:
3350 for i in r:
3355 node = r.node(i)
3351 node = r.node(i)
3356 if format == 0:
3352 if format == 0:
3357 try:
3353 try:
3358 pp = r.parents(node)
3354 pp = r.parents(node)
3359 except Exception:
3355 except Exception:
3360 pp = [repo.nullid, repo.nullid]
3356 pp = [repo.nullid, repo.nullid]
3361 if ui.verbose:
3357 if ui.verbose:
3362 ui.write(
3358 ui.write(
3363 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3359 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3364 % (
3360 % (
3365 i,
3361 i,
3366 r.start(i),
3362 r.start(i),
3367 r.length(i),
3363 r.length(i),
3368 r.linkrev(i),
3364 r.linkrev(i),
3369 shortfn(node),
3365 shortfn(node),
3370 shortfn(pp[0]),
3366 shortfn(pp[0]),
3371 shortfn(pp[1]),
3367 shortfn(pp[1]),
3372 )
3368 )
3373 )
3369 )
3374 else:
3370 else:
3375 ui.write(
3371 ui.write(
3376 b"% 6d % 7d %s %s %s\n"
3372 b"% 6d % 7d %s %s %s\n"
3377 % (
3373 % (
3378 i,
3374 i,
3379 r.linkrev(i),
3375 r.linkrev(i),
3380 shortfn(node),
3376 shortfn(node),
3381 shortfn(pp[0]),
3377 shortfn(pp[0]),
3382 shortfn(pp[1]),
3378 shortfn(pp[1]),
3383 )
3379 )
3384 )
3380 )
3385 elif format == 1:
3381 elif format == 1:
3386 pr = r.parentrevs(i)
3382 pr = r.parentrevs(i)
3387 if ui.verbose:
3383 if ui.verbose:
3388 ui.write(
3384 ui.write(
3389 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3385 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3390 % (
3386 % (
3391 i,
3387 i,
3392 r.flags(i),
3388 r.flags(i),
3393 r.start(i),
3389 r.start(i),
3394 r.length(i),
3390 r.length(i),
3395 r.rawsize(i),
3391 r.rawsize(i),
3396 r.linkrev(i),
3392 r.linkrev(i),
3397 pr[0],
3393 pr[0],
3398 pr[1],
3394 pr[1],
3399 shortfn(node),
3395 shortfn(node),
3400 )
3396 )
3401 )
3397 )
3402 else:
3398 else:
3403 ui.write(
3399 ui.write(
3404 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3400 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3405 % (
3401 % (
3406 i,
3402 i,
3407 r.flags(i),
3403 r.flags(i),
3408 r.rawsize(i),
3404 r.rawsize(i),
3409 r.linkrev(i),
3405 r.linkrev(i),
3410 pr[0],
3406 pr[0],
3411 pr[1],
3407 pr[1],
3412 shortfn(node),
3408 shortfn(node),
3413 )
3409 )
3414 )
3410 )
3415
3411
3416
3412
3417 @command(
3413 @command(
3418 b'debugrevspec',
3414 b'debugrevspec',
3419 [
3415 [
3420 (
3416 (
3421 b'',
3417 b'',
3422 b'optimize',
3418 b'optimize',
3423 None,
3419 None,
3424 _(b'print parsed tree after optimizing (DEPRECATED)'),
3420 _(b'print parsed tree after optimizing (DEPRECATED)'),
3425 ),
3421 ),
3426 (
3422 (
3427 b'',
3423 b'',
3428 b'show-revs',
3424 b'show-revs',
3429 True,
3425 True,
3430 _(b'print list of result revisions (default)'),
3426 _(b'print list of result revisions (default)'),
3431 ),
3427 ),
3432 (
3428 (
3433 b's',
3429 b's',
3434 b'show-set',
3430 b'show-set',
3435 None,
3431 None,
3436 _(b'print internal representation of result set'),
3432 _(b'print internal representation of result set'),
3437 ),
3433 ),
3438 (
3434 (
3439 b'p',
3435 b'p',
3440 b'show-stage',
3436 b'show-stage',
3441 [],
3437 [],
3442 _(b'print parsed tree at the given stage'),
3438 _(b'print parsed tree at the given stage'),
3443 _(b'NAME'),
3439 _(b'NAME'),
3444 ),
3440 ),
3445 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3441 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3446 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3442 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3447 ],
3443 ],
3448 b'REVSPEC',
3444 b'REVSPEC',
3449 )
3445 )
3450 def debugrevspec(ui, repo, expr, **opts):
3446 def debugrevspec(ui, repo, expr, **opts):
3451 """parse and apply a revision specification
3447 """parse and apply a revision specification
3452
3448
3453 Use -p/--show-stage option to print the parsed tree at the given stages.
3449 Use -p/--show-stage option to print the parsed tree at the given stages.
3454 Use -p all to print tree at every stage.
3450 Use -p all to print tree at every stage.
3455
3451
3456 Use --no-show-revs option with -s or -p to print only the set
3452 Use --no-show-revs option with -s or -p to print only the set
3457 representation or the parsed tree respectively.
3453 representation or the parsed tree respectively.
3458
3454
3459 Use --verify-optimized to compare the optimized result with the unoptimized
3455 Use --verify-optimized to compare the optimized result with the unoptimized
3460 one. Returns 1 if the optimized result differs.
3456 one. Returns 1 if the optimized result differs.
3461 """
3457 """
3462 opts = pycompat.byteskwargs(opts)
3458 opts = pycompat.byteskwargs(opts)
3463 aliases = ui.configitems(b'revsetalias')
3459 aliases = ui.configitems(b'revsetalias')
3464 stages = [
3460 stages = [
3465 (b'parsed', lambda tree: tree),
3461 (b'parsed', lambda tree: tree),
3466 (
3462 (
3467 b'expanded',
3463 b'expanded',
3468 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3464 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3469 ),
3465 ),
3470 (b'concatenated', revsetlang.foldconcat),
3466 (b'concatenated', revsetlang.foldconcat),
3471 (b'analyzed', revsetlang.analyze),
3467 (b'analyzed', revsetlang.analyze),
3472 (b'optimized', revsetlang.optimize),
3468 (b'optimized', revsetlang.optimize),
3473 ]
3469 ]
3474 if opts[b'no_optimized']:
3470 if opts[b'no_optimized']:
3475 stages = stages[:-1]
3471 stages = stages[:-1]
3476 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3472 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3477 raise error.Abort(
3473 raise error.Abort(
3478 _(b'cannot use --verify-optimized with --no-optimized')
3474 _(b'cannot use --verify-optimized with --no-optimized')
3479 )
3475 )
3480 stagenames = {n for n, f in stages}
3476 stagenames = {n for n, f in stages}
3481
3477
3482 showalways = set()
3478 showalways = set()
3483 showchanged = set()
3479 showchanged = set()
3484 if ui.verbose and not opts[b'show_stage']:
3480 if ui.verbose and not opts[b'show_stage']:
3485 # show parsed tree by --verbose (deprecated)
3481 # show parsed tree by --verbose (deprecated)
3486 showalways.add(b'parsed')
3482 showalways.add(b'parsed')
3487 showchanged.update([b'expanded', b'concatenated'])
3483 showchanged.update([b'expanded', b'concatenated'])
3488 if opts[b'optimize']:
3484 if opts[b'optimize']:
3489 showalways.add(b'optimized')
3485 showalways.add(b'optimized')
3490 if opts[b'show_stage'] and opts[b'optimize']:
3486 if opts[b'show_stage'] and opts[b'optimize']:
3491 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3487 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3492 if opts[b'show_stage'] == [b'all']:
3488 if opts[b'show_stage'] == [b'all']:
3493 showalways.update(stagenames)
3489 showalways.update(stagenames)
3494 else:
3490 else:
3495 for n in opts[b'show_stage']:
3491 for n in opts[b'show_stage']:
3496 if n not in stagenames:
3492 if n not in stagenames:
3497 raise error.Abort(_(b'invalid stage name: %s') % n)
3493 raise error.Abort(_(b'invalid stage name: %s') % n)
3498 showalways.update(opts[b'show_stage'])
3494 showalways.update(opts[b'show_stage'])
3499
3495
3500 treebystage = {}
3496 treebystage = {}
3501 printedtree = None
3497 printedtree = None
3502 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3498 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3503 for n, f in stages:
3499 for n, f in stages:
3504 treebystage[n] = tree = f(tree)
3500 treebystage[n] = tree = f(tree)
3505 if n in showalways or (n in showchanged and tree != printedtree):
3501 if n in showalways or (n in showchanged and tree != printedtree):
3506 if opts[b'show_stage'] or n != b'parsed':
3502 if opts[b'show_stage'] or n != b'parsed':
3507 ui.write(b"* %s:\n" % n)
3503 ui.write(b"* %s:\n" % n)
3508 ui.write(revsetlang.prettyformat(tree), b"\n")
3504 ui.write(revsetlang.prettyformat(tree), b"\n")
3509 printedtree = tree
3505 printedtree = tree
3510
3506
3511 if opts[b'verify_optimized']:
3507 if opts[b'verify_optimized']:
3512 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3508 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3513 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3509 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3514 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3510 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3515 ui.writenoi18n(
3511 ui.writenoi18n(
3516 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3512 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3517 )
3513 )
3518 ui.writenoi18n(
3514 ui.writenoi18n(
3519 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3515 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3520 )
3516 )
3521 arevs = list(arevs)
3517 arevs = list(arevs)
3522 brevs = list(brevs)
3518 brevs = list(brevs)
3523 if arevs == brevs:
3519 if arevs == brevs:
3524 return 0
3520 return 0
3525 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3521 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3526 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3522 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3527 sm = difflib.SequenceMatcher(None, arevs, brevs)
3523 sm = difflib.SequenceMatcher(None, arevs, brevs)
3528 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3524 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3529 if tag in ('delete', 'replace'):
3525 if tag in ('delete', 'replace'):
3530 for c in arevs[alo:ahi]:
3526 for c in arevs[alo:ahi]:
3531 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3527 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3532 if tag in ('insert', 'replace'):
3528 if tag in ('insert', 'replace'):
3533 for c in brevs[blo:bhi]:
3529 for c in brevs[blo:bhi]:
3534 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3530 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3535 if tag == 'equal':
3531 if tag == 'equal':
3536 for c in arevs[alo:ahi]:
3532 for c in arevs[alo:ahi]:
3537 ui.write(b' %d\n' % c)
3533 ui.write(b' %d\n' % c)
3538 return 1
3534 return 1
3539
3535
3540 func = revset.makematcher(tree)
3536 func = revset.makematcher(tree)
3541 revs = func(repo)
3537 revs = func(repo)
3542 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3538 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3543 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3539 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3544 if not opts[b'show_revs']:
3540 if not opts[b'show_revs']:
3545 return
3541 return
3546 for c in revs:
3542 for c in revs:
3547 ui.write(b"%d\n" % c)
3543 ui.write(b"%d\n" % c)
3548
3544
3549
3545
3550 @command(
3546 @command(
3551 b'debugserve',
3547 b'debugserve',
3552 [
3548 [
3553 (
3549 (
3554 b'',
3550 b'',
3555 b'sshstdio',
3551 b'sshstdio',
3556 False,
3552 False,
3557 _(b'run an SSH server bound to process handles'),
3553 _(b'run an SSH server bound to process handles'),
3558 ),
3554 ),
3559 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3555 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3560 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3556 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3561 ],
3557 ],
3562 b'',
3558 b'',
3563 )
3559 )
3564 def debugserve(ui, repo, **opts):
3560 def debugserve(ui, repo, **opts):
3565 """run a server with advanced settings
3561 """run a server with advanced settings
3566
3562
3567 This command is similar to :hg:`serve`. It exists partially as a
3563 This command is similar to :hg:`serve`. It exists partially as a
3568 workaround to the fact that ``hg serve --stdio`` must have specific
3564 workaround to the fact that ``hg serve --stdio`` must have specific
3569 arguments for security reasons.
3565 arguments for security reasons.
3570 """
3566 """
3571 opts = pycompat.byteskwargs(opts)
3567 opts = pycompat.byteskwargs(opts)
3572
3568
3573 if not opts[b'sshstdio']:
3569 if not opts[b'sshstdio']:
3574 raise error.Abort(_(b'only --sshstdio is currently supported'))
3570 raise error.Abort(_(b'only --sshstdio is currently supported'))
3575
3571
3576 logfh = None
3572 logfh = None
3577
3573
3578 if opts[b'logiofd'] and opts[b'logiofile']:
3574 if opts[b'logiofd'] and opts[b'logiofile']:
3579 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3575 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3580
3576
3581 if opts[b'logiofd']:
3577 if opts[b'logiofd']:
3582 # Ideally we would be line buffered. But line buffering in binary
3578 # Ideally we would be line buffered. But line buffering in binary
3583 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3579 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3584 # buffering could have performance impacts. But since this isn't
3580 # buffering could have performance impacts. But since this isn't
3585 # performance critical code, it should be fine.
3581 # performance critical code, it should be fine.
3586 try:
3582 try:
3587 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3583 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3588 except OSError as e:
3584 except OSError as e:
3589 if e.errno != errno.ESPIPE:
3585 if e.errno != errno.ESPIPE:
3590 raise
3586 raise
3591 # can't seek a pipe, so `ab` mode fails on py3
3587 # can't seek a pipe, so `ab` mode fails on py3
3592 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3588 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3593 elif opts[b'logiofile']:
3589 elif opts[b'logiofile']:
3594 logfh = open(opts[b'logiofile'], b'ab', 0)
3590 logfh = open(opts[b'logiofile'], b'ab', 0)
3595
3591
3596 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3592 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3597 s.serve_forever()
3593 s.serve_forever()
3598
3594
3599
3595
3600 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3596 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3601 def debugsetparents(ui, repo, rev1, rev2=None):
3597 def debugsetparents(ui, repo, rev1, rev2=None):
3602 """manually set the parents of the current working directory (DANGEROUS)
3598 """manually set the parents of the current working directory (DANGEROUS)
3603
3599
3604 This command is not what you are looking for and should not be used. Using
3600 This command is not what you are looking for and should not be used. Using
3605 this command will most certainly results in slight corruption of the file
3601 this command will most certainly results in slight corruption of the file
3606 level histories withing your repository. DO NOT USE THIS COMMAND.
3602 level histories withing your repository. DO NOT USE THIS COMMAND.
3607
3603
3608 The command update the p1 and p2 field in the dirstate, and not touching
3604 The command update the p1 and p2 field in the dirstate, and not touching
3609 anything else. This useful for writing repository conversion tools, but
3605 anything else. This useful for writing repository conversion tools, but
3610 should be used with extreme care. For example, neither the working
3606 should be used with extreme care. For example, neither the working
3611 directory nor the dirstate is updated, so file status may be incorrect
3607 directory nor the dirstate is updated, so file status may be incorrect
3612 after running this command. Only used if you are one of the few people that
3608 after running this command. Only used if you are one of the few people that
3613 deeply unstand both conversion tools and file level histories. If you are
3609 deeply unstand both conversion tools and file level histories. If you are
3614 reading this help, you are not one of this people (most of them sailed west
3610 reading this help, you are not one of this people (most of them sailed west
3615 from Mithlond anyway.
3611 from Mithlond anyway.
3616
3612
3617 So one last time DO NOT USE THIS COMMAND.
3613 So one last time DO NOT USE THIS COMMAND.
3618
3614
3619 Returns 0 on success.
3615 Returns 0 on success.
3620 """
3616 """
3621
3617
3622 node1 = scmutil.revsingle(repo, rev1).node()
3618 node1 = scmutil.revsingle(repo, rev1).node()
3623 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3619 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3624
3620
3625 with repo.wlock():
3621 with repo.wlock():
3626 repo.setparents(node1, node2)
3622 repo.setparents(node1, node2)
3627
3623
3628
3624
3629 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3625 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3630 def debugsidedata(ui, repo, file_, rev=None, **opts):
3626 def debugsidedata(ui, repo, file_, rev=None, **opts):
3631 """dump the side data for a cl/manifest/file revision
3627 """dump the side data for a cl/manifest/file revision
3632
3628
3633 Use --verbose to dump the sidedata content."""
3629 Use --verbose to dump the sidedata content."""
3634 opts = pycompat.byteskwargs(opts)
3630 opts = pycompat.byteskwargs(opts)
3635 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3631 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3636 if rev is not None:
3632 if rev is not None:
3637 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3633 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3638 file_, rev = None, file_
3634 file_, rev = None, file_
3639 elif rev is None:
3635 elif rev is None:
3640 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3636 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3641 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3637 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3642 r = getattr(r, '_revlog', r)
3638 r = getattr(r, '_revlog', r)
3643 try:
3639 try:
3644 sidedata = r.sidedata(r.lookup(rev))
3640 sidedata = r.sidedata(r.lookup(rev))
3645 except KeyError:
3641 except KeyError:
3646 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3642 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3647 if sidedata:
3643 if sidedata:
3648 sidedata = list(sidedata.items())
3644 sidedata = list(sidedata.items())
3649 sidedata.sort()
3645 sidedata.sort()
3650 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3646 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3651 for key, value in sidedata:
3647 for key, value in sidedata:
3652 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3648 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3653 if ui.verbose:
3649 if ui.verbose:
3654 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3650 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3655
3651
3656
3652
3657 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3653 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3658 def debugssl(ui, repo, source=None, **opts):
3654 def debugssl(ui, repo, source=None, **opts):
3659 """test a secure connection to a server
3655 """test a secure connection to a server
3660
3656
3661 This builds the certificate chain for the server on Windows, installing the
3657 This builds the certificate chain for the server on Windows, installing the
3662 missing intermediates and trusted root via Windows Update if necessary. It
3658 missing intermediates and trusted root via Windows Update if necessary. It
3663 does nothing on other platforms.
3659 does nothing on other platforms.
3664
3660
3665 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3661 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3666 that server is used. See :hg:`help urls` for more information.
3662 that server is used. See :hg:`help urls` for more information.
3667
3663
3668 If the update succeeds, retry the original operation. Otherwise, the cause
3664 If the update succeeds, retry the original operation. Otherwise, the cause
3669 of the SSL error is likely another issue.
3665 of the SSL error is likely another issue.
3670 """
3666 """
3671 if not pycompat.iswindows:
3667 if not pycompat.iswindows:
3672 raise error.Abort(
3668 raise error.Abort(
3673 _(b'certificate chain building is only possible on Windows')
3669 _(b'certificate chain building is only possible on Windows')
3674 )
3670 )
3675
3671
3676 if not source:
3672 if not source:
3677 if not repo:
3673 if not repo:
3678 raise error.Abort(
3674 raise error.Abort(
3679 _(
3675 _(
3680 b"there is no Mercurial repository here, and no "
3676 b"there is no Mercurial repository here, and no "
3681 b"server specified"
3677 b"server specified"
3682 )
3678 )
3683 )
3679 )
3684 source = b"default"
3680 source = b"default"
3685
3681
3686 source, branches = urlutil.get_unique_pull_path(
3682 source, branches = urlutil.get_unique_pull_path(
3687 b'debugssl', repo, ui, source
3683 b'debugssl', repo, ui, source
3688 )
3684 )
3689 url = urlutil.url(source)
3685 url = urlutil.url(source)
3690
3686
3691 defaultport = {b'https': 443, b'ssh': 22}
3687 defaultport = {b'https': 443, b'ssh': 22}
3692 if url.scheme in defaultport:
3688 if url.scheme in defaultport:
3693 try:
3689 try:
3694 addr = (url.host, int(url.port or defaultport[url.scheme]))
3690 addr = (url.host, int(url.port or defaultport[url.scheme]))
3695 except ValueError:
3691 except ValueError:
3696 raise error.Abort(_(b"malformed port number in URL"))
3692 raise error.Abort(_(b"malformed port number in URL"))
3697 else:
3693 else:
3698 raise error.Abort(_(b"only https and ssh connections are supported"))
3694 raise error.Abort(_(b"only https and ssh connections are supported"))
3699
3695
3700 from . import win32
3696 from . import win32
3701
3697
3702 s = ssl.wrap_socket(
3698 s = ssl.wrap_socket(
3703 socket.socket(),
3699 socket.socket(),
3704 ssl_version=ssl.PROTOCOL_TLS,
3700 ssl_version=ssl.PROTOCOL_TLS,
3705 cert_reqs=ssl.CERT_NONE,
3701 cert_reqs=ssl.CERT_NONE,
3706 ca_certs=None,
3702 ca_certs=None,
3707 )
3703 )
3708
3704
3709 try:
3705 try:
3710 s.connect(addr)
3706 s.connect(addr)
3711 cert = s.getpeercert(True)
3707 cert = s.getpeercert(True)
3712
3708
3713 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3709 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3714
3710
3715 complete = win32.checkcertificatechain(cert, build=False)
3711 complete = win32.checkcertificatechain(cert, build=False)
3716
3712
3717 if not complete:
3713 if not complete:
3718 ui.status(_(b'certificate chain is incomplete, updating... '))
3714 ui.status(_(b'certificate chain is incomplete, updating... '))
3719
3715
3720 if not win32.checkcertificatechain(cert):
3716 if not win32.checkcertificatechain(cert):
3721 ui.status(_(b'failed.\n'))
3717 ui.status(_(b'failed.\n'))
3722 else:
3718 else:
3723 ui.status(_(b'done.\n'))
3719 ui.status(_(b'done.\n'))
3724 else:
3720 else:
3725 ui.status(_(b'full certificate chain is available\n'))
3721 ui.status(_(b'full certificate chain is available\n'))
3726 finally:
3722 finally:
3727 s.close()
3723 s.close()
3728
3724
3729
3725
3730 @command(
3726 @command(
3731 b"debugbackupbundle",
3727 b"debugbackupbundle",
3732 [
3728 [
3733 (
3729 (
3734 b"",
3730 b"",
3735 b"recover",
3731 b"recover",
3736 b"",
3732 b"",
3737 b"brings the specified changeset back into the repository",
3733 b"brings the specified changeset back into the repository",
3738 )
3734 )
3739 ]
3735 ]
3740 + cmdutil.logopts,
3736 + cmdutil.logopts,
3741 _(b"hg debugbackupbundle [--recover HASH]"),
3737 _(b"hg debugbackupbundle [--recover HASH]"),
3742 )
3738 )
3743 def debugbackupbundle(ui, repo, *pats, **opts):
3739 def debugbackupbundle(ui, repo, *pats, **opts):
3744 """lists the changesets available in backup bundles
3740 """lists the changesets available in backup bundles
3745
3741
3746 Without any arguments, this command prints a list of the changesets in each
3742 Without any arguments, this command prints a list of the changesets in each
3747 backup bundle.
3743 backup bundle.
3748
3744
3749 --recover takes a changeset hash and unbundles the first bundle that
3745 --recover takes a changeset hash and unbundles the first bundle that
3750 contains that hash, which puts that changeset back in your repository.
3746 contains that hash, which puts that changeset back in your repository.
3751
3747
3752 --verbose will print the entire commit message and the bundle path for that
3748 --verbose will print the entire commit message and the bundle path for that
3753 backup.
3749 backup.
3754 """
3750 """
3755 backups = list(
3751 backups = list(
3756 filter(
3752 filter(
3757 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3753 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3758 )
3754 )
3759 )
3755 )
3760 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3756 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3761
3757
3762 opts = pycompat.byteskwargs(opts)
3758 opts = pycompat.byteskwargs(opts)
3763 opts[b"bundle"] = b""
3759 opts[b"bundle"] = b""
3764 opts[b"force"] = None
3760 opts[b"force"] = None
3765 limit = logcmdutil.getlimit(opts)
3761 limit = logcmdutil.getlimit(opts)
3766
3762
3767 def display(other, chlist, displayer):
3763 def display(other, chlist, displayer):
3768 if opts.get(b"newest_first"):
3764 if opts.get(b"newest_first"):
3769 chlist.reverse()
3765 chlist.reverse()
3770 count = 0
3766 count = 0
3771 for n in chlist:
3767 for n in chlist:
3772 if limit is not None and count >= limit:
3768 if limit is not None and count >= limit:
3773 break
3769 break
3774 parents = [
3770 parents = [
3775 True for p in other.changelog.parents(n) if p != repo.nullid
3771 True for p in other.changelog.parents(n) if p != repo.nullid
3776 ]
3772 ]
3777 if opts.get(b"no_merges") and len(parents) == 2:
3773 if opts.get(b"no_merges") and len(parents) == 2:
3778 continue
3774 continue
3779 count += 1
3775 count += 1
3780 displayer.show(other[n])
3776 displayer.show(other[n])
3781
3777
3782 recovernode = opts.get(b"recover")
3778 recovernode = opts.get(b"recover")
3783 if recovernode:
3779 if recovernode:
3784 if scmutil.isrevsymbol(repo, recovernode):
3780 if scmutil.isrevsymbol(repo, recovernode):
3785 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3781 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3786 return
3782 return
3787 elif backups:
3783 elif backups:
3788 msg = _(
3784 msg = _(
3789 b"Recover changesets using: hg debugbackupbundle --recover "
3785 b"Recover changesets using: hg debugbackupbundle --recover "
3790 b"<changeset hash>\n\nAvailable backup changesets:"
3786 b"<changeset hash>\n\nAvailable backup changesets:"
3791 )
3787 )
3792 ui.status(msg, label=b"status.removed")
3788 ui.status(msg, label=b"status.removed")
3793 else:
3789 else:
3794 ui.status(_(b"no backup changesets found\n"))
3790 ui.status(_(b"no backup changesets found\n"))
3795 return
3791 return
3796
3792
3797 for backup in backups:
3793 for backup in backups:
3798 # Much of this is copied from the hg incoming logic
3794 # Much of this is copied from the hg incoming logic
3799 source = os.path.relpath(backup, encoding.getcwd())
3795 source = os.path.relpath(backup, encoding.getcwd())
3800 source, branches = urlutil.get_unique_pull_path(
3796 source, branches = urlutil.get_unique_pull_path(
3801 b'debugbackupbundle',
3797 b'debugbackupbundle',
3802 repo,
3798 repo,
3803 ui,
3799 ui,
3804 source,
3800 source,
3805 default_branches=opts.get(b'branch'),
3801 default_branches=opts.get(b'branch'),
3806 )
3802 )
3807 try:
3803 try:
3808 other = hg.peer(repo, opts, source)
3804 other = hg.peer(repo, opts, source)
3809 except error.LookupError as ex:
3805 except error.LookupError as ex:
3810 msg = _(b"\nwarning: unable to open bundle %s") % source
3806 msg = _(b"\nwarning: unable to open bundle %s") % source
3811 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3807 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3812 ui.warn(msg, hint=hint)
3808 ui.warn(msg, hint=hint)
3813 continue
3809 continue
3814 revs, checkout = hg.addbranchrevs(
3810 revs, checkout = hg.addbranchrevs(
3815 repo, other, branches, opts.get(b"rev")
3811 repo, other, branches, opts.get(b"rev")
3816 )
3812 )
3817
3813
3818 if revs:
3814 if revs:
3819 revs = [other.lookup(rev) for rev in revs]
3815 revs = [other.lookup(rev) for rev in revs]
3820
3816
3821 with ui.silent():
3817 with ui.silent():
3822 try:
3818 try:
3823 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3819 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3824 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3820 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3825 )
3821 )
3826 except error.LookupError:
3822 except error.LookupError:
3827 continue
3823 continue
3828
3824
3829 try:
3825 try:
3830 if not chlist:
3826 if not chlist:
3831 continue
3827 continue
3832 if recovernode:
3828 if recovernode:
3833 with repo.lock(), repo.transaction(b"unbundle") as tr:
3829 with repo.lock(), repo.transaction(b"unbundle") as tr:
3834 if scmutil.isrevsymbol(other, recovernode):
3830 if scmutil.isrevsymbol(other, recovernode):
3835 ui.status(_(b"Unbundling %s\n") % (recovernode))
3831 ui.status(_(b"Unbundling %s\n") % (recovernode))
3836 f = hg.openpath(ui, source)
3832 f = hg.openpath(ui, source)
3837 gen = exchange.readbundle(ui, f, source)
3833 gen = exchange.readbundle(ui, f, source)
3838 if isinstance(gen, bundle2.unbundle20):
3834 if isinstance(gen, bundle2.unbundle20):
3839 bundle2.applybundle(
3835 bundle2.applybundle(
3840 repo,
3836 repo,
3841 gen,
3837 gen,
3842 tr,
3838 tr,
3843 source=b"unbundle",
3839 source=b"unbundle",
3844 url=b"bundle:" + source,
3840 url=b"bundle:" + source,
3845 )
3841 )
3846 else:
3842 else:
3847 gen.apply(repo, b"unbundle", b"bundle:" + source)
3843 gen.apply(repo, b"unbundle", b"bundle:" + source)
3848 break
3844 break
3849 else:
3845 else:
3850 backupdate = encoding.strtolocal(
3846 backupdate = encoding.strtolocal(
3851 time.strftime(
3847 time.strftime(
3852 "%a %H:%M, %Y-%m-%d",
3848 "%a %H:%M, %Y-%m-%d",
3853 time.localtime(os.path.getmtime(source)),
3849 time.localtime(os.path.getmtime(source)),
3854 )
3850 )
3855 )
3851 )
3856 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3852 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3857 if ui.verbose:
3853 if ui.verbose:
3858 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3854 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3859 else:
3855 else:
3860 opts[
3856 opts[
3861 b"template"
3857 b"template"
3862 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3858 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3863 displayer = logcmdutil.changesetdisplayer(
3859 displayer = logcmdutil.changesetdisplayer(
3864 ui, other, opts, False
3860 ui, other, opts, False
3865 )
3861 )
3866 display(other, chlist, displayer)
3862 display(other, chlist, displayer)
3867 displayer.close()
3863 displayer.close()
3868 finally:
3864 finally:
3869 cleanupfn()
3865 cleanupfn()
3870
3866
3871
3867
3872 @command(
3868 @command(
3873 b'debugsub',
3869 b'debugsub',
3874 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3870 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3875 _(b'[-r REV] [REV]'),
3871 _(b'[-r REV] [REV]'),
3876 )
3872 )
3877 def debugsub(ui, repo, rev=None):
3873 def debugsub(ui, repo, rev=None):
3878 ctx = scmutil.revsingle(repo, rev, None)
3874 ctx = scmutil.revsingle(repo, rev, None)
3879 for k, v in sorted(ctx.substate.items()):
3875 for k, v in sorted(ctx.substate.items()):
3880 ui.writenoi18n(b'path %s\n' % k)
3876 ui.writenoi18n(b'path %s\n' % k)
3881 ui.writenoi18n(b' source %s\n' % v[0])
3877 ui.writenoi18n(b' source %s\n' % v[0])
3882 ui.writenoi18n(b' revision %s\n' % v[1])
3878 ui.writenoi18n(b' revision %s\n' % v[1])
3883
3879
3884
3880
3885 @command(b'debugshell', optionalrepo=True)
3881 @command(b'debugshell', optionalrepo=True)
3886 def debugshell(ui, repo):
3882 def debugshell(ui, repo):
3887 """run an interactive Python interpreter
3883 """run an interactive Python interpreter
3888
3884
3889 The local namespace is provided with a reference to the ui and
3885 The local namespace is provided with a reference to the ui and
3890 the repo instance (if available).
3886 the repo instance (if available).
3891 """
3887 """
3892 import code
3888 import code
3893
3889
3894 imported_objects = {
3890 imported_objects = {
3895 'ui': ui,
3891 'ui': ui,
3896 'repo': repo,
3892 'repo': repo,
3897 }
3893 }
3898
3894
3899 code.interact(local=imported_objects)
3895 code.interact(local=imported_objects)
3900
3896
3901
3897
3902 @command(
3898 @command(
3903 b'debugsuccessorssets',
3899 b'debugsuccessorssets',
3904 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3900 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3905 _(b'[REV]'),
3901 _(b'[REV]'),
3906 )
3902 )
3907 def debugsuccessorssets(ui, repo, *revs, **opts):
3903 def debugsuccessorssets(ui, repo, *revs, **opts):
3908 """show set of successors for revision
3904 """show set of successors for revision
3909
3905
3910 A successors set of changeset A is a consistent group of revisions that
3906 A successors set of changeset A is a consistent group of revisions that
3911 succeed A. It contains non-obsolete changesets only unless closests
3907 succeed A. It contains non-obsolete changesets only unless closests
3912 successors set is set.
3908 successors set is set.
3913
3909
3914 In most cases a changeset A has a single successors set containing a single
3910 In most cases a changeset A has a single successors set containing a single
3915 successor (changeset A replaced by A').
3911 successor (changeset A replaced by A').
3916
3912
3917 A changeset that is made obsolete with no successors are called "pruned".
3913 A changeset that is made obsolete with no successors are called "pruned".
3918 Such changesets have no successors sets at all.
3914 Such changesets have no successors sets at all.
3919
3915
3920 A changeset that has been "split" will have a successors set containing
3916 A changeset that has been "split" will have a successors set containing
3921 more than one successor.
3917 more than one successor.
3922
3918
3923 A changeset that has been rewritten in multiple different ways is called
3919 A changeset that has been rewritten in multiple different ways is called
3924 "divergent". Such changesets have multiple successor sets (each of which
3920 "divergent". Such changesets have multiple successor sets (each of which
3925 may also be split, i.e. have multiple successors).
3921 may also be split, i.e. have multiple successors).
3926
3922
3927 Results are displayed as follows::
3923 Results are displayed as follows::
3928
3924
3929 <rev1>
3925 <rev1>
3930 <successors-1A>
3926 <successors-1A>
3931 <rev2>
3927 <rev2>
3932 <successors-2A>
3928 <successors-2A>
3933 <successors-2B1> <successors-2B2> <successors-2B3>
3929 <successors-2B1> <successors-2B2> <successors-2B3>
3934
3930
3935 Here rev2 has two possible (i.e. divergent) successors sets. The first
3931 Here rev2 has two possible (i.e. divergent) successors sets. The first
3936 holds one element, whereas the second holds three (i.e. the changeset has
3932 holds one element, whereas the second holds three (i.e. the changeset has
3937 been split).
3933 been split).
3938 """
3934 """
3939 # passed to successorssets caching computation from one call to another
3935 # passed to successorssets caching computation from one call to another
3940 cache = {}
3936 cache = {}
3941 ctx2str = bytes
3937 ctx2str = bytes
3942 node2str = short
3938 node2str = short
3943 for rev in scmutil.revrange(repo, revs):
3939 for rev in scmutil.revrange(repo, revs):
3944 ctx = repo[rev]
3940 ctx = repo[rev]
3945 ui.write(b'%s\n' % ctx2str(ctx))
3941 ui.write(b'%s\n' % ctx2str(ctx))
3946 for succsset in obsutil.successorssets(
3942 for succsset in obsutil.successorssets(
3947 repo, ctx.node(), closest=opts['closest'], cache=cache
3943 repo, ctx.node(), closest=opts['closest'], cache=cache
3948 ):
3944 ):
3949 if succsset:
3945 if succsset:
3950 ui.write(b' ')
3946 ui.write(b' ')
3951 ui.write(node2str(succsset[0]))
3947 ui.write(node2str(succsset[0]))
3952 for node in succsset[1:]:
3948 for node in succsset[1:]:
3953 ui.write(b' ')
3949 ui.write(b' ')
3954 ui.write(node2str(node))
3950 ui.write(node2str(node))
3955 ui.write(b'\n')
3951 ui.write(b'\n')
3956
3952
3957
3953
3958 @command(b'debugtagscache', [])
3954 @command(b'debugtagscache', [])
3959 def debugtagscache(ui, repo):
3955 def debugtagscache(ui, repo):
3960 """display the contents of .hg/cache/hgtagsfnodes1"""
3956 """display the contents of .hg/cache/hgtagsfnodes1"""
3961 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3957 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3962 flog = repo.file(b'.hgtags')
3958 flog = repo.file(b'.hgtags')
3963 for r in repo:
3959 for r in repo:
3964 node = repo[r].node()
3960 node = repo[r].node()
3965 tagsnode = cache.getfnode(node, computemissing=False)
3961 tagsnode = cache.getfnode(node, computemissing=False)
3966 if tagsnode:
3962 if tagsnode:
3967 tagsnodedisplay = hex(tagsnode)
3963 tagsnodedisplay = hex(tagsnode)
3968 if not flog.hasnode(tagsnode):
3964 if not flog.hasnode(tagsnode):
3969 tagsnodedisplay += b' (unknown node)'
3965 tagsnodedisplay += b' (unknown node)'
3970 elif tagsnode is None:
3966 elif tagsnode is None:
3971 tagsnodedisplay = b'missing'
3967 tagsnodedisplay = b'missing'
3972 else:
3968 else:
3973 tagsnodedisplay = b'invalid'
3969 tagsnodedisplay = b'invalid'
3974
3970
3975 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3971 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3976
3972
3977
3973
3978 @command(
3974 @command(
3979 b'debugtemplate',
3975 b'debugtemplate',
3980 [
3976 [
3981 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3977 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3982 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3978 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3983 ],
3979 ],
3984 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3980 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3985 optionalrepo=True,
3981 optionalrepo=True,
3986 )
3982 )
3987 def debugtemplate(ui, repo, tmpl, **opts):
3983 def debugtemplate(ui, repo, tmpl, **opts):
3988 """parse and apply a template
3984 """parse and apply a template
3989
3985
3990 If -r/--rev is given, the template is processed as a log template and
3986 If -r/--rev is given, the template is processed as a log template and
3991 applied to the given changesets. Otherwise, it is processed as a generic
3987 applied to the given changesets. Otherwise, it is processed as a generic
3992 template.
3988 template.
3993
3989
3994 Use --verbose to print the parsed tree.
3990 Use --verbose to print the parsed tree.
3995 """
3991 """
3996 revs = None
3992 revs = None
3997 if opts['rev']:
3993 if opts['rev']:
3998 if repo is None:
3994 if repo is None:
3999 raise error.RepoError(
3995 raise error.RepoError(
4000 _(b'there is no Mercurial repository here (.hg not found)')
3996 _(b'there is no Mercurial repository here (.hg not found)')
4001 )
3997 )
4002 revs = scmutil.revrange(repo, opts['rev'])
3998 revs = scmutil.revrange(repo, opts['rev'])
4003
3999
4004 props = {}
4000 props = {}
4005 for d in opts['define']:
4001 for d in opts['define']:
4006 try:
4002 try:
4007 k, v = (e.strip() for e in d.split(b'=', 1))
4003 k, v = (e.strip() for e in d.split(b'=', 1))
4008 if not k or k == b'ui':
4004 if not k or k == b'ui':
4009 raise ValueError
4005 raise ValueError
4010 props[k] = v
4006 props[k] = v
4011 except ValueError:
4007 except ValueError:
4012 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4008 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4013
4009
4014 if ui.verbose:
4010 if ui.verbose:
4015 aliases = ui.configitems(b'templatealias')
4011 aliases = ui.configitems(b'templatealias')
4016 tree = templater.parse(tmpl)
4012 tree = templater.parse(tmpl)
4017 ui.note(templater.prettyformat(tree), b'\n')
4013 ui.note(templater.prettyformat(tree), b'\n')
4018 newtree = templater.expandaliases(tree, aliases)
4014 newtree = templater.expandaliases(tree, aliases)
4019 if newtree != tree:
4015 if newtree != tree:
4020 ui.notenoi18n(
4016 ui.notenoi18n(
4021 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4017 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4022 )
4018 )
4023
4019
4024 if revs is None:
4020 if revs is None:
4025 tres = formatter.templateresources(ui, repo)
4021 tres = formatter.templateresources(ui, repo)
4026 t = formatter.maketemplater(ui, tmpl, resources=tres)
4022 t = formatter.maketemplater(ui, tmpl, resources=tres)
4027 if ui.verbose:
4023 if ui.verbose:
4028 kwds, funcs = t.symbolsuseddefault()
4024 kwds, funcs = t.symbolsuseddefault()
4029 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4025 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4030 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4026 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4031 ui.write(t.renderdefault(props))
4027 ui.write(t.renderdefault(props))
4032 else:
4028 else:
4033 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4029 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4034 if ui.verbose:
4030 if ui.verbose:
4035 kwds, funcs = displayer.t.symbolsuseddefault()
4031 kwds, funcs = displayer.t.symbolsuseddefault()
4036 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4032 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4037 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4033 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4038 for r in revs:
4034 for r in revs:
4039 displayer.show(repo[r], **pycompat.strkwargs(props))
4035 displayer.show(repo[r], **pycompat.strkwargs(props))
4040 displayer.close()
4036 displayer.close()
4041
4037
4042
4038
4043 @command(
4039 @command(
4044 b'debuguigetpass',
4040 b'debuguigetpass',
4045 [
4041 [
4046 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4042 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4047 ],
4043 ],
4048 _(b'[-p TEXT]'),
4044 _(b'[-p TEXT]'),
4049 norepo=True,
4045 norepo=True,
4050 )
4046 )
4051 def debuguigetpass(ui, prompt=b''):
4047 def debuguigetpass(ui, prompt=b''):
4052 """show prompt to type password"""
4048 """show prompt to type password"""
4053 r = ui.getpass(prompt)
4049 r = ui.getpass(prompt)
4054 if r is None:
4050 if r is None:
4055 r = b"<default response>"
4051 r = b"<default response>"
4056 ui.writenoi18n(b'response: %s\n' % r)
4052 ui.writenoi18n(b'response: %s\n' % r)
4057
4053
4058
4054
4059 @command(
4055 @command(
4060 b'debuguiprompt',
4056 b'debuguiprompt',
4061 [
4057 [
4062 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4058 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4063 ],
4059 ],
4064 _(b'[-p TEXT]'),
4060 _(b'[-p TEXT]'),
4065 norepo=True,
4061 norepo=True,
4066 )
4062 )
4067 def debuguiprompt(ui, prompt=b''):
4063 def debuguiprompt(ui, prompt=b''):
4068 """show plain prompt"""
4064 """show plain prompt"""
4069 r = ui.prompt(prompt)
4065 r = ui.prompt(prompt)
4070 ui.writenoi18n(b'response: %s\n' % r)
4066 ui.writenoi18n(b'response: %s\n' % r)
4071
4067
4072
4068
4073 @command(b'debugupdatecaches', [])
4069 @command(b'debugupdatecaches', [])
4074 def debugupdatecaches(ui, repo, *pats, **opts):
4070 def debugupdatecaches(ui, repo, *pats, **opts):
4075 """warm all known caches in the repository"""
4071 """warm all known caches in the repository"""
4076 with repo.wlock(), repo.lock():
4072 with repo.wlock(), repo.lock():
4077 repo.updatecaches(caches=repository.CACHES_ALL)
4073 repo.updatecaches(caches=repository.CACHES_ALL)
4078
4074
4079
4075
4080 @command(
4076 @command(
4081 b'debugupgraderepo',
4077 b'debugupgraderepo',
4082 [
4078 [
4083 (
4079 (
4084 b'o',
4080 b'o',
4085 b'optimize',
4081 b'optimize',
4086 [],
4082 [],
4087 _(b'extra optimization to perform'),
4083 _(b'extra optimization to perform'),
4088 _(b'NAME'),
4084 _(b'NAME'),
4089 ),
4085 ),
4090 (b'', b'run', False, _(b'performs an upgrade')),
4086 (b'', b'run', False, _(b'performs an upgrade')),
4091 (b'', b'backup', True, _(b'keep the old repository content around')),
4087 (b'', b'backup', True, _(b'keep the old repository content around')),
4092 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4088 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4093 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4089 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4094 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4090 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4095 ],
4091 ],
4096 )
4092 )
4097 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4093 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4098 """upgrade a repository to use different features
4094 """upgrade a repository to use different features
4099
4095
4100 If no arguments are specified, the repository is evaluated for upgrade
4096 If no arguments are specified, the repository is evaluated for upgrade
4101 and a list of problems and potential optimizations is printed.
4097 and a list of problems and potential optimizations is printed.
4102
4098
4103 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4099 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4104 can be influenced via additional arguments. More details will be provided
4100 can be influenced via additional arguments. More details will be provided
4105 by the command output when run without ``--run``.
4101 by the command output when run without ``--run``.
4106
4102
4107 During the upgrade, the repository will be locked and no writes will be
4103 During the upgrade, the repository will be locked and no writes will be
4108 allowed.
4104 allowed.
4109
4105
4110 At the end of the upgrade, the repository may not be readable while new
4106 At the end of the upgrade, the repository may not be readable while new
4111 repository data is swapped in. This window will be as long as it takes to
4107 repository data is swapped in. This window will be as long as it takes to
4112 rename some directories inside the ``.hg`` directory. On most machines, this
4108 rename some directories inside the ``.hg`` directory. On most machines, this
4113 should complete almost instantaneously and the chances of a consumer being
4109 should complete almost instantaneously and the chances of a consumer being
4114 unable to access the repository should be low.
4110 unable to access the repository should be low.
4115
4111
4116 By default, all revlogs will be upgraded. You can restrict this using flags
4112 By default, all revlogs will be upgraded. You can restrict this using flags
4117 such as `--manifest`:
4113 such as `--manifest`:
4118
4114
4119 * `--manifest`: only optimize the manifest
4115 * `--manifest`: only optimize the manifest
4120 * `--no-manifest`: optimize all revlog but the manifest
4116 * `--no-manifest`: optimize all revlog but the manifest
4121 * `--changelog`: optimize the changelog only
4117 * `--changelog`: optimize the changelog only
4122 * `--no-changelog --no-manifest`: optimize filelogs only
4118 * `--no-changelog --no-manifest`: optimize filelogs only
4123 * `--filelogs`: optimize the filelogs only
4119 * `--filelogs`: optimize the filelogs only
4124 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4120 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4125 """
4121 """
4126 return upgrade.upgraderepo(
4122 return upgrade.upgraderepo(
4127 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4123 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4128 )
4124 )
4129
4125
4130
4126
4131 @command(
4127 @command(
4132 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4128 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4133 )
4129 )
4134 def debugwalk(ui, repo, *pats, **opts):
4130 def debugwalk(ui, repo, *pats, **opts):
4135 """show how files match on given patterns"""
4131 """show how files match on given patterns"""
4136 opts = pycompat.byteskwargs(opts)
4132 opts = pycompat.byteskwargs(opts)
4137 m = scmutil.match(repo[None], pats, opts)
4133 m = scmutil.match(repo[None], pats, opts)
4138 if ui.verbose:
4134 if ui.verbose:
4139 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4135 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4140 items = list(repo[None].walk(m))
4136 items = list(repo[None].walk(m))
4141 if not items:
4137 if not items:
4142 return
4138 return
4143 f = lambda fn: fn
4139 f = lambda fn: fn
4144 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4140 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4145 f = lambda fn: util.normpath(fn)
4141 f = lambda fn: util.normpath(fn)
4146 fmt = b'f %%-%ds %%-%ds %%s' % (
4142 fmt = b'f %%-%ds %%-%ds %%s' % (
4147 max([len(abs) for abs in items]),
4143 max([len(abs) for abs in items]),
4148 max([len(repo.pathto(abs)) for abs in items]),
4144 max([len(repo.pathto(abs)) for abs in items]),
4149 )
4145 )
4150 for abs in items:
4146 for abs in items:
4151 line = fmt % (
4147 line = fmt % (
4152 abs,
4148 abs,
4153 f(repo.pathto(abs)),
4149 f(repo.pathto(abs)),
4154 m.exact(abs) and b'exact' or b'',
4150 m.exact(abs) and b'exact' or b'',
4155 )
4151 )
4156 ui.write(b"%s\n" % line.rstrip())
4152 ui.write(b"%s\n" % line.rstrip())
4157
4153
4158
4154
4159 @command(b'debugwhyunstable', [], _(b'REV'))
4155 @command(b'debugwhyunstable', [], _(b'REV'))
4160 def debugwhyunstable(ui, repo, rev):
4156 def debugwhyunstable(ui, repo, rev):
4161 """explain instabilities of a changeset"""
4157 """explain instabilities of a changeset"""
4162 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4158 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4163 dnodes = b''
4159 dnodes = b''
4164 if entry.get(b'divergentnodes'):
4160 if entry.get(b'divergentnodes'):
4165 dnodes = (
4161 dnodes = (
4166 b' '.join(
4162 b' '.join(
4167 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4163 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4168 for ctx in entry[b'divergentnodes']
4164 for ctx in entry[b'divergentnodes']
4169 )
4165 )
4170 + b' '
4166 + b' '
4171 )
4167 )
4172 ui.write(
4168 ui.write(
4173 b'%s: %s%s %s\n'
4169 b'%s: %s%s %s\n'
4174 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4170 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4175 )
4171 )
4176
4172
4177
4173
4178 @command(
4174 @command(
4179 b'debugwireargs',
4175 b'debugwireargs',
4180 [
4176 [
4181 (b'', b'three', b'', b'three'),
4177 (b'', b'three', b'', b'three'),
4182 (b'', b'four', b'', b'four'),
4178 (b'', b'four', b'', b'four'),
4183 (b'', b'five', b'', b'five'),
4179 (b'', b'five', b'', b'five'),
4184 ]
4180 ]
4185 + cmdutil.remoteopts,
4181 + cmdutil.remoteopts,
4186 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4182 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4187 norepo=True,
4183 norepo=True,
4188 )
4184 )
4189 def debugwireargs(ui, repopath, *vals, **opts):
4185 def debugwireargs(ui, repopath, *vals, **opts):
4190 opts = pycompat.byteskwargs(opts)
4186 opts = pycompat.byteskwargs(opts)
4191 repo = hg.peer(ui, opts, repopath)
4187 repo = hg.peer(ui, opts, repopath)
4192 try:
4188 try:
4193 for opt in cmdutil.remoteopts:
4189 for opt in cmdutil.remoteopts:
4194 del opts[opt[1]]
4190 del opts[opt[1]]
4195 args = {}
4191 args = {}
4196 for k, v in pycompat.iteritems(opts):
4192 for k, v in pycompat.iteritems(opts):
4197 if v:
4193 if v:
4198 args[k] = v
4194 args[k] = v
4199 args = pycompat.strkwargs(args)
4195 args = pycompat.strkwargs(args)
4200 # run twice to check that we don't mess up the stream for the next command
4196 # run twice to check that we don't mess up the stream for the next command
4201 res1 = repo.debugwireargs(*vals, **args)
4197 res1 = repo.debugwireargs(*vals, **args)
4202 res2 = repo.debugwireargs(*vals, **args)
4198 res2 = repo.debugwireargs(*vals, **args)
4203 ui.write(b"%s\n" % res1)
4199 ui.write(b"%s\n" % res1)
4204 if res1 != res2:
4200 if res1 != res2:
4205 ui.warn(b"%s\n" % res2)
4201 ui.warn(b"%s\n" % res2)
4206 finally:
4202 finally:
4207 repo.close()
4203 repo.close()
4208
4204
4209
4205
4210 def _parsewirelangblocks(fh):
4206 def _parsewirelangblocks(fh):
4211 activeaction = None
4207 activeaction = None
4212 blocklines = []
4208 blocklines = []
4213 lastindent = 0
4209 lastindent = 0
4214
4210
4215 for line in fh:
4211 for line in fh:
4216 line = line.rstrip()
4212 line = line.rstrip()
4217 if not line:
4213 if not line:
4218 continue
4214 continue
4219
4215
4220 if line.startswith(b'#'):
4216 if line.startswith(b'#'):
4221 continue
4217 continue
4222
4218
4223 if not line.startswith(b' '):
4219 if not line.startswith(b' '):
4224 # New block. Flush previous one.
4220 # New block. Flush previous one.
4225 if activeaction:
4221 if activeaction:
4226 yield activeaction, blocklines
4222 yield activeaction, blocklines
4227
4223
4228 activeaction = line
4224 activeaction = line
4229 blocklines = []
4225 blocklines = []
4230 lastindent = 0
4226 lastindent = 0
4231 continue
4227 continue
4232
4228
4233 # Else we start with an indent.
4229 # Else we start with an indent.
4234
4230
4235 if not activeaction:
4231 if not activeaction:
4236 raise error.Abort(_(b'indented line outside of block'))
4232 raise error.Abort(_(b'indented line outside of block'))
4237
4233
4238 indent = len(line) - len(line.lstrip())
4234 indent = len(line) - len(line.lstrip())
4239
4235
4240 # If this line is indented more than the last line, concatenate it.
4236 # If this line is indented more than the last line, concatenate it.
4241 if indent > lastindent and blocklines:
4237 if indent > lastindent and blocklines:
4242 blocklines[-1] += line.lstrip()
4238 blocklines[-1] += line.lstrip()
4243 else:
4239 else:
4244 blocklines.append(line)
4240 blocklines.append(line)
4245 lastindent = indent
4241 lastindent = indent
4246
4242
4247 # Flush last block.
4243 # Flush last block.
4248 if activeaction:
4244 if activeaction:
4249 yield activeaction, blocklines
4245 yield activeaction, blocklines
4250
4246
4251
4247
4252 @command(
4248 @command(
4253 b'debugwireproto',
4249 b'debugwireproto',
4254 [
4250 [
4255 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4251 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4256 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4252 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4257 (
4253 (
4258 b'',
4254 b'',
4259 b'noreadstderr',
4255 b'noreadstderr',
4260 False,
4256 False,
4261 _(b'do not read from stderr of the remote'),
4257 _(b'do not read from stderr of the remote'),
4262 ),
4258 ),
4263 (
4259 (
4264 b'',
4260 b'',
4265 b'nologhandshake',
4261 b'nologhandshake',
4266 False,
4262 False,
4267 _(b'do not log I/O related to the peer handshake'),
4263 _(b'do not log I/O related to the peer handshake'),
4268 ),
4264 ),
4269 ]
4265 ]
4270 + cmdutil.remoteopts,
4266 + cmdutil.remoteopts,
4271 _(b'[PATH]'),
4267 _(b'[PATH]'),
4272 optionalrepo=True,
4268 optionalrepo=True,
4273 )
4269 )
4274 def debugwireproto(ui, repo, path=None, **opts):
4270 def debugwireproto(ui, repo, path=None, **opts):
4275 """send wire protocol commands to a server
4271 """send wire protocol commands to a server
4276
4272
4277 This command can be used to issue wire protocol commands to remote
4273 This command can be used to issue wire protocol commands to remote
4278 peers and to debug the raw data being exchanged.
4274 peers and to debug the raw data being exchanged.
4279
4275
4280 ``--localssh`` will start an SSH server against the current repository
4276 ``--localssh`` will start an SSH server against the current repository
4281 and connect to that. By default, the connection will perform a handshake
4277 and connect to that. By default, the connection will perform a handshake
4282 and establish an appropriate peer instance.
4278 and establish an appropriate peer instance.
4283
4279
4284 ``--peer`` can be used to bypass the handshake protocol and construct a
4280 ``--peer`` can be used to bypass the handshake protocol and construct a
4285 peer instance using the specified class type. Valid values are ``raw``,
4281 peer instance using the specified class type. Valid values are ``raw``,
4286 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4282 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4287 raw data payloads and don't support higher-level command actions.
4283 raw data payloads and don't support higher-level command actions.
4288
4284
4289 ``--noreadstderr`` can be used to disable automatic reading from stderr
4285 ``--noreadstderr`` can be used to disable automatic reading from stderr
4290 of the peer (for SSH connections only). Disabling automatic reading of
4286 of the peer (for SSH connections only). Disabling automatic reading of
4291 stderr is useful for making output more deterministic.
4287 stderr is useful for making output more deterministic.
4292
4288
4293 Commands are issued via a mini language which is specified via stdin.
4289 Commands are issued via a mini language which is specified via stdin.
4294 The language consists of individual actions to perform. An action is
4290 The language consists of individual actions to perform. An action is
4295 defined by a block. A block is defined as a line with no leading
4291 defined by a block. A block is defined as a line with no leading
4296 space followed by 0 or more lines with leading space. Blocks are
4292 space followed by 0 or more lines with leading space. Blocks are
4297 effectively a high-level command with additional metadata.
4293 effectively a high-level command with additional metadata.
4298
4294
4299 Lines beginning with ``#`` are ignored.
4295 Lines beginning with ``#`` are ignored.
4300
4296
4301 The following sections denote available actions.
4297 The following sections denote available actions.
4302
4298
4303 raw
4299 raw
4304 ---
4300 ---
4305
4301
4306 Send raw data to the server.
4302 Send raw data to the server.
4307
4303
4308 The block payload contains the raw data to send as one atomic send
4304 The block payload contains the raw data to send as one atomic send
4309 operation. The data may not actually be delivered in a single system
4305 operation. The data may not actually be delivered in a single system
4310 call: it depends on the abilities of the transport being used.
4306 call: it depends on the abilities of the transport being used.
4311
4307
4312 Each line in the block is de-indented and concatenated. Then, that
4308 Each line in the block is de-indented and concatenated. Then, that
4313 value is evaluated as a Python b'' literal. This allows the use of
4309 value is evaluated as a Python b'' literal. This allows the use of
4314 backslash escaping, etc.
4310 backslash escaping, etc.
4315
4311
4316 raw+
4312 raw+
4317 ----
4313 ----
4318
4314
4319 Behaves like ``raw`` except flushes output afterwards.
4315 Behaves like ``raw`` except flushes output afterwards.
4320
4316
4321 command <X>
4317 command <X>
4322 -----------
4318 -----------
4323
4319
4324 Send a request to run a named command, whose name follows the ``command``
4320 Send a request to run a named command, whose name follows the ``command``
4325 string.
4321 string.
4326
4322
4327 Arguments to the command are defined as lines in this block. The format of
4323 Arguments to the command are defined as lines in this block. The format of
4328 each line is ``<key> <value>``. e.g.::
4324 each line is ``<key> <value>``. e.g.::
4329
4325
4330 command listkeys
4326 command listkeys
4331 namespace bookmarks
4327 namespace bookmarks
4332
4328
4333 If the value begins with ``eval:``, it will be interpreted as a Python
4329 If the value begins with ``eval:``, it will be interpreted as a Python
4334 literal expression. Otherwise values are interpreted as Python b'' literals.
4330 literal expression. Otherwise values are interpreted as Python b'' literals.
4335 This allows sending complex types and encoding special byte sequences via
4331 This allows sending complex types and encoding special byte sequences via
4336 backslash escaping.
4332 backslash escaping.
4337
4333
4338 The following arguments have special meaning:
4334 The following arguments have special meaning:
4339
4335
4340 ``PUSHFILE``
4336 ``PUSHFILE``
4341 When defined, the *push* mechanism of the peer will be used instead
4337 When defined, the *push* mechanism of the peer will be used instead
4342 of the static request-response mechanism and the content of the
4338 of the static request-response mechanism and the content of the
4343 file specified in the value of this argument will be sent as the
4339 file specified in the value of this argument will be sent as the
4344 command payload.
4340 command payload.
4345
4341
4346 This can be used to submit a local bundle file to the remote.
4342 This can be used to submit a local bundle file to the remote.
4347
4343
4348 batchbegin
4344 batchbegin
4349 ----------
4345 ----------
4350
4346
4351 Instruct the peer to begin a batched send.
4347 Instruct the peer to begin a batched send.
4352
4348
4353 All ``command`` blocks are queued for execution until the next
4349 All ``command`` blocks are queued for execution until the next
4354 ``batchsubmit`` block.
4350 ``batchsubmit`` block.
4355
4351
4356 batchsubmit
4352 batchsubmit
4357 -----------
4353 -----------
4358
4354
4359 Submit previously queued ``command`` blocks as a batch request.
4355 Submit previously queued ``command`` blocks as a batch request.
4360
4356
4361 This action MUST be paired with a ``batchbegin`` action.
4357 This action MUST be paired with a ``batchbegin`` action.
4362
4358
4363 httprequest <method> <path>
4359 httprequest <method> <path>
4364 ---------------------------
4360 ---------------------------
4365
4361
4366 (HTTP peer only)
4362 (HTTP peer only)
4367
4363
4368 Send an HTTP request to the peer.
4364 Send an HTTP request to the peer.
4369
4365
4370 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4366 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4371
4367
4372 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4368 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4373 headers to add to the request. e.g. ``Accept: foo``.
4369 headers to add to the request. e.g. ``Accept: foo``.
4374
4370
4375 The following arguments are special:
4371 The following arguments are special:
4376
4372
4377 ``BODYFILE``
4373 ``BODYFILE``
4378 The content of the file defined as the value to this argument will be
4374 The content of the file defined as the value to this argument will be
4379 transferred verbatim as the HTTP request body.
4375 transferred verbatim as the HTTP request body.
4380
4376
4381 ``frame <type> <flags> <payload>``
4377 ``frame <type> <flags> <payload>``
4382 Send a unified protocol frame as part of the request body.
4378 Send a unified protocol frame as part of the request body.
4383
4379
4384 All frames will be collected and sent as the body to the HTTP
4380 All frames will be collected and sent as the body to the HTTP
4385 request.
4381 request.
4386
4382
4387 close
4383 close
4388 -----
4384 -----
4389
4385
4390 Close the connection to the server.
4386 Close the connection to the server.
4391
4387
4392 flush
4388 flush
4393 -----
4389 -----
4394
4390
4395 Flush data written to the server.
4391 Flush data written to the server.
4396
4392
4397 readavailable
4393 readavailable
4398 -------------
4394 -------------
4399
4395
4400 Close the write end of the connection and read all available data from
4396 Close the write end of the connection and read all available data from
4401 the server.
4397 the server.
4402
4398
4403 If the connection to the server encompasses multiple pipes, we poll both
4399 If the connection to the server encompasses multiple pipes, we poll both
4404 pipes and read available data.
4400 pipes and read available data.
4405
4401
4406 readline
4402 readline
4407 --------
4403 --------
4408
4404
4409 Read a line of output from the server. If there are multiple output
4405 Read a line of output from the server. If there are multiple output
4410 pipes, reads only the main pipe.
4406 pipes, reads only the main pipe.
4411
4407
4412 ereadline
4408 ereadline
4413 ---------
4409 ---------
4414
4410
4415 Like ``readline``, but read from the stderr pipe, if available.
4411 Like ``readline``, but read from the stderr pipe, if available.
4416
4412
4417 read <X>
4413 read <X>
4418 --------
4414 --------
4419
4415
4420 ``read()`` N bytes from the server's main output pipe.
4416 ``read()`` N bytes from the server's main output pipe.
4421
4417
4422 eread <X>
4418 eread <X>
4423 ---------
4419 ---------
4424
4420
4425 ``read()`` N bytes from the server's stderr pipe, if available.
4421 ``read()`` N bytes from the server's stderr pipe, if available.
4426
4422
4427 Specifying Unified Frame-Based Protocol Frames
4423 Specifying Unified Frame-Based Protocol Frames
4428 ----------------------------------------------
4424 ----------------------------------------------
4429
4425
4430 It is possible to emit a *Unified Frame-Based Protocol* by using special
4426 It is possible to emit a *Unified Frame-Based Protocol* by using special
4431 syntax.
4427 syntax.
4432
4428
4433 A frame is composed as a type, flags, and payload. These can be parsed
4429 A frame is composed as a type, flags, and payload. These can be parsed
4434 from a string of the form:
4430 from a string of the form:
4435
4431
4436 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4432 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4437
4433
4438 ``request-id`` and ``stream-id`` are integers defining the request and
4434 ``request-id`` and ``stream-id`` are integers defining the request and
4439 stream identifiers.
4435 stream identifiers.
4440
4436
4441 ``type`` can be an integer value for the frame type or the string name
4437 ``type`` can be an integer value for the frame type or the string name
4442 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4438 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4443 ``command-name``.
4439 ``command-name``.
4444
4440
4445 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4441 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4446 components. Each component (and there can be just one) can be an integer
4442 components. Each component (and there can be just one) can be an integer
4447 or a flag name for stream flags or frame flags, respectively. Values are
4443 or a flag name for stream flags or frame flags, respectively. Values are
4448 resolved to integers and then bitwise OR'd together.
4444 resolved to integers and then bitwise OR'd together.
4449
4445
4450 ``payload`` represents the raw frame payload. If it begins with
4446 ``payload`` represents the raw frame payload. If it begins with
4451 ``cbor:``, the following string is evaluated as Python code and the
4447 ``cbor:``, the following string is evaluated as Python code and the
4452 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4448 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4453 as a Python byte string literal.
4449 as a Python byte string literal.
4454 """
4450 """
4455 opts = pycompat.byteskwargs(opts)
4451 opts = pycompat.byteskwargs(opts)
4456
4452
4457 if opts[b'localssh'] and not repo:
4453 if opts[b'localssh'] and not repo:
4458 raise error.Abort(_(b'--localssh requires a repository'))
4454 raise error.Abort(_(b'--localssh requires a repository'))
4459
4455
4460 if opts[b'peer'] and opts[b'peer'] not in (
4456 if opts[b'peer'] and opts[b'peer'] not in (
4461 b'raw',
4457 b'raw',
4462 b'http2',
4458 b'http2',
4463 b'ssh1',
4459 b'ssh1',
4464 b'ssh2',
4460 b'ssh2',
4465 ):
4461 ):
4466 raise error.Abort(
4462 raise error.Abort(
4467 _(b'invalid value for --peer'),
4463 _(b'invalid value for --peer'),
4468 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4464 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4469 )
4465 )
4470
4466
4471 if path and opts[b'localssh']:
4467 if path and opts[b'localssh']:
4472 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4468 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4473
4469
4474 if ui.interactive():
4470 if ui.interactive():
4475 ui.write(_(b'(waiting for commands on stdin)\n'))
4471 ui.write(_(b'(waiting for commands on stdin)\n'))
4476
4472
4477 blocks = list(_parsewirelangblocks(ui.fin))
4473 blocks = list(_parsewirelangblocks(ui.fin))
4478
4474
4479 proc = None
4475 proc = None
4480 stdin = None
4476 stdin = None
4481 stdout = None
4477 stdout = None
4482 stderr = None
4478 stderr = None
4483 opener = None
4479 opener = None
4484
4480
4485 if opts[b'localssh']:
4481 if opts[b'localssh']:
4486 # We start the SSH server in its own process so there is process
4482 # We start the SSH server in its own process so there is process
4487 # separation. This prevents a whole class of potential bugs around
4483 # separation. This prevents a whole class of potential bugs around
4488 # shared state from interfering with server operation.
4484 # shared state from interfering with server operation.
4489 args = procutil.hgcmd() + [
4485 args = procutil.hgcmd() + [
4490 b'-R',
4486 b'-R',
4491 repo.root,
4487 repo.root,
4492 b'debugserve',
4488 b'debugserve',
4493 b'--sshstdio',
4489 b'--sshstdio',
4494 ]
4490 ]
4495 proc = subprocess.Popen(
4491 proc = subprocess.Popen(
4496 pycompat.rapply(procutil.tonativestr, args),
4492 pycompat.rapply(procutil.tonativestr, args),
4497 stdin=subprocess.PIPE,
4493 stdin=subprocess.PIPE,
4498 stdout=subprocess.PIPE,
4494 stdout=subprocess.PIPE,
4499 stderr=subprocess.PIPE,
4495 stderr=subprocess.PIPE,
4500 bufsize=0,
4496 bufsize=0,
4501 )
4497 )
4502
4498
4503 stdin = proc.stdin
4499 stdin = proc.stdin
4504 stdout = proc.stdout
4500 stdout = proc.stdout
4505 stderr = proc.stderr
4501 stderr = proc.stderr
4506
4502
4507 # We turn the pipes into observers so we can log I/O.
4503 # We turn the pipes into observers so we can log I/O.
4508 if ui.verbose or opts[b'peer'] == b'raw':
4504 if ui.verbose or opts[b'peer'] == b'raw':
4509 stdin = util.makeloggingfileobject(
4505 stdin = util.makeloggingfileobject(
4510 ui, proc.stdin, b'i', logdata=True
4506 ui, proc.stdin, b'i', logdata=True
4511 )
4507 )
4512 stdout = util.makeloggingfileobject(
4508 stdout = util.makeloggingfileobject(
4513 ui, proc.stdout, b'o', logdata=True
4509 ui, proc.stdout, b'o', logdata=True
4514 )
4510 )
4515 stderr = util.makeloggingfileobject(
4511 stderr = util.makeloggingfileobject(
4516 ui, proc.stderr, b'e', logdata=True
4512 ui, proc.stderr, b'e', logdata=True
4517 )
4513 )
4518
4514
4519 # --localssh also implies the peer connection settings.
4515 # --localssh also implies the peer connection settings.
4520
4516
4521 url = b'ssh://localserver'
4517 url = b'ssh://localserver'
4522 autoreadstderr = not opts[b'noreadstderr']
4518 autoreadstderr = not opts[b'noreadstderr']
4523
4519
4524 if opts[b'peer'] == b'ssh1':
4520 if opts[b'peer'] == b'ssh1':
4525 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4521 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4526 peer = sshpeer.sshv1peer(
4522 peer = sshpeer.sshv1peer(
4527 ui,
4523 ui,
4528 url,
4524 url,
4529 proc,
4525 proc,
4530 stdin,
4526 stdin,
4531 stdout,
4527 stdout,
4532 stderr,
4528 stderr,
4533 None,
4529 None,
4534 autoreadstderr=autoreadstderr,
4530 autoreadstderr=autoreadstderr,
4535 )
4531 )
4536 elif opts[b'peer'] == b'ssh2':
4532 elif opts[b'peer'] == b'ssh2':
4537 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4533 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4538 peer = sshpeer.sshv2peer(
4534 peer = sshpeer.sshv2peer(
4539 ui,
4535 ui,
4540 url,
4536 url,
4541 proc,
4537 proc,
4542 stdin,
4538 stdin,
4543 stdout,
4539 stdout,
4544 stderr,
4540 stderr,
4545 None,
4541 None,
4546 autoreadstderr=autoreadstderr,
4542 autoreadstderr=autoreadstderr,
4547 )
4543 )
4548 elif opts[b'peer'] == b'raw':
4544 elif opts[b'peer'] == b'raw':
4549 ui.write(_(b'using raw connection to peer\n'))
4545 ui.write(_(b'using raw connection to peer\n'))
4550 peer = None
4546 peer = None
4551 else:
4547 else:
4552 ui.write(_(b'creating ssh peer from handshake results\n'))
4548 ui.write(_(b'creating ssh peer from handshake results\n'))
4553 peer = sshpeer.makepeer(
4549 peer = sshpeer.makepeer(
4554 ui,
4550 ui,
4555 url,
4551 url,
4556 proc,
4552 proc,
4557 stdin,
4553 stdin,
4558 stdout,
4554 stdout,
4559 stderr,
4555 stderr,
4560 autoreadstderr=autoreadstderr,
4556 autoreadstderr=autoreadstderr,
4561 )
4557 )
4562
4558
4563 elif path:
4559 elif path:
4564 # We bypass hg.peer() so we can proxy the sockets.
4560 # We bypass hg.peer() so we can proxy the sockets.
4565 # TODO consider not doing this because we skip
4561 # TODO consider not doing this because we skip
4566 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4562 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4567 u = urlutil.url(path)
4563 u = urlutil.url(path)
4568 if u.scheme != b'http':
4564 if u.scheme != b'http':
4569 raise error.Abort(_(b'only http:// paths are currently supported'))
4565 raise error.Abort(_(b'only http:// paths are currently supported'))
4570
4566
4571 url, authinfo = u.authinfo()
4567 url, authinfo = u.authinfo()
4572 openerargs = {
4568 openerargs = {
4573 'useragent': b'Mercurial debugwireproto',
4569 'useragent': b'Mercurial debugwireproto',
4574 }
4570 }
4575
4571
4576 # Turn pipes/sockets into observers so we can log I/O.
4572 # Turn pipes/sockets into observers so we can log I/O.
4577 if ui.verbose:
4573 if ui.verbose:
4578 openerargs.update(
4574 openerargs.update(
4579 {
4575 {
4580 'loggingfh': ui,
4576 'loggingfh': ui,
4581 'loggingname': b's',
4577 'loggingname': b's',
4582 'loggingopts': {
4578 'loggingopts': {
4583 'logdata': True,
4579 'logdata': True,
4584 'logdataapis': False,
4580 'logdataapis': False,
4585 },
4581 },
4586 }
4582 }
4587 )
4583 )
4588
4584
4589 if ui.debugflag:
4585 if ui.debugflag:
4590 openerargs['loggingopts']['logdataapis'] = True
4586 openerargs['loggingopts']['logdataapis'] = True
4591
4587
4592 # Don't send default headers when in raw mode. This allows us to
4588 # Don't send default headers when in raw mode. This allows us to
4593 # bypass most of the behavior of our URL handling code so we can
4589 # bypass most of the behavior of our URL handling code so we can
4594 # have near complete control over what's sent on the wire.
4590 # have near complete control over what's sent on the wire.
4595 if opts[b'peer'] == b'raw':
4591 if opts[b'peer'] == b'raw':
4596 openerargs['sendaccept'] = False
4592 openerargs['sendaccept'] = False
4597
4593
4598 opener = urlmod.opener(ui, authinfo, **openerargs)
4594 opener = urlmod.opener(ui, authinfo, **openerargs)
4599
4595
4600 if opts[b'peer'] == b'http2':
4596 if opts[b'peer'] == b'http2':
4601 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4597 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4602 # We go through makepeer() because we need an API descriptor for
4598 # We go through makepeer() because we need an API descriptor for
4603 # the peer instance to be useful.
4599 # the peer instance to be useful.
4604 maybe_silent = (
4600 maybe_silent = (
4605 ui.silent()
4601 ui.silent()
4606 if opts[b'nologhandshake']
4602 if opts[b'nologhandshake']
4607 else util.nullcontextmanager()
4603 else util.nullcontextmanager()
4608 )
4604 )
4609 with maybe_silent, ui.configoverride(
4605 with maybe_silent, ui.configoverride(
4610 {(b'experimental', b'httppeer.advertise-v2'): True}
4606 {(b'experimental', b'httppeer.advertise-v2'): True}
4611 ):
4607 ):
4612 peer = httppeer.makepeer(ui, path, opener=opener)
4608 peer = httppeer.makepeer(ui, path, opener=opener)
4613
4609
4614 if not isinstance(peer, httppeer.httpv2peer):
4610 if not isinstance(peer, httppeer.httpv2peer):
4615 raise error.Abort(
4611 raise error.Abort(
4616 _(
4612 _(
4617 b'could not instantiate HTTP peer for '
4613 b'could not instantiate HTTP peer for '
4618 b'wire protocol version 2'
4614 b'wire protocol version 2'
4619 ),
4615 ),
4620 hint=_(
4616 hint=_(
4621 b'the server may not have the feature '
4617 b'the server may not have the feature '
4622 b'enabled or is not allowing this '
4618 b'enabled or is not allowing this '
4623 b'client version'
4619 b'client version'
4624 ),
4620 ),
4625 )
4621 )
4626
4622
4627 elif opts[b'peer'] == b'raw':
4623 elif opts[b'peer'] == b'raw':
4628 ui.write(_(b'using raw connection to peer\n'))
4624 ui.write(_(b'using raw connection to peer\n'))
4629 peer = None
4625 peer = None
4630 elif opts[b'peer']:
4626 elif opts[b'peer']:
4631 raise error.Abort(
4627 raise error.Abort(
4632 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4628 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4633 )
4629 )
4634 else:
4630 else:
4635 peer = httppeer.makepeer(ui, path, opener=opener)
4631 peer = httppeer.makepeer(ui, path, opener=opener)
4636
4632
4637 # We /could/ populate stdin/stdout with sock.makefile()...
4633 # We /could/ populate stdin/stdout with sock.makefile()...
4638 else:
4634 else:
4639 raise error.Abort(_(b'unsupported connection configuration'))
4635 raise error.Abort(_(b'unsupported connection configuration'))
4640
4636
4641 batchedcommands = None
4637 batchedcommands = None
4642
4638
4643 # Now perform actions based on the parsed wire language instructions.
4639 # Now perform actions based on the parsed wire language instructions.
4644 for action, lines in blocks:
4640 for action, lines in blocks:
4645 if action in (b'raw', b'raw+'):
4641 if action in (b'raw', b'raw+'):
4646 if not stdin:
4642 if not stdin:
4647 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4643 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4648
4644
4649 # Concatenate the data together.
4645 # Concatenate the data together.
4650 data = b''.join(l.lstrip() for l in lines)
4646 data = b''.join(l.lstrip() for l in lines)
4651 data = stringutil.unescapestr(data)
4647 data = stringutil.unescapestr(data)
4652 stdin.write(data)
4648 stdin.write(data)
4653
4649
4654 if action == b'raw+':
4650 if action == b'raw+':
4655 stdin.flush()
4651 stdin.flush()
4656 elif action == b'flush':
4652 elif action == b'flush':
4657 if not stdin:
4653 if not stdin:
4658 raise error.Abort(_(b'cannot call flush on this peer'))
4654 raise error.Abort(_(b'cannot call flush on this peer'))
4659 stdin.flush()
4655 stdin.flush()
4660 elif action.startswith(b'command'):
4656 elif action.startswith(b'command'):
4661 if not peer:
4657 if not peer:
4662 raise error.Abort(
4658 raise error.Abort(
4663 _(
4659 _(
4664 b'cannot send commands unless peer instance '
4660 b'cannot send commands unless peer instance '
4665 b'is available'
4661 b'is available'
4666 )
4662 )
4667 )
4663 )
4668
4664
4669 command = action.split(b' ', 1)[1]
4665 command = action.split(b' ', 1)[1]
4670
4666
4671 args = {}
4667 args = {}
4672 for line in lines:
4668 for line in lines:
4673 # We need to allow empty values.
4669 # We need to allow empty values.
4674 fields = line.lstrip().split(b' ', 1)
4670 fields = line.lstrip().split(b' ', 1)
4675 if len(fields) == 1:
4671 if len(fields) == 1:
4676 key = fields[0]
4672 key = fields[0]
4677 value = b''
4673 value = b''
4678 else:
4674 else:
4679 key, value = fields
4675 key, value = fields
4680
4676
4681 if value.startswith(b'eval:'):
4677 if value.startswith(b'eval:'):
4682 value = stringutil.evalpythonliteral(value[5:])
4678 value = stringutil.evalpythonliteral(value[5:])
4683 else:
4679 else:
4684 value = stringutil.unescapestr(value)
4680 value = stringutil.unescapestr(value)
4685
4681
4686 args[key] = value
4682 args[key] = value
4687
4683
4688 if batchedcommands is not None:
4684 if batchedcommands is not None:
4689 batchedcommands.append((command, args))
4685 batchedcommands.append((command, args))
4690 continue
4686 continue
4691
4687
4692 ui.status(_(b'sending %s command\n') % command)
4688 ui.status(_(b'sending %s command\n') % command)
4693
4689
4694 if b'PUSHFILE' in args:
4690 if b'PUSHFILE' in args:
4695 with open(args[b'PUSHFILE'], 'rb') as fh:
4691 with open(args[b'PUSHFILE'], 'rb') as fh:
4696 del args[b'PUSHFILE']
4692 del args[b'PUSHFILE']
4697 res, output = peer._callpush(
4693 res, output = peer._callpush(
4698 command, fh, **pycompat.strkwargs(args)
4694 command, fh, **pycompat.strkwargs(args)
4699 )
4695 )
4700 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4696 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4701 ui.status(
4697 ui.status(
4702 _(b'remote output: %s\n') % stringutil.escapestr(output)
4698 _(b'remote output: %s\n') % stringutil.escapestr(output)
4703 )
4699 )
4704 else:
4700 else:
4705 with peer.commandexecutor() as e:
4701 with peer.commandexecutor() as e:
4706 res = e.callcommand(command, args).result()
4702 res = e.callcommand(command, args).result()
4707
4703
4708 if isinstance(res, wireprotov2peer.commandresponse):
4704 if isinstance(res, wireprotov2peer.commandresponse):
4709 val = res.objects()
4705 val = res.objects()
4710 ui.status(
4706 ui.status(
4711 _(b'response: %s\n')
4707 _(b'response: %s\n')
4712 % stringutil.pprint(val, bprefix=True, indent=2)
4708 % stringutil.pprint(val, bprefix=True, indent=2)
4713 )
4709 )
4714 else:
4710 else:
4715 ui.status(
4711 ui.status(
4716 _(b'response: %s\n')
4712 _(b'response: %s\n')
4717 % stringutil.pprint(res, bprefix=True, indent=2)
4713 % stringutil.pprint(res, bprefix=True, indent=2)
4718 )
4714 )
4719
4715
4720 elif action == b'batchbegin':
4716 elif action == b'batchbegin':
4721 if batchedcommands is not None:
4717 if batchedcommands is not None:
4722 raise error.Abort(_(b'nested batchbegin not allowed'))
4718 raise error.Abort(_(b'nested batchbegin not allowed'))
4723
4719
4724 batchedcommands = []
4720 batchedcommands = []
4725 elif action == b'batchsubmit':
4721 elif action == b'batchsubmit':
4726 # There is a batching API we could go through. But it would be
4722 # There is a batching API we could go through. But it would be
4727 # difficult to normalize requests into function calls. It is easier
4723 # difficult to normalize requests into function calls. It is easier
4728 # to bypass this layer and normalize to commands + args.
4724 # to bypass this layer and normalize to commands + args.
4729 ui.status(
4725 ui.status(
4730 _(b'sending batch with %d sub-commands\n')
4726 _(b'sending batch with %d sub-commands\n')
4731 % len(batchedcommands)
4727 % len(batchedcommands)
4732 )
4728 )
4733 assert peer is not None
4729 assert peer is not None
4734 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4730 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4735 ui.status(
4731 ui.status(
4736 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4732 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4737 )
4733 )
4738
4734
4739 batchedcommands = None
4735 batchedcommands = None
4740
4736
4741 elif action.startswith(b'httprequest '):
4737 elif action.startswith(b'httprequest '):
4742 if not opener:
4738 if not opener:
4743 raise error.Abort(
4739 raise error.Abort(
4744 _(b'cannot use httprequest without an HTTP peer')
4740 _(b'cannot use httprequest without an HTTP peer')
4745 )
4741 )
4746
4742
4747 request = action.split(b' ', 2)
4743 request = action.split(b' ', 2)
4748 if len(request) != 3:
4744 if len(request) != 3:
4749 raise error.Abort(
4745 raise error.Abort(
4750 _(
4746 _(
4751 b'invalid httprequest: expected format is '
4747 b'invalid httprequest: expected format is '
4752 b'"httprequest <method> <path>'
4748 b'"httprequest <method> <path>'
4753 )
4749 )
4754 )
4750 )
4755
4751
4756 method, httppath = request[1:]
4752 method, httppath = request[1:]
4757 headers = {}
4753 headers = {}
4758 body = None
4754 body = None
4759 frames = []
4755 frames = []
4760 for line in lines:
4756 for line in lines:
4761 line = line.lstrip()
4757 line = line.lstrip()
4762 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4758 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4763 if m:
4759 if m:
4764 # Headers need to use native strings.
4760 # Headers need to use native strings.
4765 key = pycompat.strurl(m.group(1))
4761 key = pycompat.strurl(m.group(1))
4766 value = pycompat.strurl(m.group(2))
4762 value = pycompat.strurl(m.group(2))
4767 headers[key] = value
4763 headers[key] = value
4768 continue
4764 continue
4769
4765
4770 if line.startswith(b'BODYFILE '):
4766 if line.startswith(b'BODYFILE '):
4771 with open(line.split(b' ', 1), b'rb') as fh:
4767 with open(line.split(b' ', 1), b'rb') as fh:
4772 body = fh.read()
4768 body = fh.read()
4773 elif line.startswith(b'frame '):
4769 elif line.startswith(b'frame '):
4774 frame = wireprotoframing.makeframefromhumanstring(
4770 frame = wireprotoframing.makeframefromhumanstring(
4775 line[len(b'frame ') :]
4771 line[len(b'frame ') :]
4776 )
4772 )
4777
4773
4778 frames.append(frame)
4774 frames.append(frame)
4779 else:
4775 else:
4780 raise error.Abort(
4776 raise error.Abort(
4781 _(b'unknown argument to httprequest: %s') % line
4777 _(b'unknown argument to httprequest: %s') % line
4782 )
4778 )
4783
4779
4784 url = path + httppath
4780 url = path + httppath
4785
4781
4786 if frames:
4782 if frames:
4787 body = b''.join(bytes(f) for f in frames)
4783 body = b''.join(bytes(f) for f in frames)
4788
4784
4789 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4785 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4790
4786
4791 # urllib.Request insists on using has_data() as a proxy for
4787 # urllib.Request insists on using has_data() as a proxy for
4792 # determining the request method. Override that to use our
4788 # determining the request method. Override that to use our
4793 # explicitly requested method.
4789 # explicitly requested method.
4794 req.get_method = lambda: pycompat.sysstr(method)
4790 req.get_method = lambda: pycompat.sysstr(method)
4795
4791
4796 try:
4792 try:
4797 res = opener.open(req)
4793 res = opener.open(req)
4798 body = res.read()
4794 body = res.read()
4799 except util.urlerr.urlerror as e:
4795 except util.urlerr.urlerror as e:
4800 # read() method must be called, but only exists in Python 2
4796 # read() method must be called, but only exists in Python 2
4801 getattr(e, 'read', lambda: None)()
4797 getattr(e, 'read', lambda: None)()
4802 continue
4798 continue
4803
4799
4804 ct = res.headers.get('Content-Type')
4800 ct = res.headers.get('Content-Type')
4805 if ct == 'application/mercurial-cbor':
4801 if ct == 'application/mercurial-cbor':
4806 ui.write(
4802 ui.write(
4807 _(b'cbor> %s\n')
4803 _(b'cbor> %s\n')
4808 % stringutil.pprint(
4804 % stringutil.pprint(
4809 cborutil.decodeall(body), bprefix=True, indent=2
4805 cborutil.decodeall(body), bprefix=True, indent=2
4810 )
4806 )
4811 )
4807 )
4812
4808
4813 elif action == b'close':
4809 elif action == b'close':
4814 assert peer is not None
4810 assert peer is not None
4815 peer.close()
4811 peer.close()
4816 elif action == b'readavailable':
4812 elif action == b'readavailable':
4817 if not stdout or not stderr:
4813 if not stdout or not stderr:
4818 raise error.Abort(
4814 raise error.Abort(
4819 _(b'readavailable not available on this peer')
4815 _(b'readavailable not available on this peer')
4820 )
4816 )
4821
4817
4822 stdin.close()
4818 stdin.close()
4823 stdout.read()
4819 stdout.read()
4824 stderr.read()
4820 stderr.read()
4825
4821
4826 elif action == b'readline':
4822 elif action == b'readline':
4827 if not stdout:
4823 if not stdout:
4828 raise error.Abort(_(b'readline not available on this peer'))
4824 raise error.Abort(_(b'readline not available on this peer'))
4829 stdout.readline()
4825 stdout.readline()
4830 elif action == b'ereadline':
4826 elif action == b'ereadline':
4831 if not stderr:
4827 if not stderr:
4832 raise error.Abort(_(b'ereadline not available on this peer'))
4828 raise error.Abort(_(b'ereadline not available on this peer'))
4833 stderr.readline()
4829 stderr.readline()
4834 elif action.startswith(b'read '):
4830 elif action.startswith(b'read '):
4835 count = int(action.split(b' ', 1)[1])
4831 count = int(action.split(b' ', 1)[1])
4836 if not stdout:
4832 if not stdout:
4837 raise error.Abort(_(b'read not available on this peer'))
4833 raise error.Abort(_(b'read not available on this peer'))
4838 stdout.read(count)
4834 stdout.read(count)
4839 elif action.startswith(b'eread '):
4835 elif action.startswith(b'eread '):
4840 count = int(action.split(b' ', 1)[1])
4836 count = int(action.split(b' ', 1)[1])
4841 if not stderr:
4837 if not stderr:
4842 raise error.Abort(_(b'eread not available on this peer'))
4838 raise error.Abort(_(b'eread not available on this peer'))
4843 stderr.read(count)
4839 stderr.read(count)
4844 else:
4840 else:
4845 raise error.Abort(_(b'unknown action: %s') % action)
4841 raise error.Abort(_(b'unknown action: %s') % action)
4846
4842
4847 if batchedcommands is not None:
4843 if batchedcommands is not None:
4848 raise error.Abort(_(b'unclosed "batchbegin" request'))
4844 raise error.Abort(_(b'unclosed "batchbegin" request'))
4849
4845
4850 if peer:
4846 if peer:
4851 peer.close()
4847 peer.close()
4852
4848
4853 if proc:
4849 if proc:
4854 proc.kill()
4850 proc.kill()
@@ -1,750 +1,751 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
32
33 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
34 FROM_P2 = -2
35
35
36 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
37 NONNORMAL = -1
38
38
39 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
41
41
42 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
43
43
44
44
45 class dirstatemap(object):
45 class dirstatemap(object):
46 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
47
47
48 The dirstate contains the following state:
48 The dirstate contains the following state:
49
49
50 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
51 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
52
52
53 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
54 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
55
55
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
60 `dropfile` methods.
61
61
62 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
63
63
64 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
65
65
66 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
68
69 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
71
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
74
74
75 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
76 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
77 """
77 """
78
78
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
80 self._ui = ui
81 self._opener = opener
81 self._opener = opener
82 self._root = root
82 self._root = root
83 self._filename = b'dirstate'
83 self._filename = b'dirstate'
84 self._nodelen = 20
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
86 assert (
86 assert (
87 not use_dirstate_v2
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
89
89
90 self._parents = None
90 self._parents = None
91 self._dirtyparents = False
91 self._dirtyparents = False
92
92
93 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
94 self._pendingmode = None
95
95
96 @propertycache
96 @propertycache
97 def _map(self):
97 def _map(self):
98 self._map = {}
98 self._map = {}
99 self.read()
99 self.read()
100 return self._map
100 return self._map
101
101
102 @propertycache
102 @propertycache
103 def copymap(self):
103 def copymap(self):
104 self.copymap = {}
104 self.copymap = {}
105 self._map
105 self._map
106 return self.copymap
106 return self.copymap
107
107
108 def directories(self):
108 def directories(self):
109 # Rust / dirstate-v2 only
109 # Rust / dirstate-v2 only
110 return []
110 return []
111
111
112 def clear(self):
112 def clear(self):
113 self._map.clear()
113 self._map.clear()
114 self.copymap.clear()
114 self.copymap.clear()
115 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
115 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
116 util.clearcachedproperty(self, b"_dirs")
116 util.clearcachedproperty(self, b"_dirs")
117 util.clearcachedproperty(self, b"_alldirs")
117 util.clearcachedproperty(self, b"_alldirs")
118 util.clearcachedproperty(self, b"filefoldmap")
118 util.clearcachedproperty(self, b"filefoldmap")
119 util.clearcachedproperty(self, b"dirfoldmap")
119 util.clearcachedproperty(self, b"dirfoldmap")
120 util.clearcachedproperty(self, b"nonnormalset")
120 util.clearcachedproperty(self, b"nonnormalset")
121 util.clearcachedproperty(self, b"otherparentset")
121 util.clearcachedproperty(self, b"otherparentset")
122
122
123 def items(self):
123 def items(self):
124 return pycompat.iteritems(self._map)
124 return pycompat.iteritems(self._map)
125
125
126 # forward for python2,3 compat
126 # forward for python2,3 compat
127 iteritems = items
127 iteritems = items
128
128
129 def __len__(self):
129 def __len__(self):
130 return len(self._map)
130 return len(self._map)
131
131
132 def __iter__(self):
132 def __iter__(self):
133 return iter(self._map)
133 return iter(self._map)
134
134
135 def get(self, key, default=None):
135 def get(self, key, default=None):
136 return self._map.get(key, default)
136 return self._map.get(key, default)
137
137
138 def __contains__(self, key):
138 def __contains__(self, key):
139 return key in self._map
139 return key in self._map
140
140
141 def __getitem__(self, key):
141 def __getitem__(self, key):
142 return self._map[key]
142 return self._map[key]
143
143
144 def keys(self):
144 def keys(self):
145 return self._map.keys()
145 return self._map.keys()
146
146
147 def preload(self):
147 def preload(self):
148 """Loads the underlying data, if it's not already loaded"""
148 """Loads the underlying data, if it's not already loaded"""
149 self._map
149 self._map
150
150
151 def addfile(
151 def addfile(
152 self,
152 self,
153 f,
153 f,
154 mode=0,
154 mode=0,
155 size=None,
155 size=None,
156 mtime=None,
156 mtime=None,
157 added=False,
157 added=False,
158 merged=False,
158 merged=False,
159 from_p2=False,
159 from_p2=False,
160 possibly_dirty=False,
160 possibly_dirty=False,
161 ):
161 ):
162 """Add a tracked file to the dirstate."""
162 """Add a tracked file to the dirstate."""
163 if added:
163 if added:
164 assert not merged
164 assert not merged
165 assert not possibly_dirty
165 assert not possibly_dirty
166 assert not from_p2
166 assert not from_p2
167 state = b'a'
167 state = b'a'
168 size = NONNORMAL
168 size = NONNORMAL
169 mtime = AMBIGUOUS_TIME
169 mtime = AMBIGUOUS_TIME
170 elif merged:
170 elif merged:
171 assert not possibly_dirty
171 assert not possibly_dirty
172 assert not from_p2
172 assert not from_p2
173 state = b'm'
173 state = b'm'
174 size = FROM_P2
174 size = FROM_P2
175 mtime = AMBIGUOUS_TIME
175 mtime = AMBIGUOUS_TIME
176 elif from_p2:
176 elif from_p2:
177 assert not possibly_dirty
177 assert not possibly_dirty
178 state = b'n'
178 state = b'n'
179 size = FROM_P2
179 size = FROM_P2
180 mtime = AMBIGUOUS_TIME
180 mtime = AMBIGUOUS_TIME
181 elif possibly_dirty:
181 elif possibly_dirty:
182 state = b'n'
182 state = b'n'
183 size = NONNORMAL
183 size = NONNORMAL
184 mtime = AMBIGUOUS_TIME
184 mtime = AMBIGUOUS_TIME
185 else:
185 else:
186 assert size != FROM_P2
186 assert size != FROM_P2
187 assert size != NONNORMAL
187 assert size != NONNORMAL
188 state = b'n'
188 state = b'n'
189 size = size & rangemask
189 size = size & rangemask
190 mtime = mtime & rangemask
190 mtime = mtime & rangemask
191 assert state is not None
191 assert state is not None
192 assert size is not None
192 assert size is not None
193 assert mtime is not None
193 assert mtime is not None
194 old_entry = self.get(f)
194 old_entry = self.get(f)
195 if (
195 if (
196 old_entry is None or old_entry.removed
196 old_entry is None or old_entry.removed
197 ) and "_dirs" in self.__dict__:
197 ) and "_dirs" in self.__dict__:
198 self._dirs.addpath(f)
198 self._dirs.addpath(f)
199 if old_entry is None and "_alldirs" in self.__dict__:
199 if old_entry is None and "_alldirs" in self.__dict__:
200 self._alldirs.addpath(f)
200 self._alldirs.addpath(f)
201 self._map[f] = DirstateItem(state, mode, size, mtime)
201 self._map[f] = DirstateItem(state, mode, size, mtime)
202 if state != b'n' or mtime == AMBIGUOUS_TIME:
202 if state != b'n' or mtime == AMBIGUOUS_TIME:
203 self.nonnormalset.add(f)
203 self.nonnormalset.add(f)
204 if size == FROM_P2:
204 if size == FROM_P2:
205 self.otherparentset.add(f)
205 self.otherparentset.add(f)
206
206
207 def removefile(self, f, in_merge=False):
207 def removefile(self, f, in_merge=False):
208 """
208 """
209 Mark a file as removed in the dirstate.
209 Mark a file as removed in the dirstate.
210
210
211 The `size` parameter is used to store sentinel values that indicate
211 The `size` parameter is used to store sentinel values that indicate
212 the file's previous state. In the future, we should refactor this
212 the file's previous state. In the future, we should refactor this
213 to be more explicit about what that state is.
213 to be more explicit about what that state is.
214 """
214 """
215 entry = self.get(f)
215 entry = self.get(f)
216 size = 0
216 size = 0
217 if in_merge:
217 if in_merge:
218 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
218 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
219 # during a merge. So I (marmoute) am not sure we need the
219 # during a merge. So I (marmoute) am not sure we need the
220 # conditionnal at all. Adding double checking this with assert
220 # conditionnal at all. Adding double checking this with assert
221 # would be nice.
221 # would be nice.
222 if entry is not None:
222 if entry is not None:
223 # backup the previous state
223 # backup the previous state
224 if entry.merged: # merge
224 if entry.merged: # merge
225 size = NONNORMAL
225 size = NONNORMAL
226 elif entry.from_p2:
226 elif entry.from_p2:
227 size = FROM_P2
227 size = FROM_P2
228 self.otherparentset.add(f)
228 self.otherparentset.add(f)
229 if entry is not None and not (entry.merged or entry.from_p2):
229 if entry is not None and not (entry.merged or entry.from_p2):
230 self.copymap.pop(f, None)
230 self.copymap.pop(f, None)
231
231
232 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
232 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
233 self._dirs.delpath(f)
233 self._dirs.delpath(f)
234 if entry is None and "_alldirs" in self.__dict__:
234 if entry is None and "_alldirs" in self.__dict__:
235 self._alldirs.addpath(f)
235 self._alldirs.addpath(f)
236 if "filefoldmap" in self.__dict__:
236 if "filefoldmap" in self.__dict__:
237 normed = util.normcase(f)
237 normed = util.normcase(f)
238 self.filefoldmap.pop(normed, None)
238 self.filefoldmap.pop(normed, None)
239 self._map[f] = DirstateItem(b'r', 0, size, 0)
239 self._map[f] = DirstateItem(b'r', 0, size, 0)
240 self.nonnormalset.add(f)
240 self.nonnormalset.add(f)
241
241
242 def dropfile(self, f):
242 def dropfile(self, f):
243 """
243 """
244 Remove a file from the dirstate. Returns True if the file was
244 Remove a file from the dirstate. Returns True if the file was
245 previously recorded.
245 previously recorded.
246 """
246 """
247 old_entry = self._map.pop(f, None)
247 old_entry = self._map.pop(f, None)
248 exists = False
248 exists = False
249 oldstate = b'?'
249 oldstate = b'?'
250 if old_entry is not None:
250 if old_entry is not None:
251 exists = True
251 exists = True
252 oldstate = old_entry.state
252 oldstate = old_entry.state
253 if exists:
253 if exists:
254 if oldstate != b"r" and "_dirs" in self.__dict__:
254 if oldstate != b"r" and "_dirs" in self.__dict__:
255 self._dirs.delpath(f)
255 self._dirs.delpath(f)
256 if "_alldirs" in self.__dict__:
256 if "_alldirs" in self.__dict__:
257 self._alldirs.delpath(f)
257 self._alldirs.delpath(f)
258 if "filefoldmap" in self.__dict__:
258 if "filefoldmap" in self.__dict__:
259 normed = util.normcase(f)
259 normed = util.normcase(f)
260 self.filefoldmap.pop(normed, None)
260 self.filefoldmap.pop(normed, None)
261 self.nonnormalset.discard(f)
261 self.nonnormalset.discard(f)
262 return exists
262 return exists
263
263
264 def clearambiguoustimes(self, files, now):
264 def clearambiguoustimes(self, files, now):
265 for f in files:
265 for f in files:
266 e = self.get(f)
266 e = self.get(f)
267 if e is not None and e.need_delay(now):
267 if e is not None and e.need_delay(now):
268 e.set_possibly_dirty()
268 e.set_possibly_dirty()
269 self.nonnormalset.add(f)
269 self.nonnormalset.add(f)
270
270
271 def nonnormalentries(self):
271 def nonnormalentries(self):
272 '''Compute the nonnormal dirstate entries from the dmap'''
272 '''Compute the nonnormal dirstate entries from the dmap'''
273 try:
273 try:
274 return parsers.nonnormalotherparententries(self._map)
274 return parsers.nonnormalotherparententries(self._map)
275 except AttributeError:
275 except AttributeError:
276 nonnorm = set()
276 nonnorm = set()
277 otherparent = set()
277 otherparent = set()
278 for fname, e in pycompat.iteritems(self._map):
278 for fname, e in pycompat.iteritems(self._map):
279 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
279 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
280 nonnorm.add(fname)
280 nonnorm.add(fname)
281 if e.from_p2:
281 if e.from_p2:
282 otherparent.add(fname)
282 otherparent.add(fname)
283 return nonnorm, otherparent
283 return nonnorm, otherparent
284
284
285 @propertycache
285 @propertycache
286 def filefoldmap(self):
286 def filefoldmap(self):
287 """Returns a dictionary mapping normalized case paths to their
287 """Returns a dictionary mapping normalized case paths to their
288 non-normalized versions.
288 non-normalized versions.
289 """
289 """
290 try:
290 try:
291 makefilefoldmap = parsers.make_file_foldmap
291 makefilefoldmap = parsers.make_file_foldmap
292 except AttributeError:
292 except AttributeError:
293 pass
293 pass
294 else:
294 else:
295 return makefilefoldmap(
295 return makefilefoldmap(
296 self._map, util.normcasespec, util.normcasefallback
296 self._map, util.normcasespec, util.normcasefallback
297 )
297 )
298
298
299 f = {}
299 f = {}
300 normcase = util.normcase
300 normcase = util.normcase
301 for name, s in pycompat.iteritems(self._map):
301 for name, s in pycompat.iteritems(self._map):
302 if not s.removed:
302 if not s.removed:
303 f[normcase(name)] = name
303 f[normcase(name)] = name
304 f[b'.'] = b'.' # prevents useless util.fspath() invocation
304 f[b'.'] = b'.' # prevents useless util.fspath() invocation
305 return f
305 return f
306
306
307 def hastrackeddir(self, d):
307 def hastrackeddir(self, d):
308 """
308 """
309 Returns True if the dirstate contains a tracked (not removed) file
309 Returns True if the dirstate contains a tracked (not removed) file
310 in this directory.
310 in this directory.
311 """
311 """
312 return d in self._dirs
312 return d in self._dirs
313
313
314 def hasdir(self, d):
314 def hasdir(self, d):
315 """
315 """
316 Returns True if the dirstate contains a file (tracked or removed)
316 Returns True if the dirstate contains a file (tracked or removed)
317 in this directory.
317 in this directory.
318 """
318 """
319 return d in self._alldirs
319 return d in self._alldirs
320
320
321 @propertycache
321 @propertycache
322 def _dirs(self):
322 def _dirs(self):
323 return pathutil.dirs(self._map, b'r')
323 return pathutil.dirs(self._map, b'r')
324
324
325 @propertycache
325 @propertycache
326 def _alldirs(self):
326 def _alldirs(self):
327 return pathutil.dirs(self._map)
327 return pathutil.dirs(self._map)
328
328
329 def _opendirstatefile(self):
329 def _opendirstatefile(self):
330 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
330 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
331 if self._pendingmode is not None and self._pendingmode != mode:
331 if self._pendingmode is not None and self._pendingmode != mode:
332 fp.close()
332 fp.close()
333 raise error.Abort(
333 raise error.Abort(
334 _(b'working directory state may be changed parallelly')
334 _(b'working directory state may be changed parallelly')
335 )
335 )
336 self._pendingmode = mode
336 self._pendingmode = mode
337 return fp
337 return fp
338
338
339 def parents(self):
339 def parents(self):
340 if not self._parents:
340 if not self._parents:
341 try:
341 try:
342 fp = self._opendirstatefile()
342 fp = self._opendirstatefile()
343 st = fp.read(2 * self._nodelen)
343 st = fp.read(2 * self._nodelen)
344 fp.close()
344 fp.close()
345 except IOError as err:
345 except IOError as err:
346 if err.errno != errno.ENOENT:
346 if err.errno != errno.ENOENT:
347 raise
347 raise
348 # File doesn't exist, so the current state is empty
348 # File doesn't exist, so the current state is empty
349 st = b''
349 st = b''
350
350
351 l = len(st)
351 l = len(st)
352 if l == self._nodelen * 2:
352 if l == self._nodelen * 2:
353 self._parents = (
353 self._parents = (
354 st[: self._nodelen],
354 st[: self._nodelen],
355 st[self._nodelen : 2 * self._nodelen],
355 st[self._nodelen : 2 * self._nodelen],
356 )
356 )
357 elif l == 0:
357 elif l == 0:
358 self._parents = (
358 self._parents = (
359 self._nodeconstants.nullid,
359 self._nodeconstants.nullid,
360 self._nodeconstants.nullid,
360 self._nodeconstants.nullid,
361 )
361 )
362 else:
362 else:
363 raise error.Abort(
363 raise error.Abort(
364 _(b'working directory state appears damaged!')
364 _(b'working directory state appears damaged!')
365 )
365 )
366
366
367 return self._parents
367 return self._parents
368
368
369 def setparents(self, p1, p2):
369 def setparents(self, p1, p2):
370 self._parents = (p1, p2)
370 self._parents = (p1, p2)
371 self._dirtyparents = True
371 self._dirtyparents = True
372
372
373 def read(self):
373 def read(self):
374 # ignore HG_PENDING because identity is used only for writing
374 # ignore HG_PENDING because identity is used only for writing
375 self.identity = util.filestat.frompath(
375 self.identity = util.filestat.frompath(
376 self._opener.join(self._filename)
376 self._opener.join(self._filename)
377 )
377 )
378
378
379 try:
379 try:
380 fp = self._opendirstatefile()
380 fp = self._opendirstatefile()
381 try:
381 try:
382 st = fp.read()
382 st = fp.read()
383 finally:
383 finally:
384 fp.close()
384 fp.close()
385 except IOError as err:
385 except IOError as err:
386 if err.errno != errno.ENOENT:
386 if err.errno != errno.ENOENT:
387 raise
387 raise
388 return
388 return
389 if not st:
389 if not st:
390 return
390 return
391
391
392 if util.safehasattr(parsers, b'dict_new_presized'):
392 if util.safehasattr(parsers, b'dict_new_presized'):
393 # Make an estimate of the number of files in the dirstate based on
393 # Make an estimate of the number of files in the dirstate based on
394 # its size. This trades wasting some memory for avoiding costly
394 # its size. This trades wasting some memory for avoiding costly
395 # resizes. Each entry have a prefix of 17 bytes followed by one or
395 # resizes. Each entry have a prefix of 17 bytes followed by one or
396 # two path names. Studies on various large-scale real-world repositories
396 # two path names. Studies on various large-scale real-world repositories
397 # found 54 bytes a reasonable upper limit for the average path names.
397 # found 54 bytes a reasonable upper limit for the average path names.
398 # Copy entries are ignored for the sake of this estimate.
398 # Copy entries are ignored for the sake of this estimate.
399 self._map = parsers.dict_new_presized(len(st) // 71)
399 self._map = parsers.dict_new_presized(len(st) // 71)
400
400
401 # Python's garbage collector triggers a GC each time a certain number
401 # Python's garbage collector triggers a GC each time a certain number
402 # of container objects (the number being defined by
402 # of container objects (the number being defined by
403 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
403 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
404 # for each file in the dirstate. The C version then immediately marks
404 # for each file in the dirstate. The C version then immediately marks
405 # them as not to be tracked by the collector. However, this has no
405 # them as not to be tracked by the collector. However, this has no
406 # effect on when GCs are triggered, only on what objects the GC looks
406 # effect on when GCs are triggered, only on what objects the GC looks
407 # into. This means that O(number of files) GCs are unavoidable.
407 # into. This means that O(number of files) GCs are unavoidable.
408 # Depending on when in the process's lifetime the dirstate is parsed,
408 # Depending on when in the process's lifetime the dirstate is parsed,
409 # this can get very expensive. As a workaround, disable GC while
409 # this can get very expensive. As a workaround, disable GC while
410 # parsing the dirstate.
410 # parsing the dirstate.
411 #
411 #
412 # (we cannot decorate the function directly since it is in a C module)
412 # (we cannot decorate the function directly since it is in a C module)
413 parse_dirstate = util.nogc(parsers.parse_dirstate)
413 parse_dirstate = util.nogc(parsers.parse_dirstate)
414 p = parse_dirstate(self._map, self.copymap, st)
414 p = parse_dirstate(self._map, self.copymap, st)
415 if not self._dirtyparents:
415 if not self._dirtyparents:
416 self.setparents(*p)
416 self.setparents(*p)
417
417
418 # Avoid excess attribute lookups by fast pathing certain checks
418 # Avoid excess attribute lookups by fast pathing certain checks
419 self.__contains__ = self._map.__contains__
419 self.__contains__ = self._map.__contains__
420 self.__getitem__ = self._map.__getitem__
420 self.__getitem__ = self._map.__getitem__
421 self.get = self._map.get
421 self.get = self._map.get
422
422
423 def write(self, _tr, st, now):
423 def write(self, _tr, st, now):
424 st.write(
424 st.write(
425 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
425 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
426 )
426 )
427 st.close()
427 st.close()
428 self._dirtyparents = False
428 self._dirtyparents = False
429 self.nonnormalset, self.otherparentset = self.nonnormalentries()
429 self.nonnormalset, self.otherparentset = self.nonnormalentries()
430
430
431 @propertycache
431 @propertycache
432 def nonnormalset(self):
432 def nonnormalset(self):
433 nonnorm, otherparents = self.nonnormalentries()
433 nonnorm, otherparents = self.nonnormalentries()
434 self.otherparentset = otherparents
434 self.otherparentset = otherparents
435 return nonnorm
435 return nonnorm
436
436
437 @propertycache
437 @propertycache
438 def otherparentset(self):
438 def otherparentset(self):
439 nonnorm, otherparents = self.nonnormalentries()
439 nonnorm, otherparents = self.nonnormalentries()
440 self.nonnormalset = nonnorm
440 self.nonnormalset = nonnorm
441 return otherparents
441 return otherparents
442
442
443 def non_normal_or_other_parent_paths(self):
443 def non_normal_or_other_parent_paths(self):
444 return self.nonnormalset.union(self.otherparentset)
444 return self.nonnormalset.union(self.otherparentset)
445
445
446 @propertycache
446 @propertycache
447 def identity(self):
447 def identity(self):
448 self._map
448 self._map
449 return self.identity
449 return self.identity
450
450
451 @propertycache
451 @propertycache
452 def dirfoldmap(self):
452 def dirfoldmap(self):
453 f = {}
453 f = {}
454 normcase = util.normcase
454 normcase = util.normcase
455 for name in self._dirs:
455 for name in self._dirs:
456 f[normcase(name)] = name
456 f[normcase(name)] = name
457 return f
457 return f
458
458
459
459
460 if rustmod is not None:
460 if rustmod is not None:
461
461
462 class dirstatemap(object):
462 class dirstatemap(object):
463 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
463 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
464 self._use_dirstate_v2 = use_dirstate_v2
464 self._use_dirstate_v2 = use_dirstate_v2
465 self._nodeconstants = nodeconstants
465 self._nodeconstants = nodeconstants
466 self._ui = ui
466 self._ui = ui
467 self._opener = opener
467 self._opener = opener
468 self._root = root
468 self._root = root
469 self._filename = b'dirstate'
469 self._filename = b'dirstate'
470 self._nodelen = 20 # Also update Rust code when changing this!
470 self._nodelen = 20 # Also update Rust code when changing this!
471 self._parents = None
471 self._parents = None
472 self._dirtyparents = False
472 self._dirtyparents = False
473 self._docket = None
473 self._docket = None
474
474
475 # for consistent view between _pl() and _read() invocations
475 # for consistent view between _pl() and _read() invocations
476 self._pendingmode = None
476 self._pendingmode = None
477
477
478 self._use_dirstate_tree = self._ui.configbool(
478 self._use_dirstate_tree = self._ui.configbool(
479 b"experimental",
479 b"experimental",
480 b"dirstate-tree.in-memory",
480 b"dirstate-tree.in-memory",
481 False,
481 False,
482 )
482 )
483
483
484 def addfile(
484 def addfile(
485 self,
485 self,
486 f,
486 f,
487 mode=0,
487 mode=0,
488 size=None,
488 size=None,
489 mtime=None,
489 mtime=None,
490 added=False,
490 added=False,
491 merged=False,
491 merged=False,
492 from_p2=False,
492 from_p2=False,
493 possibly_dirty=False,
493 possibly_dirty=False,
494 ):
494 ):
495 return self._rustmap.addfile(
495 return self._rustmap.addfile(
496 f,
496 f,
497 mode,
497 mode,
498 size,
498 size,
499 mtime,
499 mtime,
500 added,
500 added,
501 merged,
501 merged,
502 from_p2,
502 from_p2,
503 possibly_dirty,
503 possibly_dirty,
504 )
504 )
505
505
506 def removefile(self, *args, **kwargs):
506 def removefile(self, *args, **kwargs):
507 return self._rustmap.removefile(*args, **kwargs)
507 return self._rustmap.removefile(*args, **kwargs)
508
508
509 def dropfile(self, *args, **kwargs):
509 def dropfile(self, *args, **kwargs):
510 return self._rustmap.dropfile(*args, **kwargs)
510 return self._rustmap.dropfile(*args, **kwargs)
511
511
512 def clearambiguoustimes(self, *args, **kwargs):
512 def clearambiguoustimes(self, *args, **kwargs):
513 return self._rustmap.clearambiguoustimes(*args, **kwargs)
513 return self._rustmap.clearambiguoustimes(*args, **kwargs)
514
514
515 def nonnormalentries(self):
515 def nonnormalentries(self):
516 return self._rustmap.nonnormalentries()
516 return self._rustmap.nonnormalentries()
517
517
518 def get(self, *args, **kwargs):
518 def get(self, *args, **kwargs):
519 return self._rustmap.get(*args, **kwargs)
519 return self._rustmap.get(*args, **kwargs)
520
520
521 @property
521 @property
522 def copymap(self):
522 def copymap(self):
523 return self._rustmap.copymap()
523 return self._rustmap.copymap()
524
524
525 def directories(self):
525 def directories(self):
526 return self._rustmap.directories()
526 return self._rustmap.directories()
527
527
528 def preload(self):
528 def preload(self):
529 self._rustmap
529 self._rustmap
530
530
531 def clear(self):
531 def clear(self):
532 self._rustmap.clear()
532 self._rustmap.clear()
533 self.setparents(
533 self.setparents(
534 self._nodeconstants.nullid, self._nodeconstants.nullid
534 self._nodeconstants.nullid, self._nodeconstants.nullid
535 )
535 )
536 util.clearcachedproperty(self, b"_dirs")
536 util.clearcachedproperty(self, b"_dirs")
537 util.clearcachedproperty(self, b"_alldirs")
537 util.clearcachedproperty(self, b"_alldirs")
538 util.clearcachedproperty(self, b"dirfoldmap")
538 util.clearcachedproperty(self, b"dirfoldmap")
539
539
540 def items(self):
540 def items(self):
541 return self._rustmap.items()
541 return self._rustmap.items()
542
542
543 def keys(self):
543 def keys(self):
544 return iter(self._rustmap)
544 return iter(self._rustmap)
545
545
546 def __contains__(self, key):
546 def __contains__(self, key):
547 return key in self._rustmap
547 return key in self._rustmap
548
548
549 def __getitem__(self, item):
549 def __getitem__(self, item):
550 return self._rustmap[item]
550 return self._rustmap[item]
551
551
552 def __len__(self):
552 def __len__(self):
553 return len(self._rustmap)
553 return len(self._rustmap)
554
554
555 def __iter__(self):
555 def __iter__(self):
556 return iter(self._rustmap)
556 return iter(self._rustmap)
557
557
558 # forward for python2,3 compat
558 # forward for python2,3 compat
559 iteritems = items
559 iteritems = items
560
560
561 def _opendirstatefile(self):
561 def _opendirstatefile(self):
562 fp, mode = txnutil.trypending(
562 fp, mode = txnutil.trypending(
563 self._root, self._opener, self._filename
563 self._root, self._opener, self._filename
564 )
564 )
565 if self._pendingmode is not None and self._pendingmode != mode:
565 if self._pendingmode is not None and self._pendingmode != mode:
566 fp.close()
566 fp.close()
567 raise error.Abort(
567 raise error.Abort(
568 _(b'working directory state may be changed parallelly')
568 _(b'working directory state may be changed parallelly')
569 )
569 )
570 self._pendingmode = mode
570 self._pendingmode = mode
571 return fp
571 return fp
572
572
573 def _readdirstatefile(self, size=-1):
573 def _readdirstatefile(self, size=-1):
574 try:
574 try:
575 with self._opendirstatefile() as fp:
575 with self._opendirstatefile() as fp:
576 return fp.read(size)
576 return fp.read(size)
577 except IOError as err:
577 except IOError as err:
578 if err.errno != errno.ENOENT:
578 if err.errno != errno.ENOENT:
579 raise
579 raise
580 # File doesn't exist, so the current state is empty
580 # File doesn't exist, so the current state is empty
581 return b''
581 return b''
582
582
583 def setparents(self, p1, p2):
583 def setparents(self, p1, p2):
584 self._parents = (p1, p2)
584 self._parents = (p1, p2)
585 self._dirtyparents = True
585 self._dirtyparents = True
586
586
587 def parents(self):
587 def parents(self):
588 if not self._parents:
588 if not self._parents:
589 if self._use_dirstate_v2:
589 if self._use_dirstate_v2:
590 self._parents = self.docket.parents
590 self._parents = self.docket.parents
591 else:
591 else:
592 read_len = self._nodelen * 2
592 read_len = self._nodelen * 2
593 st = self._readdirstatefile(read_len)
593 st = self._readdirstatefile(read_len)
594 l = len(st)
594 l = len(st)
595 if l == read_len:
595 if l == read_len:
596 self._parents = (
596 self._parents = (
597 st[: self._nodelen],
597 st[: self._nodelen],
598 st[self._nodelen : 2 * self._nodelen],
598 st[self._nodelen : 2 * self._nodelen],
599 )
599 )
600 elif l == 0:
600 elif l == 0:
601 self._parents = (
601 self._parents = (
602 self._nodeconstants.nullid,
602 self._nodeconstants.nullid,
603 self._nodeconstants.nullid,
603 self._nodeconstants.nullid,
604 )
604 )
605 else:
605 else:
606 raise error.Abort(
606 raise error.Abort(
607 _(b'working directory state appears damaged!')
607 _(b'working directory state appears damaged!')
608 )
608 )
609
609
610 return self._parents
610 return self._parents
611
611
612 @property
612 @property
613 def docket(self):
613 def docket(self):
614 if not self._docket:
614 if not self._docket:
615 if not self._use_dirstate_v2:
615 if not self._use_dirstate_v2:
616 raise error.ProgrammingError(
616 raise error.ProgrammingError(
617 b'dirstate only has a docket in v2 format'
617 b'dirstate only has a docket in v2 format'
618 )
618 )
619 self._docket = docketmod.DirstateDocket.parse(
619 self._docket = docketmod.DirstateDocket.parse(
620 self._readdirstatefile(), self._nodeconstants
620 self._readdirstatefile(), self._nodeconstants
621 )
621 )
622 return self._docket
622 return self._docket
623
623
624 @propertycache
624 @propertycache
625 def _rustmap(self):
625 def _rustmap(self):
626 """
626 """
627 Fills the Dirstatemap when called.
627 Fills the Dirstatemap when called.
628 """
628 """
629 # ignore HG_PENDING because identity is used only for writing
629 # ignore HG_PENDING because identity is used only for writing
630 self.identity = util.filestat.frompath(
630 self.identity = util.filestat.frompath(
631 self._opener.join(self._filename)
631 self._opener.join(self._filename)
632 )
632 )
633
633
634 if self._use_dirstate_v2:
634 if self._use_dirstate_v2:
635 if self.docket.uuid:
635 if self.docket.uuid:
636 # TODO: use mmap when possible
636 # TODO: use mmap when possible
637 data = self._opener.read(self.docket.data_filename())
637 data = self._opener.read(self.docket.data_filename())
638 else:
638 else:
639 data = b''
639 data = b''
640 self._rustmap = rustmod.DirstateMap.new_v2(
640 self._rustmap = rustmod.DirstateMap.new_v2(
641 data, self.docket.data_size
641 data, self.docket.data_size, self.docket.tree_metadata
642 )
642 )
643 parents = self.docket.parents
643 parents = self.docket.parents
644 else:
644 else:
645 self._rustmap, parents = rustmod.DirstateMap.new_v1(
645 self._rustmap, parents = rustmod.DirstateMap.new_v1(
646 self._use_dirstate_tree, self._readdirstatefile()
646 self._use_dirstate_tree, self._readdirstatefile()
647 )
647 )
648
648
649 if parents and not self._dirtyparents:
649 if parents and not self._dirtyparents:
650 self.setparents(*parents)
650 self.setparents(*parents)
651
651
652 self.__contains__ = self._rustmap.__contains__
652 self.__contains__ = self._rustmap.__contains__
653 self.__getitem__ = self._rustmap.__getitem__
653 self.__getitem__ = self._rustmap.__getitem__
654 self.get = self._rustmap.get
654 self.get = self._rustmap.get
655 return self._rustmap
655 return self._rustmap
656
656
657 def write(self, tr, st, now):
657 def write(self, tr, st, now):
658 if not self._use_dirstate_v2:
658 if not self._use_dirstate_v2:
659 p1, p2 = self.parents()
659 p1, p2 = self.parents()
660 packed = self._rustmap.write_v1(p1, p2, now)
660 packed = self._rustmap.write_v1(p1, p2, now)
661 st.write(packed)
661 st.write(packed)
662 st.close()
662 st.close()
663 self._dirtyparents = False
663 self._dirtyparents = False
664 return
664 return
665
665
666 # We can only append to an existing data file if there is one
666 # We can only append to an existing data file if there is one
667 can_append = self.docket.uuid is not None
667 can_append = self.docket.uuid is not None
668 packed, append = self._rustmap.write_v2(now, can_append)
668 packed, meta, append = self._rustmap.write_v2(now, can_append)
669 if append:
669 if append:
670 docket = self.docket
670 docket = self.docket
671 data_filename = docket.data_filename()
671 data_filename = docket.data_filename()
672 if tr:
672 if tr:
673 tr.add(data_filename, docket.data_size)
673 tr.add(data_filename, docket.data_size)
674 with self._opener(data_filename, b'r+b') as fp:
674 with self._opener(data_filename, b'r+b') as fp:
675 fp.seek(docket.data_size)
675 fp.seek(docket.data_size)
676 assert fp.tell() == docket.data_size
676 assert fp.tell() == docket.data_size
677 written = fp.write(packed)
677 written = fp.write(packed)
678 if written is not None: # py2 may return None
678 if written is not None: # py2 may return None
679 assert written == len(packed), (written, len(packed))
679 assert written == len(packed), (written, len(packed))
680 docket.data_size += len(packed)
680 docket.data_size += len(packed)
681 docket.parents = self.parents()
681 docket.parents = self.parents()
682 docket.tree_metadata = meta
682 st.write(docket.serialize())
683 st.write(docket.serialize())
683 st.close()
684 st.close()
684 else:
685 else:
685 old_docket = self.docket
686 old_docket = self.docket
686 new_docket = docketmod.DirstateDocket.with_new_uuid(
687 new_docket = docketmod.DirstateDocket.with_new_uuid(
687 self.parents(), len(packed)
688 self.parents(), len(packed), meta
688 )
689 )
689 data_filename = new_docket.data_filename()
690 data_filename = new_docket.data_filename()
690 if tr:
691 if tr:
691 tr.add(data_filename, 0)
692 tr.add(data_filename, 0)
692 self._opener.write(data_filename, packed)
693 self._opener.write(data_filename, packed)
693 # Write the new docket after the new data file has been
694 # Write the new docket after the new data file has been
694 # written. Because `st` was opened with `atomictemp=True`,
695 # written. Because `st` was opened with `atomictemp=True`,
695 # the actual `.hg/dirstate` file is only affected on close.
696 # the actual `.hg/dirstate` file is only affected on close.
696 st.write(new_docket.serialize())
697 st.write(new_docket.serialize())
697 st.close()
698 st.close()
698 # Remove the old data file after the new docket pointing to
699 # Remove the old data file after the new docket pointing to
699 # the new data file was written.
700 # the new data file was written.
700 if old_docket.uuid:
701 if old_docket.uuid:
701 data_filename = old_docket.data_filename()
702 data_filename = old_docket.data_filename()
702 unlink = lambda _tr=None: self._opener.unlink(data_filename)
703 unlink = lambda _tr=None: self._opener.unlink(data_filename)
703 if tr:
704 if tr:
704 category = b"dirstate-v2-clean-" + old_docket.uuid
705 category = b"dirstate-v2-clean-" + old_docket.uuid
705 tr.addpostclose(category, unlink)
706 tr.addpostclose(category, unlink)
706 else:
707 else:
707 unlink()
708 unlink()
708 self._docket = new_docket
709 self._docket = new_docket
709 # Reload from the newly-written file
710 # Reload from the newly-written file
710 util.clearcachedproperty(self, b"_rustmap")
711 util.clearcachedproperty(self, b"_rustmap")
711 self._dirtyparents = False
712 self._dirtyparents = False
712
713
713 @propertycache
714 @propertycache
714 def filefoldmap(self):
715 def filefoldmap(self):
715 """Returns a dictionary mapping normalized case paths to their
716 """Returns a dictionary mapping normalized case paths to their
716 non-normalized versions.
717 non-normalized versions.
717 """
718 """
718 return self._rustmap.filefoldmapasdict()
719 return self._rustmap.filefoldmapasdict()
719
720
720 def hastrackeddir(self, d):
721 def hastrackeddir(self, d):
721 return self._rustmap.hastrackeddir(d)
722 return self._rustmap.hastrackeddir(d)
722
723
723 def hasdir(self, d):
724 def hasdir(self, d):
724 return self._rustmap.hasdir(d)
725 return self._rustmap.hasdir(d)
725
726
726 @propertycache
727 @propertycache
727 def identity(self):
728 def identity(self):
728 self._rustmap
729 self._rustmap
729 return self.identity
730 return self.identity
730
731
731 @property
732 @property
732 def nonnormalset(self):
733 def nonnormalset(self):
733 nonnorm = self._rustmap.non_normal_entries()
734 nonnorm = self._rustmap.non_normal_entries()
734 return nonnorm
735 return nonnorm
735
736
736 @propertycache
737 @propertycache
737 def otherparentset(self):
738 def otherparentset(self):
738 otherparents = self._rustmap.other_parent_entries()
739 otherparents = self._rustmap.other_parent_entries()
739 return otherparents
740 return otherparents
740
741
741 def non_normal_or_other_parent_paths(self):
742 def non_normal_or_other_parent_paths(self):
742 return self._rustmap.non_normal_or_other_parent_paths()
743 return self._rustmap.non_normal_or_other_parent_paths()
743
744
744 @propertycache
745 @propertycache
745 def dirfoldmap(self):
746 def dirfoldmap(self):
746 f = {}
747 f = {}
747 normcase = util.normcase
748 normcase = util.normcase
748 for name, _pseudo_entry in self.directories():
749 for name, _pseudo_entry in self.directories():
749 f[normcase(name)] = name
750 f[normcase(name)] = name
750 return f
751 return f
@@ -1,62 +1,75 b''
1 # dirstatedocket.py - docket file for dirstate-v2
1 # dirstatedocket.py - docket file for dirstate-v2
2 #
2 #
3 # Copyright Mercurial Contributors
3 # Copyright Mercurial Contributors
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from ..revlogutils import docket as docket_mod
12 from ..revlogutils import docket as docket_mod
13
13
14
14
15 V2_FORMAT_MARKER = b"dirstate-v2\n"
15 V2_FORMAT_MARKER = b"dirstate-v2\n"
16
16
17 # Must match the constant of the same name in
18 # `rust/hg-core/src/dirstate_tree/on_disk.rs`
19 TREE_METADATA_SIZE = 40
20
17 # * 12 bytes: format marker
21 # * 12 bytes: format marker
18 # * 32 bytes: node ID of the working directory's first parent
22 # * 32 bytes: node ID of the working directory's first parent
19 # * 32 bytes: node ID of the working directory's second parent
23 # * 32 bytes: node ID of the working directory's second parent
20 # * 4 bytes: big-endian used size of the data file
24 # * 4 bytes: big-endian used size of the data file
25 # * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
21 # * 1 byte: length of the data file's UUID
26 # * 1 byte: length of the data file's UUID
22 # * variable: data file's UUID
27 # * variable: data file's UUID
23 #
28 #
24 # Node IDs are null-padded if shorter than 32 bytes.
29 # Node IDs are null-padded if shorter than 32 bytes.
25 # A data file shorter than the specified used size is corrupted (truncated)
30 # A data file shorter than the specified used size is corrupted (truncated)
26 HEADER = struct.Struct(">{}s32s32sLB".format(len(V2_FORMAT_MARKER)))
31 HEADER = struct.Struct(
32 ">{}s32s32sL{}sB".format(len(V2_FORMAT_MARKER), TREE_METADATA_SIZE)
33 )
27
34
28
35
29 class DirstateDocket(object):
36 class DirstateDocket(object):
30 data_filename_pattern = b'dirstate.%s.d'
37 data_filename_pattern = b'dirstate.%s.d'
31
38
32 def __init__(self, parents, data_size, uuid):
39 def __init__(self, parents, data_size, tree_metadata, uuid):
33 self.parents = parents
40 self.parents = parents
34 self.data_size = data_size
41 self.data_size = data_size
42 self.tree_metadata = tree_metadata
35 self.uuid = uuid
43 self.uuid = uuid
36
44
37 @classmethod
45 @classmethod
38 def with_new_uuid(cls, parents, data):
46 def with_new_uuid(cls, parents, data_size, tree_metadata):
39 return cls(parents, data, docket_mod.make_uid())
47 return cls(parents, data_size, tree_metadata, docket_mod.make_uid())
40
48
41 @classmethod
49 @classmethod
42 def parse(cls, data, nodeconstants):
50 def parse(cls, data, nodeconstants):
43 if not data:
51 if not data:
44 parents = (nodeconstants.nullid, nodeconstants.nullid)
52 parents = (nodeconstants.nullid, nodeconstants.nullid)
45 return cls(parents, 0, None)
53 return cls(parents, 0, b'', None)
46 marker, p1, p2, data_size, uuid_size = HEADER.unpack_from(data)
54 marker, p1, p2, data_size, meta, uuid_size = HEADER.unpack_from(data)
47 if marker != V2_FORMAT_MARKER:
55 if marker != V2_FORMAT_MARKER:
48 raise ValueError("expected dirstate-v2 marker")
56 raise ValueError("expected dirstate-v2 marker")
49 uuid = data[HEADER.size : HEADER.size + uuid_size]
57 uuid = data[HEADER.size : HEADER.size + uuid_size]
50 p1 = p1[: nodeconstants.nodelen]
58 p1 = p1[: nodeconstants.nodelen]
51 p2 = p2[: nodeconstants.nodelen]
59 p2 = p2[: nodeconstants.nodelen]
52 return cls((p1, p2), data_size, uuid)
60 return cls((p1, p2), data_size, meta, uuid)
53
61
54 def serialize(self):
62 def serialize(self):
55 p1, p2 = self.parents
63 p1, p2 = self.parents
56 header = HEADER.pack(
64 header = HEADER.pack(
57 V2_FORMAT_MARKER, p1, p2, self.data_size, len(self.uuid)
65 V2_FORMAT_MARKER,
66 p1,
67 p2,
68 self.data_size,
69 self.tree_metadata,
70 len(self.uuid),
58 )
71 )
59 return header + self.uuid
72 return header + self.uuid
60
73
61 def data_filename(self):
74 def data_filename(self):
62 return self.data_filename_pattern % self.uuid
75 return self.data_filename_pattern % self.uuid
@@ -1,1270 +1,1272 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 /// Append to an existing data file if the amount of unreachable data (not used
32 /// Append to an existing data file if the amount of unreachable data (not used
33 /// anymore) is less than this fraction of the total amount of existing data.
33 /// anymore) is less than this fraction of the total amount of existing data.
34 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
34 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
35
35
36 pub struct DirstateMap<'on_disk> {
36 pub struct DirstateMap<'on_disk> {
37 /// Contents of the `.hg/dirstate` file
37 /// Contents of the `.hg/dirstate` file
38 pub(super) on_disk: &'on_disk [u8],
38 pub(super) on_disk: &'on_disk [u8],
39
39
40 pub(super) root: ChildNodes<'on_disk>,
40 pub(super) root: ChildNodes<'on_disk>,
41
41
42 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
42 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
43 pub(super) nodes_with_entry_count: u32,
43 pub(super) nodes_with_entry_count: u32,
44
44
45 /// Number of nodes anywhere in the tree that have
45 /// Number of nodes anywhere in the tree that have
46 /// `.copy_source.is_some()`.
46 /// `.copy_source.is_some()`.
47 pub(super) nodes_with_copy_source_count: u32,
47 pub(super) nodes_with_copy_source_count: u32,
48
48
49 /// See on_disk::Header
49 /// See on_disk::Header
50 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
50 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
51
51
52 /// How many bytes of `on_disk` are not used anymore
52 /// How many bytes of `on_disk` are not used anymore
53 pub(super) unreachable_bytes: u32,
53 pub(super) unreachable_bytes: u32,
54 }
54 }
55
55
56 /// Using a plain `HgPathBuf` of the full path from the repository root as a
56 /// Using a plain `HgPathBuf` of the full path from the repository root as a
57 /// map key would also work: all paths in a given map have the same parent
57 /// map key would also work: all paths in a given map have the same parent
58 /// path, so comparing full paths gives the same result as comparing base
58 /// path, so comparing full paths gives the same result as comparing base
59 /// names. However `HashMap` would waste time always re-hashing the same
59 /// names. However `HashMap` would waste time always re-hashing the same
60 /// string prefix.
60 /// string prefix.
61 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
61 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
62
62
63 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
63 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
64 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
64 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
65 pub(super) enum BorrowedPath<'tree, 'on_disk> {
65 pub(super) enum BorrowedPath<'tree, 'on_disk> {
66 InMemory(&'tree HgPathBuf),
66 InMemory(&'tree HgPathBuf),
67 OnDisk(&'on_disk HgPath),
67 OnDisk(&'on_disk HgPath),
68 }
68 }
69
69
70 pub(super) enum ChildNodes<'on_disk> {
70 pub(super) enum ChildNodes<'on_disk> {
71 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
71 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
72 OnDisk(&'on_disk [on_disk::Node]),
72 OnDisk(&'on_disk [on_disk::Node]),
73 }
73 }
74
74
75 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
75 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
76 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
76 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
77 OnDisk(&'on_disk [on_disk::Node]),
77 OnDisk(&'on_disk [on_disk::Node]),
78 }
78 }
79
79
80 pub(super) enum NodeRef<'tree, 'on_disk> {
80 pub(super) enum NodeRef<'tree, 'on_disk> {
81 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
81 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
82 OnDisk(&'on_disk on_disk::Node),
82 OnDisk(&'on_disk on_disk::Node),
83 }
83 }
84
84
85 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
85 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
86 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
86 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
87 match *self {
87 match *self {
88 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
88 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
89 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
89 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
90 }
90 }
91 }
91 }
92 }
92 }
93
93
94 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
94 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
95 type Target = HgPath;
95 type Target = HgPath;
96
96
97 fn deref(&self) -> &HgPath {
97 fn deref(&self) -> &HgPath {
98 match *self {
98 match *self {
99 BorrowedPath::InMemory(in_memory) => in_memory,
99 BorrowedPath::InMemory(in_memory) => in_memory,
100 BorrowedPath::OnDisk(on_disk) => on_disk,
100 BorrowedPath::OnDisk(on_disk) => on_disk,
101 }
101 }
102 }
102 }
103 }
103 }
104
104
105 impl Default for ChildNodes<'_> {
105 impl Default for ChildNodes<'_> {
106 fn default() -> Self {
106 fn default() -> Self {
107 ChildNodes::InMemory(Default::default())
107 ChildNodes::InMemory(Default::default())
108 }
108 }
109 }
109 }
110
110
111 impl<'on_disk> ChildNodes<'on_disk> {
111 impl<'on_disk> ChildNodes<'on_disk> {
112 pub(super) fn as_ref<'tree>(
112 pub(super) fn as_ref<'tree>(
113 &'tree self,
113 &'tree self,
114 ) -> ChildNodesRef<'tree, 'on_disk> {
114 ) -> ChildNodesRef<'tree, 'on_disk> {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
116 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
117 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
117 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn is_empty(&self) -> bool {
121 pub(super) fn is_empty(&self) -> bool {
122 match self {
122 match self {
123 ChildNodes::InMemory(nodes) => nodes.is_empty(),
123 ChildNodes::InMemory(nodes) => nodes.is_empty(),
124 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
124 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
125 }
125 }
126 }
126 }
127
127
128 fn make_mut(
128 fn make_mut(
129 &mut self,
129 &mut self,
130 on_disk: &'on_disk [u8],
130 on_disk: &'on_disk [u8],
131 unreachable_bytes: &mut u32,
131 unreachable_bytes: &mut u32,
132 ) -> Result<
132 ) -> Result<
133 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
133 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
134 DirstateV2ParseError,
134 DirstateV2ParseError,
135 > {
135 > {
136 match self {
136 match self {
137 ChildNodes::InMemory(nodes) => Ok(nodes),
137 ChildNodes::InMemory(nodes) => Ok(nodes),
138 ChildNodes::OnDisk(nodes) => {
138 ChildNodes::OnDisk(nodes) => {
139 *unreachable_bytes +=
139 *unreachable_bytes +=
140 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
140 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
141 let nodes = nodes
141 let nodes = nodes
142 .iter()
142 .iter()
143 .map(|node| {
143 .map(|node| {
144 Ok((
144 Ok((
145 node.path(on_disk)?,
145 node.path(on_disk)?,
146 node.to_in_memory_node(on_disk)?,
146 node.to_in_memory_node(on_disk)?,
147 ))
147 ))
148 })
148 })
149 .collect::<Result<_, _>>()?;
149 .collect::<Result<_, _>>()?;
150 *self = ChildNodes::InMemory(nodes);
150 *self = ChildNodes::InMemory(nodes);
151 match self {
151 match self {
152 ChildNodes::InMemory(nodes) => Ok(nodes),
152 ChildNodes::InMemory(nodes) => Ok(nodes),
153 ChildNodes::OnDisk(_) => unreachable!(),
153 ChildNodes::OnDisk(_) => unreachable!(),
154 }
154 }
155 }
155 }
156 }
156 }
157 }
157 }
158 }
158 }
159
159
160 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
160 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
161 pub(super) fn get(
161 pub(super) fn get(
162 &self,
162 &self,
163 base_name: &HgPath,
163 base_name: &HgPath,
164 on_disk: &'on_disk [u8],
164 on_disk: &'on_disk [u8],
165 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
165 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
166 match self {
166 match self {
167 ChildNodesRef::InMemory(nodes) => Ok(nodes
167 ChildNodesRef::InMemory(nodes) => Ok(nodes
168 .get_key_value(base_name)
168 .get_key_value(base_name)
169 .map(|(k, v)| NodeRef::InMemory(k, v))),
169 .map(|(k, v)| NodeRef::InMemory(k, v))),
170 ChildNodesRef::OnDisk(nodes) => {
170 ChildNodesRef::OnDisk(nodes) => {
171 let mut parse_result = Ok(());
171 let mut parse_result = Ok(());
172 let search_result = nodes.binary_search_by(|node| {
172 let search_result = nodes.binary_search_by(|node| {
173 match node.base_name(on_disk) {
173 match node.base_name(on_disk) {
174 Ok(node_base_name) => node_base_name.cmp(base_name),
174 Ok(node_base_name) => node_base_name.cmp(base_name),
175 Err(e) => {
175 Err(e) => {
176 parse_result = Err(e);
176 parse_result = Err(e);
177 // Dummy comparison result, `search_result` won’t
177 // Dummy comparison result, `search_result` won’t
178 // be used since `parse_result` is an error
178 // be used since `parse_result` is an error
179 std::cmp::Ordering::Equal
179 std::cmp::Ordering::Equal
180 }
180 }
181 }
181 }
182 });
182 });
183 parse_result.map(|()| {
183 parse_result.map(|()| {
184 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
184 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
185 })
185 })
186 }
186 }
187 }
187 }
188 }
188 }
189
189
190 /// Iterate in undefined order
190 /// Iterate in undefined order
191 pub(super) fn iter(
191 pub(super) fn iter(
192 &self,
192 &self,
193 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
193 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
194 match self {
194 match self {
195 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
195 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
196 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
196 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
197 ),
197 ),
198 ChildNodesRef::OnDisk(nodes) => {
198 ChildNodesRef::OnDisk(nodes) => {
199 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
199 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
200 }
200 }
201 }
201 }
202 }
202 }
203
203
204 /// Iterate in parallel in undefined order
204 /// Iterate in parallel in undefined order
205 pub(super) fn par_iter(
205 pub(super) fn par_iter(
206 &self,
206 &self,
207 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
207 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
208 {
208 {
209 use rayon::prelude::*;
209 use rayon::prelude::*;
210 match self {
210 match self {
211 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
211 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
212 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
212 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
213 ),
213 ),
214 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
214 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
215 nodes.par_iter().map(NodeRef::OnDisk),
215 nodes.par_iter().map(NodeRef::OnDisk),
216 ),
216 ),
217 }
217 }
218 }
218 }
219
219
220 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
220 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
221 match self {
221 match self {
222 ChildNodesRef::InMemory(nodes) => {
222 ChildNodesRef::InMemory(nodes) => {
223 let mut vec: Vec<_> = nodes
223 let mut vec: Vec<_> = nodes
224 .iter()
224 .iter()
225 .map(|(k, v)| NodeRef::InMemory(k, v))
225 .map(|(k, v)| NodeRef::InMemory(k, v))
226 .collect();
226 .collect();
227 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
227 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
228 match node {
228 match node {
229 NodeRef::InMemory(path, _node) => path.base_name(),
229 NodeRef::InMemory(path, _node) => path.base_name(),
230 NodeRef::OnDisk(_) => unreachable!(),
230 NodeRef::OnDisk(_) => unreachable!(),
231 }
231 }
232 }
232 }
233 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
233 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
234 // value: https://github.com/rust-lang/rust/issues/34162
234 // value: https://github.com/rust-lang/rust/issues/34162
235 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
235 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
236 vec
236 vec
237 }
237 }
238 ChildNodesRef::OnDisk(nodes) => {
238 ChildNodesRef::OnDisk(nodes) => {
239 // Nodes on disk are already sorted
239 // Nodes on disk are already sorted
240 nodes.iter().map(NodeRef::OnDisk).collect()
240 nodes.iter().map(NodeRef::OnDisk).collect()
241 }
241 }
242 }
242 }
243 }
243 }
244 }
244 }
245
245
246 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
246 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
247 pub(super) fn full_path(
247 pub(super) fn full_path(
248 &self,
248 &self,
249 on_disk: &'on_disk [u8],
249 on_disk: &'on_disk [u8],
250 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
250 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
251 match self {
251 match self {
252 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
252 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
253 NodeRef::OnDisk(node) => node.full_path(on_disk),
253 NodeRef::OnDisk(node) => node.full_path(on_disk),
254 }
254 }
255 }
255 }
256
256
257 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
257 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
258 /// HgPath>` detached from `'tree`
258 /// HgPath>` detached from `'tree`
259 pub(super) fn full_path_borrowed(
259 pub(super) fn full_path_borrowed(
260 &self,
260 &self,
261 on_disk: &'on_disk [u8],
261 on_disk: &'on_disk [u8],
262 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
262 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
263 match self {
263 match self {
264 NodeRef::InMemory(path, _node) => match path.full_path() {
264 NodeRef::InMemory(path, _node) => match path.full_path() {
265 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
265 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
266 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
266 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
267 },
267 },
268 NodeRef::OnDisk(node) => {
268 NodeRef::OnDisk(node) => {
269 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
269 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
270 }
270 }
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn base_name(
274 pub(super) fn base_name(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
277 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
279 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
280 NodeRef::OnDisk(node) => node.base_name(on_disk),
280 NodeRef::OnDisk(node) => node.base_name(on_disk),
281 }
281 }
282 }
282 }
283
283
284 pub(super) fn children(
284 pub(super) fn children(
285 &self,
285 &self,
286 on_disk: &'on_disk [u8],
286 on_disk: &'on_disk [u8],
287 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
287 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
288 match self {
288 match self {
289 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
289 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
290 NodeRef::OnDisk(node) => {
290 NodeRef::OnDisk(node) => {
291 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
291 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
292 }
292 }
293 }
293 }
294 }
294 }
295
295
296 pub(super) fn has_copy_source(&self) -> bool {
296 pub(super) fn has_copy_source(&self) -> bool {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
298 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
299 NodeRef::OnDisk(node) => node.has_copy_source(),
299 NodeRef::OnDisk(node) => node.has_copy_source(),
300 }
300 }
301 }
301 }
302
302
303 pub(super) fn copy_source(
303 pub(super) fn copy_source(
304 &self,
304 &self,
305 on_disk: &'on_disk [u8],
305 on_disk: &'on_disk [u8],
306 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
306 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
307 match self {
307 match self {
308 NodeRef::InMemory(_path, node) => {
308 NodeRef::InMemory(_path, node) => {
309 Ok(node.copy_source.as_ref().map(|s| &**s))
309 Ok(node.copy_source.as_ref().map(|s| &**s))
310 }
310 }
311 NodeRef::OnDisk(node) => node.copy_source(on_disk),
311 NodeRef::OnDisk(node) => node.copy_source(on_disk),
312 }
312 }
313 }
313 }
314
314
315 pub(super) fn entry(
315 pub(super) fn entry(
316 &self,
316 &self,
317 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
317 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
318 match self {
318 match self {
319 NodeRef::InMemory(_path, node) => {
319 NodeRef::InMemory(_path, node) => {
320 Ok(node.data.as_entry().copied())
320 Ok(node.data.as_entry().copied())
321 }
321 }
322 NodeRef::OnDisk(node) => node.entry(),
322 NodeRef::OnDisk(node) => node.entry(),
323 }
323 }
324 }
324 }
325
325
326 pub(super) fn state(
326 pub(super) fn state(
327 &self,
327 &self,
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
328 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
329 match self {
329 match self {
330 NodeRef::InMemory(_path, node) => {
330 NodeRef::InMemory(_path, node) => {
331 Ok(node.data.as_entry().map(|entry| entry.state))
331 Ok(node.data.as_entry().map(|entry| entry.state))
332 }
332 }
333 NodeRef::OnDisk(node) => node.state(),
333 NodeRef::OnDisk(node) => node.state(),
334 }
334 }
335 }
335 }
336
336
337 pub(super) fn cached_directory_mtime(
337 pub(super) fn cached_directory_mtime(
338 &self,
338 &self,
339 ) -> Option<&'tree on_disk::Timestamp> {
339 ) -> Option<&'tree on_disk::Timestamp> {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => match &node.data {
341 NodeRef::InMemory(_path, node) => match &node.data {
342 NodeData::CachedDirectory { mtime } => Some(mtime),
342 NodeData::CachedDirectory { mtime } => Some(mtime),
343 _ => None,
343 _ => None,
344 },
344 },
345 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
345 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
346 }
346 }
347 }
347 }
348
348
349 pub(super) fn descendants_with_entry_count(&self) -> u32 {
349 pub(super) fn descendants_with_entry_count(&self) -> u32 {
350 match self {
350 match self {
351 NodeRef::InMemory(_path, node) => {
351 NodeRef::InMemory(_path, node) => {
352 node.descendants_with_entry_count
352 node.descendants_with_entry_count
353 }
353 }
354 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
354 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
355 }
355 }
356 }
356 }
357
357
358 pub(super) fn tracked_descendants_count(&self) -> u32 {
358 pub(super) fn tracked_descendants_count(&self) -> u32 {
359 match self {
359 match self {
360 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
360 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
361 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
361 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
362 }
362 }
363 }
363 }
364 }
364 }
365
365
366 /// Represents a file or a directory
366 /// Represents a file or a directory
367 #[derive(Default)]
367 #[derive(Default)]
368 pub(super) struct Node<'on_disk> {
368 pub(super) struct Node<'on_disk> {
369 pub(super) data: NodeData,
369 pub(super) data: NodeData,
370
370
371 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
371 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
372
372
373 pub(super) children: ChildNodes<'on_disk>,
373 pub(super) children: ChildNodes<'on_disk>,
374
374
375 /// How many (non-inclusive) descendants of this node have an entry.
375 /// How many (non-inclusive) descendants of this node have an entry.
376 pub(super) descendants_with_entry_count: u32,
376 pub(super) descendants_with_entry_count: u32,
377
377
378 /// How many (non-inclusive) descendants of this node have an entry whose
378 /// How many (non-inclusive) descendants of this node have an entry whose
379 /// state is "tracked".
379 /// state is "tracked".
380 pub(super) tracked_descendants_count: u32,
380 pub(super) tracked_descendants_count: u32,
381 }
381 }
382
382
383 pub(super) enum NodeData {
383 pub(super) enum NodeData {
384 Entry(DirstateEntry),
384 Entry(DirstateEntry),
385 CachedDirectory { mtime: on_disk::Timestamp },
385 CachedDirectory { mtime: on_disk::Timestamp },
386 None,
386 None,
387 }
387 }
388
388
389 impl Default for NodeData {
389 impl Default for NodeData {
390 fn default() -> Self {
390 fn default() -> Self {
391 NodeData::None
391 NodeData::None
392 }
392 }
393 }
393 }
394
394
395 impl NodeData {
395 impl NodeData {
396 fn has_entry(&self) -> bool {
396 fn has_entry(&self) -> bool {
397 match self {
397 match self {
398 NodeData::Entry(_) => true,
398 NodeData::Entry(_) => true,
399 _ => false,
399 _ => false,
400 }
400 }
401 }
401 }
402
402
403 fn as_entry(&self) -> Option<&DirstateEntry> {
403 fn as_entry(&self) -> Option<&DirstateEntry> {
404 match self {
404 match self {
405 NodeData::Entry(entry) => Some(entry),
405 NodeData::Entry(entry) => Some(entry),
406 _ => None,
406 _ => None,
407 }
407 }
408 }
408 }
409 }
409 }
410
410
411 impl<'on_disk> DirstateMap<'on_disk> {
411 impl<'on_disk> DirstateMap<'on_disk> {
412 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
412 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
413 Self {
413 Self {
414 on_disk,
414 on_disk,
415 root: ChildNodes::default(),
415 root: ChildNodes::default(),
416 nodes_with_entry_count: 0,
416 nodes_with_entry_count: 0,
417 nodes_with_copy_source_count: 0,
417 nodes_with_copy_source_count: 0,
418 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
418 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
419 unreachable_bytes: 0,
419 unreachable_bytes: 0,
420 }
420 }
421 }
421 }
422
422
423 #[timed]
423 #[timed]
424 pub fn new_v2(
424 pub fn new_v2(
425 on_disk: &'on_disk [u8],
425 on_disk: &'on_disk [u8],
426 data_size: usize,
426 data_size: usize,
427 metadata: &[u8],
427 ) -> Result<Self, DirstateError> {
428 ) -> Result<Self, DirstateError> {
428 if let Some(data) = on_disk.get(..data_size) {
429 if let Some(data) = on_disk.get(..data_size) {
429 Ok(on_disk::read(data)?)
430 Ok(on_disk::read(data, metadata)?)
430 } else {
431 } else {
431 Err(DirstateV2ParseError.into())
432 Err(DirstateV2ParseError.into())
432 }
433 }
433 }
434 }
434
435
435 #[timed]
436 #[timed]
436 pub fn new_v1(
437 pub fn new_v1(
437 on_disk: &'on_disk [u8],
438 on_disk: &'on_disk [u8],
438 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
439 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
439 let mut map = Self::empty(on_disk);
440 let mut map = Self::empty(on_disk);
440 if map.on_disk.is_empty() {
441 if map.on_disk.is_empty() {
441 return Ok((map, None));
442 return Ok((map, None));
442 }
443 }
443
444
444 let parents = parse_dirstate_entries(
445 let parents = parse_dirstate_entries(
445 map.on_disk,
446 map.on_disk,
446 |path, entry, copy_source| {
447 |path, entry, copy_source| {
447 let tracked = entry.state.is_tracked();
448 let tracked = entry.state.is_tracked();
448 let node = Self::get_or_insert_node(
449 let node = Self::get_or_insert_node(
449 map.on_disk,
450 map.on_disk,
450 &mut map.unreachable_bytes,
451 &mut map.unreachable_bytes,
451 &mut map.root,
452 &mut map.root,
452 path,
453 path,
453 WithBasename::to_cow_borrowed,
454 WithBasename::to_cow_borrowed,
454 |ancestor| {
455 |ancestor| {
455 if tracked {
456 if tracked {
456 ancestor.tracked_descendants_count += 1
457 ancestor.tracked_descendants_count += 1
457 }
458 }
458 ancestor.descendants_with_entry_count += 1
459 ancestor.descendants_with_entry_count += 1
459 },
460 },
460 )?;
461 )?;
461 assert!(
462 assert!(
462 !node.data.has_entry(),
463 !node.data.has_entry(),
463 "duplicate dirstate entry in read"
464 "duplicate dirstate entry in read"
464 );
465 );
465 assert!(
466 assert!(
466 node.copy_source.is_none(),
467 node.copy_source.is_none(),
467 "duplicate dirstate entry in read"
468 "duplicate dirstate entry in read"
468 );
469 );
469 node.data = NodeData::Entry(*entry);
470 node.data = NodeData::Entry(*entry);
470 node.copy_source = copy_source.map(Cow::Borrowed);
471 node.copy_source = copy_source.map(Cow::Borrowed);
471 map.nodes_with_entry_count += 1;
472 map.nodes_with_entry_count += 1;
472 if copy_source.is_some() {
473 if copy_source.is_some() {
473 map.nodes_with_copy_source_count += 1
474 map.nodes_with_copy_source_count += 1
474 }
475 }
475 Ok(())
476 Ok(())
476 },
477 },
477 )?;
478 )?;
478 let parents = Some(parents.clone());
479 let parents = Some(parents.clone());
479
480
480 Ok((map, parents))
481 Ok((map, parents))
481 }
482 }
482
483
483 /// Assuming dirstate-v2 format, returns whether the next write should
484 /// Assuming dirstate-v2 format, returns whether the next write should
484 /// append to the existing data file that contains `self.on_disk` (true),
485 /// append to the existing data file that contains `self.on_disk` (true),
485 /// or create a new data file from scratch (false).
486 /// or create a new data file from scratch (false).
486 pub(super) fn write_should_append(&self) -> bool {
487 pub(super) fn write_should_append(&self) -> bool {
487 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
488 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
488 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
489 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
489 }
490 }
490
491
491 fn get_node<'tree>(
492 fn get_node<'tree>(
492 &'tree self,
493 &'tree self,
493 path: &HgPath,
494 path: &HgPath,
494 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
495 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
495 let mut children = self.root.as_ref();
496 let mut children = self.root.as_ref();
496 let mut components = path.components();
497 let mut components = path.components();
497 let mut component =
498 let mut component =
498 components.next().expect("expected at least one components");
499 components.next().expect("expected at least one components");
499 loop {
500 loop {
500 if let Some(child) = children.get(component, self.on_disk)? {
501 if let Some(child) = children.get(component, self.on_disk)? {
501 if let Some(next_component) = components.next() {
502 if let Some(next_component) = components.next() {
502 component = next_component;
503 component = next_component;
503 children = child.children(self.on_disk)?;
504 children = child.children(self.on_disk)?;
504 } else {
505 } else {
505 return Ok(Some(child));
506 return Ok(Some(child));
506 }
507 }
507 } else {
508 } else {
508 return Ok(None);
509 return Ok(None);
509 }
510 }
510 }
511 }
511 }
512 }
512
513
513 /// Returns a mutable reference to the node at `path` if it exists
514 /// Returns a mutable reference to the node at `path` if it exists
514 ///
515 ///
515 /// This takes `root` instead of `&mut self` so that callers can mutate
516 /// This takes `root` instead of `&mut self` so that callers can mutate
516 /// other fields while the returned borrow is still valid
517 /// other fields while the returned borrow is still valid
517 fn get_node_mut<'tree>(
518 fn get_node_mut<'tree>(
518 on_disk: &'on_disk [u8],
519 on_disk: &'on_disk [u8],
519 unreachable_bytes: &mut u32,
520 unreachable_bytes: &mut u32,
520 root: &'tree mut ChildNodes<'on_disk>,
521 root: &'tree mut ChildNodes<'on_disk>,
521 path: &HgPath,
522 path: &HgPath,
522 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
523 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
523 let mut children = root;
524 let mut children = root;
524 let mut components = path.components();
525 let mut components = path.components();
525 let mut component =
526 let mut component =
526 components.next().expect("expected at least one components");
527 components.next().expect("expected at least one components");
527 loop {
528 loop {
528 if let Some(child) = children
529 if let Some(child) = children
529 .make_mut(on_disk, unreachable_bytes)?
530 .make_mut(on_disk, unreachable_bytes)?
530 .get_mut(component)
531 .get_mut(component)
531 {
532 {
532 if let Some(next_component) = components.next() {
533 if let Some(next_component) = components.next() {
533 component = next_component;
534 component = next_component;
534 children = &mut child.children;
535 children = &mut child.children;
535 } else {
536 } else {
536 return Ok(Some(child));
537 return Ok(Some(child));
537 }
538 }
538 } else {
539 } else {
539 return Ok(None);
540 return Ok(None);
540 }
541 }
541 }
542 }
542 }
543 }
543
544
544 pub(super) fn get_or_insert<'tree, 'path>(
545 pub(super) fn get_or_insert<'tree, 'path>(
545 &'tree mut self,
546 &'tree mut self,
546 path: &HgPath,
547 path: &HgPath,
547 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
548 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
548 Self::get_or_insert_node(
549 Self::get_or_insert_node(
549 self.on_disk,
550 self.on_disk,
550 &mut self.unreachable_bytes,
551 &mut self.unreachable_bytes,
551 &mut self.root,
552 &mut self.root,
552 path,
553 path,
553 WithBasename::to_cow_owned,
554 WithBasename::to_cow_owned,
554 |_| {},
555 |_| {},
555 )
556 )
556 }
557 }
557
558
558 fn get_or_insert_node<'tree, 'path>(
559 fn get_or_insert_node<'tree, 'path>(
559 on_disk: &'on_disk [u8],
560 on_disk: &'on_disk [u8],
560 unreachable_bytes: &mut u32,
561 unreachable_bytes: &mut u32,
561 root: &'tree mut ChildNodes<'on_disk>,
562 root: &'tree mut ChildNodes<'on_disk>,
562 path: &'path HgPath,
563 path: &'path HgPath,
563 to_cow: impl Fn(
564 to_cow: impl Fn(
564 WithBasename<&'path HgPath>,
565 WithBasename<&'path HgPath>,
565 ) -> WithBasename<Cow<'on_disk, HgPath>>,
566 ) -> WithBasename<Cow<'on_disk, HgPath>>,
566 mut each_ancestor: impl FnMut(&mut Node),
567 mut each_ancestor: impl FnMut(&mut Node),
567 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
568 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
568 let mut child_nodes = root;
569 let mut child_nodes = root;
569 let mut inclusive_ancestor_paths =
570 let mut inclusive_ancestor_paths =
570 WithBasename::inclusive_ancestors_of(path);
571 WithBasename::inclusive_ancestors_of(path);
571 let mut ancestor_path = inclusive_ancestor_paths
572 let mut ancestor_path = inclusive_ancestor_paths
572 .next()
573 .next()
573 .expect("expected at least one inclusive ancestor");
574 .expect("expected at least one inclusive ancestor");
574 loop {
575 loop {
575 // TODO: can we avoid allocating an owned key in cases where the
576 // TODO: can we avoid allocating an owned key in cases where the
576 // map already contains that key, without introducing double
577 // map already contains that key, without introducing double
577 // lookup?
578 // lookup?
578 let child_node = child_nodes
579 let child_node = child_nodes
579 .make_mut(on_disk, unreachable_bytes)?
580 .make_mut(on_disk, unreachable_bytes)?
580 .entry(to_cow(ancestor_path))
581 .entry(to_cow(ancestor_path))
581 .or_default();
582 .or_default();
582 if let Some(next) = inclusive_ancestor_paths.next() {
583 if let Some(next) = inclusive_ancestor_paths.next() {
583 each_ancestor(child_node);
584 each_ancestor(child_node);
584 ancestor_path = next;
585 ancestor_path = next;
585 child_nodes = &mut child_node.children;
586 child_nodes = &mut child_node.children;
586 } else {
587 } else {
587 return Ok(child_node);
588 return Ok(child_node);
588 }
589 }
589 }
590 }
590 }
591 }
591
592
592 fn add_or_remove_file(
593 fn add_or_remove_file(
593 &mut self,
594 &mut self,
594 path: &HgPath,
595 path: &HgPath,
595 old_state: EntryState,
596 old_state: EntryState,
596 new_entry: DirstateEntry,
597 new_entry: DirstateEntry,
597 ) -> Result<(), DirstateV2ParseError> {
598 ) -> Result<(), DirstateV2ParseError> {
598 let had_entry = old_state != EntryState::Unknown;
599 let had_entry = old_state != EntryState::Unknown;
599 let tracked_count_increment =
600 let tracked_count_increment =
600 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
601 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
601 (false, true) => 1,
602 (false, true) => 1,
602 (true, false) => -1,
603 (true, false) => -1,
603 _ => 0,
604 _ => 0,
604 };
605 };
605
606
606 let node = Self::get_or_insert_node(
607 let node = Self::get_or_insert_node(
607 self.on_disk,
608 self.on_disk,
608 &mut self.unreachable_bytes,
609 &mut self.unreachable_bytes,
609 &mut self.root,
610 &mut self.root,
610 path,
611 path,
611 WithBasename::to_cow_owned,
612 WithBasename::to_cow_owned,
612 |ancestor| {
613 |ancestor| {
613 if !had_entry {
614 if !had_entry {
614 ancestor.descendants_with_entry_count += 1;
615 ancestor.descendants_with_entry_count += 1;
615 }
616 }
616
617
617 // We can’t use `+= increment` because the counter is unsigned,
618 // We can’t use `+= increment` because the counter is unsigned,
618 // and we want debug builds to detect accidental underflow
619 // and we want debug builds to detect accidental underflow
619 // through zero
620 // through zero
620 match tracked_count_increment {
621 match tracked_count_increment {
621 1 => ancestor.tracked_descendants_count += 1,
622 1 => ancestor.tracked_descendants_count += 1,
622 -1 => ancestor.tracked_descendants_count -= 1,
623 -1 => ancestor.tracked_descendants_count -= 1,
623 _ => {}
624 _ => {}
624 }
625 }
625 },
626 },
626 )?;
627 )?;
627 if !had_entry {
628 if !had_entry {
628 self.nodes_with_entry_count += 1
629 self.nodes_with_entry_count += 1
629 }
630 }
630 node.data = NodeData::Entry(new_entry);
631 node.data = NodeData::Entry(new_entry);
631 Ok(())
632 Ok(())
632 }
633 }
633
634
634 fn iter_nodes<'tree>(
635 fn iter_nodes<'tree>(
635 &'tree self,
636 &'tree self,
636 ) -> impl Iterator<
637 ) -> impl Iterator<
637 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
638 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
638 > + 'tree {
639 > + 'tree {
639 // Depth first tree traversal.
640 // Depth first tree traversal.
640 //
641 //
641 // If we could afford internal iteration and recursion,
642 // If we could afford internal iteration and recursion,
642 // this would look like:
643 // this would look like:
643 //
644 //
644 // ```
645 // ```
645 // fn traverse_children(
646 // fn traverse_children(
646 // children: &ChildNodes,
647 // children: &ChildNodes,
647 // each: &mut impl FnMut(&Node),
648 // each: &mut impl FnMut(&Node),
648 // ) {
649 // ) {
649 // for child in children.values() {
650 // for child in children.values() {
650 // traverse_children(&child.children, each);
651 // traverse_children(&child.children, each);
651 // each(child);
652 // each(child);
652 // }
653 // }
653 // }
654 // }
654 // ```
655 // ```
655 //
656 //
656 // However we want an external iterator and therefore can’t use the
657 // However we want an external iterator and therefore can’t use the
657 // call stack. Use an explicit stack instead:
658 // call stack. Use an explicit stack instead:
658 let mut stack = Vec::new();
659 let mut stack = Vec::new();
659 let mut iter = self.root.as_ref().iter();
660 let mut iter = self.root.as_ref().iter();
660 std::iter::from_fn(move || {
661 std::iter::from_fn(move || {
661 while let Some(child_node) = iter.next() {
662 while let Some(child_node) = iter.next() {
662 let children = match child_node.children(self.on_disk) {
663 let children = match child_node.children(self.on_disk) {
663 Ok(children) => children,
664 Ok(children) => children,
664 Err(error) => return Some(Err(error)),
665 Err(error) => return Some(Err(error)),
665 };
666 };
666 // Pseudo-recursion
667 // Pseudo-recursion
667 let new_iter = children.iter();
668 let new_iter = children.iter();
668 let old_iter = std::mem::replace(&mut iter, new_iter);
669 let old_iter = std::mem::replace(&mut iter, new_iter);
669 stack.push((child_node, old_iter));
670 stack.push((child_node, old_iter));
670 }
671 }
671 // Found the end of a `children.iter()` iterator.
672 // Found the end of a `children.iter()` iterator.
672 if let Some((child_node, next_iter)) = stack.pop() {
673 if let Some((child_node, next_iter)) = stack.pop() {
673 // "Return" from pseudo-recursion by restoring state from the
674 // "Return" from pseudo-recursion by restoring state from the
674 // explicit stack
675 // explicit stack
675 iter = next_iter;
676 iter = next_iter;
676
677
677 Some(Ok(child_node))
678 Some(Ok(child_node))
678 } else {
679 } else {
679 // Reached the bottom of the stack, we’re done
680 // Reached the bottom of the stack, we’re done
680 None
681 None
681 }
682 }
682 })
683 })
683 }
684 }
684
685
685 fn clear_known_ambiguous_mtimes(
686 fn clear_known_ambiguous_mtimes(
686 &mut self,
687 &mut self,
687 paths: &[impl AsRef<HgPath>],
688 paths: &[impl AsRef<HgPath>],
688 ) -> Result<(), DirstateV2ParseError> {
689 ) -> Result<(), DirstateV2ParseError> {
689 for path in paths {
690 for path in paths {
690 if let Some(node) = Self::get_node_mut(
691 if let Some(node) = Self::get_node_mut(
691 self.on_disk,
692 self.on_disk,
692 &mut self.unreachable_bytes,
693 &mut self.unreachable_bytes,
693 &mut self.root,
694 &mut self.root,
694 path.as_ref(),
695 path.as_ref(),
695 )? {
696 )? {
696 if let NodeData::Entry(entry) = &mut node.data {
697 if let NodeData::Entry(entry) = &mut node.data {
697 entry.clear_mtime();
698 entry.clear_mtime();
698 }
699 }
699 }
700 }
700 }
701 }
701 Ok(())
702 Ok(())
702 }
703 }
703
704
704 /// Return a faillilble iterator of full paths of nodes that have an
705 /// Return a faillilble iterator of full paths of nodes that have an
705 /// `entry` for which the given `predicate` returns true.
706 /// `entry` for which the given `predicate` returns true.
706 ///
707 ///
707 /// Fallibility means that each iterator item is a `Result`, which may
708 /// Fallibility means that each iterator item is a `Result`, which may
708 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
709 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
709 /// should only happen if Mercurial is buggy or a repository is corrupted.
710 /// should only happen if Mercurial is buggy or a repository is corrupted.
710 fn filter_full_paths<'tree>(
711 fn filter_full_paths<'tree>(
711 &'tree self,
712 &'tree self,
712 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
713 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
713 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
714 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
714 {
715 {
715 filter_map_results(self.iter_nodes(), move |node| {
716 filter_map_results(self.iter_nodes(), move |node| {
716 if let Some(entry) = node.entry()? {
717 if let Some(entry) = node.entry()? {
717 if predicate(&entry) {
718 if predicate(&entry) {
718 return Ok(Some(node.full_path(self.on_disk)?));
719 return Ok(Some(node.full_path(self.on_disk)?));
719 }
720 }
720 }
721 }
721 Ok(None)
722 Ok(None)
722 })
723 })
723 }
724 }
724
725
725 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
726 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
726 if let Cow::Borrowed(path) = path {
727 if let Cow::Borrowed(path) = path {
727 *unreachable_bytes += path.len() as u32
728 *unreachable_bytes += path.len() as u32
728 }
729 }
729 }
730 }
730 }
731 }
731
732
732 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
733 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
733 ///
734 ///
734 /// The callback is only called for incoming `Ok` values. Errors are passed
735 /// The callback is only called for incoming `Ok` values. Errors are passed
735 /// through as-is. In order to let it use the `?` operator the callback is
736 /// through as-is. In order to let it use the `?` operator the callback is
736 /// expected to return a `Result` of `Option`, instead of an `Option` of
737 /// expected to return a `Result` of `Option`, instead of an `Option` of
737 /// `Result`.
738 /// `Result`.
738 fn filter_map_results<'a, I, F, A, B, E>(
739 fn filter_map_results<'a, I, F, A, B, E>(
739 iter: I,
740 iter: I,
740 f: F,
741 f: F,
741 ) -> impl Iterator<Item = Result<B, E>> + 'a
742 ) -> impl Iterator<Item = Result<B, E>> + 'a
742 where
743 where
743 I: Iterator<Item = Result<A, E>> + 'a,
744 I: Iterator<Item = Result<A, E>> + 'a,
744 F: Fn(A) -> Result<Option<B>, E> + 'a,
745 F: Fn(A) -> Result<Option<B>, E> + 'a,
745 {
746 {
746 iter.filter_map(move |result| match result {
747 iter.filter_map(move |result| match result {
747 Ok(node) => f(node).transpose(),
748 Ok(node) => f(node).transpose(),
748 Err(e) => Some(Err(e)),
749 Err(e) => Some(Err(e)),
749 })
750 })
750 }
751 }
751
752
752 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
753 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
753 fn clear(&mut self) {
754 fn clear(&mut self) {
754 self.root = Default::default();
755 self.root = Default::default();
755 self.nodes_with_entry_count = 0;
756 self.nodes_with_entry_count = 0;
756 self.nodes_with_copy_source_count = 0;
757 self.nodes_with_copy_source_count = 0;
757 }
758 }
758
759
759 fn add_file(
760 fn add_file(
760 &mut self,
761 &mut self,
761 filename: &HgPath,
762 filename: &HgPath,
762 entry: DirstateEntry,
763 entry: DirstateEntry,
763 added: bool,
764 added: bool,
764 merged: bool,
765 merged: bool,
765 from_p2: bool,
766 from_p2: bool,
766 possibly_dirty: bool,
767 possibly_dirty: bool,
767 ) -> Result<(), DirstateError> {
768 ) -> Result<(), DirstateError> {
768 let mut entry = entry;
769 let mut entry = entry;
769 if added {
770 if added {
770 assert!(!possibly_dirty);
771 assert!(!possibly_dirty);
771 assert!(!from_p2);
772 assert!(!from_p2);
772 entry.state = EntryState::Added;
773 entry.state = EntryState::Added;
773 entry.size = SIZE_NON_NORMAL;
774 entry.size = SIZE_NON_NORMAL;
774 entry.mtime = MTIME_UNSET;
775 entry.mtime = MTIME_UNSET;
775 } else if merged {
776 } else if merged {
776 assert!(!possibly_dirty);
777 assert!(!possibly_dirty);
777 assert!(!from_p2);
778 assert!(!from_p2);
778 entry.state = EntryState::Merged;
779 entry.state = EntryState::Merged;
779 entry.size = SIZE_FROM_OTHER_PARENT;
780 entry.size = SIZE_FROM_OTHER_PARENT;
780 entry.mtime = MTIME_UNSET;
781 entry.mtime = MTIME_UNSET;
781 } else if from_p2 {
782 } else if from_p2 {
782 assert!(!possibly_dirty);
783 assert!(!possibly_dirty);
783 entry.state = EntryState::Normal;
784 entry.state = EntryState::Normal;
784 entry.size = SIZE_FROM_OTHER_PARENT;
785 entry.size = SIZE_FROM_OTHER_PARENT;
785 entry.mtime = MTIME_UNSET;
786 entry.mtime = MTIME_UNSET;
786 } else if possibly_dirty {
787 } else if possibly_dirty {
787 entry.state = EntryState::Normal;
788 entry.state = EntryState::Normal;
788 entry.size = SIZE_NON_NORMAL;
789 entry.size = SIZE_NON_NORMAL;
789 entry.mtime = MTIME_UNSET;
790 entry.mtime = MTIME_UNSET;
790 } else {
791 } else {
791 entry.state = EntryState::Normal;
792 entry.state = EntryState::Normal;
792 entry.size = entry.size & V1_RANGEMASK;
793 entry.size = entry.size & V1_RANGEMASK;
793 entry.mtime = entry.mtime & V1_RANGEMASK;
794 entry.mtime = entry.mtime & V1_RANGEMASK;
794 }
795 }
795
796
796 let old_state = match self.get(filename)? {
797 let old_state = match self.get(filename)? {
797 Some(e) => e.state,
798 Some(e) => e.state,
798 None => EntryState::Unknown,
799 None => EntryState::Unknown,
799 };
800 };
800
801
801 Ok(self.add_or_remove_file(filename, old_state, entry)?)
802 Ok(self.add_or_remove_file(filename, old_state, entry)?)
802 }
803 }
803
804
804 fn remove_file(
805 fn remove_file(
805 &mut self,
806 &mut self,
806 filename: &HgPath,
807 filename: &HgPath,
807 in_merge: bool,
808 in_merge: bool,
808 ) -> Result<(), DirstateError> {
809 ) -> Result<(), DirstateError> {
809 let old_entry_opt = self.get(filename)?;
810 let old_entry_opt = self.get(filename)?;
810 let old_state = match old_entry_opt {
811 let old_state = match old_entry_opt {
811 Some(e) => e.state,
812 Some(e) => e.state,
812 None => EntryState::Unknown,
813 None => EntryState::Unknown,
813 };
814 };
814 let mut size = 0;
815 let mut size = 0;
815 if in_merge {
816 if in_merge {
816 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
817 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
817 // during a merge. So I (marmoute) am not sure we need the
818 // during a merge. So I (marmoute) am not sure we need the
818 // conditionnal at all. Adding double checking this with assert
819 // conditionnal at all. Adding double checking this with assert
819 // would be nice.
820 // would be nice.
820 if let Some(old_entry) = old_entry_opt {
821 if let Some(old_entry) = old_entry_opt {
821 // backup the previous state
822 // backup the previous state
822 if old_entry.state == EntryState::Merged {
823 if old_entry.state == EntryState::Merged {
823 size = SIZE_NON_NORMAL;
824 size = SIZE_NON_NORMAL;
824 } else if old_entry.state == EntryState::Normal
825 } else if old_entry.state == EntryState::Normal
825 && old_entry.size == SIZE_FROM_OTHER_PARENT
826 && old_entry.size == SIZE_FROM_OTHER_PARENT
826 {
827 {
827 // other parent
828 // other parent
828 size = SIZE_FROM_OTHER_PARENT;
829 size = SIZE_FROM_OTHER_PARENT;
829 }
830 }
830 }
831 }
831 }
832 }
832 if size == 0 {
833 if size == 0 {
833 self.copy_map_remove(filename)?;
834 self.copy_map_remove(filename)?;
834 }
835 }
835 let entry = DirstateEntry {
836 let entry = DirstateEntry {
836 state: EntryState::Removed,
837 state: EntryState::Removed,
837 mode: 0,
838 mode: 0,
838 size,
839 size,
839 mtime: 0,
840 mtime: 0,
840 };
841 };
841 Ok(self.add_or_remove_file(filename, old_state, entry)?)
842 Ok(self.add_or_remove_file(filename, old_state, entry)?)
842 }
843 }
843
844
844 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
845 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
845 let old_state = match self.get(filename)? {
846 let old_state = match self.get(filename)? {
846 Some(e) => e.state,
847 Some(e) => e.state,
847 None => EntryState::Unknown,
848 None => EntryState::Unknown,
848 };
849 };
849 struct Dropped {
850 struct Dropped {
850 was_tracked: bool,
851 was_tracked: bool,
851 had_entry: bool,
852 had_entry: bool,
852 had_copy_source: bool,
853 had_copy_source: bool,
853 }
854 }
854
855
855 /// If this returns `Ok(Some((dropped, removed)))`, then
856 /// If this returns `Ok(Some((dropped, removed)))`, then
856 ///
857 ///
857 /// * `dropped` is about the leaf node that was at `filename`
858 /// * `dropped` is about the leaf node that was at `filename`
858 /// * `removed` is whether this particular level of recursion just
859 /// * `removed` is whether this particular level of recursion just
859 /// removed a node in `nodes`.
860 /// removed a node in `nodes`.
860 fn recur<'on_disk>(
861 fn recur<'on_disk>(
861 on_disk: &'on_disk [u8],
862 on_disk: &'on_disk [u8],
862 unreachable_bytes: &mut u32,
863 unreachable_bytes: &mut u32,
863 nodes: &mut ChildNodes<'on_disk>,
864 nodes: &mut ChildNodes<'on_disk>,
864 path: &HgPath,
865 path: &HgPath,
865 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
866 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
866 let (first_path_component, rest_of_path) =
867 let (first_path_component, rest_of_path) =
867 path.split_first_component();
868 path.split_first_component();
868 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
869 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
869 let node = if let Some(node) = nodes.get_mut(first_path_component)
870 let node = if let Some(node) = nodes.get_mut(first_path_component)
870 {
871 {
871 node
872 node
872 } else {
873 } else {
873 return Ok(None);
874 return Ok(None);
874 };
875 };
875 let dropped;
876 let dropped;
876 if let Some(rest) = rest_of_path {
877 if let Some(rest) = rest_of_path {
877 if let Some((d, removed)) = recur(
878 if let Some((d, removed)) = recur(
878 on_disk,
879 on_disk,
879 unreachable_bytes,
880 unreachable_bytes,
880 &mut node.children,
881 &mut node.children,
881 rest,
882 rest,
882 )? {
883 )? {
883 dropped = d;
884 dropped = d;
884 if dropped.had_entry {
885 if dropped.had_entry {
885 node.descendants_with_entry_count -= 1;
886 node.descendants_with_entry_count -= 1;
886 }
887 }
887 if dropped.was_tracked {
888 if dropped.was_tracked {
888 node.tracked_descendants_count -= 1;
889 node.tracked_descendants_count -= 1;
889 }
890 }
890
891
891 // Directory caches must be invalidated when removing a
892 // Directory caches must be invalidated when removing a
892 // child node
893 // child node
893 if removed {
894 if removed {
894 if let NodeData::CachedDirectory { .. } = &node.data {
895 if let NodeData::CachedDirectory { .. } = &node.data {
895 node.data = NodeData::None
896 node.data = NodeData::None
896 }
897 }
897 }
898 }
898 } else {
899 } else {
899 return Ok(None);
900 return Ok(None);
900 }
901 }
901 } else {
902 } else {
902 let had_entry = node.data.has_entry();
903 let had_entry = node.data.has_entry();
903 if had_entry {
904 if had_entry {
904 node.data = NodeData::None
905 node.data = NodeData::None
905 }
906 }
906 if let Some(source) = &node.copy_source {
907 if let Some(source) = &node.copy_source {
907 DirstateMap::count_dropped_path(unreachable_bytes, source)
908 DirstateMap::count_dropped_path(unreachable_bytes, source)
908 }
909 }
909 dropped = Dropped {
910 dropped = Dropped {
910 was_tracked: node
911 was_tracked: node
911 .data
912 .data
912 .as_entry()
913 .as_entry()
913 .map_or(false, |entry| entry.state.is_tracked()),
914 .map_or(false, |entry| entry.state.is_tracked()),
914 had_entry,
915 had_entry,
915 had_copy_source: node.copy_source.take().is_some(),
916 had_copy_source: node.copy_source.take().is_some(),
916 };
917 };
917 }
918 }
918 // After recursion, for both leaf (rest_of_path is None) nodes and
919 // After recursion, for both leaf (rest_of_path is None) nodes and
919 // parent nodes, remove a node if it just became empty.
920 // parent nodes, remove a node if it just became empty.
920 let remove = !node.data.has_entry()
921 let remove = !node.data.has_entry()
921 && node.copy_source.is_none()
922 && node.copy_source.is_none()
922 && node.children.is_empty();
923 && node.children.is_empty();
923 if remove {
924 if remove {
924 let (key, _) =
925 let (key, _) =
925 nodes.remove_entry(first_path_component).unwrap();
926 nodes.remove_entry(first_path_component).unwrap();
926 DirstateMap::count_dropped_path(
927 DirstateMap::count_dropped_path(
927 unreachable_bytes,
928 unreachable_bytes,
928 key.full_path(),
929 key.full_path(),
929 )
930 )
930 }
931 }
931 Ok(Some((dropped, remove)))
932 Ok(Some((dropped, remove)))
932 }
933 }
933
934
934 if let Some((dropped, _removed)) = recur(
935 if let Some((dropped, _removed)) = recur(
935 self.on_disk,
936 self.on_disk,
936 &mut self.unreachable_bytes,
937 &mut self.unreachable_bytes,
937 &mut self.root,
938 &mut self.root,
938 filename,
939 filename,
939 )? {
940 )? {
940 if dropped.had_entry {
941 if dropped.had_entry {
941 self.nodes_with_entry_count -= 1
942 self.nodes_with_entry_count -= 1
942 }
943 }
943 if dropped.had_copy_source {
944 if dropped.had_copy_source {
944 self.nodes_with_copy_source_count -= 1
945 self.nodes_with_copy_source_count -= 1
945 }
946 }
946 Ok(dropped.had_entry)
947 Ok(dropped.had_entry)
947 } else {
948 } else {
948 debug_assert!(!old_state.is_tracked());
949 debug_assert!(!old_state.is_tracked());
949 Ok(false)
950 Ok(false)
950 }
951 }
951 }
952 }
952
953
953 fn clear_ambiguous_times(
954 fn clear_ambiguous_times(
954 &mut self,
955 &mut self,
955 filenames: Vec<HgPathBuf>,
956 filenames: Vec<HgPathBuf>,
956 now: i32,
957 now: i32,
957 ) -> Result<(), DirstateV2ParseError> {
958 ) -> Result<(), DirstateV2ParseError> {
958 for filename in filenames {
959 for filename in filenames {
959 if let Some(node) = Self::get_node_mut(
960 if let Some(node) = Self::get_node_mut(
960 self.on_disk,
961 self.on_disk,
961 &mut self.unreachable_bytes,
962 &mut self.unreachable_bytes,
962 &mut self.root,
963 &mut self.root,
963 &filename,
964 &filename,
964 )? {
965 )? {
965 if let NodeData::Entry(entry) = &mut node.data {
966 if let NodeData::Entry(entry) = &mut node.data {
966 entry.clear_ambiguous_mtime(now);
967 entry.clear_ambiguous_mtime(now);
967 }
968 }
968 }
969 }
969 }
970 }
970 Ok(())
971 Ok(())
971 }
972 }
972
973
973 fn non_normal_entries_contains(
974 fn non_normal_entries_contains(
974 &mut self,
975 &mut self,
975 key: &HgPath,
976 key: &HgPath,
976 ) -> Result<bool, DirstateV2ParseError> {
977 ) -> Result<bool, DirstateV2ParseError> {
977 Ok(if let Some(node) = self.get_node(key)? {
978 Ok(if let Some(node) = self.get_node(key)? {
978 node.entry()?.map_or(false, |entry| entry.is_non_normal())
979 node.entry()?.map_or(false, |entry| entry.is_non_normal())
979 } else {
980 } else {
980 false
981 false
981 })
982 })
982 }
983 }
983
984
984 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
985 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
985 // Do nothing, this `DirstateMap` does not have a separate "non normal
986 // Do nothing, this `DirstateMap` does not have a separate "non normal
986 // entries" set that need to be kept up to date
987 // entries" set that need to be kept up to date
987 }
988 }
988
989
989 fn non_normal_or_other_parent_paths(
990 fn non_normal_or_other_parent_paths(
990 &mut self,
991 &mut self,
991 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
992 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
992 {
993 {
993 Box::new(self.filter_full_paths(|entry| {
994 Box::new(self.filter_full_paths(|entry| {
994 entry.is_non_normal() || entry.is_from_other_parent()
995 entry.is_non_normal() || entry.is_from_other_parent()
995 }))
996 }))
996 }
997 }
997
998
998 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
999 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
999 // Do nothing, this `DirstateMap` does not have a separate "non normal
1000 // Do nothing, this `DirstateMap` does not have a separate "non normal
1000 // entries" and "from other parent" sets that need to be recomputed
1001 // entries" and "from other parent" sets that need to be recomputed
1001 }
1002 }
1002
1003
1003 fn iter_non_normal_paths(
1004 fn iter_non_normal_paths(
1004 &mut self,
1005 &mut self,
1005 ) -> Box<
1006 ) -> Box<
1006 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1007 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1007 > {
1008 > {
1008 self.iter_non_normal_paths_panic()
1009 self.iter_non_normal_paths_panic()
1009 }
1010 }
1010
1011
1011 fn iter_non_normal_paths_panic(
1012 fn iter_non_normal_paths_panic(
1012 &self,
1013 &self,
1013 ) -> Box<
1014 ) -> Box<
1014 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1015 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1015 > {
1016 > {
1016 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1017 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
1017 }
1018 }
1018
1019
1019 fn iter_other_parent_paths(
1020 fn iter_other_parent_paths(
1020 &mut self,
1021 &mut self,
1021 ) -> Box<
1022 ) -> Box<
1022 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1023 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
1023 > {
1024 > {
1024 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1025 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
1025 }
1026 }
1026
1027
1027 fn has_tracked_dir(
1028 fn has_tracked_dir(
1028 &mut self,
1029 &mut self,
1029 directory: &HgPath,
1030 directory: &HgPath,
1030 ) -> Result<bool, DirstateError> {
1031 ) -> Result<bool, DirstateError> {
1031 if let Some(node) = self.get_node(directory)? {
1032 if let Some(node) = self.get_node(directory)? {
1032 // A node without a `DirstateEntry` was created to hold child
1033 // A node without a `DirstateEntry` was created to hold child
1033 // nodes, and is therefore a directory.
1034 // nodes, and is therefore a directory.
1034 let state = node.state()?;
1035 let state = node.state()?;
1035 Ok(state.is_none() && node.tracked_descendants_count() > 0)
1036 Ok(state.is_none() && node.tracked_descendants_count() > 0)
1036 } else {
1037 } else {
1037 Ok(false)
1038 Ok(false)
1038 }
1039 }
1039 }
1040 }
1040
1041
1041 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1042 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1042 if let Some(node) = self.get_node(directory)? {
1043 if let Some(node) = self.get_node(directory)? {
1043 // A node without a `DirstateEntry` was created to hold child
1044 // A node without a `DirstateEntry` was created to hold child
1044 // nodes, and is therefore a directory.
1045 // nodes, and is therefore a directory.
1045 let state = node.state()?;
1046 let state = node.state()?;
1046 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1047 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1047 } else {
1048 } else {
1048 Ok(false)
1049 Ok(false)
1049 }
1050 }
1050 }
1051 }
1051
1052
1052 #[timed]
1053 #[timed]
1053 fn pack_v1(
1054 fn pack_v1(
1054 &mut self,
1055 &mut self,
1055 parents: DirstateParents,
1056 parents: DirstateParents,
1056 now: Timestamp,
1057 now: Timestamp,
1057 ) -> Result<Vec<u8>, DirstateError> {
1058 ) -> Result<Vec<u8>, DirstateError> {
1058 let now: i32 = now.0.try_into().expect("time overflow");
1059 let now: i32 = now.0.try_into().expect("time overflow");
1059 let mut ambiguous_mtimes = Vec::new();
1060 let mut ambiguous_mtimes = Vec::new();
1060 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1061 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1061 // reallocations
1062 // reallocations
1062 let mut size = parents.as_bytes().len();
1063 let mut size = parents.as_bytes().len();
1063 for node in self.iter_nodes() {
1064 for node in self.iter_nodes() {
1064 let node = node?;
1065 let node = node?;
1065 if let Some(entry) = node.entry()? {
1066 if let Some(entry) = node.entry()? {
1066 size += packed_entry_size(
1067 size += packed_entry_size(
1067 node.full_path(self.on_disk)?,
1068 node.full_path(self.on_disk)?,
1068 node.copy_source(self.on_disk)?,
1069 node.copy_source(self.on_disk)?,
1069 );
1070 );
1070 if entry.mtime_is_ambiguous(now) {
1071 if entry.mtime_is_ambiguous(now) {
1071 ambiguous_mtimes.push(
1072 ambiguous_mtimes.push(
1072 node.full_path_borrowed(self.on_disk)?
1073 node.full_path_borrowed(self.on_disk)?
1073 .detach_from_tree(),
1074 .detach_from_tree(),
1074 )
1075 )
1075 }
1076 }
1076 }
1077 }
1077 }
1078 }
1078 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1079 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1079
1080
1080 let mut packed = Vec::with_capacity(size);
1081 let mut packed = Vec::with_capacity(size);
1081 packed.extend(parents.as_bytes());
1082 packed.extend(parents.as_bytes());
1082
1083
1083 for node in self.iter_nodes() {
1084 for node in self.iter_nodes() {
1084 let node = node?;
1085 let node = node?;
1085 if let Some(entry) = node.entry()? {
1086 if let Some(entry) = node.entry()? {
1086 pack_entry(
1087 pack_entry(
1087 node.full_path(self.on_disk)?,
1088 node.full_path(self.on_disk)?,
1088 &entry,
1089 &entry,
1089 node.copy_source(self.on_disk)?,
1090 node.copy_source(self.on_disk)?,
1090 &mut packed,
1091 &mut packed,
1091 );
1092 );
1092 }
1093 }
1093 }
1094 }
1094 Ok(packed)
1095 Ok(packed)
1095 }
1096 }
1096
1097
1097 /// Returns new data together with whether that data should be appended to
1098 /// Returns new data and metadata together with whether that data should be
1098 /// the existing data file whose content is at `self.on_disk` (true),
1099 /// appended to the existing data file whose content is at
1099 /// instead of written to a new data file (false).
1100 /// `self.on_disk` (true), instead of written to a new data file
1101 /// (false).
1100 #[timed]
1102 #[timed]
1101 fn pack_v2(
1103 fn pack_v2(
1102 &mut self,
1104 &mut self,
1103 now: Timestamp,
1105 now: Timestamp,
1104 can_append: bool,
1106 can_append: bool,
1105 ) -> Result<(Vec<u8>, bool), DirstateError> {
1107 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
1106 // TODO: how do we want to handle this in 2038?
1108 // TODO: how do we want to handle this in 2038?
1107 let now: i32 = now.0.try_into().expect("time overflow");
1109 let now: i32 = now.0.try_into().expect("time overflow");
1108 let mut paths = Vec::new();
1110 let mut paths = Vec::new();
1109 for node in self.iter_nodes() {
1111 for node in self.iter_nodes() {
1110 let node = node?;
1112 let node = node?;
1111 if let Some(entry) = node.entry()? {
1113 if let Some(entry) = node.entry()? {
1112 if entry.mtime_is_ambiguous(now) {
1114 if entry.mtime_is_ambiguous(now) {
1113 paths.push(
1115 paths.push(
1114 node.full_path_borrowed(self.on_disk)?
1116 node.full_path_borrowed(self.on_disk)?
1115 .detach_from_tree(),
1117 .detach_from_tree(),
1116 )
1118 )
1117 }
1119 }
1118 }
1120 }
1119 }
1121 }
1120 // Borrow of `self` ends here since we collect cloned paths
1122 // Borrow of `self` ends here since we collect cloned paths
1121
1123
1122 self.clear_known_ambiguous_mtimes(&paths)?;
1124 self.clear_known_ambiguous_mtimes(&paths)?;
1123
1125
1124 on_disk::write(self, can_append)
1126 on_disk::write(self, can_append)
1125 }
1127 }
1126
1128
1127 fn status<'a>(
1129 fn status<'a>(
1128 &'a mut self,
1130 &'a mut self,
1129 matcher: &'a (dyn Matcher + Sync),
1131 matcher: &'a (dyn Matcher + Sync),
1130 root_dir: PathBuf,
1132 root_dir: PathBuf,
1131 ignore_files: Vec<PathBuf>,
1133 ignore_files: Vec<PathBuf>,
1132 options: StatusOptions,
1134 options: StatusOptions,
1133 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1135 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1134 {
1136 {
1135 super::status::status(self, matcher, root_dir, ignore_files, options)
1137 super::status::status(self, matcher, root_dir, ignore_files, options)
1136 }
1138 }
1137
1139
1138 fn copy_map_len(&self) -> usize {
1140 fn copy_map_len(&self) -> usize {
1139 self.nodes_with_copy_source_count as usize
1141 self.nodes_with_copy_source_count as usize
1140 }
1142 }
1141
1143
1142 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1144 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1143 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1145 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1144 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1146 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1145 Some((node.full_path(self.on_disk)?, source))
1147 Some((node.full_path(self.on_disk)?, source))
1146 } else {
1148 } else {
1147 None
1149 None
1148 })
1150 })
1149 }))
1151 }))
1150 }
1152 }
1151
1153
1152 fn copy_map_contains_key(
1154 fn copy_map_contains_key(
1153 &self,
1155 &self,
1154 key: &HgPath,
1156 key: &HgPath,
1155 ) -> Result<bool, DirstateV2ParseError> {
1157 ) -> Result<bool, DirstateV2ParseError> {
1156 Ok(if let Some(node) = self.get_node(key)? {
1158 Ok(if let Some(node) = self.get_node(key)? {
1157 node.has_copy_source()
1159 node.has_copy_source()
1158 } else {
1160 } else {
1159 false
1161 false
1160 })
1162 })
1161 }
1163 }
1162
1164
1163 fn copy_map_get(
1165 fn copy_map_get(
1164 &self,
1166 &self,
1165 key: &HgPath,
1167 key: &HgPath,
1166 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1168 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1167 if let Some(node) = self.get_node(key)? {
1169 if let Some(node) = self.get_node(key)? {
1168 if let Some(source) = node.copy_source(self.on_disk)? {
1170 if let Some(source) = node.copy_source(self.on_disk)? {
1169 return Ok(Some(source));
1171 return Ok(Some(source));
1170 }
1172 }
1171 }
1173 }
1172 Ok(None)
1174 Ok(None)
1173 }
1175 }
1174
1176
1175 fn copy_map_remove(
1177 fn copy_map_remove(
1176 &mut self,
1178 &mut self,
1177 key: &HgPath,
1179 key: &HgPath,
1178 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1179 let count = &mut self.nodes_with_copy_source_count;
1181 let count = &mut self.nodes_with_copy_source_count;
1180 let unreachable_bytes = &mut self.unreachable_bytes;
1182 let unreachable_bytes = &mut self.unreachable_bytes;
1181 Ok(Self::get_node_mut(
1183 Ok(Self::get_node_mut(
1182 self.on_disk,
1184 self.on_disk,
1183 unreachable_bytes,
1185 unreachable_bytes,
1184 &mut self.root,
1186 &mut self.root,
1185 key,
1187 key,
1186 )?
1188 )?
1187 .and_then(|node| {
1189 .and_then(|node| {
1188 if let Some(source) = &node.copy_source {
1190 if let Some(source) = &node.copy_source {
1189 *count -= 1;
1191 *count -= 1;
1190 Self::count_dropped_path(unreachable_bytes, source);
1192 Self::count_dropped_path(unreachable_bytes, source);
1191 }
1193 }
1192 node.copy_source.take().map(Cow::into_owned)
1194 node.copy_source.take().map(Cow::into_owned)
1193 }))
1195 }))
1194 }
1196 }
1195
1197
1196 fn copy_map_insert(
1198 fn copy_map_insert(
1197 &mut self,
1199 &mut self,
1198 key: HgPathBuf,
1200 key: HgPathBuf,
1199 value: HgPathBuf,
1201 value: HgPathBuf,
1200 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1202 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1201 let node = Self::get_or_insert_node(
1203 let node = Self::get_or_insert_node(
1202 self.on_disk,
1204 self.on_disk,
1203 &mut self.unreachable_bytes,
1205 &mut self.unreachable_bytes,
1204 &mut self.root,
1206 &mut self.root,
1205 &key,
1207 &key,
1206 WithBasename::to_cow_owned,
1208 WithBasename::to_cow_owned,
1207 |_ancestor| {},
1209 |_ancestor| {},
1208 )?;
1210 )?;
1209 if node.copy_source.is_none() {
1211 if node.copy_source.is_none() {
1210 self.nodes_with_copy_source_count += 1
1212 self.nodes_with_copy_source_count += 1
1211 }
1213 }
1212 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1214 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1213 }
1215 }
1214
1216
1215 fn len(&self) -> usize {
1217 fn len(&self) -> usize {
1216 self.nodes_with_entry_count as usize
1218 self.nodes_with_entry_count as usize
1217 }
1219 }
1218
1220
1219 fn contains_key(
1221 fn contains_key(
1220 &self,
1222 &self,
1221 key: &HgPath,
1223 key: &HgPath,
1222 ) -> Result<bool, DirstateV2ParseError> {
1224 ) -> Result<bool, DirstateV2ParseError> {
1223 Ok(self.get(key)?.is_some())
1225 Ok(self.get(key)?.is_some())
1224 }
1226 }
1225
1227
1226 fn get(
1228 fn get(
1227 &self,
1229 &self,
1228 key: &HgPath,
1230 key: &HgPath,
1229 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1231 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1230 Ok(if let Some(node) = self.get_node(key)? {
1232 Ok(if let Some(node) = self.get_node(key)? {
1231 node.entry()?
1233 node.entry()?
1232 } else {
1234 } else {
1233 None
1235 None
1234 })
1236 })
1235 }
1237 }
1236
1238
1237 fn iter(&self) -> StateMapIter<'_> {
1239 fn iter(&self) -> StateMapIter<'_> {
1238 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1240 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1239 Ok(if let Some(entry) = node.entry()? {
1241 Ok(if let Some(entry) = node.entry()? {
1240 Some((node.full_path(self.on_disk)?, entry))
1242 Some((node.full_path(self.on_disk)?, entry))
1241 } else {
1243 } else {
1242 None
1244 None
1243 })
1245 })
1244 }))
1246 }))
1245 }
1247 }
1246
1248
1247 fn iter_directories(
1249 fn iter_directories(
1248 &self,
1250 &self,
1249 ) -> Box<
1251 ) -> Box<
1250 dyn Iterator<
1252 dyn Iterator<
1251 Item = Result<
1253 Item = Result<
1252 (&HgPath, Option<Timestamp>),
1254 (&HgPath, Option<Timestamp>),
1253 DirstateV2ParseError,
1255 DirstateV2ParseError,
1254 >,
1256 >,
1255 > + Send
1257 > + Send
1256 + '_,
1258 + '_,
1257 > {
1259 > {
1258 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1260 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1259 Ok(if node.state()?.is_none() {
1261 Ok(if node.state()?.is_none() {
1260 Some((
1262 Some((
1261 node.full_path(self.on_disk)?,
1263 node.full_path(self.on_disk)?,
1262 node.cached_directory_mtime()
1264 node.cached_directory_mtime()
1263 .map(|mtime| Timestamp(mtime.seconds())),
1265 .map(|mtime| Timestamp(mtime.seconds())),
1264 ))
1266 ))
1265 } else {
1267 } else {
1266 None
1268 None
1267 })
1269 })
1268 }))
1270 }))
1269 }
1271 }
1270 }
1272 }
@@ -1,491 +1,492 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
14 use crate::StateMapIter;
15 use crate::StatusError;
15 use crate::StatusError;
16 use crate::StatusOptions;
16 use crate::StatusOptions;
17
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
22 ///
23 /// A trait object is used to support two different concrete types:
23 /// A trait object is used to support two different concrete types:
24 ///
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
32 /// abstracted in this trait.
33 ///
33 ///
34 /// The dirstate map associates paths of files in the working directory to
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
37 /// Remove information about all files in this map
38 fn clear(&mut self);
38 fn clear(&mut self);
39
39
40 /// Add or change the information associated to a given file.
40 /// Add or change the information associated to a given file.
41 ///
41 ///
42 /// `old_state` is the state in the entry that `get` would have returned
42 /// `old_state` is the state in the entry that `get` would have returned
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
44 ///
44 ///
45 /// `entry.state` should never be `EntryState::Unknown`.
45 /// `entry.state` should never be `EntryState::Unknown`.
46 fn add_file(
46 fn add_file(
47 &mut self,
47 &mut self,
48 filename: &HgPath,
48 filename: &HgPath,
49 entry: DirstateEntry,
49 entry: DirstateEntry,
50 added: bool,
50 added: bool,
51 merged: bool,
51 merged: bool,
52 from_p2: bool,
52 from_p2: bool,
53 possibly_dirty: bool,
53 possibly_dirty: bool,
54 ) -> Result<(), DirstateError>;
54 ) -> Result<(), DirstateError>;
55
55
56 /// Mark a file as "removed" (as in `hg rm`).
56 /// Mark a file as "removed" (as in `hg rm`).
57 ///
57 ///
58 /// `old_state` is the state in the entry that `get` would have returned
58 /// `old_state` is the state in the entry that `get` would have returned
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 ///
60 ///
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// put in the size field in the dirstate-v1 format.
62 /// put in the size field in the dirstate-v1 format.
63 fn remove_file(
63 fn remove_file(
64 &mut self,
64 &mut self,
65 filename: &HgPath,
65 filename: &HgPath,
66 in_merge: bool,
66 in_merge: bool,
67 ) -> Result<(), DirstateError>;
67 ) -> Result<(), DirstateError>;
68
68
69 /// Drop information about this file from the map if any, and return
69 /// Drop information about this file from the map if any, and return
70 /// whether there was any.
70 /// whether there was any.
71 ///
71 ///
72 /// `get` will now return `None` for this filename.
72 /// `get` will now return `None` for this filename.
73 ///
73 ///
74 /// `old_state` is the state in the entry that `get` would have returned
74 /// `old_state` is the state in the entry that `get` would have returned
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
77
77
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
80 /// timestamp.
80 /// timestamp.
81 fn clear_ambiguous_times(
81 fn clear_ambiguous_times(
82 &mut self,
82 &mut self,
83 filenames: Vec<HgPathBuf>,
83 filenames: Vec<HgPathBuf>,
84 now: i32,
84 now: i32,
85 ) -> Result<(), DirstateV2ParseError>;
85 ) -> Result<(), DirstateV2ParseError>;
86
86
87 /// Return whether the map has an "non-normal" entry for the given
87 /// Return whether the map has an "non-normal" entry for the given
88 /// filename. That is, any entry with a `state` other than
88 /// filename. That is, any entry with a `state` other than
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
90 fn non_normal_entries_contains(
90 fn non_normal_entries_contains(
91 &mut self,
91 &mut self,
92 key: &HgPath,
92 key: &HgPath,
93 ) -> Result<bool, DirstateV2ParseError>;
93 ) -> Result<bool, DirstateV2ParseError>;
94
94
95 /// Mark the given path as "normal" file. This is only relevant in the flat
95 /// Mark the given path as "normal" file. This is only relevant in the flat
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
97 /// up to date.
97 /// up to date.
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
99
99
100 /// Return an iterator of paths whose respective entry are either
100 /// Return an iterator of paths whose respective entry are either
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
102 /// parent".
102 /// parent".
103 ///
103 ///
104 /// If that information is cached, create the cache as needed.
104 /// If that information is cached, create the cache as needed.
105 ///
105 ///
106 /// "From other parent" is defined as `state == Normal && size == -2`.
106 /// "From other parent" is defined as `state == Normal && size == -2`.
107 ///
107 ///
108 /// Because parse errors can happen during iteration, the iterated items
108 /// Because parse errors can happen during iteration, the iterated items
109 /// are `Result`s.
109 /// are `Result`s.
110 fn non_normal_or_other_parent_paths(
110 fn non_normal_or_other_parent_paths(
111 &mut self,
111 &mut self,
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
113
113
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
115 ///
115 ///
116 /// If `force` is true, the cache is re-created even if it already exists.
116 /// If `force` is true, the cache is re-created even if it already exists.
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
118
118
119 /// Return an iterator of paths whose respective entry are "non-normal"
119 /// Return an iterator of paths whose respective entry are "non-normal"
120 /// (see `non_normal_entries_contains`).
120 /// (see `non_normal_entries_contains`).
121 ///
121 ///
122 /// If that information is cached, create the cache as needed.
122 /// If that information is cached, create the cache as needed.
123 ///
123 ///
124 /// Because parse errors can happen during iteration, the iterated items
124 /// Because parse errors can happen during iteration, the iterated items
125 /// are `Result`s.
125 /// are `Result`s.
126 fn iter_non_normal_paths(
126 fn iter_non_normal_paths(
127 &mut self,
127 &mut self,
128 ) -> Box<
128 ) -> Box<
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
130 >;
130 >;
131
131
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
133 /// self`.
133 /// self`.
134 ///
134 ///
135 /// Panics if a cache is necessary but does not exist yet.
135 /// Panics if a cache is necessary but does not exist yet.
136 fn iter_non_normal_paths_panic(
136 fn iter_non_normal_paths_panic(
137 &self,
137 &self,
138 ) -> Box<
138 ) -> Box<
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
140 >;
140 >;
141
141
142 /// Return an iterator of paths whose respective entry are "from other
142 /// Return an iterator of paths whose respective entry are "from other
143 /// parent".
143 /// parent".
144 ///
144 ///
145 /// If that information is cached, create the cache as needed.
145 /// If that information is cached, create the cache as needed.
146 ///
146 ///
147 /// "From other parent" is defined as `state == Normal && size == -2`.
147 /// "From other parent" is defined as `state == Normal && size == -2`.
148 ///
148 ///
149 /// Because parse errors can happen during iteration, the iterated items
149 /// Because parse errors can happen during iteration, the iterated items
150 /// are `Result`s.
150 /// are `Result`s.
151 fn iter_other_parent_paths(
151 fn iter_other_parent_paths(
152 &mut self,
152 &mut self,
153 ) -> Box<
153 ) -> Box<
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
155 >;
155 >;
156
156
157 /// Returns whether the sub-tree rooted at the given directory contains any
157 /// Returns whether the sub-tree rooted at the given directory contains any
158 /// tracked file.
158 /// tracked file.
159 ///
159 ///
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
161 fn has_tracked_dir(
161 fn has_tracked_dir(
162 &mut self,
162 &mut self,
163 directory: &HgPath,
163 directory: &HgPath,
164 ) -> Result<bool, DirstateError>;
164 ) -> Result<bool, DirstateError>;
165
165
166 /// Returns whether the sub-tree rooted at the given directory contains any
166 /// Returns whether the sub-tree rooted at the given directory contains any
167 /// file with a dirstate entry.
167 /// file with a dirstate entry.
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
169
169
170 /// Clear mtimes that are ambigous with `now` (similar to
170 /// Clear mtimes that are ambigous with `now` (similar to
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
173 /// format.
173 /// format.
174 fn pack_v1(
174 fn pack_v1(
175 &mut self,
175 &mut self,
176 parents: DirstateParents,
176 parents: DirstateParents,
177 now: Timestamp,
177 now: Timestamp,
178 ) -> Result<Vec<u8>, DirstateError>;
178 ) -> Result<Vec<u8>, DirstateError>;
179
179
180 /// Clear mtimes that are ambigous with `now` (similar to
180 /// Clear mtimes that are ambigous with `now` (similar to
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
182 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
182 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
183 /// format.
183 /// format.
184 ///
184 ///
185 /// Returns new data together with whether that data should be appended to
185 /// Returns new data and metadata together with whether that data should be
186 /// the existing data file whose content is at `self.on_disk` (true),
186 /// appended to the existing data file whose content is at
187 /// instead of written to a new data file (false).
187 /// `self.on_disk` (true), instead of written to a new data file
188 /// (false).
188 ///
189 ///
189 /// Note: this is only supported by the tree dirstate map.
190 /// Note: this is only supported by the tree dirstate map.
190 fn pack_v2(
191 fn pack_v2(
191 &mut self,
192 &mut self,
192 now: Timestamp,
193 now: Timestamp,
193 can_append: bool,
194 can_append: bool,
194 ) -> Result<(Vec<u8>, bool), DirstateError>;
195 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError>;
195
196
196 /// Run the status algorithm.
197 /// Run the status algorithm.
197 ///
198 ///
198 /// This is not sematically a method of the dirstate map, but a different
199 /// This is not sematically a method of the dirstate map, but a different
199 /// algorithm is used for the flat v.s. tree dirstate map so having it in
200 /// algorithm is used for the flat v.s. tree dirstate map so having it in
200 /// this trait enables the same dynamic dispatch as with other methods.
201 /// this trait enables the same dynamic dispatch as with other methods.
201 fn status<'a>(
202 fn status<'a>(
202 &'a mut self,
203 &'a mut self,
203 matcher: &'a (dyn Matcher + Sync),
204 matcher: &'a (dyn Matcher + Sync),
204 root_dir: PathBuf,
205 root_dir: PathBuf,
205 ignore_files: Vec<PathBuf>,
206 ignore_files: Vec<PathBuf>,
206 options: StatusOptions,
207 options: StatusOptions,
207 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
208 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
208
209
209 /// Returns how many files in the dirstate map have a recorded copy source.
210 /// Returns how many files in the dirstate map have a recorded copy source.
210 fn copy_map_len(&self) -> usize;
211 fn copy_map_len(&self) -> usize;
211
212
212 /// Returns an iterator of `(path, copy_source)` for all files that have a
213 /// Returns an iterator of `(path, copy_source)` for all files that have a
213 /// copy source.
214 /// copy source.
214 fn copy_map_iter(&self) -> CopyMapIter<'_>;
215 fn copy_map_iter(&self) -> CopyMapIter<'_>;
215
216
216 /// Returns whether the givef file has a copy source.
217 /// Returns whether the givef file has a copy source.
217 fn copy_map_contains_key(
218 fn copy_map_contains_key(
218 &self,
219 &self,
219 key: &HgPath,
220 key: &HgPath,
220 ) -> Result<bool, DirstateV2ParseError>;
221 ) -> Result<bool, DirstateV2ParseError>;
221
222
222 /// Returns the copy source for the given file.
223 /// Returns the copy source for the given file.
223 fn copy_map_get(
224 fn copy_map_get(
224 &self,
225 &self,
225 key: &HgPath,
226 key: &HgPath,
226 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
227 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
227
228
228 /// Removes the recorded copy source if any for the given file, and returns
229 /// Removes the recorded copy source if any for the given file, and returns
229 /// it.
230 /// it.
230 fn copy_map_remove(
231 fn copy_map_remove(
231 &mut self,
232 &mut self,
232 key: &HgPath,
233 key: &HgPath,
233 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
234 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
234
235
235 /// Set the given `value` copy source for the given `key` file.
236 /// Set the given `value` copy source for the given `key` file.
236 fn copy_map_insert(
237 fn copy_map_insert(
237 &mut self,
238 &mut self,
238 key: HgPathBuf,
239 key: HgPathBuf,
239 value: HgPathBuf,
240 value: HgPathBuf,
240 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
241 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
241
242
242 /// Returns the number of files that have an entry.
243 /// Returns the number of files that have an entry.
243 fn len(&self) -> usize;
244 fn len(&self) -> usize;
244
245
245 /// Returns whether the given file has an entry.
246 /// Returns whether the given file has an entry.
246 fn contains_key(&self, key: &HgPath)
247 fn contains_key(&self, key: &HgPath)
247 -> Result<bool, DirstateV2ParseError>;
248 -> Result<bool, DirstateV2ParseError>;
248
249
249 /// Returns the entry, if any, for the given file.
250 /// Returns the entry, if any, for the given file.
250 fn get(
251 fn get(
251 &self,
252 &self,
252 key: &HgPath,
253 key: &HgPath,
253 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
254 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
254
255
255 /// Returns a `(path, entry)` iterator of files that have an entry.
256 /// Returns a `(path, entry)` iterator of files that have an entry.
256 ///
257 ///
257 /// Because parse errors can happen during iteration, the iterated items
258 /// Because parse errors can happen during iteration, the iterated items
258 /// are `Result`s.
259 /// are `Result`s.
259 fn iter(&self) -> StateMapIter<'_>;
260 fn iter(&self) -> StateMapIter<'_>;
260
261
261 /// In the tree dirstate, return an iterator of "directory" (entry-less)
262 /// In the tree dirstate, return an iterator of "directory" (entry-less)
262 /// nodes with the data stored for them. This is for `hg debugdirstate
263 /// nodes with the data stored for them. This is for `hg debugdirstate
263 /// --dirs`.
264 /// --dirs`.
264 ///
265 ///
265 /// In the flat dirstate, returns an empty iterator.
266 /// In the flat dirstate, returns an empty iterator.
266 ///
267 ///
267 /// Because parse errors can happen during iteration, the iterated items
268 /// Because parse errors can happen during iteration, the iterated items
268 /// are `Result`s.
269 /// are `Result`s.
269 fn iter_directories(
270 fn iter_directories(
270 &self,
271 &self,
271 ) -> Box<
272 ) -> Box<
272 dyn Iterator<
273 dyn Iterator<
273 Item = Result<
274 Item = Result<
274 (&HgPath, Option<Timestamp>),
275 (&HgPath, Option<Timestamp>),
275 DirstateV2ParseError,
276 DirstateV2ParseError,
276 >,
277 >,
277 > + Send
278 > + Send
278 + '_,
279 + '_,
279 >;
280 >;
280 }
281 }
281
282
282 impl DirstateMapMethods for DirstateMap {
283 impl DirstateMapMethods for DirstateMap {
283 fn clear(&mut self) {
284 fn clear(&mut self) {
284 self.clear()
285 self.clear()
285 }
286 }
286
287
287 fn add_file(
288 fn add_file(
288 &mut self,
289 &mut self,
289 filename: &HgPath,
290 filename: &HgPath,
290 entry: DirstateEntry,
291 entry: DirstateEntry,
291 added: bool,
292 added: bool,
292 merged: bool,
293 merged: bool,
293 from_p2: bool,
294 from_p2: bool,
294 possibly_dirty: bool,
295 possibly_dirty: bool,
295 ) -> Result<(), DirstateError> {
296 ) -> Result<(), DirstateError> {
296 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
297 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
297 }
298 }
298
299
299 fn remove_file(
300 fn remove_file(
300 &mut self,
301 &mut self,
301 filename: &HgPath,
302 filename: &HgPath,
302 in_merge: bool,
303 in_merge: bool,
303 ) -> Result<(), DirstateError> {
304 ) -> Result<(), DirstateError> {
304 self.remove_file(filename, in_merge)
305 self.remove_file(filename, in_merge)
305 }
306 }
306
307
307 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
308 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
308 self.drop_file(filename)
309 self.drop_file(filename)
309 }
310 }
310
311
311 fn clear_ambiguous_times(
312 fn clear_ambiguous_times(
312 &mut self,
313 &mut self,
313 filenames: Vec<HgPathBuf>,
314 filenames: Vec<HgPathBuf>,
314 now: i32,
315 now: i32,
315 ) -> Result<(), DirstateV2ParseError> {
316 ) -> Result<(), DirstateV2ParseError> {
316 Ok(self.clear_ambiguous_times(filenames, now))
317 Ok(self.clear_ambiguous_times(filenames, now))
317 }
318 }
318
319
319 fn non_normal_entries_contains(
320 fn non_normal_entries_contains(
320 &mut self,
321 &mut self,
321 key: &HgPath,
322 key: &HgPath,
322 ) -> Result<bool, DirstateV2ParseError> {
323 ) -> Result<bool, DirstateV2ParseError> {
323 let (non_normal, _other_parent) =
324 let (non_normal, _other_parent) =
324 self.get_non_normal_other_parent_entries();
325 self.get_non_normal_other_parent_entries();
325 Ok(non_normal.contains(key))
326 Ok(non_normal.contains(key))
326 }
327 }
327
328
328 fn non_normal_entries_remove(&mut self, key: &HgPath) {
329 fn non_normal_entries_remove(&mut self, key: &HgPath) {
329 self.non_normal_entries_remove(key)
330 self.non_normal_entries_remove(key)
330 }
331 }
331
332
332 fn non_normal_or_other_parent_paths(
333 fn non_normal_or_other_parent_paths(
333 &mut self,
334 &mut self,
334 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
335 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
335 {
336 {
336 let (non_normal, other_parent) =
337 let (non_normal, other_parent) =
337 self.get_non_normal_other_parent_entries();
338 self.get_non_normal_other_parent_entries();
338 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
339 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
339 }
340 }
340
341
341 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
342 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
342 self.set_non_normal_other_parent_entries(force)
343 self.set_non_normal_other_parent_entries(force)
343 }
344 }
344
345
345 fn iter_non_normal_paths(
346 fn iter_non_normal_paths(
346 &mut self,
347 &mut self,
347 ) -> Box<
348 ) -> Box<
348 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
349 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
349 > {
350 > {
350 let (non_normal, _other_parent) =
351 let (non_normal, _other_parent) =
351 self.get_non_normal_other_parent_entries();
352 self.get_non_normal_other_parent_entries();
352 Box::new(non_normal.iter().map(|p| Ok(&**p)))
353 Box::new(non_normal.iter().map(|p| Ok(&**p)))
353 }
354 }
354
355
355 fn iter_non_normal_paths_panic(
356 fn iter_non_normal_paths_panic(
356 &self,
357 &self,
357 ) -> Box<
358 ) -> Box<
358 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
359 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
359 > {
360 > {
360 let (non_normal, _other_parent) =
361 let (non_normal, _other_parent) =
361 self.get_non_normal_other_parent_entries_panic();
362 self.get_non_normal_other_parent_entries_panic();
362 Box::new(non_normal.iter().map(|p| Ok(&**p)))
363 Box::new(non_normal.iter().map(|p| Ok(&**p)))
363 }
364 }
364
365
365 fn iter_other_parent_paths(
366 fn iter_other_parent_paths(
366 &mut self,
367 &mut self,
367 ) -> Box<
368 ) -> Box<
368 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
369 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
369 > {
370 > {
370 let (_non_normal, other_parent) =
371 let (_non_normal, other_parent) =
371 self.get_non_normal_other_parent_entries();
372 self.get_non_normal_other_parent_entries();
372 Box::new(other_parent.iter().map(|p| Ok(&**p)))
373 Box::new(other_parent.iter().map(|p| Ok(&**p)))
373 }
374 }
374
375
375 fn has_tracked_dir(
376 fn has_tracked_dir(
376 &mut self,
377 &mut self,
377 directory: &HgPath,
378 directory: &HgPath,
378 ) -> Result<bool, DirstateError> {
379 ) -> Result<bool, DirstateError> {
379 self.has_tracked_dir(directory)
380 self.has_tracked_dir(directory)
380 }
381 }
381
382
382 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
383 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
383 self.has_dir(directory)
384 self.has_dir(directory)
384 }
385 }
385
386
386 fn pack_v1(
387 fn pack_v1(
387 &mut self,
388 &mut self,
388 parents: DirstateParents,
389 parents: DirstateParents,
389 now: Timestamp,
390 now: Timestamp,
390 ) -> Result<Vec<u8>, DirstateError> {
391 ) -> Result<Vec<u8>, DirstateError> {
391 self.pack(parents, now)
392 self.pack(parents, now)
392 }
393 }
393
394
394 fn pack_v2(
395 fn pack_v2(
395 &mut self,
396 &mut self,
396 _now: Timestamp,
397 _now: Timestamp,
397 _can_append: bool,
398 _can_append: bool,
398 ) -> Result<(Vec<u8>, bool), DirstateError> {
399 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
399 panic!(
400 panic!(
400 "should have used dirstate_tree::DirstateMap to use the v2 format"
401 "should have used dirstate_tree::DirstateMap to use the v2 format"
401 )
402 )
402 }
403 }
403
404
404 fn status<'a>(
405 fn status<'a>(
405 &'a mut self,
406 &'a mut self,
406 matcher: &'a (dyn Matcher + Sync),
407 matcher: &'a (dyn Matcher + Sync),
407 root_dir: PathBuf,
408 root_dir: PathBuf,
408 ignore_files: Vec<PathBuf>,
409 ignore_files: Vec<PathBuf>,
409 options: StatusOptions,
410 options: StatusOptions,
410 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
411 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
411 {
412 {
412 crate::status(self, matcher, root_dir, ignore_files, options)
413 crate::status(self, matcher, root_dir, ignore_files, options)
413 }
414 }
414
415
415 fn copy_map_len(&self) -> usize {
416 fn copy_map_len(&self) -> usize {
416 self.copy_map.len()
417 self.copy_map.len()
417 }
418 }
418
419
419 fn copy_map_iter(&self) -> CopyMapIter<'_> {
420 fn copy_map_iter(&self) -> CopyMapIter<'_> {
420 Box::new(
421 Box::new(
421 self.copy_map
422 self.copy_map
422 .iter()
423 .iter()
423 .map(|(key, value)| Ok((&**key, &**value))),
424 .map(|(key, value)| Ok((&**key, &**value))),
424 )
425 )
425 }
426 }
426
427
427 fn copy_map_contains_key(
428 fn copy_map_contains_key(
428 &self,
429 &self,
429 key: &HgPath,
430 key: &HgPath,
430 ) -> Result<bool, DirstateV2ParseError> {
431 ) -> Result<bool, DirstateV2ParseError> {
431 Ok(self.copy_map.contains_key(key))
432 Ok(self.copy_map.contains_key(key))
432 }
433 }
433
434
434 fn copy_map_get(
435 fn copy_map_get(
435 &self,
436 &self,
436 key: &HgPath,
437 key: &HgPath,
437 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
438 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
438 Ok(self.copy_map.get(key).map(|p| &**p))
439 Ok(self.copy_map.get(key).map(|p| &**p))
439 }
440 }
440
441
441 fn copy_map_remove(
442 fn copy_map_remove(
442 &mut self,
443 &mut self,
443 key: &HgPath,
444 key: &HgPath,
444 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
445 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
445 Ok(self.copy_map.remove(key))
446 Ok(self.copy_map.remove(key))
446 }
447 }
447
448
448 fn copy_map_insert(
449 fn copy_map_insert(
449 &mut self,
450 &mut self,
450 key: HgPathBuf,
451 key: HgPathBuf,
451 value: HgPathBuf,
452 value: HgPathBuf,
452 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
453 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
453 Ok(self.copy_map.insert(key, value))
454 Ok(self.copy_map.insert(key, value))
454 }
455 }
455
456
456 fn len(&self) -> usize {
457 fn len(&self) -> usize {
457 (&**self).len()
458 (&**self).len()
458 }
459 }
459
460
460 fn contains_key(
461 fn contains_key(
461 &self,
462 &self,
462 key: &HgPath,
463 key: &HgPath,
463 ) -> Result<bool, DirstateV2ParseError> {
464 ) -> Result<bool, DirstateV2ParseError> {
464 Ok((&**self).contains_key(key))
465 Ok((&**self).contains_key(key))
465 }
466 }
466
467
467 fn get(
468 fn get(
468 &self,
469 &self,
469 key: &HgPath,
470 key: &HgPath,
470 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
471 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
471 Ok((&**self).get(key).cloned())
472 Ok((&**self).get(key).cloned())
472 }
473 }
473
474
474 fn iter(&self) -> StateMapIter<'_> {
475 fn iter(&self) -> StateMapIter<'_> {
475 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
476 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
476 }
477 }
477
478
478 fn iter_directories(
479 fn iter_directories(
479 &self,
480 &self,
480 ) -> Box<
481 ) -> Box<
481 dyn Iterator<
482 dyn Iterator<
482 Item = Result<
483 Item = Result<
483 (&HgPath, Option<Timestamp>),
484 (&HgPath, Option<Timestamp>),
484 DirstateV2ParseError,
485 DirstateV2ParseError,
485 >,
486 >,
486 > + Send
487 > + Send
487 + '_,
488 + '_,
488 > {
489 > {
489 Box::new(std::iter::empty())
490 Box::new(std::iter::empty())
490 }
491 }
491 }
492 }
@@ -1,756 +1,753 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! # File format
3 //! # File format
4 //!
4 //!
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
7 //! struct, followed by the data file identifier.
7 //! struct, followed by the data file identifier.
8 //!
8 //!
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
10 //! file may be longer than the size given in the docket, but not shorter. Only
10 //! file may be longer than the size given in the docket, but not shorter. Only
11 //! the start of the data file up to the given size is considered. The
11 //! the start of the data file up to the given size is considered. The
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
13 //! `Root` struct is found at the end of that slice of data.
13 //! `Root` struct is found at the end of that slice of data.
14 //!
14 //!
15 //! Its `root_nodes` field contains the slice (offset and length) to
15 //! Its `root_nodes` field contains the slice (offset and length) to
16 //! the nodes representing the files and directories at the root of the
16 //! the nodes representing the files and directories at the root of the
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
19 //! nodes (if any) for nested files and directories.
19 //! nodes (if any) for nested files and directories.
20
20
21 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
21 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
22 use crate::dirstate_tree::path_with_basename::WithBasename;
22 use crate::dirstate_tree::path_with_basename::WithBasename;
23 use crate::errors::HgError;
23 use crate::errors::HgError;
24 use crate::utils::hg_path::HgPath;
24 use crate::utils::hg_path::HgPath;
25 use crate::DirstateEntry;
25 use crate::DirstateEntry;
26 use crate::DirstateError;
26 use crate::DirstateError;
27 use crate::DirstateParents;
27 use crate::DirstateParents;
28 use crate::EntryState;
28 use crate::EntryState;
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
30 use bytes_cast::BytesCast;
30 use bytes_cast::BytesCast;
31 use format_bytes::format_bytes;
31 use format_bytes::format_bytes;
32 use std::borrow::Cow;
32 use std::borrow::Cow;
33 use std::convert::{TryFrom, TryInto};
33 use std::convert::{TryFrom, TryInto};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
35
35
36 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
36 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
37 /// This a redundant sanity check more than an actual "magic number" since
37 /// This a redundant sanity check more than an actual "magic number" since
38 /// `.hg/requires` already governs which format should be used.
38 /// `.hg/requires` already governs which format should be used.
39 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
39 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
40
40
41 /// Keep space for 256-bit hashes
41 /// Keep space for 256-bit hashes
42 const STORED_NODE_ID_BYTES: usize = 32;
42 const STORED_NODE_ID_BYTES: usize = 32;
43
43
44 /// … even though only 160 bits are used for now, with SHA-1
44 /// … even though only 160 bits are used for now, with SHA-1
45 const USED_NODE_ID_BYTES: usize = 20;
45 const USED_NODE_ID_BYTES: usize = 20;
46
46
47 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
47 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
48 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
48 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
49
49
50 /// Must match the constant of the same name in
51 /// `mercurial/dirstateutils/docket.py`
52 const TREE_METADATA_SIZE: usize = 40;
53
54 /// Make sure that size-affecting changes are made knowingly
55 #[allow(unused)]
56 fn static_assert_size_of() {
57 let _ = std::mem::transmute::<DocketHeader, [u8; 121]>;
58 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
59 let _ = std::mem::transmute::<Node, [u8; 43]>;
60 }
61
50 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
62 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
51 #[derive(BytesCast)]
63 #[derive(BytesCast)]
52 #[repr(C)]
64 #[repr(C)]
53 struct DocketHeader {
65 struct DocketHeader {
54 marker: [u8; V2_FORMAT_MARKER.len()],
66 marker: [u8; V2_FORMAT_MARKER.len()],
55 parent_1: [u8; STORED_NODE_ID_BYTES],
67 parent_1: [u8; STORED_NODE_ID_BYTES],
56 parent_2: [u8; STORED_NODE_ID_BYTES],
68 parent_2: [u8; STORED_NODE_ID_BYTES],
57
69
58 /// Counted in bytes
70 /// Counted in bytes
59 data_size: Size,
71 data_size: Size,
60
72
73 metadata: TreeMetadata,
74
61 uuid_size: u8,
75 uuid_size: u8,
62 }
76 }
63
77
64 pub struct Docket<'on_disk> {
78 pub struct Docket<'on_disk> {
65 header: &'on_disk DocketHeader,
79 header: &'on_disk DocketHeader,
66 uuid: &'on_disk [u8],
80 uuid: &'on_disk [u8],
67 }
81 }
68
82
69 #[derive(BytesCast)]
83 #[derive(BytesCast)]
70 #[repr(C)]
84 #[repr(C)]
71 struct Root {
85 struct TreeMetadata {
72 root_nodes: ChildNodes,
86 root_nodes: ChildNodes,
73 nodes_with_entry_count: Size,
87 nodes_with_entry_count: Size,
74 nodes_with_copy_source_count: Size,
88 nodes_with_copy_source_count: Size,
75
89
76 /// How many bytes of this data file are not used anymore
90 /// How many bytes of this data file are not used anymore
77 unreachable_bytes: Size,
91 unreachable_bytes: Size,
78
92
79 /// If non-zero, a hash of ignore files that were used for some previous
93 /// If non-zero, a hash of ignore files that were used for some previous
80 /// run of the `status` algorithm.
94 /// run of the `status` algorithm.
81 ///
95 ///
82 /// We define:
96 /// We define:
83 ///
97 ///
84 /// * "Root" ignore files are `.hgignore` at the root of the repository if
98 /// * "Root" ignore files are `.hgignore` at the root of the repository if
85 /// it exists, and files from `ui.ignore.*` config. This set of files is
99 /// it exists, and files from `ui.ignore.*` config. This set of files is
86 /// then sorted by the string representation of their path.
100 /// then sorted by the string representation of their path.
87 /// * The "expanded contents" of an ignore files is the byte string made
101 /// * The "expanded contents" of an ignore files is the byte string made
88 /// by concatenating its contents with the "expanded contents" of other
102 /// by concatenating its contents with the "expanded contents" of other
89 /// files included with `include:` or `subinclude:` files, in inclusion
103 /// files included with `include:` or `subinclude:` files, in inclusion
90 /// order. This definition is recursive, as included files can
104 /// order. This definition is recursive, as included files can
91 /// themselves include more files.
105 /// themselves include more files.
92 ///
106 ///
93 /// This hash is defined as the SHA-1 of the concatenation (in sorted
107 /// This hash is defined as the SHA-1 of the concatenation (in sorted
94 /// order) of the "expanded contents" of each "root" ignore file.
108 /// order) of the "expanded contents" of each "root" ignore file.
95 /// (Note that computing this does not require actually concatenating byte
109 /// (Note that computing this does not require actually concatenating byte
96 /// strings into contiguous memory, instead SHA-1 hashing can be done
110 /// strings into contiguous memory, instead SHA-1 hashing can be done
97 /// incrementally.)
111 /// incrementally.)
98 ignore_patterns_hash: IgnorePatternsHash,
112 ignore_patterns_hash: IgnorePatternsHash,
99 }
113 }
100
114
101 #[derive(BytesCast)]
115 #[derive(BytesCast)]
102 #[repr(C)]
116 #[repr(C)]
103 pub(super) struct Node {
117 pub(super) struct Node {
104 full_path: PathSlice,
118 full_path: PathSlice,
105
119
106 /// In bytes from `self.full_path.start`
120 /// In bytes from `self.full_path.start`
107 base_name_start: PathSize,
121 base_name_start: PathSize,
108
122
109 copy_source: OptPathSlice,
123 copy_source: OptPathSlice,
110 children: ChildNodes,
124 children: ChildNodes,
111 pub(super) descendants_with_entry_count: Size,
125 pub(super) descendants_with_entry_count: Size,
112 pub(super) tracked_descendants_count: Size,
126 pub(super) tracked_descendants_count: Size,
113
127
114 /// Depending on the value of `state`:
128 /// Depending on the value of `state`:
115 ///
129 ///
116 /// * A null byte: `data` is not used.
130 /// * A null byte: `data` is not used.
117 ///
131 ///
118 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
132 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
119 /// represent a dirstate entry like in the v1 format.
133 /// represent a dirstate entry like in the v1 format.
120 ///
134 ///
121 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
135 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
122 /// as the `Timestamp` for the mtime of a cached directory.
136 /// as the `Timestamp` for the mtime of a cached directory.
123 ///
137 ///
124 /// The presence of this state means that at some point, this path in
138 /// The presence of this state means that at some point, this path in
125 /// the working directory was observed:
139 /// the working directory was observed:
126 ///
140 ///
127 /// - To be a directory
141 /// - To be a directory
128 /// - With the modification time as given by `Timestamp`
142 /// - With the modification time as given by `Timestamp`
129 /// - That timestamp was already strictly in the past when observed,
143 /// - That timestamp was already strictly in the past when observed,
130 /// meaning that later changes cannot happen in the same clock tick
144 /// meaning that later changes cannot happen in the same clock tick
131 /// and must cause a different modification time (unless the system
145 /// and must cause a different modification time (unless the system
132 /// clock jumps back and we get unlucky, which is not impossible but
146 /// clock jumps back and we get unlucky, which is not impossible but
133 /// but deemed unlikely enough).
147 /// but deemed unlikely enough).
134 /// - All direct children of this directory (as returned by
148 /// - All direct children of this directory (as returned by
135 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
149 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
136 /// are ignored by ignore patterns whose hash is in
150 /// are ignored by ignore patterns whose hash is in
137 /// `Root::ignore_patterns_hash`.
151 /// `TreeMetadata::ignore_patterns_hash`.
138 ///
152 ///
139 /// This means that if `std::fs::symlink_metadata` later reports the
153 /// This means that if `std::fs::symlink_metadata` later reports the
140 /// same modification time and ignored patterns haven’t changed, a run
154 /// same modification time and ignored patterns haven’t changed, a run
141 /// of status that is not listing ignored files can skip calling
155 /// of status that is not listing ignored files can skip calling
142 /// `std::fs::read_dir` again for this directory, iterate child
156 /// `std::fs::read_dir` again for this directory, iterate child
143 /// dirstate nodes instead.
157 /// dirstate nodes instead.
144 state: u8,
158 state: u8,
145 data: Entry,
159 data: Entry,
146 }
160 }
147
161
148 #[derive(BytesCast, Copy, Clone)]
162 #[derive(BytesCast, Copy, Clone)]
149 #[repr(C)]
163 #[repr(C)]
150 struct Entry {
164 struct Entry {
151 mode: I32Be,
165 mode: I32Be,
152 mtime: I32Be,
166 mtime: I32Be,
153 size: I32Be,
167 size: I32Be,
154 }
168 }
155
169
156 /// Duration since the Unix epoch
170 /// Duration since the Unix epoch
157 #[derive(BytesCast, Copy, Clone, PartialEq)]
171 #[derive(BytesCast, Copy, Clone, PartialEq)]
158 #[repr(C)]
172 #[repr(C)]
159 pub(super) struct Timestamp {
173 pub(super) struct Timestamp {
160 seconds: I64Be,
174 seconds: I64Be,
161
175
162 /// In `0 .. 1_000_000_000`.
176 /// In `0 .. 1_000_000_000`.
163 ///
177 ///
164 /// This timestamp is later or earlier than `(seconds, 0)` by this many
178 /// This timestamp is later or earlier than `(seconds, 0)` by this many
165 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
179 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
166 nanoseconds: U32Be,
180 nanoseconds: U32Be,
167 }
181 }
168
182
169 /// Counted in bytes from the start of the file
183 /// Counted in bytes from the start of the file
170 ///
184 ///
171 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
185 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
172 type Offset = U32Be;
186 type Offset = U32Be;
173
187
174 /// Counted in number of items
188 /// Counted in number of items
175 ///
189 ///
176 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
190 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
177 type Size = U32Be;
191 type Size = U32Be;
178
192
179 /// Counted in bytes
193 /// Counted in bytes
180 ///
194 ///
181 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
195 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
182 type PathSize = U16Be;
196 type PathSize = U16Be;
183
197
184 /// A contiguous sequence of `len` times `Node`, representing the child nodes
198 /// A contiguous sequence of `len` times `Node`, representing the child nodes
185 /// of either some other node or of the repository root.
199 /// of either some other node or of the repository root.
186 ///
200 ///
187 /// Always sorted by ascending `full_path`, to allow binary search.
201 /// Always sorted by ascending `full_path`, to allow binary search.
188 /// Since nodes with the same parent nodes also have the same parent path,
202 /// Since nodes with the same parent nodes also have the same parent path,
189 /// only the `base_name`s need to be compared during binary search.
203 /// only the `base_name`s need to be compared during binary search.
190 #[derive(BytesCast, Copy, Clone)]
204 #[derive(BytesCast, Copy, Clone)]
191 #[repr(C)]
205 #[repr(C)]
192 struct ChildNodes {
206 struct ChildNodes {
193 start: Offset,
207 start: Offset,
194 len: Size,
208 len: Size,
195 }
209 }
196
210
197 /// A `HgPath` of `len` bytes
211 /// A `HgPath` of `len` bytes
198 #[derive(BytesCast, Copy, Clone)]
212 #[derive(BytesCast, Copy, Clone)]
199 #[repr(C)]
213 #[repr(C)]
200 struct PathSlice {
214 struct PathSlice {
201 start: Offset,
215 start: Offset,
202 len: PathSize,
216 len: PathSize,
203 }
217 }
204
218
205 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
219 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
206 type OptPathSlice = PathSlice;
220 type OptPathSlice = PathSlice;
207
221
208 /// Make sure that size-affecting changes are made knowingly
209 fn _static_assert_size_of() {
210 let _ = std::mem::transmute::<DocketHeader, [u8; 81]>;
211 let _ = std::mem::transmute::<Root, [u8; 40]>;
212 let _ = std::mem::transmute::<Node, [u8; 43]>;
213 }
214
215 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
222 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
216 ///
223 ///
217 /// This should only happen if Mercurial is buggy or a repository is corrupted.
224 /// This should only happen if Mercurial is buggy or a repository is corrupted.
218 #[derive(Debug)]
225 #[derive(Debug)]
219 pub struct DirstateV2ParseError;
226 pub struct DirstateV2ParseError;
220
227
221 impl From<DirstateV2ParseError> for HgError {
228 impl From<DirstateV2ParseError> for HgError {
222 fn from(_: DirstateV2ParseError) -> Self {
229 fn from(_: DirstateV2ParseError) -> Self {
223 HgError::corrupted("dirstate-v2 parse error")
230 HgError::corrupted("dirstate-v2 parse error")
224 }
231 }
225 }
232 }
226
233
227 impl From<DirstateV2ParseError> for crate::DirstateError {
234 impl From<DirstateV2ParseError> for crate::DirstateError {
228 fn from(error: DirstateV2ParseError) -> Self {
235 fn from(error: DirstateV2ParseError) -> Self {
229 HgError::from(error).into()
236 HgError::from(error).into()
230 }
237 }
231 }
238 }
232
239
233 impl<'on_disk> Docket<'on_disk> {
240 impl<'on_disk> Docket<'on_disk> {
234 pub fn parents(&self) -> DirstateParents {
241 pub fn parents(&self) -> DirstateParents {
235 use crate::Node;
242 use crate::Node;
236 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
243 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
237 .unwrap()
244 .unwrap()
238 .clone();
245 .clone();
239 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
246 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
240 .unwrap()
247 .unwrap()
241 .clone();
248 .clone();
242 DirstateParents { p1, p2 }
249 DirstateParents { p1, p2 }
243 }
250 }
244
251
252 pub fn tree_metadata(&self) -> &[u8] {
253 self.header.metadata.as_bytes()
254 }
255
245 pub fn data_size(&self) -> usize {
256 pub fn data_size(&self) -> usize {
246 // This `unwrap` could only panic on a 16-bit CPU
257 // This `unwrap` could only panic on a 16-bit CPU
247 self.header.data_size.get().try_into().unwrap()
258 self.header.data_size.get().try_into().unwrap()
248 }
259 }
249
260
250 pub fn data_filename(&self) -> String {
261 pub fn data_filename(&self) -> String {
251 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
262 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
252 }
263 }
253 }
264 }
254
265
255 pub fn read_docket(
266 pub fn read_docket(
256 on_disk: &[u8],
267 on_disk: &[u8],
257 ) -> Result<Docket<'_>, DirstateV2ParseError> {
268 ) -> Result<Docket<'_>, DirstateV2ParseError> {
258 let (header, uuid) =
269 let (header, uuid) =
259 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
270 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
260 let uuid_size = header.uuid_size as usize;
271 let uuid_size = header.uuid_size as usize;
261 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
272 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
262 Ok(Docket { header, uuid })
273 Ok(Docket { header, uuid })
263 } else {
274 } else {
264 Err(DirstateV2ParseError)
275 Err(DirstateV2ParseError)
265 }
276 }
266 }
277 }
267
278
268 fn read_root<'on_disk>(
269 on_disk: &'on_disk [u8],
270 ) -> Result<&'on_disk Root, DirstateV2ParseError> {
271 // Find the `Root` at the end of the given slice
272 let root_offset = on_disk
273 .len()
274 .checked_sub(std::mem::size_of::<Root>())
275 // A non-empty slice too short is an error
276 .ok_or(DirstateV2ParseError)?;
277 let (root, _) = Root::from_bytes(&on_disk[root_offset..])
278 .map_err(|_| DirstateV2ParseError)?;
279 Ok(root)
280 }
281
282 pub(super) fn read<'on_disk>(
279 pub(super) fn read<'on_disk>(
283 on_disk: &'on_disk [u8],
280 on_disk: &'on_disk [u8],
281 metadata: &[u8],
284 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
282 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
285 if on_disk.is_empty() {
283 if on_disk.is_empty() {
286 return Ok(DirstateMap::empty(on_disk));
284 return Ok(DirstateMap::empty(on_disk));
287 }
285 }
288 let root = read_root(on_disk)?;
286 let (meta, _) = TreeMetadata::from_bytes(metadata)
289 let mut unreachable_bytes = root.unreachable_bytes.get();
287 .map_err(|_| DirstateV2ParseError)?;
290 // Each append writes a new `Root`, so it’s never reused
291 unreachable_bytes += std::mem::size_of::<Root>() as u32;
292 let dirstate_map = DirstateMap {
288 let dirstate_map = DirstateMap {
293 on_disk,
289 on_disk,
294 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
290 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
295 on_disk,
291 on_disk,
296 root.root_nodes,
292 meta.root_nodes,
297 )?),
293 )?),
298 nodes_with_entry_count: root.nodes_with_entry_count.get(),
294 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
299 nodes_with_copy_source_count: root.nodes_with_copy_source_count.get(),
295 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
300 ignore_patterns_hash: root.ignore_patterns_hash,
296 ignore_patterns_hash: meta.ignore_patterns_hash,
301 unreachable_bytes,
297 unreachable_bytes: meta.unreachable_bytes.get(),
302 };
298 };
303 Ok(dirstate_map)
299 Ok(dirstate_map)
304 }
300 }
305
301
306 impl Node {
302 impl Node {
307 pub(super) fn full_path<'on_disk>(
303 pub(super) fn full_path<'on_disk>(
308 &self,
304 &self,
309 on_disk: &'on_disk [u8],
305 on_disk: &'on_disk [u8],
310 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
306 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
311 read_hg_path(on_disk, self.full_path)
307 read_hg_path(on_disk, self.full_path)
312 }
308 }
313
309
314 pub(super) fn base_name_start<'on_disk>(
310 pub(super) fn base_name_start<'on_disk>(
315 &self,
311 &self,
316 ) -> Result<usize, DirstateV2ParseError> {
312 ) -> Result<usize, DirstateV2ParseError> {
317 let start = self.base_name_start.get();
313 let start = self.base_name_start.get();
318 if start < self.full_path.len.get() {
314 if start < self.full_path.len.get() {
319 let start = usize::try_from(start)
315 let start = usize::try_from(start)
320 // u32 -> usize, could only panic on a 16-bit CPU
316 // u32 -> usize, could only panic on a 16-bit CPU
321 .expect("dirstate-v2 base_name_start out of bounds");
317 .expect("dirstate-v2 base_name_start out of bounds");
322 Ok(start)
318 Ok(start)
323 } else {
319 } else {
324 Err(DirstateV2ParseError)
320 Err(DirstateV2ParseError)
325 }
321 }
326 }
322 }
327
323
328 pub(super) fn base_name<'on_disk>(
324 pub(super) fn base_name<'on_disk>(
329 &self,
325 &self,
330 on_disk: &'on_disk [u8],
326 on_disk: &'on_disk [u8],
331 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
327 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
332 let full_path = self.full_path(on_disk)?;
328 let full_path = self.full_path(on_disk)?;
333 let base_name_start = self.base_name_start()?;
329 let base_name_start = self.base_name_start()?;
334 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
330 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
335 }
331 }
336
332
337 pub(super) fn path<'on_disk>(
333 pub(super) fn path<'on_disk>(
338 &self,
334 &self,
339 on_disk: &'on_disk [u8],
335 on_disk: &'on_disk [u8],
340 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
336 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
341 Ok(WithBasename::from_raw_parts(
337 Ok(WithBasename::from_raw_parts(
342 Cow::Borrowed(self.full_path(on_disk)?),
338 Cow::Borrowed(self.full_path(on_disk)?),
343 self.base_name_start()?,
339 self.base_name_start()?,
344 ))
340 ))
345 }
341 }
346
342
347 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
343 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
348 self.copy_source.start.get() != 0
344 self.copy_source.start.get() != 0
349 }
345 }
350
346
351 pub(super) fn copy_source<'on_disk>(
347 pub(super) fn copy_source<'on_disk>(
352 &self,
348 &self,
353 on_disk: &'on_disk [u8],
349 on_disk: &'on_disk [u8],
354 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
350 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
355 Ok(if self.has_copy_source() {
351 Ok(if self.has_copy_source() {
356 Some(read_hg_path(on_disk, self.copy_source)?)
352 Some(read_hg_path(on_disk, self.copy_source)?)
357 } else {
353 } else {
358 None
354 None
359 })
355 })
360 }
356 }
361
357
362 pub(super) fn node_data(
358 pub(super) fn node_data(
363 &self,
359 &self,
364 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
360 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
365 let entry = |state| {
361 let entry = |state| {
366 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
362 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
367 };
363 };
368
364
369 match self.state {
365 match self.state {
370 b'\0' => Ok(dirstate_map::NodeData::None),
366 b'\0' => Ok(dirstate_map::NodeData::None),
371 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
367 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
372 mtime: *self.data.as_timestamp(),
368 mtime: *self.data.as_timestamp(),
373 }),
369 }),
374 b'n' => Ok(entry(EntryState::Normal)),
370 b'n' => Ok(entry(EntryState::Normal)),
375 b'a' => Ok(entry(EntryState::Added)),
371 b'a' => Ok(entry(EntryState::Added)),
376 b'r' => Ok(entry(EntryState::Removed)),
372 b'r' => Ok(entry(EntryState::Removed)),
377 b'm' => Ok(entry(EntryState::Merged)),
373 b'm' => Ok(entry(EntryState::Merged)),
378 _ => Err(DirstateV2ParseError),
374 _ => Err(DirstateV2ParseError),
379 }
375 }
380 }
376 }
381
377
382 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
378 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
383 if self.state == b'd' {
379 if self.state == b'd' {
384 Some(self.data.as_timestamp())
380 Some(self.data.as_timestamp())
385 } else {
381 } else {
386 None
382 None
387 }
383 }
388 }
384 }
389
385
390 pub(super) fn state(
386 pub(super) fn state(
391 &self,
387 &self,
392 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
388 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
393 match self.state {
389 match self.state {
394 b'\0' | b'd' => Ok(None),
390 b'\0' | b'd' => Ok(None),
395 b'n' => Ok(Some(EntryState::Normal)),
391 b'n' => Ok(Some(EntryState::Normal)),
396 b'a' => Ok(Some(EntryState::Added)),
392 b'a' => Ok(Some(EntryState::Added)),
397 b'r' => Ok(Some(EntryState::Removed)),
393 b'r' => Ok(Some(EntryState::Removed)),
398 b'm' => Ok(Some(EntryState::Merged)),
394 b'm' => Ok(Some(EntryState::Merged)),
399 _ => Err(DirstateV2ParseError),
395 _ => Err(DirstateV2ParseError),
400 }
396 }
401 }
397 }
402
398
403 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
399 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
404 DirstateEntry {
400 DirstateEntry {
405 state,
401 state,
406 mode: self.data.mode.get(),
402 mode: self.data.mode.get(),
407 mtime: self.data.mtime.get(),
403 mtime: self.data.mtime.get(),
408 size: self.data.size.get(),
404 size: self.data.size.get(),
409 }
405 }
410 }
406 }
411
407
412 pub(super) fn entry(
408 pub(super) fn entry(
413 &self,
409 &self,
414 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
410 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
415 Ok(self
411 Ok(self
416 .state()?
412 .state()?
417 .map(|state| self.entry_with_given_state(state)))
413 .map(|state| self.entry_with_given_state(state)))
418 }
414 }
419
415
420 pub(super) fn children<'on_disk>(
416 pub(super) fn children<'on_disk>(
421 &self,
417 &self,
422 on_disk: &'on_disk [u8],
418 on_disk: &'on_disk [u8],
423 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
419 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
424 read_nodes(on_disk, self.children)
420 read_nodes(on_disk, self.children)
425 }
421 }
426
422
427 pub(super) fn to_in_memory_node<'on_disk>(
423 pub(super) fn to_in_memory_node<'on_disk>(
428 &self,
424 &self,
429 on_disk: &'on_disk [u8],
425 on_disk: &'on_disk [u8],
430 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
426 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
431 Ok(dirstate_map::Node {
427 Ok(dirstate_map::Node {
432 children: dirstate_map::ChildNodes::OnDisk(
428 children: dirstate_map::ChildNodes::OnDisk(
433 self.children(on_disk)?,
429 self.children(on_disk)?,
434 ),
430 ),
435 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
431 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
436 data: self.node_data()?,
432 data: self.node_data()?,
437 descendants_with_entry_count: self
433 descendants_with_entry_count: self
438 .descendants_with_entry_count
434 .descendants_with_entry_count
439 .get(),
435 .get(),
440 tracked_descendants_count: self.tracked_descendants_count.get(),
436 tracked_descendants_count: self.tracked_descendants_count.get(),
441 })
437 })
442 }
438 }
443 }
439 }
444
440
445 impl Entry {
441 impl Entry {
446 fn from_timestamp(timestamp: Timestamp) -> Self {
442 fn from_timestamp(timestamp: Timestamp) -> Self {
447 // Safety: both types implement the `ByteCast` trait, so we could
443 // Safety: both types implement the `ByteCast` trait, so we could
448 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
444 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
449 // `transmute` instead makes the compiler check that the two types
445 // `transmute` instead makes the compiler check that the two types
450 // have the same size, which eliminates the error case of
446 // have the same size, which eliminates the error case of
451 // `from_bytes`.
447 // `from_bytes`.
452 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
448 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
453 }
449 }
454
450
455 fn as_timestamp(&self) -> &Timestamp {
451 fn as_timestamp(&self) -> &Timestamp {
456 // Safety: same as above in `from_timestamp`
452 // Safety: same as above in `from_timestamp`
457 unsafe { &*(self as *const Entry as *const Timestamp) }
453 unsafe { &*(self as *const Entry as *const Timestamp) }
458 }
454 }
459 }
455 }
460
456
461 impl Timestamp {
457 impl Timestamp {
462 pub fn seconds(&self) -> i64 {
458 pub fn seconds(&self) -> i64 {
463 self.seconds.get()
459 self.seconds.get()
464 }
460 }
465 }
461 }
466
462
467 impl From<SystemTime> for Timestamp {
463 impl From<SystemTime> for Timestamp {
468 fn from(system_time: SystemTime) -> Self {
464 fn from(system_time: SystemTime) -> Self {
469 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
465 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
470 Ok(duration) => {
466 Ok(duration) => {
471 (duration.as_secs() as i64, duration.subsec_nanos())
467 (duration.as_secs() as i64, duration.subsec_nanos())
472 }
468 }
473 Err(error) => {
469 Err(error) => {
474 let negative = error.duration();
470 let negative = error.duration();
475 (-(negative.as_secs() as i64), negative.subsec_nanos())
471 (-(negative.as_secs() as i64), negative.subsec_nanos())
476 }
472 }
477 };
473 };
478 Timestamp {
474 Timestamp {
479 seconds: secs.into(),
475 seconds: secs.into(),
480 nanoseconds: nanos.into(),
476 nanoseconds: nanos.into(),
481 }
477 }
482 }
478 }
483 }
479 }
484
480
485 impl From<&'_ Timestamp> for SystemTime {
481 impl From<&'_ Timestamp> for SystemTime {
486 fn from(timestamp: &'_ Timestamp) -> Self {
482 fn from(timestamp: &'_ Timestamp) -> Self {
487 let secs = timestamp.seconds.get();
483 let secs = timestamp.seconds.get();
488 let nanos = timestamp.nanoseconds.get();
484 let nanos = timestamp.nanoseconds.get();
489 if secs >= 0 {
485 if secs >= 0 {
490 UNIX_EPOCH + Duration::new(secs as u64, nanos)
486 UNIX_EPOCH + Duration::new(secs as u64, nanos)
491 } else {
487 } else {
492 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
488 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
493 }
489 }
494 }
490 }
495 }
491 }
496
492
497 fn read_hg_path(
493 fn read_hg_path(
498 on_disk: &[u8],
494 on_disk: &[u8],
499 slice: PathSlice,
495 slice: PathSlice,
500 ) -> Result<&HgPath, DirstateV2ParseError> {
496 ) -> Result<&HgPath, DirstateV2ParseError> {
501 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
497 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
502 }
498 }
503
499
504 fn read_nodes(
500 fn read_nodes(
505 on_disk: &[u8],
501 on_disk: &[u8],
506 slice: ChildNodes,
502 slice: ChildNodes,
507 ) -> Result<&[Node], DirstateV2ParseError> {
503 ) -> Result<&[Node], DirstateV2ParseError> {
508 read_slice(on_disk, slice.start, slice.len.get())
504 read_slice(on_disk, slice.start, slice.len.get())
509 }
505 }
510
506
511 fn read_slice<T, Len>(
507 fn read_slice<T, Len>(
512 on_disk: &[u8],
508 on_disk: &[u8],
513 start: Offset,
509 start: Offset,
514 len: Len,
510 len: Len,
515 ) -> Result<&[T], DirstateV2ParseError>
511 ) -> Result<&[T], DirstateV2ParseError>
516 where
512 where
517 T: BytesCast,
513 T: BytesCast,
518 Len: TryInto<usize>,
514 Len: TryInto<usize>,
519 {
515 {
520 // Either `usize::MAX` would result in "out of bounds" error since a single
516 // Either `usize::MAX` would result in "out of bounds" error since a single
521 // `&[u8]` cannot occupy the entire addess space.
517 // `&[u8]` cannot occupy the entire addess space.
522 let start = start.get().try_into().unwrap_or(std::usize::MAX);
518 let start = start.get().try_into().unwrap_or(std::usize::MAX);
523 let len = len.try_into().unwrap_or(std::usize::MAX);
519 let len = len.try_into().unwrap_or(std::usize::MAX);
524 on_disk
520 on_disk
525 .get(start..)
521 .get(start..)
526 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
522 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
527 .map(|(slice, _rest)| slice)
523 .map(|(slice, _rest)| slice)
528 .ok_or_else(|| DirstateV2ParseError)
524 .ok_or_else(|| DirstateV2ParseError)
529 }
525 }
530
526
531 pub(crate) fn for_each_tracked_path<'on_disk>(
527 pub(crate) fn for_each_tracked_path<'on_disk>(
532 on_disk: &'on_disk [u8],
528 on_disk: &'on_disk [u8],
529 metadata: &[u8],
533 mut f: impl FnMut(&'on_disk HgPath),
530 mut f: impl FnMut(&'on_disk HgPath),
534 ) -> Result<(), DirstateV2ParseError> {
531 ) -> Result<(), DirstateV2ParseError> {
535 let root = read_root(on_disk)?;
532 let (meta, _) = TreeMetadata::from_bytes(metadata)
533 .map_err(|_| DirstateV2ParseError)?;
536 fn recur<'on_disk>(
534 fn recur<'on_disk>(
537 on_disk: &'on_disk [u8],
535 on_disk: &'on_disk [u8],
538 nodes: ChildNodes,
536 nodes: ChildNodes,
539 f: &mut impl FnMut(&'on_disk HgPath),
537 f: &mut impl FnMut(&'on_disk HgPath),
540 ) -> Result<(), DirstateV2ParseError> {
538 ) -> Result<(), DirstateV2ParseError> {
541 for node in read_nodes(on_disk, nodes)? {
539 for node in read_nodes(on_disk, nodes)? {
542 if let Some(state) = node.state()? {
540 if let Some(state) = node.state()? {
543 if state.is_tracked() {
541 if state.is_tracked() {
544 f(node.full_path(on_disk)?)
542 f(node.full_path(on_disk)?)
545 }
543 }
546 }
544 }
547 recur(on_disk, node.children, f)?
545 recur(on_disk, node.children, f)?
548 }
546 }
549 Ok(())
547 Ok(())
550 }
548 }
551 recur(on_disk, root.root_nodes, &mut f)
549 recur(on_disk, meta.root_nodes, &mut f)
552 }
550 }
553
551
554 /// Returns new data together with whether that data should be appended to the
552 /// Returns new data and metadata, together with whether that data should be
555 /// existing data file whose content is at `dirstate_map.on_disk` (true),
553 /// appended to the existing data file whose content is at
556 /// instead of written to a new data file (false).
554 /// `dirstate_map.on_disk` (true), instead of written to a new data file
555 /// (false).
557 pub(super) fn write(
556 pub(super) fn write(
558 dirstate_map: &mut DirstateMap,
557 dirstate_map: &mut DirstateMap,
559 can_append: bool,
558 can_append: bool,
560 ) -> Result<(Vec<u8>, bool), DirstateError> {
559 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
561 let append = can_append && dirstate_map.write_should_append();
560 let append = can_append && dirstate_map.write_should_append();
562
561
563 // This ignores the space for paths, and for nodes without an entry.
562 // This ignores the space for paths, and for nodes without an entry.
564 // TODO: better estimate? Skip the `Vec` and write to a file directly?
563 // TODO: better estimate? Skip the `Vec` and write to a file directly?
565 let size_guess = std::mem::size_of::<Root>()
564 let size_guess = std::mem::size_of::<Node>()
566 + std::mem::size_of::<Node>()
565 * dirstate_map.nodes_with_entry_count as usize;
567 * dirstate_map.nodes_with_entry_count as usize;
568
566
569 let mut writer = Writer {
567 let mut writer = Writer {
570 dirstate_map,
568 dirstate_map,
571 append,
569 append,
572 out: Vec::with_capacity(size_guess),
570 out: Vec::with_capacity(size_guess),
573 };
571 };
574
572
575 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
573 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
576
574
577 let root = Root {
575 let meta = TreeMetadata {
578 root_nodes,
576 root_nodes,
579 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
577 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
580 nodes_with_copy_source_count: dirstate_map
578 nodes_with_copy_source_count: dirstate_map
581 .nodes_with_copy_source_count
579 .nodes_with_copy_source_count
582 .into(),
580 .into(),
583 unreachable_bytes: dirstate_map.unreachable_bytes.into(),
581 unreachable_bytes: dirstate_map.unreachable_bytes.into(),
584 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
582 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
585 };
583 };
586 writer.out.extend(root.as_bytes());
584 Ok((writer.out, meta.as_bytes().to_vec(), append))
587 Ok((writer.out, append))
588 }
585 }
589
586
590 struct Writer<'dmap, 'on_disk> {
587 struct Writer<'dmap, 'on_disk> {
591 dirstate_map: &'dmap DirstateMap<'on_disk>,
588 dirstate_map: &'dmap DirstateMap<'on_disk>,
592 append: bool,
589 append: bool,
593 out: Vec<u8>,
590 out: Vec<u8>,
594 }
591 }
595
592
596 impl Writer<'_, '_> {
593 impl Writer<'_, '_> {
597 fn write_nodes(
594 fn write_nodes(
598 &mut self,
595 &mut self,
599 nodes: dirstate_map::ChildNodesRef,
596 nodes: dirstate_map::ChildNodesRef,
600 ) -> Result<ChildNodes, DirstateError> {
597 ) -> Result<ChildNodes, DirstateError> {
601 // Reuse already-written nodes if possible
598 // Reuse already-written nodes if possible
602 if self.append {
599 if self.append {
603 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
600 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
604 let start = self.on_disk_offset_of(nodes_slice).expect(
601 let start = self.on_disk_offset_of(nodes_slice).expect(
605 "dirstate-v2 OnDisk nodes not found within on_disk",
602 "dirstate-v2 OnDisk nodes not found within on_disk",
606 );
603 );
607 let len = child_nodes_len_from_usize(nodes_slice.len());
604 let len = child_nodes_len_from_usize(nodes_slice.len());
608 return Ok(ChildNodes { start, len });
605 return Ok(ChildNodes { start, len });
609 }
606 }
610 }
607 }
611
608
612 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
609 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
613 // undefined iteration order. Sort to enable binary search in the
610 // undefined iteration order. Sort to enable binary search in the
614 // written file.
611 // written file.
615 let nodes = nodes.sorted();
612 let nodes = nodes.sorted();
616 let nodes_len = nodes.len();
613 let nodes_len = nodes.len();
617
614
618 // First accumulate serialized nodes in a `Vec`
615 // First accumulate serialized nodes in a `Vec`
619 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
616 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
620 for node in nodes {
617 for node in nodes {
621 let children =
618 let children =
622 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
619 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
623 let full_path = node.full_path(self.dirstate_map.on_disk)?;
620 let full_path = node.full_path(self.dirstate_map.on_disk)?;
624 let full_path = self.write_path(full_path.as_bytes());
621 let full_path = self.write_path(full_path.as_bytes());
625 let copy_source = if let Some(source) =
622 let copy_source = if let Some(source) =
626 node.copy_source(self.dirstate_map.on_disk)?
623 node.copy_source(self.dirstate_map.on_disk)?
627 {
624 {
628 self.write_path(source.as_bytes())
625 self.write_path(source.as_bytes())
629 } else {
626 } else {
630 PathSlice {
627 PathSlice {
631 start: 0.into(),
628 start: 0.into(),
632 len: 0.into(),
629 len: 0.into(),
633 }
630 }
634 };
631 };
635 on_disk_nodes.push(match node {
632 on_disk_nodes.push(match node {
636 NodeRef::InMemory(path, node) => {
633 NodeRef::InMemory(path, node) => {
637 let (state, data) = match &node.data {
634 let (state, data) = match &node.data {
638 dirstate_map::NodeData::Entry(entry) => (
635 dirstate_map::NodeData::Entry(entry) => (
639 entry.state.into(),
636 entry.state.into(),
640 Entry {
637 Entry {
641 mode: entry.mode.into(),
638 mode: entry.mode.into(),
642 mtime: entry.mtime.into(),
639 mtime: entry.mtime.into(),
643 size: entry.size.into(),
640 size: entry.size.into(),
644 },
641 },
645 ),
642 ),
646 dirstate_map::NodeData::CachedDirectory { mtime } => {
643 dirstate_map::NodeData::CachedDirectory { mtime } => {
647 (b'd', Entry::from_timestamp(*mtime))
644 (b'd', Entry::from_timestamp(*mtime))
648 }
645 }
649 dirstate_map::NodeData::None => (
646 dirstate_map::NodeData::None => (
650 b'\0',
647 b'\0',
651 Entry {
648 Entry {
652 mode: 0.into(),
649 mode: 0.into(),
653 mtime: 0.into(),
650 mtime: 0.into(),
654 size: 0.into(),
651 size: 0.into(),
655 },
652 },
656 ),
653 ),
657 };
654 };
658 Node {
655 Node {
659 children,
656 children,
660 copy_source,
657 copy_source,
661 full_path,
658 full_path,
662 base_name_start: u16::try_from(path.base_name_start())
659 base_name_start: u16::try_from(path.base_name_start())
663 // Could only panic for paths over 64 KiB
660 // Could only panic for paths over 64 KiB
664 .expect("dirstate-v2 path length overflow")
661 .expect("dirstate-v2 path length overflow")
665 .into(),
662 .into(),
666 descendants_with_entry_count: node
663 descendants_with_entry_count: node
667 .descendants_with_entry_count
664 .descendants_with_entry_count
668 .into(),
665 .into(),
669 tracked_descendants_count: node
666 tracked_descendants_count: node
670 .tracked_descendants_count
667 .tracked_descendants_count
671 .into(),
668 .into(),
672 state,
669 state,
673 data,
670 data,
674 }
671 }
675 }
672 }
676 NodeRef::OnDisk(node) => Node {
673 NodeRef::OnDisk(node) => Node {
677 children,
674 children,
678 copy_source,
675 copy_source,
679 full_path,
676 full_path,
680 ..*node
677 ..*node
681 },
678 },
682 })
679 })
683 }
680 }
684 // … so we can write them contiguously, after writing everything else
681 // … so we can write them contiguously, after writing everything else
685 // they refer to.
682 // they refer to.
686 let start = self.current_offset();
683 let start = self.current_offset();
687 let len = child_nodes_len_from_usize(nodes_len);
684 let len = child_nodes_len_from_usize(nodes_len);
688 self.out.extend(on_disk_nodes.as_bytes());
685 self.out.extend(on_disk_nodes.as_bytes());
689 Ok(ChildNodes { start, len })
686 Ok(ChildNodes { start, len })
690 }
687 }
691
688
692 /// If the given slice of items is within `on_disk`, returns its offset
689 /// If the given slice of items is within `on_disk`, returns its offset
693 /// from the start of `on_disk`.
690 /// from the start of `on_disk`.
694 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
691 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
695 where
692 where
696 T: BytesCast,
693 T: BytesCast,
697 {
694 {
698 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
695 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
699 let start = slice.as_ptr() as usize;
696 let start = slice.as_ptr() as usize;
700 let end = start + slice.len();
697 let end = start + slice.len();
701 start..=end
698 start..=end
702 }
699 }
703 let slice_addresses = address_range(slice.as_bytes());
700 let slice_addresses = address_range(slice.as_bytes());
704 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
701 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
705 if on_disk_addresses.contains(slice_addresses.start())
702 if on_disk_addresses.contains(slice_addresses.start())
706 && on_disk_addresses.contains(slice_addresses.end())
703 && on_disk_addresses.contains(slice_addresses.end())
707 {
704 {
708 let offset = slice_addresses.start() - on_disk_addresses.start();
705 let offset = slice_addresses.start() - on_disk_addresses.start();
709 Some(offset_from_usize(offset))
706 Some(offset_from_usize(offset))
710 } else {
707 } else {
711 None
708 None
712 }
709 }
713 }
710 }
714
711
715 fn current_offset(&mut self) -> Offset {
712 fn current_offset(&mut self) -> Offset {
716 let mut offset = self.out.len();
713 let mut offset = self.out.len();
717 if self.append {
714 if self.append {
718 offset += self.dirstate_map.on_disk.len()
715 offset += self.dirstate_map.on_disk.len()
719 }
716 }
720 offset_from_usize(offset)
717 offset_from_usize(offset)
721 }
718 }
722
719
723 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
720 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
724 let len = path_len_from_usize(slice.len());
721 let len = path_len_from_usize(slice.len());
725 // Reuse an already-written path if possible
722 // Reuse an already-written path if possible
726 if self.append {
723 if self.append {
727 if let Some(start) = self.on_disk_offset_of(slice) {
724 if let Some(start) = self.on_disk_offset_of(slice) {
728 return PathSlice { start, len };
725 return PathSlice { start, len };
729 }
726 }
730 }
727 }
731 let start = self.current_offset();
728 let start = self.current_offset();
732 self.out.extend(slice.as_bytes());
729 self.out.extend(slice.as_bytes());
733 PathSlice { start, len }
730 PathSlice { start, len }
734 }
731 }
735 }
732 }
736
733
737 fn offset_from_usize(x: usize) -> Offset {
734 fn offset_from_usize(x: usize) -> Offset {
738 u32::try_from(x)
735 u32::try_from(x)
739 // Could only panic for a dirstate file larger than 4 GiB
736 // Could only panic for a dirstate file larger than 4 GiB
740 .expect("dirstate-v2 offset overflow")
737 .expect("dirstate-v2 offset overflow")
741 .into()
738 .into()
742 }
739 }
743
740
744 fn child_nodes_len_from_usize(x: usize) -> Size {
741 fn child_nodes_len_from_usize(x: usize) -> Size {
745 u32::try_from(x)
742 u32::try_from(x)
746 // Could only panic with over 4 billion nodes
743 // Could only panic with over 4 billion nodes
747 .expect("dirstate-v2 slice length overflow")
744 .expect("dirstate-v2 slice length overflow")
748 .into()
745 .into()
749 }
746 }
750
747
751 fn path_len_from_usize(x: usize) -> PathSize {
748 fn path_len_from_usize(x: usize) -> PathSize {
752 u16::try_from(x)
749 u16::try_from(x)
753 // Could only panic for paths over 64 KiB
750 // Could only panic for paths over 64 KiB
754 .expect("dirstate-v2 path length overflow")
751 .expect("dirstate-v2 path length overflow")
755 .into()
752 .into()
756 }
753 }
@@ -1,84 +1,90 b''
1 // list_tracked_files.rs
1 // list_tracked_files.rs
2 //
2 //
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::parse_dirstate_entries;
8 use crate::dirstate::parsers::parse_dirstate_entries;
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 use crate::errors::HgError;
10 use crate::errors::HgError;
11 use crate::repo::Repo;
11 use crate::repo::Repo;
12 use crate::revlog::changelog::Changelog;
12 use crate::revlog::changelog::Changelog;
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
14 use crate::revlog::node::Node;
14 use crate::revlog::node::Node;
15 use crate::revlog::revlog::RevlogError;
15 use crate::revlog::revlog::RevlogError;
16 use crate::utils::hg_path::HgPath;
16 use crate::utils::hg_path::HgPath;
17 use crate::DirstateError;
17 use crate::DirstateError;
18 use rayon::prelude::*;
18 use rayon::prelude::*;
19
19
20 /// List files under Mercurial control in the working directory
20 /// List files under Mercurial control in the working directory
21 /// by reading the dirstate
21 /// by reading the dirstate
22 pub struct Dirstate {
22 pub struct Dirstate {
23 /// The `dirstate` content.
23 /// The `dirstate` content.
24 content: Vec<u8>,
24 content: Vec<u8>,
25 dirstate_v2: bool,
25 v2_metadata: Option<Vec<u8>>,
26 }
26 }
27
27
28 impl Dirstate {
28 impl Dirstate {
29 pub fn new(repo: &Repo) -> Result<Self, HgError> {
29 pub fn new(repo: &Repo) -> Result<Self, HgError> {
30 let mut content = repo.hg_vfs().read("dirstate")?;
30 let mut content = repo.hg_vfs().read("dirstate")?;
31 if repo.has_dirstate_v2() {
31 let v2_metadata = if repo.has_dirstate_v2() {
32 let docket = read_docket(&content)?;
32 let docket = read_docket(&content)?;
33 let meta = docket.tree_metadata().to_vec();
33 content = repo.hg_vfs().read(docket.data_filename())?;
34 content = repo.hg_vfs().read(docket.data_filename())?;
34 }
35 Some(meta)
36 } else {
37 None
38 };
35 Ok(Self {
39 Ok(Self {
36 content,
40 content,
37 dirstate_v2: repo.has_dirstate_v2(),
41 v2_metadata,
38 })
42 })
39 }
43 }
40
44
41 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
45 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
42 let mut files = Vec::new();
46 let mut files = Vec::new();
43 if !self.content.is_empty() {
47 if !self.content.is_empty() {
44 if self.dirstate_v2 {
48 if let Some(meta) = &self.v2_metadata {
45 for_each_tracked_path(&self.content, |path| files.push(path))?
49 for_each_tracked_path(&self.content, meta, |path| {
50 files.push(path)
51 })?
46 } else {
52 } else {
47 let _parents = parse_dirstate_entries(
53 let _parents = parse_dirstate_entries(
48 &self.content,
54 &self.content,
49 |path, entry, _copy_source| {
55 |path, entry, _copy_source| {
50 if entry.state.is_tracked() {
56 if entry.state.is_tracked() {
51 files.push(path)
57 files.push(path)
52 }
58 }
53 Ok(())
59 Ok(())
54 },
60 },
55 )?;
61 )?;
56 }
62 }
57 }
63 }
58 files.par_sort_unstable();
64 files.par_sort_unstable();
59 Ok(files)
65 Ok(files)
60 }
66 }
61 }
67 }
62
68
63 /// List files under Mercurial control at a given revision.
69 /// List files under Mercurial control at a given revision.
64 pub fn list_rev_tracked_files(
70 pub fn list_rev_tracked_files(
65 repo: &Repo,
71 repo: &Repo,
66 revset: &str,
72 revset: &str,
67 ) -> Result<FilesForRev, RevlogError> {
73 ) -> Result<FilesForRev, RevlogError> {
68 let rev = crate::revset::resolve_single(revset, repo)?;
74 let rev = crate::revset::resolve_single(revset, repo)?;
69 let changelog = Changelog::open(repo)?;
75 let changelog = Changelog::open(repo)?;
70 let manifest = Manifest::open(repo)?;
76 let manifest = Manifest::open(repo)?;
71 let changelog_entry = changelog.get_rev(rev)?;
77 let changelog_entry = changelog.get_rev(rev)?;
72 let manifest_node =
78 let manifest_node =
73 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
79 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
74 let manifest_entry = manifest.get_node(manifest_node.into())?;
80 let manifest_entry = manifest.get_node(manifest_node.into())?;
75 Ok(FilesForRev(manifest_entry))
81 Ok(FilesForRev(manifest_entry))
76 }
82 }
77
83
78 pub struct FilesForRev(ManifestEntry);
84 pub struct FilesForRev(ManifestEntry);
79
85
80 impl FilesForRev {
86 impl FilesForRev {
81 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
87 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
82 self.0.files()
88 self.0.files()
83 }
89 }
84 }
90 }
@@ -1,614 +1,618 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_directory_item,
22 dirstate::make_directory_item,
23 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item,
24 dirstate::non_normal_entries::{
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
26 },
27 dirstate::owning::OwningDirstateMap,
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
28 parsers::dirstate_parents_to_pytuple,
29 };
29 };
30 use hg::{
30 use hg::{
31 dirstate::parsers::Timestamp,
31 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
32 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
34 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
35 dirstate_tree::on_disk::DirstateV2ParseError,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
41 };
41 };
42
42
43 // TODO
43 // TODO
44 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
48 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
49 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
51 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
52 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
53 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
57
57
58 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
59 @staticmethod
60 def new_v1(
60 def new_v1(
61 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
62 on_disk: PyBytes,
62 on_disk: PyBytes,
63 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
64 let dirstate_error = |e: DirstateError| {
64 let dirstate_error = |e: DirstateError| {
65 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
65 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
66 };
66 };
67 let (inner, parents) = if use_dirstate_tree {
67 let (inner, parents) = if use_dirstate_tree {
68 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
68 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
69 .map_err(dirstate_error)?;
69 .map_err(dirstate_error)?;
70 (Box::new(map) as _, parents)
70 (Box::new(map) as _, parents)
71 } else {
71 } else {
72 let bytes = on_disk.data(py);
72 let bytes = on_disk.data(py);
73 let mut map = RustDirstateMap::default();
73 let mut map = RustDirstateMap::default();
74 let parents = map.read(bytes).map_err(dirstate_error)?;
74 let parents = map.read(bytes).map_err(dirstate_error)?;
75 (Box::new(map) as _, parents)
75 (Box::new(map) as _, parents)
76 };
76 };
77 let map = Self::create_instance(py, inner)?;
77 let map = Self::create_instance(py, inner)?;
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
79 Ok((map, parents).to_py_object(py).into_object())
79 Ok((map, parents).to_py_object(py).into_object())
80 }
80 }
81
81
82 /// Returns a DirstateMap
82 /// Returns a DirstateMap
83 @staticmethod
83 @staticmethod
84 def new_v2(
84 def new_v2(
85 on_disk: PyBytes,
85 on_disk: PyBytes,
86 data_size: usize,
86 data_size: usize,
87 tree_metadata: PyBytes,
87 ) -> PyResult<PyObject> {
88 ) -> PyResult<PyObject> {
88 let dirstate_error = |e: DirstateError| {
89 let dirstate_error = |e: DirstateError| {
89 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
90 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
90 };
91 };
91 let inner = OwningDirstateMap::new_v2(py, on_disk, data_size)
92 let inner = OwningDirstateMap::new_v2(
92 .map_err(dirstate_error)?;
93 py, on_disk, data_size, tree_metadata,
94 ).map_err(dirstate_error)?;
93 let map = Self::create_instance(py, Box::new(inner))?;
95 let map = Self::create_instance(py, Box::new(inner))?;
94 Ok(map.into_object())
96 Ok(map.into_object())
95 }
97 }
96
98
97 def clear(&self) -> PyResult<PyObject> {
99 def clear(&self) -> PyResult<PyObject> {
98 self.inner(py).borrow_mut().clear();
100 self.inner(py).borrow_mut().clear();
99 Ok(py.None())
101 Ok(py.None())
100 }
102 }
101
103
102 def get(
104 def get(
103 &self,
105 &self,
104 key: PyObject,
106 key: PyObject,
105 default: Option<PyObject> = None
107 default: Option<PyObject> = None
106 ) -> PyResult<Option<PyObject>> {
108 ) -> PyResult<Option<PyObject>> {
107 let key = key.extract::<PyBytes>(py)?;
109 let key = key.extract::<PyBytes>(py)?;
108 match self
110 match self
109 .inner(py)
111 .inner(py)
110 .borrow()
112 .borrow()
111 .get(HgPath::new(key.data(py)))
113 .get(HgPath::new(key.data(py)))
112 .map_err(|e| v2_error(py, e))?
114 .map_err(|e| v2_error(py, e))?
113 {
115 {
114 Some(entry) => {
116 Some(entry) => {
115 Ok(Some(make_dirstate_item(py, &entry)?))
117 Ok(Some(make_dirstate_item(py, &entry)?))
116 },
118 },
117 None => Ok(default)
119 None => Ok(default)
118 }
120 }
119 }
121 }
120
122
121 def addfile(
123 def addfile(
122 &self,
124 &self,
123 f: PyObject,
125 f: PyObject,
124 mode: PyObject,
126 mode: PyObject,
125 size: PyObject,
127 size: PyObject,
126 mtime: PyObject,
128 mtime: PyObject,
127 added: PyObject,
129 added: PyObject,
128 merged: PyObject,
130 merged: PyObject,
129 from_p2: PyObject,
131 from_p2: PyObject,
130 possibly_dirty: PyObject,
132 possibly_dirty: PyObject,
131 ) -> PyResult<PyObject> {
133 ) -> PyResult<PyObject> {
132 let f = f.extract::<PyBytes>(py)?;
134 let f = f.extract::<PyBytes>(py)?;
133 let filename = HgPath::new(f.data(py));
135 let filename = HgPath::new(f.data(py));
134 let mode = if mode.is_none(py) {
136 let mode = if mode.is_none(py) {
135 // fallback default value
137 // fallback default value
136 0
138 0
137 } else {
139 } else {
138 mode.extract(py)?
140 mode.extract(py)?
139 };
141 };
140 let size = if size.is_none(py) {
142 let size = if size.is_none(py) {
141 // fallback default value
143 // fallback default value
142 SIZE_NON_NORMAL
144 SIZE_NON_NORMAL
143 } else {
145 } else {
144 size.extract(py)?
146 size.extract(py)?
145 };
147 };
146 let mtime = if mtime.is_none(py) {
148 let mtime = if mtime.is_none(py) {
147 // fallback default value
149 // fallback default value
148 MTIME_UNSET
150 MTIME_UNSET
149 } else {
151 } else {
150 mtime.extract(py)?
152 mtime.extract(py)?
151 };
153 };
152 let entry = DirstateEntry {
154 let entry = DirstateEntry {
153 // XXX Arbitrary default value since the value is determined later
155 // XXX Arbitrary default value since the value is determined later
154 state: EntryState::Normal,
156 state: EntryState::Normal,
155 mode: mode,
157 mode: mode,
156 size: size,
158 size: size,
157 mtime: mtime,
159 mtime: mtime,
158 };
160 };
159 let added = added.extract::<PyBool>(py)?.is_true();
161 let added = added.extract::<PyBool>(py)?.is_true();
160 let merged = merged.extract::<PyBool>(py)?.is_true();
162 let merged = merged.extract::<PyBool>(py)?.is_true();
161 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
163 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
162 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
164 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
163 self.inner(py).borrow_mut().add_file(
165 self.inner(py).borrow_mut().add_file(
164 filename,
166 filename,
165 entry,
167 entry,
166 added,
168 added,
167 merged,
169 merged,
168 from_p2,
170 from_p2,
169 possibly_dirty
171 possibly_dirty
170 ).and(Ok(py.None())).or_else(|e: DirstateError| {
172 ).and(Ok(py.None())).or_else(|e: DirstateError| {
171 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
173 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
172 })
174 })
173 }
175 }
174
176
175 def removefile(
177 def removefile(
176 &self,
178 &self,
177 f: PyObject,
179 f: PyObject,
178 in_merge: PyObject
180 in_merge: PyObject
179 ) -> PyResult<PyObject> {
181 ) -> PyResult<PyObject> {
180 self.inner(py).borrow_mut()
182 self.inner(py).borrow_mut()
181 .remove_file(
183 .remove_file(
182 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
184 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
183 in_merge.extract::<PyBool>(py)?.is_true(),
185 in_merge.extract::<PyBool>(py)?.is_true(),
184 )
186 )
185 .or_else(|_| {
187 .or_else(|_| {
186 Err(PyErr::new::<exc::OSError, _>(
188 Err(PyErr::new::<exc::OSError, _>(
187 py,
189 py,
188 "Dirstate error".to_string(),
190 "Dirstate error".to_string(),
189 ))
191 ))
190 })?;
192 })?;
191 Ok(py.None())
193 Ok(py.None())
192 }
194 }
193
195
194 def dropfile(
196 def dropfile(
195 &self,
197 &self,
196 f: PyObject,
198 f: PyObject,
197 ) -> PyResult<PyBool> {
199 ) -> PyResult<PyBool> {
198 self.inner(py).borrow_mut()
200 self.inner(py).borrow_mut()
199 .drop_file(
201 .drop_file(
200 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
202 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
201 )
203 )
202 .and_then(|b| Ok(b.to_py_object(py)))
204 .and_then(|b| Ok(b.to_py_object(py)))
203 .or_else(|e| {
205 .or_else(|e| {
204 Err(PyErr::new::<exc::OSError, _>(
206 Err(PyErr::new::<exc::OSError, _>(
205 py,
207 py,
206 format!("Dirstate error: {}", e.to_string()),
208 format!("Dirstate error: {}", e.to_string()),
207 ))
209 ))
208 })
210 })
209 }
211 }
210
212
211 def clearambiguoustimes(
213 def clearambiguoustimes(
212 &self,
214 &self,
213 files: PyObject,
215 files: PyObject,
214 now: PyObject
216 now: PyObject
215 ) -> PyResult<PyObject> {
217 ) -> PyResult<PyObject> {
216 let files: PyResult<Vec<HgPathBuf>> = files
218 let files: PyResult<Vec<HgPathBuf>> = files
217 .iter(py)?
219 .iter(py)?
218 .map(|filename| {
220 .map(|filename| {
219 Ok(HgPathBuf::from_bytes(
221 Ok(HgPathBuf::from_bytes(
220 filename?.extract::<PyBytes>(py)?.data(py),
222 filename?.extract::<PyBytes>(py)?.data(py),
221 ))
223 ))
222 })
224 })
223 .collect();
225 .collect();
224 self.inner(py)
226 self.inner(py)
225 .borrow_mut()
227 .borrow_mut()
226 .clear_ambiguous_times(files?, now.extract(py)?)
228 .clear_ambiguous_times(files?, now.extract(py)?)
227 .map_err(|e| v2_error(py, e))?;
229 .map_err(|e| v2_error(py, e))?;
228 Ok(py.None())
230 Ok(py.None())
229 }
231 }
230
232
231 def other_parent_entries(&self) -> PyResult<PyObject> {
233 def other_parent_entries(&self) -> PyResult<PyObject> {
232 let mut inner_shared = self.inner(py).borrow_mut();
234 let mut inner_shared = self.inner(py).borrow_mut();
233 let set = PySet::empty(py)?;
235 let set = PySet::empty(py)?;
234 for path in inner_shared.iter_other_parent_paths() {
236 for path in inner_shared.iter_other_parent_paths() {
235 let path = path.map_err(|e| v2_error(py, e))?;
237 let path = path.map_err(|e| v2_error(py, e))?;
236 set.add(py, PyBytes::new(py, path.as_bytes()))?;
238 set.add(py, PyBytes::new(py, path.as_bytes()))?;
237 }
239 }
238 Ok(set.into_object())
240 Ok(set.into_object())
239 }
241 }
240
242
241 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
243 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
242 NonNormalEntries::from_inner(py, self.clone_ref(py))
244 NonNormalEntries::from_inner(py, self.clone_ref(py))
243 }
245 }
244
246
245 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
247 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
246 let key = key.extract::<PyBytes>(py)?;
248 let key = key.extract::<PyBytes>(py)?;
247 self.inner(py)
249 self.inner(py)
248 .borrow_mut()
250 .borrow_mut()
249 .non_normal_entries_contains(HgPath::new(key.data(py)))
251 .non_normal_entries_contains(HgPath::new(key.data(py)))
250 .map_err(|e| v2_error(py, e))
252 .map_err(|e| v2_error(py, e))
251 }
253 }
252
254
253 def non_normal_entries_display(&self) -> PyResult<PyString> {
255 def non_normal_entries_display(&self) -> PyResult<PyString> {
254 let mut inner = self.inner(py).borrow_mut();
256 let mut inner = self.inner(py).borrow_mut();
255 let paths = inner
257 let paths = inner
256 .iter_non_normal_paths()
258 .iter_non_normal_paths()
257 .collect::<Result<Vec<_>, _>>()
259 .collect::<Result<Vec<_>, _>>()
258 .map_err(|e| v2_error(py, e))?;
260 .map_err(|e| v2_error(py, e))?;
259 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
261 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
260 Ok(PyString::new(py, &formatted))
262 Ok(PyString::new(py, &formatted))
261 }
263 }
262
264
263 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
265 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
264 let key = key.extract::<PyBytes>(py)?;
266 let key = key.extract::<PyBytes>(py)?;
265 self
267 self
266 .inner(py)
268 .inner(py)
267 .borrow_mut()
269 .borrow_mut()
268 .non_normal_entries_remove(HgPath::new(key.data(py)));
270 .non_normal_entries_remove(HgPath::new(key.data(py)));
269 Ok(py.None())
271 Ok(py.None())
270 }
272 }
271
273
272 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
274 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
273 let mut inner = self.inner(py).borrow_mut();
275 let mut inner = self.inner(py).borrow_mut();
274
276
275 let ret = PyList::new(py, &[]);
277 let ret = PyList::new(py, &[]);
276 for filename in inner.non_normal_or_other_parent_paths() {
278 for filename in inner.non_normal_or_other_parent_paths() {
277 let filename = filename.map_err(|e| v2_error(py, e))?;
279 let filename = filename.map_err(|e| v2_error(py, e))?;
278 let as_pystring = PyBytes::new(py, filename.as_bytes());
280 let as_pystring = PyBytes::new(py, filename.as_bytes());
279 ret.append(py, as_pystring.into_object());
281 ret.append(py, as_pystring.into_object());
280 }
282 }
281 Ok(ret)
283 Ok(ret)
282 }
284 }
283
285
284 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
286 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
285 // Make sure the sets are defined before we no longer have a mutable
287 // Make sure the sets are defined before we no longer have a mutable
286 // reference to the dmap.
288 // reference to the dmap.
287 self.inner(py)
289 self.inner(py)
288 .borrow_mut()
290 .borrow_mut()
289 .set_non_normal_other_parent_entries(false);
291 .set_non_normal_other_parent_entries(false);
290
292
291 let leaked_ref = self.inner(py).leak_immutable();
293 let leaked_ref = self.inner(py).leak_immutable();
292
294
293 NonNormalEntriesIterator::from_inner(py, unsafe {
295 NonNormalEntriesIterator::from_inner(py, unsafe {
294 leaked_ref.map(py, |o| {
296 leaked_ref.map(py, |o| {
295 o.iter_non_normal_paths_panic()
297 o.iter_non_normal_paths_panic()
296 })
298 })
297 })
299 })
298 }
300 }
299
301
300 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
302 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
301 let d = d.extract::<PyBytes>(py)?;
303 let d = d.extract::<PyBytes>(py)?;
302 Ok(self.inner(py).borrow_mut()
304 Ok(self.inner(py).borrow_mut()
303 .has_tracked_dir(HgPath::new(d.data(py)))
305 .has_tracked_dir(HgPath::new(d.data(py)))
304 .map_err(|e| {
306 .map_err(|e| {
305 PyErr::new::<exc::ValueError, _>(py, e.to_string())
307 PyErr::new::<exc::ValueError, _>(py, e.to_string())
306 })?
308 })?
307 .to_py_object(py))
309 .to_py_object(py))
308 }
310 }
309
311
310 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
312 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
311 let d = d.extract::<PyBytes>(py)?;
313 let d = d.extract::<PyBytes>(py)?;
312 Ok(self.inner(py).borrow_mut()
314 Ok(self.inner(py).borrow_mut()
313 .has_dir(HgPath::new(d.data(py)))
315 .has_dir(HgPath::new(d.data(py)))
314 .map_err(|e| {
316 .map_err(|e| {
315 PyErr::new::<exc::ValueError, _>(py, e.to_string())
317 PyErr::new::<exc::ValueError, _>(py, e.to_string())
316 })?
318 })?
317 .to_py_object(py))
319 .to_py_object(py))
318 }
320 }
319
321
320 def write_v1(
322 def write_v1(
321 &self,
323 &self,
322 p1: PyObject,
324 p1: PyObject,
323 p2: PyObject,
325 p2: PyObject,
324 now: PyObject
326 now: PyObject
325 ) -> PyResult<PyBytes> {
327 ) -> PyResult<PyBytes> {
326 let now = Timestamp(now.extract(py)?);
328 let now = Timestamp(now.extract(py)?);
327
329
328 let mut inner = self.inner(py).borrow_mut();
330 let mut inner = self.inner(py).borrow_mut();
329 let parents = DirstateParents {
331 let parents = DirstateParents {
330 p1: extract_node_id(py, &p1)?,
332 p1: extract_node_id(py, &p1)?,
331 p2: extract_node_id(py, &p2)?,
333 p2: extract_node_id(py, &p2)?,
332 };
334 };
333 let result = inner.pack_v1(parents, now);
335 let result = inner.pack_v1(parents, now);
334 match result {
336 match result {
335 Ok(packed) => Ok(PyBytes::new(py, &packed)),
337 Ok(packed) => Ok(PyBytes::new(py, &packed)),
336 Err(_) => Err(PyErr::new::<exc::OSError, _>(
338 Err(_) => Err(PyErr::new::<exc::OSError, _>(
337 py,
339 py,
338 "Dirstate error".to_string(),
340 "Dirstate error".to_string(),
339 )),
341 )),
340 }
342 }
341 }
343 }
342
344
343 /// Returns new data together with whether that data should be appended to
345 /// Returns new data together with whether that data should be appended to
344 /// the existing data file whose content is at `self.on_disk` (True),
346 /// the existing data file whose content is at `self.on_disk` (True),
345 /// instead of written to a new data file (False).
347 /// instead of written to a new data file (False).
346 def write_v2(
348 def write_v2(
347 &self,
349 &self,
348 now: PyObject,
350 now: PyObject,
349 can_append: bool,
351 can_append: bool,
350 ) -> PyResult<PyObject> {
352 ) -> PyResult<PyObject> {
351 let now = Timestamp(now.extract(py)?);
353 let now = Timestamp(now.extract(py)?);
352
354
353 let mut inner = self.inner(py).borrow_mut();
355 let mut inner = self.inner(py).borrow_mut();
354 let result = inner.pack_v2(now, can_append);
356 let result = inner.pack_v2(now, can_append);
355 match result {
357 match result {
356 Ok((packed, append)) => {
358 Ok((packed, tree_metadata, append)) => {
357 let packed = PyBytes::new(py, &packed);
359 let packed = PyBytes::new(py, &packed);
358 Ok((packed, append).to_py_object(py).into_object())
360 let tree_metadata = PyBytes::new(py, &tree_metadata);
361 let tuple = (packed, tree_metadata, append);
362 Ok(tuple.to_py_object(py).into_object())
359 },
363 },
360 Err(_) => Err(PyErr::new::<exc::OSError, _>(
364 Err(_) => Err(PyErr::new::<exc::OSError, _>(
361 py,
365 py,
362 "Dirstate error".to_string(),
366 "Dirstate error".to_string(),
363 )),
367 )),
364 }
368 }
365 }
369 }
366
370
367 def filefoldmapasdict(&self) -> PyResult<PyDict> {
371 def filefoldmapasdict(&self) -> PyResult<PyDict> {
368 let dict = PyDict::new(py);
372 let dict = PyDict::new(py);
369 for item in self.inner(py).borrow_mut().iter() {
373 for item in self.inner(py).borrow_mut().iter() {
370 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
374 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
371 if entry.state != EntryState::Removed {
375 if entry.state != EntryState::Removed {
372 let key = normalize_case(path);
376 let key = normalize_case(path);
373 let value = path;
377 let value = path;
374 dict.set_item(
378 dict.set_item(
375 py,
379 py,
376 PyBytes::new(py, key.as_bytes()).into_object(),
380 PyBytes::new(py, key.as_bytes()).into_object(),
377 PyBytes::new(py, value.as_bytes()).into_object(),
381 PyBytes::new(py, value.as_bytes()).into_object(),
378 )?;
382 )?;
379 }
383 }
380 }
384 }
381 Ok(dict)
385 Ok(dict)
382 }
386 }
383
387
384 def __len__(&self) -> PyResult<usize> {
388 def __len__(&self) -> PyResult<usize> {
385 Ok(self.inner(py).borrow().len())
389 Ok(self.inner(py).borrow().len())
386 }
390 }
387
391
388 def __contains__(&self, key: PyObject) -> PyResult<bool> {
392 def __contains__(&self, key: PyObject) -> PyResult<bool> {
389 let key = key.extract::<PyBytes>(py)?;
393 let key = key.extract::<PyBytes>(py)?;
390 self.inner(py)
394 self.inner(py)
391 .borrow()
395 .borrow()
392 .contains_key(HgPath::new(key.data(py)))
396 .contains_key(HgPath::new(key.data(py)))
393 .map_err(|e| v2_error(py, e))
397 .map_err(|e| v2_error(py, e))
394 }
398 }
395
399
396 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
400 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
397 let key = key.extract::<PyBytes>(py)?;
401 let key = key.extract::<PyBytes>(py)?;
398 let key = HgPath::new(key.data(py));
402 let key = HgPath::new(key.data(py));
399 match self
403 match self
400 .inner(py)
404 .inner(py)
401 .borrow()
405 .borrow()
402 .get(key)
406 .get(key)
403 .map_err(|e| v2_error(py, e))?
407 .map_err(|e| v2_error(py, e))?
404 {
408 {
405 Some(entry) => {
409 Some(entry) => {
406 Ok(make_dirstate_item(py, &entry)?)
410 Ok(make_dirstate_item(py, &entry)?)
407 },
411 },
408 None => Err(PyErr::new::<exc::KeyError, _>(
412 None => Err(PyErr::new::<exc::KeyError, _>(
409 py,
413 py,
410 String::from_utf8_lossy(key.as_bytes()),
414 String::from_utf8_lossy(key.as_bytes()),
411 )),
415 )),
412 }
416 }
413 }
417 }
414
418
415 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
419 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
416 let leaked_ref = self.inner(py).leak_immutable();
420 let leaked_ref = self.inner(py).leak_immutable();
417 DirstateMapKeysIterator::from_inner(
421 DirstateMapKeysIterator::from_inner(
418 py,
422 py,
419 unsafe { leaked_ref.map(py, |o| o.iter()) },
423 unsafe { leaked_ref.map(py, |o| o.iter()) },
420 )
424 )
421 }
425 }
422
426
423 def items(&self) -> PyResult<DirstateMapItemsIterator> {
427 def items(&self) -> PyResult<DirstateMapItemsIterator> {
424 let leaked_ref = self.inner(py).leak_immutable();
428 let leaked_ref = self.inner(py).leak_immutable();
425 DirstateMapItemsIterator::from_inner(
429 DirstateMapItemsIterator::from_inner(
426 py,
430 py,
427 unsafe { leaked_ref.map(py, |o| o.iter()) },
431 unsafe { leaked_ref.map(py, |o| o.iter()) },
428 )
432 )
429 }
433 }
430
434
431 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
435 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
432 let leaked_ref = self.inner(py).leak_immutable();
436 let leaked_ref = self.inner(py).leak_immutable();
433 DirstateMapKeysIterator::from_inner(
437 DirstateMapKeysIterator::from_inner(
434 py,
438 py,
435 unsafe { leaked_ref.map(py, |o| o.iter()) },
439 unsafe { leaked_ref.map(py, |o| o.iter()) },
436 )
440 )
437 }
441 }
438
442
439 // TODO all copymap* methods, see docstring above
443 // TODO all copymap* methods, see docstring above
440 def copymapcopy(&self) -> PyResult<PyDict> {
444 def copymapcopy(&self) -> PyResult<PyDict> {
441 let dict = PyDict::new(py);
445 let dict = PyDict::new(py);
442 for item in self.inner(py).borrow().copy_map_iter() {
446 for item in self.inner(py).borrow().copy_map_iter() {
443 let (key, value) = item.map_err(|e| v2_error(py, e))?;
447 let (key, value) = item.map_err(|e| v2_error(py, e))?;
444 dict.set_item(
448 dict.set_item(
445 py,
449 py,
446 PyBytes::new(py, key.as_bytes()),
450 PyBytes::new(py, key.as_bytes()),
447 PyBytes::new(py, value.as_bytes()),
451 PyBytes::new(py, value.as_bytes()),
448 )?;
452 )?;
449 }
453 }
450 Ok(dict)
454 Ok(dict)
451 }
455 }
452
456
453 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
457 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
454 let key = key.extract::<PyBytes>(py)?;
458 let key = key.extract::<PyBytes>(py)?;
455 match self
459 match self
456 .inner(py)
460 .inner(py)
457 .borrow()
461 .borrow()
458 .copy_map_get(HgPath::new(key.data(py)))
462 .copy_map_get(HgPath::new(key.data(py)))
459 .map_err(|e| v2_error(py, e))?
463 .map_err(|e| v2_error(py, e))?
460 {
464 {
461 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
465 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
462 None => Err(PyErr::new::<exc::KeyError, _>(
466 None => Err(PyErr::new::<exc::KeyError, _>(
463 py,
467 py,
464 String::from_utf8_lossy(key.data(py)),
468 String::from_utf8_lossy(key.data(py)),
465 )),
469 )),
466 }
470 }
467 }
471 }
468 def copymap(&self) -> PyResult<CopyMap> {
472 def copymap(&self) -> PyResult<CopyMap> {
469 CopyMap::from_inner(py, self.clone_ref(py))
473 CopyMap::from_inner(py, self.clone_ref(py))
470 }
474 }
471
475
472 def copymaplen(&self) -> PyResult<usize> {
476 def copymaplen(&self) -> PyResult<usize> {
473 Ok(self.inner(py).borrow().copy_map_len())
477 Ok(self.inner(py).borrow().copy_map_len())
474 }
478 }
475 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
479 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
476 let key = key.extract::<PyBytes>(py)?;
480 let key = key.extract::<PyBytes>(py)?;
477 self.inner(py)
481 self.inner(py)
478 .borrow()
482 .borrow()
479 .copy_map_contains_key(HgPath::new(key.data(py)))
483 .copy_map_contains_key(HgPath::new(key.data(py)))
480 .map_err(|e| v2_error(py, e))
484 .map_err(|e| v2_error(py, e))
481 }
485 }
482 def copymapget(
486 def copymapget(
483 &self,
487 &self,
484 key: PyObject,
488 key: PyObject,
485 default: Option<PyObject>
489 default: Option<PyObject>
486 ) -> PyResult<Option<PyObject>> {
490 ) -> PyResult<Option<PyObject>> {
487 let key = key.extract::<PyBytes>(py)?;
491 let key = key.extract::<PyBytes>(py)?;
488 match self
492 match self
489 .inner(py)
493 .inner(py)
490 .borrow()
494 .borrow()
491 .copy_map_get(HgPath::new(key.data(py)))
495 .copy_map_get(HgPath::new(key.data(py)))
492 .map_err(|e| v2_error(py, e))?
496 .map_err(|e| v2_error(py, e))?
493 {
497 {
494 Some(copy) => Ok(Some(
498 Some(copy) => Ok(Some(
495 PyBytes::new(py, copy.as_bytes()).into_object(),
499 PyBytes::new(py, copy.as_bytes()).into_object(),
496 )),
500 )),
497 None => Ok(default),
501 None => Ok(default),
498 }
502 }
499 }
503 }
500 def copymapsetitem(
504 def copymapsetitem(
501 &self,
505 &self,
502 key: PyObject,
506 key: PyObject,
503 value: PyObject
507 value: PyObject
504 ) -> PyResult<PyObject> {
508 ) -> PyResult<PyObject> {
505 let key = key.extract::<PyBytes>(py)?;
509 let key = key.extract::<PyBytes>(py)?;
506 let value = value.extract::<PyBytes>(py)?;
510 let value = value.extract::<PyBytes>(py)?;
507 self.inner(py)
511 self.inner(py)
508 .borrow_mut()
512 .borrow_mut()
509 .copy_map_insert(
513 .copy_map_insert(
510 HgPathBuf::from_bytes(key.data(py)),
514 HgPathBuf::from_bytes(key.data(py)),
511 HgPathBuf::from_bytes(value.data(py)),
515 HgPathBuf::from_bytes(value.data(py)),
512 )
516 )
513 .map_err(|e| v2_error(py, e))?;
517 .map_err(|e| v2_error(py, e))?;
514 Ok(py.None())
518 Ok(py.None())
515 }
519 }
516 def copymappop(
520 def copymappop(
517 &self,
521 &self,
518 key: PyObject,
522 key: PyObject,
519 default: Option<PyObject>
523 default: Option<PyObject>
520 ) -> PyResult<Option<PyObject>> {
524 ) -> PyResult<Option<PyObject>> {
521 let key = key.extract::<PyBytes>(py)?;
525 let key = key.extract::<PyBytes>(py)?;
522 match self
526 match self
523 .inner(py)
527 .inner(py)
524 .borrow_mut()
528 .borrow_mut()
525 .copy_map_remove(HgPath::new(key.data(py)))
529 .copy_map_remove(HgPath::new(key.data(py)))
526 .map_err(|e| v2_error(py, e))?
530 .map_err(|e| v2_error(py, e))?
527 {
531 {
528 Some(_) => Ok(None),
532 Some(_) => Ok(None),
529 None => Ok(default),
533 None => Ok(default),
530 }
534 }
531 }
535 }
532
536
533 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
537 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
534 let leaked_ref = self.inner(py).leak_immutable();
538 let leaked_ref = self.inner(py).leak_immutable();
535 CopyMapKeysIterator::from_inner(
539 CopyMapKeysIterator::from_inner(
536 py,
540 py,
537 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
541 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
538 )
542 )
539 }
543 }
540
544
541 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
545 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
542 let leaked_ref = self.inner(py).leak_immutable();
546 let leaked_ref = self.inner(py).leak_immutable();
543 CopyMapItemsIterator::from_inner(
547 CopyMapItemsIterator::from_inner(
544 py,
548 py,
545 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
549 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
546 )
550 )
547 }
551 }
548
552
549 def directories(&self) -> PyResult<PyList> {
553 def directories(&self) -> PyResult<PyList> {
550 let dirs = PyList::new(py, &[]);
554 let dirs = PyList::new(py, &[]);
551 for item in self.inner(py).borrow().iter_directories() {
555 for item in self.inner(py).borrow().iter_directories() {
552 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
556 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
553 let path = PyBytes::new(py, path.as_bytes());
557 let path = PyBytes::new(py, path.as_bytes());
554 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
558 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
555 let item = make_directory_item(py, mtime as i32)?;
559 let item = make_directory_item(py, mtime as i32)?;
556 let tuple = (path, item);
560 let tuple = (path, item);
557 dirs.append(py, tuple.to_py_object(py).into_object())
561 dirs.append(py, tuple.to_py_object(py).into_object())
558 }
562 }
559 Ok(dirs)
563 Ok(dirs)
560 }
564 }
561
565
562 });
566 });
563
567
564 impl DirstateMap {
568 impl DirstateMap {
565 pub fn get_inner_mut<'a>(
569 pub fn get_inner_mut<'a>(
566 &'a self,
570 &'a self,
567 py: Python<'a>,
571 py: Python<'a>,
568 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
572 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
569 self.inner(py).borrow_mut()
573 self.inner(py).borrow_mut()
570 }
574 }
571 fn translate_key(
575 fn translate_key(
572 py: Python,
576 py: Python,
573 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
577 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
574 ) -> PyResult<Option<PyBytes>> {
578 ) -> PyResult<Option<PyBytes>> {
575 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
579 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
576 Ok(Some(PyBytes::new(py, f.as_bytes())))
580 Ok(Some(PyBytes::new(py, f.as_bytes())))
577 }
581 }
578 fn translate_key_value(
582 fn translate_key_value(
579 py: Python,
583 py: Python,
580 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
584 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
581 ) -> PyResult<Option<(PyBytes, PyObject)>> {
585 ) -> PyResult<Option<(PyBytes, PyObject)>> {
582 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
586 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
583 Ok(Some((
587 Ok(Some((
584 PyBytes::new(py, f.as_bytes()),
588 PyBytes::new(py, f.as_bytes()),
585 make_dirstate_item(py, &entry)?,
589 make_dirstate_item(py, &entry)?,
586 )))
590 )))
587 }
591 }
588 }
592 }
589
593
590 py_shared_iterator!(
594 py_shared_iterator!(
591 DirstateMapKeysIterator,
595 DirstateMapKeysIterator,
592 UnsafePyLeaked<StateMapIter<'static>>,
596 UnsafePyLeaked<StateMapIter<'static>>,
593 DirstateMap::translate_key,
597 DirstateMap::translate_key,
594 Option<PyBytes>
598 Option<PyBytes>
595 );
599 );
596
600
597 py_shared_iterator!(
601 py_shared_iterator!(
598 DirstateMapItemsIterator,
602 DirstateMapItemsIterator,
599 UnsafePyLeaked<StateMapIter<'static>>,
603 UnsafePyLeaked<StateMapIter<'static>>,
600 DirstateMap::translate_key_value,
604 DirstateMap::translate_key_value,
601 Option<(PyBytes, PyObject)>
605 Option<(PyBytes, PyObject)>
602 );
606 );
603
607
604 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
608 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
605 let bytes = obj.extract::<PyBytes>(py)?;
609 let bytes = obj.extract::<PyBytes>(py)?;
606 match bytes.data(py).try_into() {
610 match bytes.data(py).try_into() {
607 Ok(s) => Ok(s),
611 Ok(s) => Ok(s),
608 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
612 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
609 }
613 }
610 }
614 }
611
615
612 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
616 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
613 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
617 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
614 }
618 }
@@ -1,219 +1,219 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
13 use hg::StateMapIter;
14 use hg::StatusError;
14 use hg::StatusError;
15 use hg::StatusOptions;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
19 fn clear(&mut self) {
20 self.get_mut().clear()
20 self.get_mut().clear()
21 }
21 }
22
22
23 fn add_file(
23 fn add_file(
24 &mut self,
24 &mut self,
25 filename: &HgPath,
25 filename: &HgPath,
26 entry: DirstateEntry,
26 entry: DirstateEntry,
27 added: bool,
27 added: bool,
28 merged: bool,
28 merged: bool,
29 from_p2: bool,
29 from_p2: bool,
30 possibly_dirty: bool,
30 possibly_dirty: bool,
31 ) -> Result<(), DirstateError> {
31 ) -> Result<(), DirstateError> {
32 self.get_mut().add_file(
32 self.get_mut().add_file(
33 filename,
33 filename,
34 entry,
34 entry,
35 added,
35 added,
36 merged,
36 merged,
37 from_p2,
37 from_p2,
38 possibly_dirty,
38 possibly_dirty,
39 )
39 )
40 }
40 }
41
41
42 fn remove_file(
42 fn remove_file(
43 &mut self,
43 &mut self,
44 filename: &HgPath,
44 filename: &HgPath,
45 in_merge: bool,
45 in_merge: bool,
46 ) -> Result<(), DirstateError> {
46 ) -> Result<(), DirstateError> {
47 self.get_mut().remove_file(filename, in_merge)
47 self.get_mut().remove_file(filename, in_merge)
48 }
48 }
49
49
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
51 self.get_mut().drop_file(filename)
51 self.get_mut().drop_file(filename)
52 }
52 }
53
53
54 fn clear_ambiguous_times(
54 fn clear_ambiguous_times(
55 &mut self,
55 &mut self,
56 filenames: Vec<HgPathBuf>,
56 filenames: Vec<HgPathBuf>,
57 now: i32,
57 now: i32,
58 ) -> Result<(), DirstateV2ParseError> {
58 ) -> Result<(), DirstateV2ParseError> {
59 self.get_mut().clear_ambiguous_times(filenames, now)
59 self.get_mut().clear_ambiguous_times(filenames, now)
60 }
60 }
61
61
62 fn non_normal_entries_contains(
62 fn non_normal_entries_contains(
63 &mut self,
63 &mut self,
64 key: &HgPath,
64 key: &HgPath,
65 ) -> Result<bool, DirstateV2ParseError> {
65 ) -> Result<bool, DirstateV2ParseError> {
66 self.get_mut().non_normal_entries_contains(key)
66 self.get_mut().non_normal_entries_contains(key)
67 }
67 }
68
68
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
70 self.get_mut().non_normal_entries_remove(key)
70 self.get_mut().non_normal_entries_remove(key)
71 }
71 }
72
72
73 fn non_normal_or_other_parent_paths(
73 fn non_normal_or_other_parent_paths(
74 &mut self,
74 &mut self,
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
76 {
76 {
77 self.get_mut().non_normal_or_other_parent_paths()
77 self.get_mut().non_normal_or_other_parent_paths()
78 }
78 }
79
79
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
81 self.get_mut().set_non_normal_other_parent_entries(force)
81 self.get_mut().set_non_normal_other_parent_entries(force)
82 }
82 }
83
83
84 fn iter_non_normal_paths(
84 fn iter_non_normal_paths(
85 &mut self,
85 &mut self,
86 ) -> Box<
86 ) -> Box<
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
88 > {
88 > {
89 self.get_mut().iter_non_normal_paths()
89 self.get_mut().iter_non_normal_paths()
90 }
90 }
91
91
92 fn iter_non_normal_paths_panic(
92 fn iter_non_normal_paths_panic(
93 &self,
93 &self,
94 ) -> Box<
94 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
96 > {
97 self.get().iter_non_normal_paths_panic()
97 self.get().iter_non_normal_paths_panic()
98 }
98 }
99
99
100 fn iter_other_parent_paths(
100 fn iter_other_parent_paths(
101 &mut self,
101 &mut self,
102 ) -> Box<
102 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
104 > {
105 self.get_mut().iter_other_parent_paths()
105 self.get_mut().iter_other_parent_paths()
106 }
106 }
107
107
108 fn has_tracked_dir(
108 fn has_tracked_dir(
109 &mut self,
109 &mut self,
110 directory: &HgPath,
110 directory: &HgPath,
111 ) -> Result<bool, DirstateError> {
111 ) -> Result<bool, DirstateError> {
112 self.get_mut().has_tracked_dir(directory)
112 self.get_mut().has_tracked_dir(directory)
113 }
113 }
114
114
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
116 self.get_mut().has_dir(directory)
116 self.get_mut().has_dir(directory)
117 }
117 }
118
118
119 fn pack_v1(
119 fn pack_v1(
120 &mut self,
120 &mut self,
121 parents: DirstateParents,
121 parents: DirstateParents,
122 now: Timestamp,
122 now: Timestamp,
123 ) -> Result<Vec<u8>, DirstateError> {
123 ) -> Result<Vec<u8>, DirstateError> {
124 self.get_mut().pack_v1(parents, now)
124 self.get_mut().pack_v1(parents, now)
125 }
125 }
126
126
127 fn pack_v2(
127 fn pack_v2(
128 &mut self,
128 &mut self,
129 now: Timestamp,
129 now: Timestamp,
130 can_append: bool,
130 can_append: bool,
131 ) -> Result<(Vec<u8>, bool), DirstateError> {
131 ) -> Result<(Vec<u8>, Vec<u8>, bool), DirstateError> {
132 self.get_mut().pack_v2(now, can_append)
132 self.get_mut().pack_v2(now, can_append)
133 }
133 }
134
134
135 fn status<'a>(
135 fn status<'a>(
136 &'a mut self,
136 &'a mut self,
137 matcher: &'a (dyn Matcher + Sync),
137 matcher: &'a (dyn Matcher + Sync),
138 root_dir: PathBuf,
138 root_dir: PathBuf,
139 ignore_files: Vec<PathBuf>,
139 ignore_files: Vec<PathBuf>,
140 options: StatusOptions,
140 options: StatusOptions,
141 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
141 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
142 {
142 {
143 self.get_mut()
143 self.get_mut()
144 .status(matcher, root_dir, ignore_files, options)
144 .status(matcher, root_dir, ignore_files, options)
145 }
145 }
146
146
147 fn copy_map_len(&self) -> usize {
147 fn copy_map_len(&self) -> usize {
148 self.get().copy_map_len()
148 self.get().copy_map_len()
149 }
149 }
150
150
151 fn copy_map_iter(&self) -> CopyMapIter<'_> {
151 fn copy_map_iter(&self) -> CopyMapIter<'_> {
152 self.get().copy_map_iter()
152 self.get().copy_map_iter()
153 }
153 }
154
154
155 fn copy_map_contains_key(
155 fn copy_map_contains_key(
156 &self,
156 &self,
157 key: &HgPath,
157 key: &HgPath,
158 ) -> Result<bool, DirstateV2ParseError> {
158 ) -> Result<bool, DirstateV2ParseError> {
159 self.get().copy_map_contains_key(key)
159 self.get().copy_map_contains_key(key)
160 }
160 }
161
161
162 fn copy_map_get(
162 fn copy_map_get(
163 &self,
163 &self,
164 key: &HgPath,
164 key: &HgPath,
165 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
165 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
166 self.get().copy_map_get(key)
166 self.get().copy_map_get(key)
167 }
167 }
168
168
169 fn copy_map_remove(
169 fn copy_map_remove(
170 &mut self,
170 &mut self,
171 key: &HgPath,
171 key: &HgPath,
172 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
172 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
173 self.get_mut().copy_map_remove(key)
173 self.get_mut().copy_map_remove(key)
174 }
174 }
175
175
176 fn copy_map_insert(
176 fn copy_map_insert(
177 &mut self,
177 &mut self,
178 key: HgPathBuf,
178 key: HgPathBuf,
179 value: HgPathBuf,
179 value: HgPathBuf,
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
181 self.get_mut().copy_map_insert(key, value)
181 self.get_mut().copy_map_insert(key, value)
182 }
182 }
183
183
184 fn len(&self) -> usize {
184 fn len(&self) -> usize {
185 self.get().len()
185 self.get().len()
186 }
186 }
187
187
188 fn contains_key(
188 fn contains_key(
189 &self,
189 &self,
190 key: &HgPath,
190 key: &HgPath,
191 ) -> Result<bool, DirstateV2ParseError> {
191 ) -> Result<bool, DirstateV2ParseError> {
192 self.get().contains_key(key)
192 self.get().contains_key(key)
193 }
193 }
194
194
195 fn get(
195 fn get(
196 &self,
196 &self,
197 key: &HgPath,
197 key: &HgPath,
198 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
198 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
199 self.get().get(key)
199 self.get().get(key)
200 }
200 }
201
201
202 fn iter(&self) -> StateMapIter<'_> {
202 fn iter(&self) -> StateMapIter<'_> {
203 self.get().iter()
203 self.get().iter()
204 }
204 }
205
205
206 fn iter_directories(
206 fn iter_directories(
207 &self,
207 &self,
208 ) -> Box<
208 ) -> Box<
209 dyn Iterator<
209 dyn Iterator<
210 Item = Result<
210 Item = Result<
211 (&HgPath, Option<Timestamp>),
211 (&HgPath, Option<Timestamp>),
212 DirstateV2ParseError,
212 DirstateV2ParseError,
213 >,
213 >,
214 > + Send
214 > + Send
215 + '_,
215 + '_,
216 > {
216 > {
217 self.get().iter_directories()
217 self.get().iter_directories()
218 }
218 }
219 }
219 }
@@ -1,115 +1,117 b''
1 use cpython::PyBytes;
1 use cpython::PyBytes;
2 use cpython::Python;
2 use cpython::Python;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
4 use hg::DirstateError;
4 use hg::DirstateError;
5 use hg::DirstateParents;
5 use hg::DirstateParents;
6
6
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
8 /// borrows. This is similar to the owning-ref crate.
8 /// borrows. This is similar to the owning-ref crate.
9 ///
9 ///
10 /// This is similar to [`OwningRef`] which is more limited because it
10 /// This is similar to [`OwningRef`] which is more limited because it
11 /// represents exactly one `&T` reference next to the value it borrows, as
11 /// represents exactly one `&T` reference next to the value it borrows, as
12 /// opposed to a struct that may contain an arbitrary number of references in
12 /// opposed to a struct that may contain an arbitrary number of references in
13 /// arbitrarily-nested data structures.
13 /// arbitrarily-nested data structures.
14 ///
14 ///
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
16 pub(super) struct OwningDirstateMap {
16 pub(super) struct OwningDirstateMap {
17 /// Owned handle to a bytes buffer with a stable address.
17 /// Owned handle to a bytes buffer with a stable address.
18 ///
18 ///
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
20 on_disk: PyBytes,
20 on_disk: PyBytes,
21
21
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
23 /// language cannot represent a lifetime referencing a sibling field.
23 /// language cannot represent a lifetime referencing a sibling field.
24 /// This is not quite a self-referencial struct (moving this struct is not
24 /// This is not quite a self-referencial struct (moving this struct is not
25 /// a problem as it doesn’t change the address of the bytes buffer owned
25 /// a problem as it doesn’t change the address of the bytes buffer owned
26 /// by `PyBytes`) but touches similar borrow-checker limitations.
26 /// by `PyBytes`) but touches similar borrow-checker limitations.
27 ptr: *mut (),
27 ptr: *mut (),
28 }
28 }
29
29
30 impl OwningDirstateMap {
30 impl OwningDirstateMap {
31 pub fn new_v1(
31 pub fn new_v1(
32 py: Python,
32 py: Python,
33 on_disk: PyBytes,
33 on_disk: PyBytes,
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
35 let bytes: &'_ [u8] = on_disk.data(py);
35 let bytes: &'_ [u8] = on_disk.data(py);
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
37
37
38 // Like in `bytes` above, this `'_` lifetime parameter borrows from
38 // Like in `bytes` above, this `'_` lifetime parameter borrows from
39 // the bytes buffer owned by `on_disk`.
39 // the bytes buffer owned by `on_disk`.
40 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
40 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
41
41
42 // Erase the pointed type entirely in order to erase the lifetime.
42 // Erase the pointed type entirely in order to erase the lifetime.
43 let ptr: *mut () = ptr.cast();
43 let ptr: *mut () = ptr.cast();
44
44
45 Ok((Self { on_disk, ptr }, parents))
45 Ok((Self { on_disk, ptr }, parents))
46 }
46 }
47
47
48 pub fn new_v2(
48 pub fn new_v2(
49 py: Python,
49 py: Python,
50 on_disk: PyBytes,
50 on_disk: PyBytes,
51 data_size: usize,
51 data_size: usize,
52 tree_metadata: PyBytes,
52 ) -> Result<Self, DirstateError> {
53 ) -> Result<Self, DirstateError> {
53 let bytes: &'_ [u8] = on_disk.data(py);
54 let bytes: &'_ [u8] = on_disk.data(py);
54 let map = DirstateMap::new_v2(bytes, data_size)?;
55 let map =
56 DirstateMap::new_v2(bytes, data_size, tree_metadata.data(py))?;
55
57
56 // Like in `bytes` above, this `'_` lifetime parameter borrows from
58 // Like in `bytes` above, this `'_` lifetime parameter borrows from
57 // the bytes buffer owned by `on_disk`.
59 // the bytes buffer owned by `on_disk`.
58 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
60 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
59
61
60 // Erase the pointed type entirely in order to erase the lifetime.
62 // Erase the pointed type entirely in order to erase the lifetime.
61 let ptr: *mut () = ptr.cast();
63 let ptr: *mut () = ptr.cast();
62
64
63 Ok(Self { on_disk, ptr })
65 Ok(Self { on_disk, ptr })
64 }
66 }
65
67
66 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
68 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
67 // SAFETY: We cast the type-erased pointer back to the same type it had
69 // SAFETY: We cast the type-erased pointer back to the same type it had
68 // in `new`, except with a different lifetime parameter. This time we
70 // in `new`, except with a different lifetime parameter. This time we
69 // connect the lifetime to that of `self`. This cast is valid because
71 // connect the lifetime to that of `self`. This cast is valid because
70 // `self` owns the same `PyBytes` whose buffer `DirstateMap`
72 // `self` owns the same `PyBytes` whose buffer `DirstateMap`
71 // references. That buffer has a stable memory address because the byte
73 // references. That buffer has a stable memory address because the byte
72 // string value of a `PyBytes` is immutable.
74 // string value of a `PyBytes` is immutable.
73 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
75 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
74 // SAFETY: we dereference that pointer, connecting the lifetime of the
76 // SAFETY: we dereference that pointer, connecting the lifetime of the
75 // new `&mut` to that of `self`. This is valid because the
77 // new `&mut` to that of `self`. This is valid because the
76 // raw pointer is to a boxed value, and `self` owns that box.
78 // raw pointer is to a boxed value, and `self` owns that box.
77 unsafe { &mut *ptr }
79 unsafe { &mut *ptr }
78 }
80 }
79
81
80 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
82 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
81 // SAFETY: same reasoning as in `get_mut` above.
83 // SAFETY: same reasoning as in `get_mut` above.
82 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
84 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
83 unsafe { &*ptr }
85 unsafe { &*ptr }
84 }
86 }
85 }
87 }
86
88
87 impl Drop for OwningDirstateMap {
89 impl Drop for OwningDirstateMap {
88 fn drop(&mut self) {
90 fn drop(&mut self) {
89 // Silence a "field is never read" warning, and demonstrate that this
91 // Silence a "field is never read" warning, and demonstrate that this
90 // value is still alive.
92 // value is still alive.
91 let _ = &self.on_disk;
93 let _ = &self.on_disk;
92 // SAFETY: this cast is the same as in `get_mut`, and is valid for the
94 // SAFETY: this cast is the same as in `get_mut`, and is valid for the
93 // same reason. `self.on_disk` still exists at this point, drop glue
95 // same reason. `self.on_disk` still exists at this point, drop glue
94 // will drop it implicitly after this `drop` method returns.
96 // will drop it implicitly after this `drop` method returns.
95 let ptr: *mut DirstateMap<'_> = self.ptr.cast();
97 let ptr: *mut DirstateMap<'_> = self.ptr.cast();
96 // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
98 // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
97 // This is fine because drop glue does nothig for `*mut ()` and we’re
99 // This is fine because drop glue does nothig for `*mut ()` and we’re
98 // in `drop`, so `get` and `get_mut` cannot be called again.
100 // in `drop`, so `get` and `get_mut` cannot be called again.
99 unsafe { drop(Box::from_raw(ptr)) }
101 unsafe { drop(Box::from_raw(ptr)) }
100 }
102 }
101 }
103 }
102
104
103 fn _static_assert_is_send<T: Send>() {}
105 fn _static_assert_is_send<T: Send>() {}
104
106
105 fn _static_assert_fields_are_send() {
107 fn _static_assert_fields_are_send() {
106 _static_assert_is_send::<PyBytes>();
108 _static_assert_is_send::<PyBytes>();
107 _static_assert_is_send::<Box<DirstateMap<'_>>>();
109 _static_assert_is_send::<Box<DirstateMap<'_>>>();
108 }
110 }
109
111
110 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
112 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
111 // thread-safety of raw pointers is unknown in the general case. However this
113 // thread-safety of raw pointers is unknown in the general case. However this
112 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
114 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
113 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
115 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
114 // is sound to mark this struct as `Send` too.
116 // is sound to mark this struct as `Send` too.
115 unsafe impl Send for OwningDirstateMap {}
117 unsafe impl Send for OwningDirstateMap {}
@@ -1,342 +1,347 b''
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::error::CommandError;
8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::Ui;
10 use clap::{Arg, SubCommand};
10 use clap::{Arg, SubCommand};
11 use hg;
11 use hg;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
13 use hg::dirstate_tree::on_disk;
13 use hg::dirstate_tree::on_disk;
14 use hg::errors::HgResultExt;
14 use hg::errors::HgResultExt;
15 use hg::errors::IoResultExt;
15 use hg::errors::IoResultExt;
16 use hg::matchers::AlwaysMatcher;
16 use hg::matchers::AlwaysMatcher;
17 use hg::operations::cat;
17 use hg::operations::cat;
18 use hg::repo::Repo;
18 use hg::repo::Repo;
19 use hg::revlog::node::Node;
19 use hg::revlog::node::Node;
20 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
20 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
21 use hg::StatusError;
21 use hg::StatusError;
22 use hg::{HgPathCow, StatusOptions};
22 use hg::{HgPathCow, StatusOptions};
23 use log::{info, warn};
23 use log::{info, warn};
24 use std::convert::TryInto;
24 use std::convert::TryInto;
25 use std::fs;
25 use std::fs;
26 use std::io::BufReader;
26 use std::io::BufReader;
27 use std::io::Read;
27 use std::io::Read;
28
28
29 pub const HELP_TEXT: &str = "
29 pub const HELP_TEXT: &str = "
30 Show changed files in the working directory
30 Show changed files in the working directory
31
31
32 This is a pure Rust version of `hg status`.
32 This is a pure Rust version of `hg status`.
33
33
34 Some options might be missing, check the list below.
34 Some options might be missing, check the list below.
35 ";
35 ";
36
36
37 pub fn args() -> clap::App<'static, 'static> {
37 pub fn args() -> clap::App<'static, 'static> {
38 SubCommand::with_name("status")
38 SubCommand::with_name("status")
39 .alias("st")
39 .alias("st")
40 .about(HELP_TEXT)
40 .about(HELP_TEXT)
41 .arg(
41 .arg(
42 Arg::with_name("all")
42 Arg::with_name("all")
43 .help("show status of all files")
43 .help("show status of all files")
44 .short("-A")
44 .short("-A")
45 .long("--all"),
45 .long("--all"),
46 )
46 )
47 .arg(
47 .arg(
48 Arg::with_name("modified")
48 Arg::with_name("modified")
49 .help("show only modified files")
49 .help("show only modified files")
50 .short("-m")
50 .short("-m")
51 .long("--modified"),
51 .long("--modified"),
52 )
52 )
53 .arg(
53 .arg(
54 Arg::with_name("added")
54 Arg::with_name("added")
55 .help("show only added files")
55 .help("show only added files")
56 .short("-a")
56 .short("-a")
57 .long("--added"),
57 .long("--added"),
58 )
58 )
59 .arg(
59 .arg(
60 Arg::with_name("removed")
60 Arg::with_name("removed")
61 .help("show only removed files")
61 .help("show only removed files")
62 .short("-r")
62 .short("-r")
63 .long("--removed"),
63 .long("--removed"),
64 )
64 )
65 .arg(
65 .arg(
66 Arg::with_name("clean")
66 Arg::with_name("clean")
67 .help("show only clean files")
67 .help("show only clean files")
68 .short("-c")
68 .short("-c")
69 .long("--clean"),
69 .long("--clean"),
70 )
70 )
71 .arg(
71 .arg(
72 Arg::with_name("deleted")
72 Arg::with_name("deleted")
73 .help("show only deleted files")
73 .help("show only deleted files")
74 .short("-d")
74 .short("-d")
75 .long("--deleted"),
75 .long("--deleted"),
76 )
76 )
77 .arg(
77 .arg(
78 Arg::with_name("unknown")
78 Arg::with_name("unknown")
79 .help("show only unknown (not tracked) files")
79 .help("show only unknown (not tracked) files")
80 .short("-u")
80 .short("-u")
81 .long("--unknown"),
81 .long("--unknown"),
82 )
82 )
83 .arg(
83 .arg(
84 Arg::with_name("ignored")
84 Arg::with_name("ignored")
85 .help("show only ignored files")
85 .help("show only ignored files")
86 .short("-i")
86 .short("-i")
87 .long("--ignored"),
87 .long("--ignored"),
88 )
88 )
89 }
89 }
90
90
91 /// Pure data type allowing the caller to specify file states to display
91 /// Pure data type allowing the caller to specify file states to display
92 #[derive(Copy, Clone, Debug)]
92 #[derive(Copy, Clone, Debug)]
93 pub struct DisplayStates {
93 pub struct DisplayStates {
94 pub modified: bool,
94 pub modified: bool,
95 pub added: bool,
95 pub added: bool,
96 pub removed: bool,
96 pub removed: bool,
97 pub clean: bool,
97 pub clean: bool,
98 pub deleted: bool,
98 pub deleted: bool,
99 pub unknown: bool,
99 pub unknown: bool,
100 pub ignored: bool,
100 pub ignored: bool,
101 }
101 }
102
102
103 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
103 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
104 modified: true,
104 modified: true,
105 added: true,
105 added: true,
106 removed: true,
106 removed: true,
107 clean: false,
107 clean: false,
108 deleted: true,
108 deleted: true,
109 unknown: true,
109 unknown: true,
110 ignored: false,
110 ignored: false,
111 };
111 };
112
112
113 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
113 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
114 modified: true,
114 modified: true,
115 added: true,
115 added: true,
116 removed: true,
116 removed: true,
117 clean: true,
117 clean: true,
118 deleted: true,
118 deleted: true,
119 unknown: true,
119 unknown: true,
120 ignored: true,
120 ignored: true,
121 };
121 };
122
122
123 impl DisplayStates {
123 impl DisplayStates {
124 pub fn is_empty(&self) -> bool {
124 pub fn is_empty(&self) -> bool {
125 !(self.modified
125 !(self.modified
126 || self.added
126 || self.added
127 || self.removed
127 || self.removed
128 || self.clean
128 || self.clean
129 || self.deleted
129 || self.deleted
130 || self.unknown
130 || self.unknown
131 || self.ignored)
131 || self.ignored)
132 }
132 }
133 }
133 }
134
134
135 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
135 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
136 let status_enabled_default = false;
136 let status_enabled_default = false;
137 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
137 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
138 if !status_enabled.unwrap_or(status_enabled_default) {
138 if !status_enabled.unwrap_or(status_enabled_default) {
139 return Err(CommandError::unsupported(
139 return Err(CommandError::unsupported(
140 "status is experimental in rhg (enable it with 'rhg.status = true' \
140 "status is experimental in rhg (enable it with 'rhg.status = true' \
141 or enable fallback with 'rhg.on-unsupported = fallback')"
141 or enable fallback with 'rhg.on-unsupported = fallback')"
142 ));
142 ));
143 }
143 }
144
144
145 let ui = invocation.ui;
145 let ui = invocation.ui;
146 let args = invocation.subcommand_args;
146 let args = invocation.subcommand_args;
147 let display_states = if args.is_present("all") {
147 let display_states = if args.is_present("all") {
148 // TODO when implementing `--quiet`: it excludes clean files
148 // TODO when implementing `--quiet`: it excludes clean files
149 // from `--all`
149 // from `--all`
150 ALL_DISPLAY_STATES
150 ALL_DISPLAY_STATES
151 } else {
151 } else {
152 let requested = DisplayStates {
152 let requested = DisplayStates {
153 modified: args.is_present("modified"),
153 modified: args.is_present("modified"),
154 added: args.is_present("added"),
154 added: args.is_present("added"),
155 removed: args.is_present("removed"),
155 removed: args.is_present("removed"),
156 clean: args.is_present("clean"),
156 clean: args.is_present("clean"),
157 deleted: args.is_present("deleted"),
157 deleted: args.is_present("deleted"),
158 unknown: args.is_present("unknown"),
158 unknown: args.is_present("unknown"),
159 ignored: args.is_present("ignored"),
159 ignored: args.is_present("ignored"),
160 };
160 };
161 if requested.is_empty() {
161 if requested.is_empty() {
162 DEFAULT_DISPLAY_STATES
162 DEFAULT_DISPLAY_STATES
163 } else {
163 } else {
164 requested
164 requested
165 }
165 }
166 };
166 };
167
167
168 let repo = invocation.repo?;
168 let repo = invocation.repo?;
169 let dirstate_data_mmap;
169 let dirstate_data_mmap;
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
171 let docket_data =
172 repo.hg_vfs().read("dirstate").io_not_found_as_none()?;
171 let parents;
173 let parents;
172 let dirstate_data;
174 let dirstate_data;
173 let data_size;
175 let data_size;
174 if let Some(docket_data) =
176 let docket;
175 repo.hg_vfs().read("dirstate").io_not_found_as_none()?
177 let tree_metadata;
176 {
178 if let Some(docket_data) = &docket_data {
177 let docket = on_disk::read_docket(&docket_data)?;
179 docket = on_disk::read_docket(docket_data)?;
180 tree_metadata = docket.tree_metadata();
178 parents = Some(docket.parents());
181 parents = Some(docket.parents());
179 data_size = docket.data_size();
182 data_size = docket.data_size();
180 dirstate_data_mmap = repo
183 dirstate_data_mmap = repo
181 .hg_vfs()
184 .hg_vfs()
182 .mmap_open(docket.data_filename())
185 .mmap_open(docket.data_filename())
183 .io_not_found_as_none()?;
186 .io_not_found_as_none()?;
184 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
187 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
185 } else {
188 } else {
186 parents = None;
189 parents = None;
190 tree_metadata = b"";
187 data_size = 0;
191 data_size = 0;
188 dirstate_data = b"";
192 dirstate_data = b"";
189 }
193 }
190 let dmap = DirstateMap::new_v2(dirstate_data, data_size)?;
194 let dmap =
195 DirstateMap::new_v2(dirstate_data, data_size, tree_metadata)?;
191 (dmap, parents)
196 (dmap, parents)
192 } else {
197 } else {
193 dirstate_data_mmap =
198 dirstate_data_mmap =
194 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
199 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
195 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
200 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
196 DirstateMap::new_v1(dirstate_data)?
201 DirstateMap::new_v1(dirstate_data)?
197 };
202 };
198
203
199 let options = StatusOptions {
204 let options = StatusOptions {
200 // TODO should be provided by the dirstate parsing and
205 // TODO should be provided by the dirstate parsing and
201 // hence be stored on dmap. Using a value that assumes we aren't
206 // hence be stored on dmap. Using a value that assumes we aren't
202 // below the time resolution granularity of the FS and the
207 // below the time resolution granularity of the FS and the
203 // dirstate.
208 // dirstate.
204 last_normal_time: 0,
209 last_normal_time: 0,
205 // we're currently supporting file systems with exec flags only
210 // we're currently supporting file systems with exec flags only
206 // anyway
211 // anyway
207 check_exec: true,
212 check_exec: true,
208 list_clean: display_states.clean,
213 list_clean: display_states.clean,
209 list_unknown: display_states.unknown,
214 list_unknown: display_states.unknown,
210 list_ignored: display_states.ignored,
215 list_ignored: display_states.ignored,
211 collect_traversed_dirs: false,
216 collect_traversed_dirs: false,
212 };
217 };
213 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
218 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
214 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
219 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
215 &mut dmap,
220 &mut dmap,
216 &AlwaysMatcher,
221 &AlwaysMatcher,
217 repo.working_directory_path().to_owned(),
222 repo.working_directory_path().to_owned(),
218 vec![ignore_file],
223 vec![ignore_file],
219 options,
224 options,
220 )?;
225 )?;
221 if !pattern_warnings.is_empty() {
226 if !pattern_warnings.is_empty() {
222 warn!("Pattern warnings: {:?}", &pattern_warnings);
227 warn!("Pattern warnings: {:?}", &pattern_warnings);
223 }
228 }
224
229
225 if !ds_status.bad.is_empty() {
230 if !ds_status.bad.is_empty() {
226 warn!("Bad matches {:?}", &(ds_status.bad))
231 warn!("Bad matches {:?}", &(ds_status.bad))
227 }
232 }
228 if !ds_status.unsure.is_empty() {
233 if !ds_status.unsure.is_empty() {
229 info!(
234 info!(
230 "Files to be rechecked by retrieval from filelog: {:?}",
235 "Files to be rechecked by retrieval from filelog: {:?}",
231 &ds_status.unsure
236 &ds_status.unsure
232 );
237 );
233 }
238 }
234 if !ds_status.unsure.is_empty()
239 if !ds_status.unsure.is_empty()
235 && (display_states.modified || display_states.clean)
240 && (display_states.modified || display_states.clean)
236 {
241 {
237 let p1: Node = parents
242 let p1: Node = parents
238 .expect(
243 .expect(
239 "Dirstate with no parents should not list any file to
244 "Dirstate with no parents should not list any file to
240 be rechecked for modifications",
245 be rechecked for modifications",
241 )
246 )
242 .p1
247 .p1
243 .into();
248 .into();
244 let p1_hex = format!("{:x}", p1);
249 let p1_hex = format!("{:x}", p1);
245 for to_check in ds_status.unsure {
250 for to_check in ds_status.unsure {
246 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
251 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
247 if display_states.modified {
252 if display_states.modified {
248 ds_status.modified.push(to_check);
253 ds_status.modified.push(to_check);
249 }
254 }
250 } else {
255 } else {
251 if display_states.clean {
256 if display_states.clean {
252 ds_status.clean.push(to_check);
257 ds_status.clean.push(to_check);
253 }
258 }
254 }
259 }
255 }
260 }
256 }
261 }
257 if display_states.modified {
262 if display_states.modified {
258 display_status_paths(ui, &mut ds_status.modified, b"M")?;
263 display_status_paths(ui, &mut ds_status.modified, b"M")?;
259 }
264 }
260 if display_states.added {
265 if display_states.added {
261 display_status_paths(ui, &mut ds_status.added, b"A")?;
266 display_status_paths(ui, &mut ds_status.added, b"A")?;
262 }
267 }
263 if display_states.removed {
268 if display_states.removed {
264 display_status_paths(ui, &mut ds_status.removed, b"R")?;
269 display_status_paths(ui, &mut ds_status.removed, b"R")?;
265 }
270 }
266 if display_states.deleted {
271 if display_states.deleted {
267 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
272 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
268 }
273 }
269 if display_states.unknown {
274 if display_states.unknown {
270 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
275 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
271 }
276 }
272 if display_states.ignored {
277 if display_states.ignored {
273 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
278 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
274 }
279 }
275 if display_states.clean {
280 if display_states.clean {
276 display_status_paths(ui, &mut ds_status.clean, b"C")?;
281 display_status_paths(ui, &mut ds_status.clean, b"C")?;
277 }
282 }
278 Ok(())
283 Ok(())
279 }
284 }
280
285
281 // Probably more elegant to use a Deref or Borrow trait rather than
286 // Probably more elegant to use a Deref or Borrow trait rather than
282 // harcode HgPathBuf, but probably not really useful at this point
287 // harcode HgPathBuf, but probably not really useful at this point
283 fn display_status_paths(
288 fn display_status_paths(
284 ui: &Ui,
289 ui: &Ui,
285 paths: &mut [HgPathCow],
290 paths: &mut [HgPathCow],
286 status_prefix: &[u8],
291 status_prefix: &[u8],
287 ) -> Result<(), CommandError> {
292 ) -> Result<(), CommandError> {
288 paths.sort_unstable();
293 paths.sort_unstable();
289 for path in paths {
294 for path in paths {
290 // Same TODO as in commands::root
295 // Same TODO as in commands::root
291 let bytes: &[u8] = path.as_bytes();
296 let bytes: &[u8] = path.as_bytes();
292 // TODO optim, probably lots of unneeded copies here, especially
297 // TODO optim, probably lots of unneeded copies here, especially
293 // if out stream is buffered
298 // if out stream is buffered
294 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
299 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
295 }
300 }
296 Ok(())
301 Ok(())
297 }
302 }
298
303
299 /// Check if a file is modified by comparing actual repo store and file system.
304 /// Check if a file is modified by comparing actual repo store and file system.
300 ///
305 ///
301 /// This meant to be used for those that the dirstate cannot resolve, due
306 /// This meant to be used for those that the dirstate cannot resolve, due
302 /// to time resolution limits.
307 /// to time resolution limits.
303 ///
308 ///
304 /// TODO: detect permission bits and similar metadata modifications
309 /// TODO: detect permission bits and similar metadata modifications
305 fn cat_file_is_modified(
310 fn cat_file_is_modified(
306 repo: &Repo,
311 repo: &Repo,
307 hg_path: &HgPath,
312 hg_path: &HgPath,
308 rev: &str,
313 rev: &str,
309 ) -> Result<bool, CommandError> {
314 ) -> Result<bool, CommandError> {
310 // TODO CatRev expects &[HgPathBuf], something like
315 // TODO CatRev expects &[HgPathBuf], something like
311 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
316 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
312 let path_bufs = [hg_path.into()];
317 let path_bufs = [hg_path.into()];
313 // TODO IIUC CatRev returns a simple Vec<u8> for all files
318 // TODO IIUC CatRev returns a simple Vec<u8> for all files
314 // being able to tell them apart as (path, bytes) would be nicer
319 // being able to tell them apart as (path, bytes) would be nicer
315 // and OPTIM would allow manifest resolution just once.
320 // and OPTIM would allow manifest resolution just once.
316 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
321 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
317
322
318 let fs_path = repo
323 let fs_path = repo
319 .working_directory_vfs()
324 .working_directory_vfs()
320 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
325 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
321 let hg_data_len: u64 = match output.concatenated.len().try_into() {
326 let hg_data_len: u64 = match output.concatenated.len().try_into() {
322 Ok(v) => v,
327 Ok(v) => v,
323 Err(_) => {
328 Err(_) => {
324 // conversion of data length to u64 failed,
329 // conversion of data length to u64 failed,
325 // good luck for any file to have this content
330 // good luck for any file to have this content
326 return Ok(true);
331 return Ok(true);
327 }
332 }
328 };
333 };
329 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
334 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
330 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
335 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
331 {
336 {
332 return Ok(true);
337 return Ok(true);
333 }
338 }
334 for (fs_byte, hg_byte) in
339 for (fs_byte, hg_byte) in
335 BufReader::new(fobj).bytes().zip(output.concatenated)
340 BufReader::new(fobj).bytes().zip(output.concatenated)
336 {
341 {
337 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
342 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
338 return Ok(true);
343 return Ok(true);
339 }
344 }
340 }
345 }
341 Ok(false)
346 Ok(false)
342 }
347 }
General Comments 0
You need to be logged in to leave comments. Login now