##// END OF EJS Templates
dirstate-v2: Add --dirs to debugdirstate command...
Simon Sapin -
r48140:3b9914b2 default
parent child Browse files
Show More
@@ -1,4829 +1,4834 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import difflib
13 import difflib
14 import errno
14 import errno
15 import glob
15 import glob
16 import operator
16 import operator
17 import os
17 import os
18 import platform
18 import platform
19 import random
19 import random
20 import re
20 import re
21 import socket
21 import socket
22 import ssl
22 import ssl
23 import stat
23 import stat
24 import string
24 import string
25 import subprocess
25 import subprocess
26 import sys
26 import sys
27 import time
27 import time
28
28
29 from .i18n import _
29 from .i18n import _
30 from .node import (
30 from .node import (
31 bin,
31 bin,
32 hex,
32 hex,
33 nullrev,
33 nullrev,
34 short,
34 short,
35 )
35 )
36 from .pycompat import (
36 from .pycompat import (
37 getattr,
37 getattr,
38 open,
38 open,
39 )
39 )
40 from . import (
40 from . import (
41 bundle2,
41 bundle2,
42 bundlerepo,
42 bundlerepo,
43 changegroup,
43 changegroup,
44 cmdutil,
44 cmdutil,
45 color,
45 color,
46 context,
46 context,
47 copies,
47 copies,
48 dagparser,
48 dagparser,
49 encoding,
49 encoding,
50 error,
50 error,
51 exchange,
51 exchange,
52 extensions,
52 extensions,
53 filemerge,
53 filemerge,
54 filesetlang,
54 filesetlang,
55 formatter,
55 formatter,
56 hg,
56 hg,
57 httppeer,
57 httppeer,
58 localrepo,
58 localrepo,
59 lock as lockmod,
59 lock as lockmod,
60 logcmdutil,
60 logcmdutil,
61 mergestate as mergestatemod,
61 mergestate as mergestatemod,
62 metadata,
62 metadata,
63 obsolete,
63 obsolete,
64 obsutil,
64 obsutil,
65 pathutil,
65 pathutil,
66 phases,
66 phases,
67 policy,
67 policy,
68 pvec,
68 pvec,
69 pycompat,
69 pycompat,
70 registrar,
70 registrar,
71 repair,
71 repair,
72 repoview,
72 repoview,
73 revlog,
73 revlog,
74 revset,
74 revset,
75 revsetlang,
75 revsetlang,
76 scmutil,
76 scmutil,
77 setdiscovery,
77 setdiscovery,
78 simplemerge,
78 simplemerge,
79 sshpeer,
79 sshpeer,
80 sslutil,
80 sslutil,
81 streamclone,
81 streamclone,
82 strip,
82 strip,
83 tags as tagsmod,
83 tags as tagsmod,
84 templater,
84 templater,
85 treediscovery,
85 treediscovery,
86 upgrade,
86 upgrade,
87 url as urlmod,
87 url as urlmod,
88 util,
88 util,
89 vfs as vfsmod,
89 vfs as vfsmod,
90 wireprotoframing,
90 wireprotoframing,
91 wireprotoserver,
91 wireprotoserver,
92 wireprotov2peer,
92 wireprotov2peer,
93 )
93 )
94 from .interfaces import repository
94 from .interfaces import repository
95 from .utils import (
95 from .utils import (
96 cborutil,
96 cborutil,
97 compression,
97 compression,
98 dateutil,
98 dateutil,
99 procutil,
99 procutil,
100 stringutil,
100 stringutil,
101 urlutil,
101 urlutil,
102 )
102 )
103
103
104 from .revlogutils import (
104 from .revlogutils import (
105 deltas as deltautil,
105 deltas as deltautil,
106 nodemap,
106 nodemap,
107 sidedata,
107 sidedata,
108 )
108 )
109
109
110 release = lockmod.release
110 release = lockmod.release
111
111
112 table = {}
112 table = {}
113 table.update(strip.command._table)
113 table.update(strip.command._table)
114 command = registrar.command(table)
114 command = registrar.command(table)
115
115
116
116
117 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
117 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
118 def debugancestor(ui, repo, *args):
118 def debugancestor(ui, repo, *args):
119 """find the ancestor revision of two revisions in a given index"""
119 """find the ancestor revision of two revisions in a given index"""
120 if len(args) == 3:
120 if len(args) == 3:
121 index, rev1, rev2 = args
121 index, rev1, rev2 = args
122 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
122 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
123 lookup = r.lookup
123 lookup = r.lookup
124 elif len(args) == 2:
124 elif len(args) == 2:
125 if not repo:
125 if not repo:
126 raise error.Abort(
126 raise error.Abort(
127 _(b'there is no Mercurial repository here (.hg not found)')
127 _(b'there is no Mercurial repository here (.hg not found)')
128 )
128 )
129 rev1, rev2 = args
129 rev1, rev2 = args
130 r = repo.changelog
130 r = repo.changelog
131 lookup = repo.lookup
131 lookup = repo.lookup
132 else:
132 else:
133 raise error.Abort(_(b'either two or three arguments required'))
133 raise error.Abort(_(b'either two or three arguments required'))
134 a = r.ancestor(lookup(rev1), lookup(rev2))
134 a = r.ancestor(lookup(rev1), lookup(rev2))
135 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
135 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
136
136
137
137
138 @command(b'debugantivirusrunning', [])
138 @command(b'debugantivirusrunning', [])
139 def debugantivirusrunning(ui, repo):
139 def debugantivirusrunning(ui, repo):
140 """attempt to trigger an antivirus scanner to see if one is active"""
140 """attempt to trigger an antivirus scanner to see if one is active"""
141 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
141 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
142 f.write(
142 f.write(
143 util.b85decode(
143 util.b85decode(
144 # This is a base85-armored version of the EICAR test file. See
144 # This is a base85-armored version of the EICAR test file. See
145 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
145 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
146 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
146 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
147 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
147 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
148 )
148 )
149 )
149 )
150 # Give an AV engine time to scan the file.
150 # Give an AV engine time to scan the file.
151 time.sleep(2)
151 time.sleep(2)
152 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
152 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
153
153
154
154
155 @command(b'debugapplystreamclonebundle', [], b'FILE')
155 @command(b'debugapplystreamclonebundle', [], b'FILE')
156 def debugapplystreamclonebundle(ui, repo, fname):
156 def debugapplystreamclonebundle(ui, repo, fname):
157 """apply a stream clone bundle file"""
157 """apply a stream clone bundle file"""
158 f = hg.openpath(ui, fname)
158 f = hg.openpath(ui, fname)
159 gen = exchange.readbundle(ui, f, fname)
159 gen = exchange.readbundle(ui, f, fname)
160 gen.apply(repo)
160 gen.apply(repo)
161
161
162
162
163 @command(
163 @command(
164 b'debugbuilddag',
164 b'debugbuilddag',
165 [
165 [
166 (
166 (
167 b'm',
167 b'm',
168 b'mergeable-file',
168 b'mergeable-file',
169 None,
169 None,
170 _(b'add single file mergeable changes'),
170 _(b'add single file mergeable changes'),
171 ),
171 ),
172 (
172 (
173 b'o',
173 b'o',
174 b'overwritten-file',
174 b'overwritten-file',
175 None,
175 None,
176 _(b'add single file all revs overwrite'),
176 _(b'add single file all revs overwrite'),
177 ),
177 ),
178 (b'n', b'new-file', None, _(b'add new file at each rev')),
178 (b'n', b'new-file', None, _(b'add new file at each rev')),
179 ],
179 ],
180 _(b'[OPTION]... [TEXT]'),
180 _(b'[OPTION]... [TEXT]'),
181 )
181 )
182 def debugbuilddag(
182 def debugbuilddag(
183 ui,
183 ui,
184 repo,
184 repo,
185 text=None,
185 text=None,
186 mergeable_file=False,
186 mergeable_file=False,
187 overwritten_file=False,
187 overwritten_file=False,
188 new_file=False,
188 new_file=False,
189 ):
189 ):
190 """builds a repo with a given DAG from scratch in the current empty repo
190 """builds a repo with a given DAG from scratch in the current empty repo
191
191
192 The description of the DAG is read from stdin if not given on the
192 The description of the DAG is read from stdin if not given on the
193 command line.
193 command line.
194
194
195 Elements:
195 Elements:
196
196
197 - "+n" is a linear run of n nodes based on the current default parent
197 - "+n" is a linear run of n nodes based on the current default parent
198 - "." is a single node based on the current default parent
198 - "." is a single node based on the current default parent
199 - "$" resets the default parent to null (implied at the start);
199 - "$" resets the default parent to null (implied at the start);
200 otherwise the default parent is always the last node created
200 otherwise the default parent is always the last node created
201 - "<p" sets the default parent to the backref p
201 - "<p" sets the default parent to the backref p
202 - "*p" is a fork at parent p, which is a backref
202 - "*p" is a fork at parent p, which is a backref
203 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
203 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
204 - "/p2" is a merge of the preceding node and p2
204 - "/p2" is a merge of the preceding node and p2
205 - ":tag" defines a local tag for the preceding node
205 - ":tag" defines a local tag for the preceding node
206 - "@branch" sets the named branch for subsequent nodes
206 - "@branch" sets the named branch for subsequent nodes
207 - "#...\\n" is a comment up to the end of the line
207 - "#...\\n" is a comment up to the end of the line
208
208
209 Whitespace between the above elements is ignored.
209 Whitespace between the above elements is ignored.
210
210
211 A backref is either
211 A backref is either
212
212
213 - a number n, which references the node curr-n, where curr is the current
213 - a number n, which references the node curr-n, where curr is the current
214 node, or
214 node, or
215 - the name of a local tag you placed earlier using ":tag", or
215 - the name of a local tag you placed earlier using ":tag", or
216 - empty to denote the default parent.
216 - empty to denote the default parent.
217
217
218 All string valued-elements are either strictly alphanumeric, or must
218 All string valued-elements are either strictly alphanumeric, or must
219 be enclosed in double quotes ("..."), with "\\" as escape character.
219 be enclosed in double quotes ("..."), with "\\" as escape character.
220 """
220 """
221
221
222 if text is None:
222 if text is None:
223 ui.status(_(b"reading DAG from stdin\n"))
223 ui.status(_(b"reading DAG from stdin\n"))
224 text = ui.fin.read()
224 text = ui.fin.read()
225
225
226 cl = repo.changelog
226 cl = repo.changelog
227 if len(cl) > 0:
227 if len(cl) > 0:
228 raise error.Abort(_(b'repository is not empty'))
228 raise error.Abort(_(b'repository is not empty'))
229
229
230 # determine number of revs in DAG
230 # determine number of revs in DAG
231 total = 0
231 total = 0
232 for type, data in dagparser.parsedag(text):
232 for type, data in dagparser.parsedag(text):
233 if type == b'n':
233 if type == b'n':
234 total += 1
234 total += 1
235
235
236 if mergeable_file:
236 if mergeable_file:
237 linesperrev = 2
237 linesperrev = 2
238 # make a file with k lines per rev
238 # make a file with k lines per rev
239 initialmergedlines = [
239 initialmergedlines = [
240 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
240 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
241 ]
241 ]
242 initialmergedlines.append(b"")
242 initialmergedlines.append(b"")
243
243
244 tags = []
244 tags = []
245 progress = ui.makeprogress(
245 progress = ui.makeprogress(
246 _(b'building'), unit=_(b'revisions'), total=total
246 _(b'building'), unit=_(b'revisions'), total=total
247 )
247 )
248 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
248 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
249 at = -1
249 at = -1
250 atbranch = b'default'
250 atbranch = b'default'
251 nodeids = []
251 nodeids = []
252 id = 0
252 id = 0
253 progress.update(id)
253 progress.update(id)
254 for type, data in dagparser.parsedag(text):
254 for type, data in dagparser.parsedag(text):
255 if type == b'n':
255 if type == b'n':
256 ui.note((b'node %s\n' % pycompat.bytestr(data)))
256 ui.note((b'node %s\n' % pycompat.bytestr(data)))
257 id, ps = data
257 id, ps = data
258
258
259 files = []
259 files = []
260 filecontent = {}
260 filecontent = {}
261
261
262 p2 = None
262 p2 = None
263 if mergeable_file:
263 if mergeable_file:
264 fn = b"mf"
264 fn = b"mf"
265 p1 = repo[ps[0]]
265 p1 = repo[ps[0]]
266 if len(ps) > 1:
266 if len(ps) > 1:
267 p2 = repo[ps[1]]
267 p2 = repo[ps[1]]
268 pa = p1.ancestor(p2)
268 pa = p1.ancestor(p2)
269 base, local, other = [
269 base, local, other = [
270 x[fn].data() for x in (pa, p1, p2)
270 x[fn].data() for x in (pa, p1, p2)
271 ]
271 ]
272 m3 = simplemerge.Merge3Text(base, local, other)
272 m3 = simplemerge.Merge3Text(base, local, other)
273 ml = [l.strip() for l in m3.merge_lines()]
273 ml = [l.strip() for l in m3.merge_lines()]
274 ml.append(b"")
274 ml.append(b"")
275 elif at > 0:
275 elif at > 0:
276 ml = p1[fn].data().split(b"\n")
276 ml = p1[fn].data().split(b"\n")
277 else:
277 else:
278 ml = initialmergedlines
278 ml = initialmergedlines
279 ml[id * linesperrev] += b" r%i" % id
279 ml[id * linesperrev] += b" r%i" % id
280 mergedtext = b"\n".join(ml)
280 mergedtext = b"\n".join(ml)
281 files.append(fn)
281 files.append(fn)
282 filecontent[fn] = mergedtext
282 filecontent[fn] = mergedtext
283
283
284 if overwritten_file:
284 if overwritten_file:
285 fn = b"of"
285 fn = b"of"
286 files.append(fn)
286 files.append(fn)
287 filecontent[fn] = b"r%i\n" % id
287 filecontent[fn] = b"r%i\n" % id
288
288
289 if new_file:
289 if new_file:
290 fn = b"nf%i" % id
290 fn = b"nf%i" % id
291 files.append(fn)
291 files.append(fn)
292 filecontent[fn] = b"r%i\n" % id
292 filecontent[fn] = b"r%i\n" % id
293 if len(ps) > 1:
293 if len(ps) > 1:
294 if not p2:
294 if not p2:
295 p2 = repo[ps[1]]
295 p2 = repo[ps[1]]
296 for fn in p2:
296 for fn in p2:
297 if fn.startswith(b"nf"):
297 if fn.startswith(b"nf"):
298 files.append(fn)
298 files.append(fn)
299 filecontent[fn] = p2[fn].data()
299 filecontent[fn] = p2[fn].data()
300
300
301 def fctxfn(repo, cx, path):
301 def fctxfn(repo, cx, path):
302 if path in filecontent:
302 if path in filecontent:
303 return context.memfilectx(
303 return context.memfilectx(
304 repo, cx, path, filecontent[path]
304 repo, cx, path, filecontent[path]
305 )
305 )
306 return None
306 return None
307
307
308 if len(ps) == 0 or ps[0] < 0:
308 if len(ps) == 0 or ps[0] < 0:
309 pars = [None, None]
309 pars = [None, None]
310 elif len(ps) == 1:
310 elif len(ps) == 1:
311 pars = [nodeids[ps[0]], None]
311 pars = [nodeids[ps[0]], None]
312 else:
312 else:
313 pars = [nodeids[p] for p in ps]
313 pars = [nodeids[p] for p in ps]
314 cx = context.memctx(
314 cx = context.memctx(
315 repo,
315 repo,
316 pars,
316 pars,
317 b"r%i" % id,
317 b"r%i" % id,
318 files,
318 files,
319 fctxfn,
319 fctxfn,
320 date=(id, 0),
320 date=(id, 0),
321 user=b"debugbuilddag",
321 user=b"debugbuilddag",
322 extra={b'branch': atbranch},
322 extra={b'branch': atbranch},
323 )
323 )
324 nodeid = repo.commitctx(cx)
324 nodeid = repo.commitctx(cx)
325 nodeids.append(nodeid)
325 nodeids.append(nodeid)
326 at = id
326 at = id
327 elif type == b'l':
327 elif type == b'l':
328 id, name = data
328 id, name = data
329 ui.note((b'tag %s\n' % name))
329 ui.note((b'tag %s\n' % name))
330 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
330 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
331 elif type == b'a':
331 elif type == b'a':
332 ui.note((b'branch %s\n' % data))
332 ui.note((b'branch %s\n' % data))
333 atbranch = data
333 atbranch = data
334 progress.update(id)
334 progress.update(id)
335
335
336 if tags:
336 if tags:
337 repo.vfs.write(b"localtags", b"".join(tags))
337 repo.vfs.write(b"localtags", b"".join(tags))
338
338
339
339
340 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
340 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
341 indent_string = b' ' * indent
341 indent_string = b' ' * indent
342 if all:
342 if all:
343 ui.writenoi18n(
343 ui.writenoi18n(
344 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
344 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
345 % indent_string
345 % indent_string
346 )
346 )
347
347
348 def showchunks(named):
348 def showchunks(named):
349 ui.write(b"\n%s%s\n" % (indent_string, named))
349 ui.write(b"\n%s%s\n" % (indent_string, named))
350 for deltadata in gen.deltaiter():
350 for deltadata in gen.deltaiter():
351 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
351 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
352 ui.write(
352 ui.write(
353 b"%s%s %s %s %s %s %d\n"
353 b"%s%s %s %s %s %s %d\n"
354 % (
354 % (
355 indent_string,
355 indent_string,
356 hex(node),
356 hex(node),
357 hex(p1),
357 hex(p1),
358 hex(p2),
358 hex(p2),
359 hex(cs),
359 hex(cs),
360 hex(deltabase),
360 hex(deltabase),
361 len(delta),
361 len(delta),
362 )
362 )
363 )
363 )
364
364
365 gen.changelogheader()
365 gen.changelogheader()
366 showchunks(b"changelog")
366 showchunks(b"changelog")
367 gen.manifestheader()
367 gen.manifestheader()
368 showchunks(b"manifest")
368 showchunks(b"manifest")
369 for chunkdata in iter(gen.filelogheader, {}):
369 for chunkdata in iter(gen.filelogheader, {}):
370 fname = chunkdata[b'filename']
370 fname = chunkdata[b'filename']
371 showchunks(fname)
371 showchunks(fname)
372 else:
372 else:
373 if isinstance(gen, bundle2.unbundle20):
373 if isinstance(gen, bundle2.unbundle20):
374 raise error.Abort(_(b'use debugbundle2 for this file'))
374 raise error.Abort(_(b'use debugbundle2 for this file'))
375 gen.changelogheader()
375 gen.changelogheader()
376 for deltadata in gen.deltaiter():
376 for deltadata in gen.deltaiter():
377 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
377 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
378 ui.write(b"%s%s\n" % (indent_string, hex(node)))
378 ui.write(b"%s%s\n" % (indent_string, hex(node)))
379
379
380
380
381 def _debugobsmarkers(ui, part, indent=0, **opts):
381 def _debugobsmarkers(ui, part, indent=0, **opts):
382 """display version and markers contained in 'data'"""
382 """display version and markers contained in 'data'"""
383 opts = pycompat.byteskwargs(opts)
383 opts = pycompat.byteskwargs(opts)
384 data = part.read()
384 data = part.read()
385 indent_string = b' ' * indent
385 indent_string = b' ' * indent
386 try:
386 try:
387 version, markers = obsolete._readmarkers(data)
387 version, markers = obsolete._readmarkers(data)
388 except error.UnknownVersion as exc:
388 except error.UnknownVersion as exc:
389 msg = b"%sunsupported version: %s (%d bytes)\n"
389 msg = b"%sunsupported version: %s (%d bytes)\n"
390 msg %= indent_string, exc.version, len(data)
390 msg %= indent_string, exc.version, len(data)
391 ui.write(msg)
391 ui.write(msg)
392 else:
392 else:
393 msg = b"%sversion: %d (%d bytes)\n"
393 msg = b"%sversion: %d (%d bytes)\n"
394 msg %= indent_string, version, len(data)
394 msg %= indent_string, version, len(data)
395 ui.write(msg)
395 ui.write(msg)
396 fm = ui.formatter(b'debugobsolete', opts)
396 fm = ui.formatter(b'debugobsolete', opts)
397 for rawmarker in sorted(markers):
397 for rawmarker in sorted(markers):
398 m = obsutil.marker(None, rawmarker)
398 m = obsutil.marker(None, rawmarker)
399 fm.startitem()
399 fm.startitem()
400 fm.plain(indent_string)
400 fm.plain(indent_string)
401 cmdutil.showmarker(fm, m)
401 cmdutil.showmarker(fm, m)
402 fm.end()
402 fm.end()
403
403
404
404
405 def _debugphaseheads(ui, data, indent=0):
405 def _debugphaseheads(ui, data, indent=0):
406 """display version and markers contained in 'data'"""
406 """display version and markers contained in 'data'"""
407 indent_string = b' ' * indent
407 indent_string = b' ' * indent
408 headsbyphase = phases.binarydecode(data)
408 headsbyphase = phases.binarydecode(data)
409 for phase in phases.allphases:
409 for phase in phases.allphases:
410 for head in headsbyphase[phase]:
410 for head in headsbyphase[phase]:
411 ui.write(indent_string)
411 ui.write(indent_string)
412 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
412 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
413
413
414
414
415 def _quasirepr(thing):
415 def _quasirepr(thing):
416 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
416 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
417 return b'{%s}' % (
417 return b'{%s}' % (
418 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
418 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
419 )
419 )
420 return pycompat.bytestr(repr(thing))
420 return pycompat.bytestr(repr(thing))
421
421
422
422
423 def _debugbundle2(ui, gen, all=None, **opts):
423 def _debugbundle2(ui, gen, all=None, **opts):
424 """lists the contents of a bundle2"""
424 """lists the contents of a bundle2"""
425 if not isinstance(gen, bundle2.unbundle20):
425 if not isinstance(gen, bundle2.unbundle20):
426 raise error.Abort(_(b'not a bundle2 file'))
426 raise error.Abort(_(b'not a bundle2 file'))
427 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
427 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
428 parttypes = opts.get('part_type', [])
428 parttypes = opts.get('part_type', [])
429 for part in gen.iterparts():
429 for part in gen.iterparts():
430 if parttypes and part.type not in parttypes:
430 if parttypes and part.type not in parttypes:
431 continue
431 continue
432 msg = b'%s -- %s (mandatory: %r)\n'
432 msg = b'%s -- %s (mandatory: %r)\n'
433 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
433 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
434 if part.type == b'changegroup':
434 if part.type == b'changegroup':
435 version = part.params.get(b'version', b'01')
435 version = part.params.get(b'version', b'01')
436 cg = changegroup.getunbundler(version, part, b'UN')
436 cg = changegroup.getunbundler(version, part, b'UN')
437 if not ui.quiet:
437 if not ui.quiet:
438 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
438 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
439 if part.type == b'obsmarkers':
439 if part.type == b'obsmarkers':
440 if not ui.quiet:
440 if not ui.quiet:
441 _debugobsmarkers(ui, part, indent=4, **opts)
441 _debugobsmarkers(ui, part, indent=4, **opts)
442 if part.type == b'phase-heads':
442 if part.type == b'phase-heads':
443 if not ui.quiet:
443 if not ui.quiet:
444 _debugphaseheads(ui, part, indent=4)
444 _debugphaseheads(ui, part, indent=4)
445
445
446
446
447 @command(
447 @command(
448 b'debugbundle',
448 b'debugbundle',
449 [
449 [
450 (b'a', b'all', None, _(b'show all details')),
450 (b'a', b'all', None, _(b'show all details')),
451 (b'', b'part-type', [], _(b'show only the named part type')),
451 (b'', b'part-type', [], _(b'show only the named part type')),
452 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
452 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
453 ],
453 ],
454 _(b'FILE'),
454 _(b'FILE'),
455 norepo=True,
455 norepo=True,
456 )
456 )
457 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
457 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
458 """lists the contents of a bundle"""
458 """lists the contents of a bundle"""
459 with hg.openpath(ui, bundlepath) as f:
459 with hg.openpath(ui, bundlepath) as f:
460 if spec:
460 if spec:
461 spec = exchange.getbundlespec(ui, f)
461 spec = exchange.getbundlespec(ui, f)
462 ui.write(b'%s\n' % spec)
462 ui.write(b'%s\n' % spec)
463 return
463 return
464
464
465 gen = exchange.readbundle(ui, f, bundlepath)
465 gen = exchange.readbundle(ui, f, bundlepath)
466 if isinstance(gen, bundle2.unbundle20):
466 if isinstance(gen, bundle2.unbundle20):
467 return _debugbundle2(ui, gen, all=all, **opts)
467 return _debugbundle2(ui, gen, all=all, **opts)
468 _debugchangegroup(ui, gen, all=all, **opts)
468 _debugchangegroup(ui, gen, all=all, **opts)
469
469
470
470
471 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
471 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
472 def debugcapabilities(ui, path, **opts):
472 def debugcapabilities(ui, path, **opts):
473 """lists the capabilities of a remote peer"""
473 """lists the capabilities of a remote peer"""
474 opts = pycompat.byteskwargs(opts)
474 opts = pycompat.byteskwargs(opts)
475 peer = hg.peer(ui, opts, path)
475 peer = hg.peer(ui, opts, path)
476 try:
476 try:
477 caps = peer.capabilities()
477 caps = peer.capabilities()
478 ui.writenoi18n(b'Main capabilities:\n')
478 ui.writenoi18n(b'Main capabilities:\n')
479 for c in sorted(caps):
479 for c in sorted(caps):
480 ui.write(b' %s\n' % c)
480 ui.write(b' %s\n' % c)
481 b2caps = bundle2.bundle2caps(peer)
481 b2caps = bundle2.bundle2caps(peer)
482 if b2caps:
482 if b2caps:
483 ui.writenoi18n(b'Bundle2 capabilities:\n')
483 ui.writenoi18n(b'Bundle2 capabilities:\n')
484 for key, values in sorted(pycompat.iteritems(b2caps)):
484 for key, values in sorted(pycompat.iteritems(b2caps)):
485 ui.write(b' %s\n' % key)
485 ui.write(b' %s\n' % key)
486 for v in values:
486 for v in values:
487 ui.write(b' %s\n' % v)
487 ui.write(b' %s\n' % v)
488 finally:
488 finally:
489 peer.close()
489 peer.close()
490
490
491
491
492 @command(
492 @command(
493 b'debugchangedfiles',
493 b'debugchangedfiles',
494 [
494 [
495 (
495 (
496 b'',
496 b'',
497 b'compute',
497 b'compute',
498 False,
498 False,
499 b"compute information instead of reading it from storage",
499 b"compute information instead of reading it from storage",
500 ),
500 ),
501 ],
501 ],
502 b'REV',
502 b'REV',
503 )
503 )
504 def debugchangedfiles(ui, repo, rev, **opts):
504 def debugchangedfiles(ui, repo, rev, **opts):
505 """list the stored files changes for a revision"""
505 """list the stored files changes for a revision"""
506 ctx = scmutil.revsingle(repo, rev, None)
506 ctx = scmutil.revsingle(repo, rev, None)
507 files = None
507 files = None
508
508
509 if opts['compute']:
509 if opts['compute']:
510 files = metadata.compute_all_files_changes(ctx)
510 files = metadata.compute_all_files_changes(ctx)
511 else:
511 else:
512 sd = repo.changelog.sidedata(ctx.rev())
512 sd = repo.changelog.sidedata(ctx.rev())
513 files_block = sd.get(sidedata.SD_FILES)
513 files_block = sd.get(sidedata.SD_FILES)
514 if files_block is not None:
514 if files_block is not None:
515 files = metadata.decode_files_sidedata(sd)
515 files = metadata.decode_files_sidedata(sd)
516 if files is not None:
516 if files is not None:
517 for f in sorted(files.touched):
517 for f in sorted(files.touched):
518 if f in files.added:
518 if f in files.added:
519 action = b"added"
519 action = b"added"
520 elif f in files.removed:
520 elif f in files.removed:
521 action = b"removed"
521 action = b"removed"
522 elif f in files.merged:
522 elif f in files.merged:
523 action = b"merged"
523 action = b"merged"
524 elif f in files.salvaged:
524 elif f in files.salvaged:
525 action = b"salvaged"
525 action = b"salvaged"
526 else:
526 else:
527 action = b"touched"
527 action = b"touched"
528
528
529 copy_parent = b""
529 copy_parent = b""
530 copy_source = b""
530 copy_source = b""
531 if f in files.copied_from_p1:
531 if f in files.copied_from_p1:
532 copy_parent = b"p1"
532 copy_parent = b"p1"
533 copy_source = files.copied_from_p1[f]
533 copy_source = files.copied_from_p1[f]
534 elif f in files.copied_from_p2:
534 elif f in files.copied_from_p2:
535 copy_parent = b"p2"
535 copy_parent = b"p2"
536 copy_source = files.copied_from_p2[f]
536 copy_source = files.copied_from_p2[f]
537
537
538 data = (action, copy_parent, f, copy_source)
538 data = (action, copy_parent, f, copy_source)
539 template = b"%-8s %2s: %s, %s;\n"
539 template = b"%-8s %2s: %s, %s;\n"
540 ui.write(template % data)
540 ui.write(template % data)
541
541
542
542
543 @command(b'debugcheckstate', [], b'')
543 @command(b'debugcheckstate', [], b'')
544 def debugcheckstate(ui, repo):
544 def debugcheckstate(ui, repo):
545 """validate the correctness of the current dirstate"""
545 """validate the correctness of the current dirstate"""
546 parent1, parent2 = repo.dirstate.parents()
546 parent1, parent2 = repo.dirstate.parents()
547 m1 = repo[parent1].manifest()
547 m1 = repo[parent1].manifest()
548 m2 = repo[parent2].manifest()
548 m2 = repo[parent2].manifest()
549 errors = 0
549 errors = 0
550 for f in repo.dirstate:
550 for f in repo.dirstate:
551 state = repo.dirstate[f]
551 state = repo.dirstate[f]
552 if state in b"nr" and f not in m1:
552 if state in b"nr" and f not in m1:
553 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
553 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
554 errors += 1
554 errors += 1
555 if state in b"a" and f in m1:
555 if state in b"a" and f in m1:
556 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
556 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
557 errors += 1
557 errors += 1
558 if state in b"m" and f not in m1 and f not in m2:
558 if state in b"m" and f not in m1 and f not in m2:
559 ui.warn(
559 ui.warn(
560 _(b"%s in state %s, but not in either manifest\n") % (f, state)
560 _(b"%s in state %s, but not in either manifest\n") % (f, state)
561 )
561 )
562 errors += 1
562 errors += 1
563 for f in m1:
563 for f in m1:
564 state = repo.dirstate[f]
564 state = repo.dirstate[f]
565 if state not in b"nrm":
565 if state not in b"nrm":
566 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
566 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
567 errors += 1
567 errors += 1
568 if errors:
568 if errors:
569 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
569 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
570 raise error.Abort(errstr)
570 raise error.Abort(errstr)
571
571
572
572
573 @command(
573 @command(
574 b'debugcolor',
574 b'debugcolor',
575 [(b'', b'style', None, _(b'show all configured styles'))],
575 [(b'', b'style', None, _(b'show all configured styles'))],
576 b'hg debugcolor',
576 b'hg debugcolor',
577 )
577 )
578 def debugcolor(ui, repo, **opts):
578 def debugcolor(ui, repo, **opts):
579 """show available color, effects or style"""
579 """show available color, effects or style"""
580 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
580 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
581 if opts.get('style'):
581 if opts.get('style'):
582 return _debugdisplaystyle(ui)
582 return _debugdisplaystyle(ui)
583 else:
583 else:
584 return _debugdisplaycolor(ui)
584 return _debugdisplaycolor(ui)
585
585
586
586
587 def _debugdisplaycolor(ui):
587 def _debugdisplaycolor(ui):
588 ui = ui.copy()
588 ui = ui.copy()
589 ui._styles.clear()
589 ui._styles.clear()
590 for effect in color._activeeffects(ui).keys():
590 for effect in color._activeeffects(ui).keys():
591 ui._styles[effect] = effect
591 ui._styles[effect] = effect
592 if ui._terminfoparams:
592 if ui._terminfoparams:
593 for k, v in ui.configitems(b'color'):
593 for k, v in ui.configitems(b'color'):
594 if k.startswith(b'color.'):
594 if k.startswith(b'color.'):
595 ui._styles[k] = k[6:]
595 ui._styles[k] = k[6:]
596 elif k.startswith(b'terminfo.'):
596 elif k.startswith(b'terminfo.'):
597 ui._styles[k] = k[9:]
597 ui._styles[k] = k[9:]
598 ui.write(_(b'available colors:\n'))
598 ui.write(_(b'available colors:\n'))
599 # sort label with a '_' after the other to group '_background' entry.
599 # sort label with a '_' after the other to group '_background' entry.
600 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
600 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
601 for colorname, label in items:
601 for colorname, label in items:
602 ui.write(b'%s\n' % colorname, label=label)
602 ui.write(b'%s\n' % colorname, label=label)
603
603
604
604
605 def _debugdisplaystyle(ui):
605 def _debugdisplaystyle(ui):
606 ui.write(_(b'available style:\n'))
606 ui.write(_(b'available style:\n'))
607 if not ui._styles:
607 if not ui._styles:
608 return
608 return
609 width = max(len(s) for s in ui._styles)
609 width = max(len(s) for s in ui._styles)
610 for label, effects in sorted(ui._styles.items()):
610 for label, effects in sorted(ui._styles.items()):
611 ui.write(b'%s' % label, label=label)
611 ui.write(b'%s' % label, label=label)
612 if effects:
612 if effects:
613 # 50
613 # 50
614 ui.write(b': ')
614 ui.write(b': ')
615 ui.write(b' ' * (max(0, width - len(label))))
615 ui.write(b' ' * (max(0, width - len(label))))
616 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
616 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
617 ui.write(b'\n')
617 ui.write(b'\n')
618
618
619
619
620 @command(b'debugcreatestreamclonebundle', [], b'FILE')
620 @command(b'debugcreatestreamclonebundle', [], b'FILE')
621 def debugcreatestreamclonebundle(ui, repo, fname):
621 def debugcreatestreamclonebundle(ui, repo, fname):
622 """create a stream clone bundle file
622 """create a stream clone bundle file
623
623
624 Stream bundles are special bundles that are essentially archives of
624 Stream bundles are special bundles that are essentially archives of
625 revlog files. They are commonly used for cloning very quickly.
625 revlog files. They are commonly used for cloning very quickly.
626 """
626 """
627 # TODO we may want to turn this into an abort when this functionality
627 # TODO we may want to turn this into an abort when this functionality
628 # is moved into `hg bundle`.
628 # is moved into `hg bundle`.
629 if phases.hassecret(repo):
629 if phases.hassecret(repo):
630 ui.warn(
630 ui.warn(
631 _(
631 _(
632 b'(warning: stream clone bundle will contain secret '
632 b'(warning: stream clone bundle will contain secret '
633 b'revisions)\n'
633 b'revisions)\n'
634 )
634 )
635 )
635 )
636
636
637 requirements, gen = streamclone.generatebundlev1(repo)
637 requirements, gen = streamclone.generatebundlev1(repo)
638 changegroup.writechunks(ui, gen, fname)
638 changegroup.writechunks(ui, gen, fname)
639
639
640 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
640 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
641
641
642
642
643 @command(
643 @command(
644 b'debugdag',
644 b'debugdag',
645 [
645 [
646 (b't', b'tags', None, _(b'use tags as labels')),
646 (b't', b'tags', None, _(b'use tags as labels')),
647 (b'b', b'branches', None, _(b'annotate with branch names')),
647 (b'b', b'branches', None, _(b'annotate with branch names')),
648 (b'', b'dots', None, _(b'use dots for runs')),
648 (b'', b'dots', None, _(b'use dots for runs')),
649 (b's', b'spaces', None, _(b'separate elements by spaces')),
649 (b's', b'spaces', None, _(b'separate elements by spaces')),
650 ],
650 ],
651 _(b'[OPTION]... [FILE [REV]...]'),
651 _(b'[OPTION]... [FILE [REV]...]'),
652 optionalrepo=True,
652 optionalrepo=True,
653 )
653 )
654 def debugdag(ui, repo, file_=None, *revs, **opts):
654 def debugdag(ui, repo, file_=None, *revs, **opts):
655 """format the changelog or an index DAG as a concise textual description
655 """format the changelog or an index DAG as a concise textual description
656
656
657 If you pass a revlog index, the revlog's DAG is emitted. If you list
657 If you pass a revlog index, the revlog's DAG is emitted. If you list
658 revision numbers, they get labeled in the output as rN.
658 revision numbers, they get labeled in the output as rN.
659
659
660 Otherwise, the changelog DAG of the current repo is emitted.
660 Otherwise, the changelog DAG of the current repo is emitted.
661 """
661 """
662 spaces = opts.get('spaces')
662 spaces = opts.get('spaces')
663 dots = opts.get('dots')
663 dots = opts.get('dots')
664 if file_:
664 if file_:
665 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
665 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
666 revs = {int(r) for r in revs}
666 revs = {int(r) for r in revs}
667
667
668 def events():
668 def events():
669 for r in rlog:
669 for r in rlog:
670 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
670 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
671 if r in revs:
671 if r in revs:
672 yield b'l', (r, b"r%i" % r)
672 yield b'l', (r, b"r%i" % r)
673
673
674 elif repo:
674 elif repo:
675 cl = repo.changelog
675 cl = repo.changelog
676 tags = opts.get('tags')
676 tags = opts.get('tags')
677 branches = opts.get('branches')
677 branches = opts.get('branches')
678 if tags:
678 if tags:
679 labels = {}
679 labels = {}
680 for l, n in repo.tags().items():
680 for l, n in repo.tags().items():
681 labels.setdefault(cl.rev(n), []).append(l)
681 labels.setdefault(cl.rev(n), []).append(l)
682
682
683 def events():
683 def events():
684 b = b"default"
684 b = b"default"
685 for r in cl:
685 for r in cl:
686 if branches:
686 if branches:
687 newb = cl.read(cl.node(r))[5][b'branch']
687 newb = cl.read(cl.node(r))[5][b'branch']
688 if newb != b:
688 if newb != b:
689 yield b'a', newb
689 yield b'a', newb
690 b = newb
690 b = newb
691 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
691 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
692 if tags:
692 if tags:
693 ls = labels.get(r)
693 ls = labels.get(r)
694 if ls:
694 if ls:
695 for l in ls:
695 for l in ls:
696 yield b'l', (r, l)
696 yield b'l', (r, l)
697
697
698 else:
698 else:
699 raise error.Abort(_(b'need repo for changelog dag'))
699 raise error.Abort(_(b'need repo for changelog dag'))
700
700
701 for line in dagparser.dagtextlines(
701 for line in dagparser.dagtextlines(
702 events(),
702 events(),
703 addspaces=spaces,
703 addspaces=spaces,
704 wraplabels=True,
704 wraplabels=True,
705 wrapannotations=True,
705 wrapannotations=True,
706 wrapnonlinear=dots,
706 wrapnonlinear=dots,
707 usedots=dots,
707 usedots=dots,
708 maxlinewidth=70,
708 maxlinewidth=70,
709 ):
709 ):
710 ui.write(line)
710 ui.write(line)
711 ui.write(b"\n")
711 ui.write(b"\n")
712
712
713
713
714 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
714 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
715 def debugdata(ui, repo, file_, rev=None, **opts):
715 def debugdata(ui, repo, file_, rev=None, **opts):
716 """dump the contents of a data file revision"""
716 """dump the contents of a data file revision"""
717 opts = pycompat.byteskwargs(opts)
717 opts = pycompat.byteskwargs(opts)
718 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
718 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
719 if rev is not None:
719 if rev is not None:
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 file_, rev = None, file_
721 file_, rev = None, file_
722 elif rev is None:
722 elif rev is None:
723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
724 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
725 try:
725 try:
726 ui.write(r.rawdata(r.lookup(rev)))
726 ui.write(r.rawdata(r.lookup(rev)))
727 except KeyError:
727 except KeyError:
728 raise error.Abort(_(b'invalid revision identifier %s') % rev)
728 raise error.Abort(_(b'invalid revision identifier %s') % rev)
729
729
730
730
731 @command(
731 @command(
732 b'debugdate',
732 b'debugdate',
733 [(b'e', b'extended', None, _(b'try extended date formats'))],
733 [(b'e', b'extended', None, _(b'try extended date formats'))],
734 _(b'[-e] DATE [RANGE]'),
734 _(b'[-e] DATE [RANGE]'),
735 norepo=True,
735 norepo=True,
736 optionalrepo=True,
736 optionalrepo=True,
737 )
737 )
738 def debugdate(ui, date, range=None, **opts):
738 def debugdate(ui, date, range=None, **opts):
739 """parse and display a date"""
739 """parse and display a date"""
740 if opts["extended"]:
740 if opts["extended"]:
741 d = dateutil.parsedate(date, dateutil.extendeddateformats)
741 d = dateutil.parsedate(date, dateutil.extendeddateformats)
742 else:
742 else:
743 d = dateutil.parsedate(date)
743 d = dateutil.parsedate(date)
744 ui.writenoi18n(b"internal: %d %d\n" % d)
744 ui.writenoi18n(b"internal: %d %d\n" % d)
745 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
745 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
746 if range:
746 if range:
747 m = dateutil.matchdate(range)
747 m = dateutil.matchdate(range)
748 ui.writenoi18n(b"match: %s\n" % m(d[0]))
748 ui.writenoi18n(b"match: %s\n" % m(d[0]))
749
749
750
750
751 @command(
751 @command(
752 b'debugdeltachain',
752 b'debugdeltachain',
753 cmdutil.debugrevlogopts + cmdutil.formatteropts,
753 cmdutil.debugrevlogopts + cmdutil.formatteropts,
754 _(b'-c|-m|FILE'),
754 _(b'-c|-m|FILE'),
755 optionalrepo=True,
755 optionalrepo=True,
756 )
756 )
757 def debugdeltachain(ui, repo, file_=None, **opts):
757 def debugdeltachain(ui, repo, file_=None, **opts):
758 """dump information about delta chains in a revlog
758 """dump information about delta chains in a revlog
759
759
760 Output can be templatized. Available template keywords are:
760 Output can be templatized. Available template keywords are:
761
761
762 :``rev``: revision number
762 :``rev``: revision number
763 :``chainid``: delta chain identifier (numbered by unique base)
763 :``chainid``: delta chain identifier (numbered by unique base)
764 :``chainlen``: delta chain length to this revision
764 :``chainlen``: delta chain length to this revision
765 :``prevrev``: previous revision in delta chain
765 :``prevrev``: previous revision in delta chain
766 :``deltatype``: role of delta / how it was computed
766 :``deltatype``: role of delta / how it was computed
767 :``compsize``: compressed size of revision
767 :``compsize``: compressed size of revision
768 :``uncompsize``: uncompressed size of revision
768 :``uncompsize``: uncompressed size of revision
769 :``chainsize``: total size of compressed revisions in chain
769 :``chainsize``: total size of compressed revisions in chain
770 :``chainratio``: total chain size divided by uncompressed revision size
770 :``chainratio``: total chain size divided by uncompressed revision size
771 (new delta chains typically start at ratio 2.00)
771 (new delta chains typically start at ratio 2.00)
772 :``lindist``: linear distance from base revision in delta chain to end
772 :``lindist``: linear distance from base revision in delta chain to end
773 of this revision
773 of this revision
774 :``extradist``: total size of revisions not part of this delta chain from
774 :``extradist``: total size of revisions not part of this delta chain from
775 base of delta chain to end of this revision; a measurement
775 base of delta chain to end of this revision; a measurement
776 of how much extra data we need to read/seek across to read
776 of how much extra data we need to read/seek across to read
777 the delta chain for this revision
777 the delta chain for this revision
778 :``extraratio``: extradist divided by chainsize; another representation of
778 :``extraratio``: extradist divided by chainsize; another representation of
779 how much unrelated data is needed to load this delta chain
779 how much unrelated data is needed to load this delta chain
780
780
781 If the repository is configured to use the sparse read, additional keywords
781 If the repository is configured to use the sparse read, additional keywords
782 are available:
782 are available:
783
783
784 :``readsize``: total size of data read from the disk for a revision
784 :``readsize``: total size of data read from the disk for a revision
785 (sum of the sizes of all the blocks)
785 (sum of the sizes of all the blocks)
786 :``largestblock``: size of the largest block of data read from the disk
786 :``largestblock``: size of the largest block of data read from the disk
787 :``readdensity``: density of useful bytes in the data read from the disk
787 :``readdensity``: density of useful bytes in the data read from the disk
788 :``srchunks``: in how many data hunks the whole revision would be read
788 :``srchunks``: in how many data hunks the whole revision would be read
789
789
790 The sparse read can be enabled with experimental.sparse-read = True
790 The sparse read can be enabled with experimental.sparse-read = True
791 """
791 """
792 opts = pycompat.byteskwargs(opts)
792 opts = pycompat.byteskwargs(opts)
793 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
793 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
794 index = r.index
794 index = r.index
795 start = r.start
795 start = r.start
796 length = r.length
796 length = r.length
797 generaldelta = r._generaldelta
797 generaldelta = r._generaldelta
798 withsparseread = getattr(r, '_withsparseread', False)
798 withsparseread = getattr(r, '_withsparseread', False)
799
799
800 def revinfo(rev):
800 def revinfo(rev):
801 e = index[rev]
801 e = index[rev]
802 compsize = e[1]
802 compsize = e[1]
803 uncompsize = e[2]
803 uncompsize = e[2]
804 chainsize = 0
804 chainsize = 0
805
805
806 if generaldelta:
806 if generaldelta:
807 if e[3] == e[5]:
807 if e[3] == e[5]:
808 deltatype = b'p1'
808 deltatype = b'p1'
809 elif e[3] == e[6]:
809 elif e[3] == e[6]:
810 deltatype = b'p2'
810 deltatype = b'p2'
811 elif e[3] == rev - 1:
811 elif e[3] == rev - 1:
812 deltatype = b'prev'
812 deltatype = b'prev'
813 elif e[3] == rev:
813 elif e[3] == rev:
814 deltatype = b'base'
814 deltatype = b'base'
815 else:
815 else:
816 deltatype = b'other'
816 deltatype = b'other'
817 else:
817 else:
818 if e[3] == rev:
818 if e[3] == rev:
819 deltatype = b'base'
819 deltatype = b'base'
820 else:
820 else:
821 deltatype = b'prev'
821 deltatype = b'prev'
822
822
823 chain = r._deltachain(rev)[0]
823 chain = r._deltachain(rev)[0]
824 for iterrev in chain:
824 for iterrev in chain:
825 e = index[iterrev]
825 e = index[iterrev]
826 chainsize += e[1]
826 chainsize += e[1]
827
827
828 return compsize, uncompsize, deltatype, chain, chainsize
828 return compsize, uncompsize, deltatype, chain, chainsize
829
829
830 fm = ui.formatter(b'debugdeltachain', opts)
830 fm = ui.formatter(b'debugdeltachain', opts)
831
831
832 fm.plain(
832 fm.plain(
833 b' rev chain# chainlen prev delta '
833 b' rev chain# chainlen prev delta '
834 b'size rawsize chainsize ratio lindist extradist '
834 b'size rawsize chainsize ratio lindist extradist '
835 b'extraratio'
835 b'extraratio'
836 )
836 )
837 if withsparseread:
837 if withsparseread:
838 fm.plain(b' readsize largestblk rddensity srchunks')
838 fm.plain(b' readsize largestblk rddensity srchunks')
839 fm.plain(b'\n')
839 fm.plain(b'\n')
840
840
841 chainbases = {}
841 chainbases = {}
842 for rev in r:
842 for rev in r:
843 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
843 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
844 chainbase = chain[0]
844 chainbase = chain[0]
845 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
845 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
846 basestart = start(chainbase)
846 basestart = start(chainbase)
847 revstart = start(rev)
847 revstart = start(rev)
848 lineardist = revstart + comp - basestart
848 lineardist = revstart + comp - basestart
849 extradist = lineardist - chainsize
849 extradist = lineardist - chainsize
850 try:
850 try:
851 prevrev = chain[-2]
851 prevrev = chain[-2]
852 except IndexError:
852 except IndexError:
853 prevrev = -1
853 prevrev = -1
854
854
855 if uncomp != 0:
855 if uncomp != 0:
856 chainratio = float(chainsize) / float(uncomp)
856 chainratio = float(chainsize) / float(uncomp)
857 else:
857 else:
858 chainratio = chainsize
858 chainratio = chainsize
859
859
860 if chainsize != 0:
860 if chainsize != 0:
861 extraratio = float(extradist) / float(chainsize)
861 extraratio = float(extradist) / float(chainsize)
862 else:
862 else:
863 extraratio = extradist
863 extraratio = extradist
864
864
865 fm.startitem()
865 fm.startitem()
866 fm.write(
866 fm.write(
867 b'rev chainid chainlen prevrev deltatype compsize '
867 b'rev chainid chainlen prevrev deltatype compsize '
868 b'uncompsize chainsize chainratio lindist extradist '
868 b'uncompsize chainsize chainratio lindist extradist '
869 b'extraratio',
869 b'extraratio',
870 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
870 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
871 rev,
871 rev,
872 chainid,
872 chainid,
873 len(chain),
873 len(chain),
874 prevrev,
874 prevrev,
875 deltatype,
875 deltatype,
876 comp,
876 comp,
877 uncomp,
877 uncomp,
878 chainsize,
878 chainsize,
879 chainratio,
879 chainratio,
880 lineardist,
880 lineardist,
881 extradist,
881 extradist,
882 extraratio,
882 extraratio,
883 rev=rev,
883 rev=rev,
884 chainid=chainid,
884 chainid=chainid,
885 chainlen=len(chain),
885 chainlen=len(chain),
886 prevrev=prevrev,
886 prevrev=prevrev,
887 deltatype=deltatype,
887 deltatype=deltatype,
888 compsize=comp,
888 compsize=comp,
889 uncompsize=uncomp,
889 uncompsize=uncomp,
890 chainsize=chainsize,
890 chainsize=chainsize,
891 chainratio=chainratio,
891 chainratio=chainratio,
892 lindist=lineardist,
892 lindist=lineardist,
893 extradist=extradist,
893 extradist=extradist,
894 extraratio=extraratio,
894 extraratio=extraratio,
895 )
895 )
896 if withsparseread:
896 if withsparseread:
897 readsize = 0
897 readsize = 0
898 largestblock = 0
898 largestblock = 0
899 srchunks = 0
899 srchunks = 0
900
900
901 for revschunk in deltautil.slicechunk(r, chain):
901 for revschunk in deltautil.slicechunk(r, chain):
902 srchunks += 1
902 srchunks += 1
903 blkend = start(revschunk[-1]) + length(revschunk[-1])
903 blkend = start(revschunk[-1]) + length(revschunk[-1])
904 blksize = blkend - start(revschunk[0])
904 blksize = blkend - start(revschunk[0])
905
905
906 readsize += blksize
906 readsize += blksize
907 if largestblock < blksize:
907 if largestblock < blksize:
908 largestblock = blksize
908 largestblock = blksize
909
909
910 if readsize:
910 if readsize:
911 readdensity = float(chainsize) / float(readsize)
911 readdensity = float(chainsize) / float(readsize)
912 else:
912 else:
913 readdensity = 1
913 readdensity = 1
914
914
915 fm.write(
915 fm.write(
916 b'readsize largestblock readdensity srchunks',
916 b'readsize largestblock readdensity srchunks',
917 b' %10d %10d %9.5f %8d',
917 b' %10d %10d %9.5f %8d',
918 readsize,
918 readsize,
919 largestblock,
919 largestblock,
920 readdensity,
920 readdensity,
921 srchunks,
921 srchunks,
922 readsize=readsize,
922 readsize=readsize,
923 largestblock=largestblock,
923 largestblock=largestblock,
924 readdensity=readdensity,
924 readdensity=readdensity,
925 srchunks=srchunks,
925 srchunks=srchunks,
926 )
926 )
927
927
928 fm.plain(b'\n')
928 fm.plain(b'\n')
929
929
930 fm.end()
930 fm.end()
931
931
932
932
933 @command(
933 @command(
934 b'debugdirstate|debugstate',
934 b'debugdirstate|debugstate',
935 [
935 [
936 (
936 (
937 b'',
937 b'',
938 b'nodates',
938 b'nodates',
939 None,
939 None,
940 _(b'do not display the saved mtime (DEPRECATED)'),
940 _(b'do not display the saved mtime (DEPRECATED)'),
941 ),
941 ),
942 (b'', b'dates', True, _(b'display the saved mtime')),
942 (b'', b'dates', True, _(b'display the saved mtime')),
943 (b'', b'datesort', None, _(b'sort by saved mtime')),
943 (b'', b'datesort', None, _(b'sort by saved mtime')),
944 (b'', b'dirs', False, _(b'display directories')),
944 ],
945 ],
945 _(b'[OPTION]...'),
946 _(b'[OPTION]...'),
946 )
947 )
947 def debugstate(ui, repo, **opts):
948 def debugstate(ui, repo, **opts):
948 """show the contents of the current dirstate"""
949 """show the contents of the current dirstate"""
949
950
950 nodates = not opts['dates']
951 nodates = not opts['dates']
951 if opts.get('nodates') is not None:
952 if opts.get('nodates') is not None:
952 nodates = True
953 nodates = True
953 datesort = opts.get('datesort')
954 datesort = opts.get('datesort')
954
955
955 if datesort:
956 if datesort:
956 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
957 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
957 else:
958 else:
958 keyfunc = None # sort by filename
959 keyfunc = None # sort by filename
959 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
960 entries = list(pycompat.iteritems(repo.dirstate))
961 if opts['dirs']:
962 entries.extend(repo.dirstate.directories())
963 entries.sort(key=keyfunc)
964 for file_, ent in entries:
960 if ent[3] == -1:
965 if ent[3] == -1:
961 timestr = b'unset '
966 timestr = b'unset '
962 elif nodates:
967 elif nodates:
963 timestr = b'set '
968 timestr = b'set '
964 else:
969 else:
965 timestr = time.strftime(
970 timestr = time.strftime(
966 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
971 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
967 )
972 )
968 timestr = encoding.strtolocal(timestr)
973 timestr = encoding.strtolocal(timestr)
969 if ent[1] & 0o20000:
974 if ent[1] & 0o20000:
970 mode = b'lnk'
975 mode = b'lnk'
971 else:
976 else:
972 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
977 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
973 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
978 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
974 for f in repo.dirstate.copies():
979 for f in repo.dirstate.copies():
975 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
980 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
976
981
977
982
978 @command(
983 @command(
979 b'debugdiscovery',
984 b'debugdiscovery',
980 [
985 [
981 (b'', b'old', None, _(b'use old-style discovery')),
986 (b'', b'old', None, _(b'use old-style discovery')),
982 (
987 (
983 b'',
988 b'',
984 b'nonheads',
989 b'nonheads',
985 None,
990 None,
986 _(b'use old-style discovery with non-heads included'),
991 _(b'use old-style discovery with non-heads included'),
987 ),
992 ),
988 (b'', b'rev', [], b'restrict discovery to this set of revs'),
993 (b'', b'rev', [], b'restrict discovery to this set of revs'),
989 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
994 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
990 (
995 (
991 b'',
996 b'',
992 b'local-as-revs',
997 b'local-as-revs',
993 b"",
998 b"",
994 b'treat local has having these revisions only',
999 b'treat local has having these revisions only',
995 ),
1000 ),
996 (
1001 (
997 b'',
1002 b'',
998 b'remote-as-revs',
1003 b'remote-as-revs',
999 b"",
1004 b"",
1000 b'use local as remote, with only these these revisions',
1005 b'use local as remote, with only these these revisions',
1001 ),
1006 ),
1002 ]
1007 ]
1003 + cmdutil.remoteopts
1008 + cmdutil.remoteopts
1004 + cmdutil.formatteropts,
1009 + cmdutil.formatteropts,
1005 _(b'[--rev REV] [OTHER]'),
1010 _(b'[--rev REV] [OTHER]'),
1006 )
1011 )
1007 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1012 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1008 """runs the changeset discovery protocol in isolation
1013 """runs the changeset discovery protocol in isolation
1009
1014
1010 The local peer can be "replaced" by a subset of the local repository by
1015 The local peer can be "replaced" by a subset of the local repository by
1011 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1016 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1012 be "replaced" by a subset of the local repository using the
1017 be "replaced" by a subset of the local repository using the
1013 `--local-as-revs` flag. This is useful to efficiently debug pathological
1018 `--local-as-revs` flag. This is useful to efficiently debug pathological
1014 discovery situation.
1019 discovery situation.
1015
1020
1016 The following developer oriented config are relevant for people playing with this command:
1021 The following developer oriented config are relevant for people playing with this command:
1017
1022
1018 * devel.discovery.exchange-heads=True
1023 * devel.discovery.exchange-heads=True
1019
1024
1020 If False, the discovery will not start with
1025 If False, the discovery will not start with
1021 remote head fetching and local head querying.
1026 remote head fetching and local head querying.
1022
1027
1023 * devel.discovery.grow-sample=True
1028 * devel.discovery.grow-sample=True
1024
1029
1025 If False, the sample size used in set discovery will not be increased
1030 If False, the sample size used in set discovery will not be increased
1026 through the process
1031 through the process
1027
1032
1028 * devel.discovery.grow-sample.dynamic=True
1033 * devel.discovery.grow-sample.dynamic=True
1029
1034
1030 When discovery.grow-sample.dynamic is True, the default, the sample size is
1035 When discovery.grow-sample.dynamic is True, the default, the sample size is
1031 adapted to the shape of the undecided set (it is set to the max of:
1036 adapted to the shape of the undecided set (it is set to the max of:
1032 <target-size>, len(roots(undecided)), len(heads(undecided)
1037 <target-size>, len(roots(undecided)), len(heads(undecided)
1033
1038
1034 * devel.discovery.grow-sample.rate=1.05
1039 * devel.discovery.grow-sample.rate=1.05
1035
1040
1036 the rate at which the sample grow
1041 the rate at which the sample grow
1037
1042
1038 * devel.discovery.randomize=True
1043 * devel.discovery.randomize=True
1039
1044
1040 If andom sampling during discovery are deterministic. It is meant for
1045 If andom sampling during discovery are deterministic. It is meant for
1041 integration tests.
1046 integration tests.
1042
1047
1043 * devel.discovery.sample-size=200
1048 * devel.discovery.sample-size=200
1044
1049
1045 Control the initial size of the discovery sample
1050 Control the initial size of the discovery sample
1046
1051
1047 * devel.discovery.sample-size.initial=100
1052 * devel.discovery.sample-size.initial=100
1048
1053
1049 Control the initial size of the discovery for initial change
1054 Control the initial size of the discovery for initial change
1050 """
1055 """
1051 opts = pycompat.byteskwargs(opts)
1056 opts = pycompat.byteskwargs(opts)
1052 unfi = repo.unfiltered()
1057 unfi = repo.unfiltered()
1053
1058
1054 # setup potential extra filtering
1059 # setup potential extra filtering
1055 local_revs = opts[b"local_as_revs"]
1060 local_revs = opts[b"local_as_revs"]
1056 remote_revs = opts[b"remote_as_revs"]
1061 remote_revs = opts[b"remote_as_revs"]
1057
1062
1058 # make sure tests are repeatable
1063 # make sure tests are repeatable
1059 random.seed(int(opts[b'seed']))
1064 random.seed(int(opts[b'seed']))
1060
1065
1061 if not remote_revs:
1066 if not remote_revs:
1062
1067
1063 remoteurl, branches = urlutil.get_unique_pull_path(
1068 remoteurl, branches = urlutil.get_unique_pull_path(
1064 b'debugdiscovery', repo, ui, remoteurl
1069 b'debugdiscovery', repo, ui, remoteurl
1065 )
1070 )
1066 remote = hg.peer(repo, opts, remoteurl)
1071 remote = hg.peer(repo, opts, remoteurl)
1067 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1072 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1068 else:
1073 else:
1069 branches = (None, [])
1074 branches = (None, [])
1070 remote_filtered_revs = scmutil.revrange(
1075 remote_filtered_revs = scmutil.revrange(
1071 unfi, [b"not (::(%s))" % remote_revs]
1076 unfi, [b"not (::(%s))" % remote_revs]
1072 )
1077 )
1073 remote_filtered_revs = frozenset(remote_filtered_revs)
1078 remote_filtered_revs = frozenset(remote_filtered_revs)
1074
1079
1075 def remote_func(x):
1080 def remote_func(x):
1076 return remote_filtered_revs
1081 return remote_filtered_revs
1077
1082
1078 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1083 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1079
1084
1080 remote = repo.peer()
1085 remote = repo.peer()
1081 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1086 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1082
1087
1083 if local_revs:
1088 if local_revs:
1084 local_filtered_revs = scmutil.revrange(
1089 local_filtered_revs = scmutil.revrange(
1085 unfi, [b"not (::(%s))" % local_revs]
1090 unfi, [b"not (::(%s))" % local_revs]
1086 )
1091 )
1087 local_filtered_revs = frozenset(local_filtered_revs)
1092 local_filtered_revs = frozenset(local_filtered_revs)
1088
1093
1089 def local_func(x):
1094 def local_func(x):
1090 return local_filtered_revs
1095 return local_filtered_revs
1091
1096
1092 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1097 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1093 repo = repo.filtered(b'debug-discovery-local-filter')
1098 repo = repo.filtered(b'debug-discovery-local-filter')
1094
1099
1095 data = {}
1100 data = {}
1096 if opts.get(b'old'):
1101 if opts.get(b'old'):
1097
1102
1098 def doit(pushedrevs, remoteheads, remote=remote):
1103 def doit(pushedrevs, remoteheads, remote=remote):
1099 if not util.safehasattr(remote, b'branches'):
1104 if not util.safehasattr(remote, b'branches'):
1100 # enable in-client legacy support
1105 # enable in-client legacy support
1101 remote = localrepo.locallegacypeer(remote.local())
1106 remote = localrepo.locallegacypeer(remote.local())
1102 common, _in, hds = treediscovery.findcommonincoming(
1107 common, _in, hds = treediscovery.findcommonincoming(
1103 repo, remote, force=True, audit=data
1108 repo, remote, force=True, audit=data
1104 )
1109 )
1105 common = set(common)
1110 common = set(common)
1106 if not opts.get(b'nonheads'):
1111 if not opts.get(b'nonheads'):
1107 ui.writenoi18n(
1112 ui.writenoi18n(
1108 b"unpruned common: %s\n"
1113 b"unpruned common: %s\n"
1109 % b" ".join(sorted(short(n) for n in common))
1114 % b" ".join(sorted(short(n) for n in common))
1110 )
1115 )
1111
1116
1112 clnode = repo.changelog.node
1117 clnode = repo.changelog.node
1113 common = repo.revs(b'heads(::%ln)', common)
1118 common = repo.revs(b'heads(::%ln)', common)
1114 common = {clnode(r) for r in common}
1119 common = {clnode(r) for r in common}
1115 return common, hds
1120 return common, hds
1116
1121
1117 else:
1122 else:
1118
1123
1119 def doit(pushedrevs, remoteheads, remote=remote):
1124 def doit(pushedrevs, remoteheads, remote=remote):
1120 nodes = None
1125 nodes = None
1121 if pushedrevs:
1126 if pushedrevs:
1122 revs = scmutil.revrange(repo, pushedrevs)
1127 revs = scmutil.revrange(repo, pushedrevs)
1123 nodes = [repo[r].node() for r in revs]
1128 nodes = [repo[r].node() for r in revs]
1124 common, any, hds = setdiscovery.findcommonheads(
1129 common, any, hds = setdiscovery.findcommonheads(
1125 ui, repo, remote, ancestorsof=nodes, audit=data
1130 ui, repo, remote, ancestorsof=nodes, audit=data
1126 )
1131 )
1127 return common, hds
1132 return common, hds
1128
1133
1129 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1134 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1130 localrevs = opts[b'rev']
1135 localrevs = opts[b'rev']
1131
1136
1132 fm = ui.formatter(b'debugdiscovery', opts)
1137 fm = ui.formatter(b'debugdiscovery', opts)
1133 if fm.strict_format:
1138 if fm.strict_format:
1134
1139
1135 @contextlib.contextmanager
1140 @contextlib.contextmanager
1136 def may_capture_output():
1141 def may_capture_output():
1137 ui.pushbuffer()
1142 ui.pushbuffer()
1138 yield
1143 yield
1139 data[b'output'] = ui.popbuffer()
1144 data[b'output'] = ui.popbuffer()
1140
1145
1141 else:
1146 else:
1142 may_capture_output = util.nullcontextmanager
1147 may_capture_output = util.nullcontextmanager
1143 with may_capture_output():
1148 with may_capture_output():
1144 with util.timedcm('debug-discovery') as t:
1149 with util.timedcm('debug-discovery') as t:
1145 common, hds = doit(localrevs, remoterevs)
1150 common, hds = doit(localrevs, remoterevs)
1146
1151
1147 # compute all statistics
1152 # compute all statistics
1148 heads_common = set(common)
1153 heads_common = set(common)
1149 heads_remote = set(hds)
1154 heads_remote = set(hds)
1150 heads_local = set(repo.heads())
1155 heads_local = set(repo.heads())
1151 # note: they cannot be a local or remote head that is in common and not
1156 # note: they cannot be a local or remote head that is in common and not
1152 # itself a head of common.
1157 # itself a head of common.
1153 heads_common_local = heads_common & heads_local
1158 heads_common_local = heads_common & heads_local
1154 heads_common_remote = heads_common & heads_remote
1159 heads_common_remote = heads_common & heads_remote
1155 heads_common_both = heads_common & heads_remote & heads_local
1160 heads_common_both = heads_common & heads_remote & heads_local
1156
1161
1157 all = repo.revs(b'all()')
1162 all = repo.revs(b'all()')
1158 common = repo.revs(b'::%ln', common)
1163 common = repo.revs(b'::%ln', common)
1159 roots_common = repo.revs(b'roots(::%ld)', common)
1164 roots_common = repo.revs(b'roots(::%ld)', common)
1160 missing = repo.revs(b'not ::%ld', common)
1165 missing = repo.revs(b'not ::%ld', common)
1161 heads_missing = repo.revs(b'heads(%ld)', missing)
1166 heads_missing = repo.revs(b'heads(%ld)', missing)
1162 roots_missing = repo.revs(b'roots(%ld)', missing)
1167 roots_missing = repo.revs(b'roots(%ld)', missing)
1163 assert len(common) + len(missing) == len(all)
1168 assert len(common) + len(missing) == len(all)
1164
1169
1165 initial_undecided = repo.revs(
1170 initial_undecided = repo.revs(
1166 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1171 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1167 )
1172 )
1168 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1173 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1169 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1174 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1170 common_initial_undecided = initial_undecided & common
1175 common_initial_undecided = initial_undecided & common
1171 missing_initial_undecided = initial_undecided & missing
1176 missing_initial_undecided = initial_undecided & missing
1172
1177
1173 data[b'elapsed'] = t.elapsed
1178 data[b'elapsed'] = t.elapsed
1174 data[b'nb-common-heads'] = len(heads_common)
1179 data[b'nb-common-heads'] = len(heads_common)
1175 data[b'nb-common-heads-local'] = len(heads_common_local)
1180 data[b'nb-common-heads-local'] = len(heads_common_local)
1176 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1181 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1177 data[b'nb-common-heads-both'] = len(heads_common_both)
1182 data[b'nb-common-heads-both'] = len(heads_common_both)
1178 data[b'nb-common-roots'] = len(roots_common)
1183 data[b'nb-common-roots'] = len(roots_common)
1179 data[b'nb-head-local'] = len(heads_local)
1184 data[b'nb-head-local'] = len(heads_local)
1180 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1185 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1181 data[b'nb-head-remote'] = len(heads_remote)
1186 data[b'nb-head-remote'] = len(heads_remote)
1182 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1187 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1183 heads_common_remote
1188 heads_common_remote
1184 )
1189 )
1185 data[b'nb-revs'] = len(all)
1190 data[b'nb-revs'] = len(all)
1186 data[b'nb-revs-common'] = len(common)
1191 data[b'nb-revs-common'] = len(common)
1187 data[b'nb-revs-missing'] = len(missing)
1192 data[b'nb-revs-missing'] = len(missing)
1188 data[b'nb-missing-heads'] = len(heads_missing)
1193 data[b'nb-missing-heads'] = len(heads_missing)
1189 data[b'nb-missing-roots'] = len(roots_missing)
1194 data[b'nb-missing-roots'] = len(roots_missing)
1190 data[b'nb-ini_und'] = len(initial_undecided)
1195 data[b'nb-ini_und'] = len(initial_undecided)
1191 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1196 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1192 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1197 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1193 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1198 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1194 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1199 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1195
1200
1196 fm.startitem()
1201 fm.startitem()
1197 fm.data(**pycompat.strkwargs(data))
1202 fm.data(**pycompat.strkwargs(data))
1198 # display discovery summary
1203 # display discovery summary
1199 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1204 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1200 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1205 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1201 fm.plain(b"heads summary:\n")
1206 fm.plain(b"heads summary:\n")
1202 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1207 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1203 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1208 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1204 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1209 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1205 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1210 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1206 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1211 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1207 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1212 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1208 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1213 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1209 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1214 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1210 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1215 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1211 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1216 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1212 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1217 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1213 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1218 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1214 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1219 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1215 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1220 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1216 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1221 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1217 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1222 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1218 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1223 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1219 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1224 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1220 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1225 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1221 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1226 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1222 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1227 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1223 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1228 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1224
1229
1225 if ui.verbose:
1230 if ui.verbose:
1226 fm.plain(
1231 fm.plain(
1227 b"common heads: %s\n"
1232 b"common heads: %s\n"
1228 % b" ".join(sorted(short(n) for n in heads_common))
1233 % b" ".join(sorted(short(n) for n in heads_common))
1229 )
1234 )
1230 fm.end()
1235 fm.end()
1231
1236
1232
1237
1233 _chunksize = 4 << 10
1238 _chunksize = 4 << 10
1234
1239
1235
1240
1236 @command(
1241 @command(
1237 b'debugdownload',
1242 b'debugdownload',
1238 [
1243 [
1239 (b'o', b'output', b'', _(b'path')),
1244 (b'o', b'output', b'', _(b'path')),
1240 ],
1245 ],
1241 optionalrepo=True,
1246 optionalrepo=True,
1242 )
1247 )
1243 def debugdownload(ui, repo, url, output=None, **opts):
1248 def debugdownload(ui, repo, url, output=None, **opts):
1244 """download a resource using Mercurial logic and config"""
1249 """download a resource using Mercurial logic and config"""
1245 fh = urlmod.open(ui, url, output)
1250 fh = urlmod.open(ui, url, output)
1246
1251
1247 dest = ui
1252 dest = ui
1248 if output:
1253 if output:
1249 dest = open(output, b"wb", _chunksize)
1254 dest = open(output, b"wb", _chunksize)
1250 try:
1255 try:
1251 data = fh.read(_chunksize)
1256 data = fh.read(_chunksize)
1252 while data:
1257 while data:
1253 dest.write(data)
1258 dest.write(data)
1254 data = fh.read(_chunksize)
1259 data = fh.read(_chunksize)
1255 finally:
1260 finally:
1256 if output:
1261 if output:
1257 dest.close()
1262 dest.close()
1258
1263
1259
1264
1260 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1265 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1261 def debugextensions(ui, repo, **opts):
1266 def debugextensions(ui, repo, **opts):
1262 '''show information about active extensions'''
1267 '''show information about active extensions'''
1263 opts = pycompat.byteskwargs(opts)
1268 opts = pycompat.byteskwargs(opts)
1264 exts = extensions.extensions(ui)
1269 exts = extensions.extensions(ui)
1265 hgver = util.version()
1270 hgver = util.version()
1266 fm = ui.formatter(b'debugextensions', opts)
1271 fm = ui.formatter(b'debugextensions', opts)
1267 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1272 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1268 isinternal = extensions.ismoduleinternal(extmod)
1273 isinternal = extensions.ismoduleinternal(extmod)
1269 extsource = None
1274 extsource = None
1270
1275
1271 if util.safehasattr(extmod, '__file__'):
1276 if util.safehasattr(extmod, '__file__'):
1272 extsource = pycompat.fsencode(extmod.__file__)
1277 extsource = pycompat.fsencode(extmod.__file__)
1273 elif getattr(sys, 'oxidized', False):
1278 elif getattr(sys, 'oxidized', False):
1274 extsource = pycompat.sysexecutable
1279 extsource = pycompat.sysexecutable
1275 if isinternal:
1280 if isinternal:
1276 exttestedwith = [] # never expose magic string to users
1281 exttestedwith = [] # never expose magic string to users
1277 else:
1282 else:
1278 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1283 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1279 extbuglink = getattr(extmod, 'buglink', None)
1284 extbuglink = getattr(extmod, 'buglink', None)
1280
1285
1281 fm.startitem()
1286 fm.startitem()
1282
1287
1283 if ui.quiet or ui.verbose:
1288 if ui.quiet or ui.verbose:
1284 fm.write(b'name', b'%s\n', extname)
1289 fm.write(b'name', b'%s\n', extname)
1285 else:
1290 else:
1286 fm.write(b'name', b'%s', extname)
1291 fm.write(b'name', b'%s', extname)
1287 if isinternal or hgver in exttestedwith:
1292 if isinternal or hgver in exttestedwith:
1288 fm.plain(b'\n')
1293 fm.plain(b'\n')
1289 elif not exttestedwith:
1294 elif not exttestedwith:
1290 fm.plain(_(b' (untested!)\n'))
1295 fm.plain(_(b' (untested!)\n'))
1291 else:
1296 else:
1292 lasttestedversion = exttestedwith[-1]
1297 lasttestedversion = exttestedwith[-1]
1293 fm.plain(b' (%s!)\n' % lasttestedversion)
1298 fm.plain(b' (%s!)\n' % lasttestedversion)
1294
1299
1295 fm.condwrite(
1300 fm.condwrite(
1296 ui.verbose and extsource,
1301 ui.verbose and extsource,
1297 b'source',
1302 b'source',
1298 _(b' location: %s\n'),
1303 _(b' location: %s\n'),
1299 extsource or b"",
1304 extsource or b"",
1300 )
1305 )
1301
1306
1302 if ui.verbose:
1307 if ui.verbose:
1303 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1308 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1304 fm.data(bundled=isinternal)
1309 fm.data(bundled=isinternal)
1305
1310
1306 fm.condwrite(
1311 fm.condwrite(
1307 ui.verbose and exttestedwith,
1312 ui.verbose and exttestedwith,
1308 b'testedwith',
1313 b'testedwith',
1309 _(b' tested with: %s\n'),
1314 _(b' tested with: %s\n'),
1310 fm.formatlist(exttestedwith, name=b'ver'),
1315 fm.formatlist(exttestedwith, name=b'ver'),
1311 )
1316 )
1312
1317
1313 fm.condwrite(
1318 fm.condwrite(
1314 ui.verbose and extbuglink,
1319 ui.verbose and extbuglink,
1315 b'buglink',
1320 b'buglink',
1316 _(b' bug reporting: %s\n'),
1321 _(b' bug reporting: %s\n'),
1317 extbuglink or b"",
1322 extbuglink or b"",
1318 )
1323 )
1319
1324
1320 fm.end()
1325 fm.end()
1321
1326
1322
1327
1323 @command(
1328 @command(
1324 b'debugfileset',
1329 b'debugfileset',
1325 [
1330 [
1326 (
1331 (
1327 b'r',
1332 b'r',
1328 b'rev',
1333 b'rev',
1329 b'',
1334 b'',
1330 _(b'apply the filespec on this revision'),
1335 _(b'apply the filespec on this revision'),
1331 _(b'REV'),
1336 _(b'REV'),
1332 ),
1337 ),
1333 (
1338 (
1334 b'',
1339 b'',
1335 b'all-files',
1340 b'all-files',
1336 False,
1341 False,
1337 _(b'test files from all revisions and working directory'),
1342 _(b'test files from all revisions and working directory'),
1338 ),
1343 ),
1339 (
1344 (
1340 b's',
1345 b's',
1341 b'show-matcher',
1346 b'show-matcher',
1342 None,
1347 None,
1343 _(b'print internal representation of matcher'),
1348 _(b'print internal representation of matcher'),
1344 ),
1349 ),
1345 (
1350 (
1346 b'p',
1351 b'p',
1347 b'show-stage',
1352 b'show-stage',
1348 [],
1353 [],
1349 _(b'print parsed tree at the given stage'),
1354 _(b'print parsed tree at the given stage'),
1350 _(b'NAME'),
1355 _(b'NAME'),
1351 ),
1356 ),
1352 ],
1357 ],
1353 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1358 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1354 )
1359 )
1355 def debugfileset(ui, repo, expr, **opts):
1360 def debugfileset(ui, repo, expr, **opts):
1356 '''parse and apply a fileset specification'''
1361 '''parse and apply a fileset specification'''
1357 from . import fileset
1362 from . import fileset
1358
1363
1359 fileset.symbols # force import of fileset so we have predicates to optimize
1364 fileset.symbols # force import of fileset so we have predicates to optimize
1360 opts = pycompat.byteskwargs(opts)
1365 opts = pycompat.byteskwargs(opts)
1361 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1366 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1362
1367
1363 stages = [
1368 stages = [
1364 (b'parsed', pycompat.identity),
1369 (b'parsed', pycompat.identity),
1365 (b'analyzed', filesetlang.analyze),
1370 (b'analyzed', filesetlang.analyze),
1366 (b'optimized', filesetlang.optimize),
1371 (b'optimized', filesetlang.optimize),
1367 ]
1372 ]
1368 stagenames = {n for n, f in stages}
1373 stagenames = {n for n, f in stages}
1369
1374
1370 showalways = set()
1375 showalways = set()
1371 if ui.verbose and not opts[b'show_stage']:
1376 if ui.verbose and not opts[b'show_stage']:
1372 # show parsed tree by --verbose (deprecated)
1377 # show parsed tree by --verbose (deprecated)
1373 showalways.add(b'parsed')
1378 showalways.add(b'parsed')
1374 if opts[b'show_stage'] == [b'all']:
1379 if opts[b'show_stage'] == [b'all']:
1375 showalways.update(stagenames)
1380 showalways.update(stagenames)
1376 else:
1381 else:
1377 for n in opts[b'show_stage']:
1382 for n in opts[b'show_stage']:
1378 if n not in stagenames:
1383 if n not in stagenames:
1379 raise error.Abort(_(b'invalid stage name: %s') % n)
1384 raise error.Abort(_(b'invalid stage name: %s') % n)
1380 showalways.update(opts[b'show_stage'])
1385 showalways.update(opts[b'show_stage'])
1381
1386
1382 tree = filesetlang.parse(expr)
1387 tree = filesetlang.parse(expr)
1383 for n, f in stages:
1388 for n, f in stages:
1384 tree = f(tree)
1389 tree = f(tree)
1385 if n in showalways:
1390 if n in showalways:
1386 if opts[b'show_stage'] or n != b'parsed':
1391 if opts[b'show_stage'] or n != b'parsed':
1387 ui.write(b"* %s:\n" % n)
1392 ui.write(b"* %s:\n" % n)
1388 ui.write(filesetlang.prettyformat(tree), b"\n")
1393 ui.write(filesetlang.prettyformat(tree), b"\n")
1389
1394
1390 files = set()
1395 files = set()
1391 if opts[b'all_files']:
1396 if opts[b'all_files']:
1392 for r in repo:
1397 for r in repo:
1393 c = repo[r]
1398 c = repo[r]
1394 files.update(c.files())
1399 files.update(c.files())
1395 files.update(c.substate)
1400 files.update(c.substate)
1396 if opts[b'all_files'] or ctx.rev() is None:
1401 if opts[b'all_files'] or ctx.rev() is None:
1397 wctx = repo[None]
1402 wctx = repo[None]
1398 files.update(
1403 files.update(
1399 repo.dirstate.walk(
1404 repo.dirstate.walk(
1400 scmutil.matchall(repo),
1405 scmutil.matchall(repo),
1401 subrepos=list(wctx.substate),
1406 subrepos=list(wctx.substate),
1402 unknown=True,
1407 unknown=True,
1403 ignored=True,
1408 ignored=True,
1404 )
1409 )
1405 )
1410 )
1406 files.update(wctx.substate)
1411 files.update(wctx.substate)
1407 else:
1412 else:
1408 files.update(ctx.files())
1413 files.update(ctx.files())
1409 files.update(ctx.substate)
1414 files.update(ctx.substate)
1410
1415
1411 m = ctx.matchfileset(repo.getcwd(), expr)
1416 m = ctx.matchfileset(repo.getcwd(), expr)
1412 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1417 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1413 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1418 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1414 for f in sorted(files):
1419 for f in sorted(files):
1415 if not m(f):
1420 if not m(f):
1416 continue
1421 continue
1417 ui.write(b"%s\n" % f)
1422 ui.write(b"%s\n" % f)
1418
1423
1419
1424
1420 @command(b'debugformat', [] + cmdutil.formatteropts)
1425 @command(b'debugformat', [] + cmdutil.formatteropts)
1421 def debugformat(ui, repo, **opts):
1426 def debugformat(ui, repo, **opts):
1422 """display format information about the current repository
1427 """display format information about the current repository
1423
1428
1424 Use --verbose to get extra information about current config value and
1429 Use --verbose to get extra information about current config value and
1425 Mercurial default."""
1430 Mercurial default."""
1426 opts = pycompat.byteskwargs(opts)
1431 opts = pycompat.byteskwargs(opts)
1427 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1432 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1428 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1433 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1429
1434
1430 def makeformatname(name):
1435 def makeformatname(name):
1431 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1436 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1432
1437
1433 fm = ui.formatter(b'debugformat', opts)
1438 fm = ui.formatter(b'debugformat', opts)
1434 if fm.isplain():
1439 if fm.isplain():
1435
1440
1436 def formatvalue(value):
1441 def formatvalue(value):
1437 if util.safehasattr(value, b'startswith'):
1442 if util.safehasattr(value, b'startswith'):
1438 return value
1443 return value
1439 if value:
1444 if value:
1440 return b'yes'
1445 return b'yes'
1441 else:
1446 else:
1442 return b'no'
1447 return b'no'
1443
1448
1444 else:
1449 else:
1445 formatvalue = pycompat.identity
1450 formatvalue = pycompat.identity
1446
1451
1447 fm.plain(b'format-variant')
1452 fm.plain(b'format-variant')
1448 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1453 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1449 fm.plain(b' repo')
1454 fm.plain(b' repo')
1450 if ui.verbose:
1455 if ui.verbose:
1451 fm.plain(b' config default')
1456 fm.plain(b' config default')
1452 fm.plain(b'\n')
1457 fm.plain(b'\n')
1453 for fv in upgrade.allformatvariant:
1458 for fv in upgrade.allformatvariant:
1454 fm.startitem()
1459 fm.startitem()
1455 repovalue = fv.fromrepo(repo)
1460 repovalue = fv.fromrepo(repo)
1456 configvalue = fv.fromconfig(repo)
1461 configvalue = fv.fromconfig(repo)
1457
1462
1458 if repovalue != configvalue:
1463 if repovalue != configvalue:
1459 namelabel = b'formatvariant.name.mismatchconfig'
1464 namelabel = b'formatvariant.name.mismatchconfig'
1460 repolabel = b'formatvariant.repo.mismatchconfig'
1465 repolabel = b'formatvariant.repo.mismatchconfig'
1461 elif repovalue != fv.default:
1466 elif repovalue != fv.default:
1462 namelabel = b'formatvariant.name.mismatchdefault'
1467 namelabel = b'formatvariant.name.mismatchdefault'
1463 repolabel = b'formatvariant.repo.mismatchdefault'
1468 repolabel = b'formatvariant.repo.mismatchdefault'
1464 else:
1469 else:
1465 namelabel = b'formatvariant.name.uptodate'
1470 namelabel = b'formatvariant.name.uptodate'
1466 repolabel = b'formatvariant.repo.uptodate'
1471 repolabel = b'formatvariant.repo.uptodate'
1467
1472
1468 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1473 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1469 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1474 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1470 if fv.default != configvalue:
1475 if fv.default != configvalue:
1471 configlabel = b'formatvariant.config.special'
1476 configlabel = b'formatvariant.config.special'
1472 else:
1477 else:
1473 configlabel = b'formatvariant.config.default'
1478 configlabel = b'formatvariant.config.default'
1474 fm.condwrite(
1479 fm.condwrite(
1475 ui.verbose,
1480 ui.verbose,
1476 b'config',
1481 b'config',
1477 b' %6s',
1482 b' %6s',
1478 formatvalue(configvalue),
1483 formatvalue(configvalue),
1479 label=configlabel,
1484 label=configlabel,
1480 )
1485 )
1481 fm.condwrite(
1486 fm.condwrite(
1482 ui.verbose,
1487 ui.verbose,
1483 b'default',
1488 b'default',
1484 b' %7s',
1489 b' %7s',
1485 formatvalue(fv.default),
1490 formatvalue(fv.default),
1486 label=b'formatvariant.default',
1491 label=b'formatvariant.default',
1487 )
1492 )
1488 fm.plain(b'\n')
1493 fm.plain(b'\n')
1489 fm.end()
1494 fm.end()
1490
1495
1491
1496
1492 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1497 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1493 def debugfsinfo(ui, path=b"."):
1498 def debugfsinfo(ui, path=b"."):
1494 """show information detected about current filesystem"""
1499 """show information detected about current filesystem"""
1495 ui.writenoi18n(b'path: %s\n' % path)
1500 ui.writenoi18n(b'path: %s\n' % path)
1496 ui.writenoi18n(
1501 ui.writenoi18n(
1497 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1502 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1498 )
1503 )
1499 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1504 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1500 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1505 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1501 ui.writenoi18n(
1506 ui.writenoi18n(
1502 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1507 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1503 )
1508 )
1504 ui.writenoi18n(
1509 ui.writenoi18n(
1505 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1510 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1506 )
1511 )
1507 casesensitive = b'(unknown)'
1512 casesensitive = b'(unknown)'
1508 try:
1513 try:
1509 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1514 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1510 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1515 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1511 except OSError:
1516 except OSError:
1512 pass
1517 pass
1513 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1518 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1514
1519
1515
1520
1516 @command(
1521 @command(
1517 b'debuggetbundle',
1522 b'debuggetbundle',
1518 [
1523 [
1519 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1524 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1520 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1525 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1521 (
1526 (
1522 b't',
1527 b't',
1523 b'type',
1528 b'type',
1524 b'bzip2',
1529 b'bzip2',
1525 _(b'bundle compression type to use'),
1530 _(b'bundle compression type to use'),
1526 _(b'TYPE'),
1531 _(b'TYPE'),
1527 ),
1532 ),
1528 ],
1533 ],
1529 _(b'REPO FILE [-H|-C ID]...'),
1534 _(b'REPO FILE [-H|-C ID]...'),
1530 norepo=True,
1535 norepo=True,
1531 )
1536 )
1532 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1537 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1533 """retrieves a bundle from a repo
1538 """retrieves a bundle from a repo
1534
1539
1535 Every ID must be a full-length hex node id string. Saves the bundle to the
1540 Every ID must be a full-length hex node id string. Saves the bundle to the
1536 given file.
1541 given file.
1537 """
1542 """
1538 opts = pycompat.byteskwargs(opts)
1543 opts = pycompat.byteskwargs(opts)
1539 repo = hg.peer(ui, opts, repopath)
1544 repo = hg.peer(ui, opts, repopath)
1540 if not repo.capable(b'getbundle'):
1545 if not repo.capable(b'getbundle'):
1541 raise error.Abort(b"getbundle() not supported by target repository")
1546 raise error.Abort(b"getbundle() not supported by target repository")
1542 args = {}
1547 args = {}
1543 if common:
1548 if common:
1544 args['common'] = [bin(s) for s in common]
1549 args['common'] = [bin(s) for s in common]
1545 if head:
1550 if head:
1546 args['heads'] = [bin(s) for s in head]
1551 args['heads'] = [bin(s) for s in head]
1547 # TODO: get desired bundlecaps from command line.
1552 # TODO: get desired bundlecaps from command line.
1548 args['bundlecaps'] = None
1553 args['bundlecaps'] = None
1549 bundle = repo.getbundle(b'debug', **args)
1554 bundle = repo.getbundle(b'debug', **args)
1550
1555
1551 bundletype = opts.get(b'type', b'bzip2').lower()
1556 bundletype = opts.get(b'type', b'bzip2').lower()
1552 btypes = {
1557 btypes = {
1553 b'none': b'HG10UN',
1558 b'none': b'HG10UN',
1554 b'bzip2': b'HG10BZ',
1559 b'bzip2': b'HG10BZ',
1555 b'gzip': b'HG10GZ',
1560 b'gzip': b'HG10GZ',
1556 b'bundle2': b'HG20',
1561 b'bundle2': b'HG20',
1557 }
1562 }
1558 bundletype = btypes.get(bundletype)
1563 bundletype = btypes.get(bundletype)
1559 if bundletype not in bundle2.bundletypes:
1564 if bundletype not in bundle2.bundletypes:
1560 raise error.Abort(_(b'unknown bundle type specified with --type'))
1565 raise error.Abort(_(b'unknown bundle type specified with --type'))
1561 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1566 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1562
1567
1563
1568
1564 @command(b'debugignore', [], b'[FILE]')
1569 @command(b'debugignore', [], b'[FILE]')
1565 def debugignore(ui, repo, *files, **opts):
1570 def debugignore(ui, repo, *files, **opts):
1566 """display the combined ignore pattern and information about ignored files
1571 """display the combined ignore pattern and information about ignored files
1567
1572
1568 With no argument display the combined ignore pattern.
1573 With no argument display the combined ignore pattern.
1569
1574
1570 Given space separated file names, shows if the given file is ignored and
1575 Given space separated file names, shows if the given file is ignored and
1571 if so, show the ignore rule (file and line number) that matched it.
1576 if so, show the ignore rule (file and line number) that matched it.
1572 """
1577 """
1573 ignore = repo.dirstate._ignore
1578 ignore = repo.dirstate._ignore
1574 if not files:
1579 if not files:
1575 # Show all the patterns
1580 # Show all the patterns
1576 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1581 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1577 else:
1582 else:
1578 m = scmutil.match(repo[None], pats=files)
1583 m = scmutil.match(repo[None], pats=files)
1579 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1584 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1580 for f in m.files():
1585 for f in m.files():
1581 nf = util.normpath(f)
1586 nf = util.normpath(f)
1582 ignored = None
1587 ignored = None
1583 ignoredata = None
1588 ignoredata = None
1584 if nf != b'.':
1589 if nf != b'.':
1585 if ignore(nf):
1590 if ignore(nf):
1586 ignored = nf
1591 ignored = nf
1587 ignoredata = repo.dirstate._ignorefileandline(nf)
1592 ignoredata = repo.dirstate._ignorefileandline(nf)
1588 else:
1593 else:
1589 for p in pathutil.finddirs(nf):
1594 for p in pathutil.finddirs(nf):
1590 if ignore(p):
1595 if ignore(p):
1591 ignored = p
1596 ignored = p
1592 ignoredata = repo.dirstate._ignorefileandline(p)
1597 ignoredata = repo.dirstate._ignorefileandline(p)
1593 break
1598 break
1594 if ignored:
1599 if ignored:
1595 if ignored == nf:
1600 if ignored == nf:
1596 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1601 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1597 else:
1602 else:
1598 ui.write(
1603 ui.write(
1599 _(
1604 _(
1600 b"%s is ignored because of "
1605 b"%s is ignored because of "
1601 b"containing directory %s\n"
1606 b"containing directory %s\n"
1602 )
1607 )
1603 % (uipathfn(f), ignored)
1608 % (uipathfn(f), ignored)
1604 )
1609 )
1605 ignorefile, lineno, line = ignoredata
1610 ignorefile, lineno, line = ignoredata
1606 ui.write(
1611 ui.write(
1607 _(b"(ignore rule in %s, line %d: '%s')\n")
1612 _(b"(ignore rule in %s, line %d: '%s')\n")
1608 % (ignorefile, lineno, line)
1613 % (ignorefile, lineno, line)
1609 )
1614 )
1610 else:
1615 else:
1611 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1616 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1612
1617
1613
1618
1614 @command(
1619 @command(
1615 b'debugindex',
1620 b'debugindex',
1616 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1621 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1617 _(b'-c|-m|FILE'),
1622 _(b'-c|-m|FILE'),
1618 )
1623 )
1619 def debugindex(ui, repo, file_=None, **opts):
1624 def debugindex(ui, repo, file_=None, **opts):
1620 """dump index data for a storage primitive"""
1625 """dump index data for a storage primitive"""
1621 opts = pycompat.byteskwargs(opts)
1626 opts = pycompat.byteskwargs(opts)
1622 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1627 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1623
1628
1624 if ui.debugflag:
1629 if ui.debugflag:
1625 shortfn = hex
1630 shortfn = hex
1626 else:
1631 else:
1627 shortfn = short
1632 shortfn = short
1628
1633
1629 idlen = 12
1634 idlen = 12
1630 for i in store:
1635 for i in store:
1631 idlen = len(shortfn(store.node(i)))
1636 idlen = len(shortfn(store.node(i)))
1632 break
1637 break
1633
1638
1634 fm = ui.formatter(b'debugindex', opts)
1639 fm = ui.formatter(b'debugindex', opts)
1635 fm.plain(
1640 fm.plain(
1636 b' rev linkrev %s %s p2\n'
1641 b' rev linkrev %s %s p2\n'
1637 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1642 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1638 )
1643 )
1639
1644
1640 for rev in store:
1645 for rev in store:
1641 node = store.node(rev)
1646 node = store.node(rev)
1642 parents = store.parents(node)
1647 parents = store.parents(node)
1643
1648
1644 fm.startitem()
1649 fm.startitem()
1645 fm.write(b'rev', b'%6d ', rev)
1650 fm.write(b'rev', b'%6d ', rev)
1646 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1651 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1647 fm.write(b'node', b'%s ', shortfn(node))
1652 fm.write(b'node', b'%s ', shortfn(node))
1648 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1653 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1649 fm.write(b'p2', b'%s', shortfn(parents[1]))
1654 fm.write(b'p2', b'%s', shortfn(parents[1]))
1650 fm.plain(b'\n')
1655 fm.plain(b'\n')
1651
1656
1652 fm.end()
1657 fm.end()
1653
1658
1654
1659
1655 @command(
1660 @command(
1656 b'debugindexdot',
1661 b'debugindexdot',
1657 cmdutil.debugrevlogopts,
1662 cmdutil.debugrevlogopts,
1658 _(b'-c|-m|FILE'),
1663 _(b'-c|-m|FILE'),
1659 optionalrepo=True,
1664 optionalrepo=True,
1660 )
1665 )
1661 def debugindexdot(ui, repo, file_=None, **opts):
1666 def debugindexdot(ui, repo, file_=None, **opts):
1662 """dump an index DAG as a graphviz dot file"""
1667 """dump an index DAG as a graphviz dot file"""
1663 opts = pycompat.byteskwargs(opts)
1668 opts = pycompat.byteskwargs(opts)
1664 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1669 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1665 ui.writenoi18n(b"digraph G {\n")
1670 ui.writenoi18n(b"digraph G {\n")
1666 for i in r:
1671 for i in r:
1667 node = r.node(i)
1672 node = r.node(i)
1668 pp = r.parents(node)
1673 pp = r.parents(node)
1669 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1674 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1670 if pp[1] != repo.nullid:
1675 if pp[1] != repo.nullid:
1671 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1676 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1672 ui.write(b"}\n")
1677 ui.write(b"}\n")
1673
1678
1674
1679
1675 @command(b'debugindexstats', [])
1680 @command(b'debugindexstats', [])
1676 def debugindexstats(ui, repo):
1681 def debugindexstats(ui, repo):
1677 """show stats related to the changelog index"""
1682 """show stats related to the changelog index"""
1678 repo.changelog.shortest(repo.nullid, 1)
1683 repo.changelog.shortest(repo.nullid, 1)
1679 index = repo.changelog.index
1684 index = repo.changelog.index
1680 if not util.safehasattr(index, b'stats'):
1685 if not util.safehasattr(index, b'stats'):
1681 raise error.Abort(_(b'debugindexstats only works with native code'))
1686 raise error.Abort(_(b'debugindexstats only works with native code'))
1682 for k, v in sorted(index.stats().items()):
1687 for k, v in sorted(index.stats().items()):
1683 ui.write(b'%s: %d\n' % (k, v))
1688 ui.write(b'%s: %d\n' % (k, v))
1684
1689
1685
1690
1686 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1691 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1687 def debuginstall(ui, **opts):
1692 def debuginstall(ui, **opts):
1688 """test Mercurial installation
1693 """test Mercurial installation
1689
1694
1690 Returns 0 on success.
1695 Returns 0 on success.
1691 """
1696 """
1692 opts = pycompat.byteskwargs(opts)
1697 opts = pycompat.byteskwargs(opts)
1693
1698
1694 problems = 0
1699 problems = 0
1695
1700
1696 fm = ui.formatter(b'debuginstall', opts)
1701 fm = ui.formatter(b'debuginstall', opts)
1697 fm.startitem()
1702 fm.startitem()
1698
1703
1699 # encoding might be unknown or wrong. don't translate these messages.
1704 # encoding might be unknown or wrong. don't translate these messages.
1700 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1705 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1701 err = None
1706 err = None
1702 try:
1707 try:
1703 codecs.lookup(pycompat.sysstr(encoding.encoding))
1708 codecs.lookup(pycompat.sysstr(encoding.encoding))
1704 except LookupError as inst:
1709 except LookupError as inst:
1705 err = stringutil.forcebytestr(inst)
1710 err = stringutil.forcebytestr(inst)
1706 problems += 1
1711 problems += 1
1707 fm.condwrite(
1712 fm.condwrite(
1708 err,
1713 err,
1709 b'encodingerror',
1714 b'encodingerror',
1710 b" %s\n (check that your locale is properly set)\n",
1715 b" %s\n (check that your locale is properly set)\n",
1711 err,
1716 err,
1712 )
1717 )
1713
1718
1714 # Python
1719 # Python
1715 pythonlib = None
1720 pythonlib = None
1716 if util.safehasattr(os, '__file__'):
1721 if util.safehasattr(os, '__file__'):
1717 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1722 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1718 elif getattr(sys, 'oxidized', False):
1723 elif getattr(sys, 'oxidized', False):
1719 pythonlib = pycompat.sysexecutable
1724 pythonlib = pycompat.sysexecutable
1720
1725
1721 fm.write(
1726 fm.write(
1722 b'pythonexe',
1727 b'pythonexe',
1723 _(b"checking Python executable (%s)\n"),
1728 _(b"checking Python executable (%s)\n"),
1724 pycompat.sysexecutable or _(b"unknown"),
1729 pycompat.sysexecutable or _(b"unknown"),
1725 )
1730 )
1726 fm.write(
1731 fm.write(
1727 b'pythonimplementation',
1732 b'pythonimplementation',
1728 _(b"checking Python implementation (%s)\n"),
1733 _(b"checking Python implementation (%s)\n"),
1729 pycompat.sysbytes(platform.python_implementation()),
1734 pycompat.sysbytes(platform.python_implementation()),
1730 )
1735 )
1731 fm.write(
1736 fm.write(
1732 b'pythonver',
1737 b'pythonver',
1733 _(b"checking Python version (%s)\n"),
1738 _(b"checking Python version (%s)\n"),
1734 (b"%d.%d.%d" % sys.version_info[:3]),
1739 (b"%d.%d.%d" % sys.version_info[:3]),
1735 )
1740 )
1736 fm.write(
1741 fm.write(
1737 b'pythonlib',
1742 b'pythonlib',
1738 _(b"checking Python lib (%s)...\n"),
1743 _(b"checking Python lib (%s)...\n"),
1739 pythonlib or _(b"unknown"),
1744 pythonlib or _(b"unknown"),
1740 )
1745 )
1741
1746
1742 try:
1747 try:
1743 from . import rustext # pytype: disable=import-error
1748 from . import rustext # pytype: disable=import-error
1744
1749
1745 rustext.__doc__ # trigger lazy import
1750 rustext.__doc__ # trigger lazy import
1746 except ImportError:
1751 except ImportError:
1747 rustext = None
1752 rustext = None
1748
1753
1749 security = set(sslutil.supportedprotocols)
1754 security = set(sslutil.supportedprotocols)
1750 if sslutil.hassni:
1755 if sslutil.hassni:
1751 security.add(b'sni')
1756 security.add(b'sni')
1752
1757
1753 fm.write(
1758 fm.write(
1754 b'pythonsecurity',
1759 b'pythonsecurity',
1755 _(b"checking Python security support (%s)\n"),
1760 _(b"checking Python security support (%s)\n"),
1756 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1761 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1757 )
1762 )
1758
1763
1759 # These are warnings, not errors. So don't increment problem count. This
1764 # These are warnings, not errors. So don't increment problem count. This
1760 # may change in the future.
1765 # may change in the future.
1761 if b'tls1.2' not in security:
1766 if b'tls1.2' not in security:
1762 fm.plain(
1767 fm.plain(
1763 _(
1768 _(
1764 b' TLS 1.2 not supported by Python install; '
1769 b' TLS 1.2 not supported by Python install; '
1765 b'network connections lack modern security\n'
1770 b'network connections lack modern security\n'
1766 )
1771 )
1767 )
1772 )
1768 if b'sni' not in security:
1773 if b'sni' not in security:
1769 fm.plain(
1774 fm.plain(
1770 _(
1775 _(
1771 b' SNI not supported by Python install; may have '
1776 b' SNI not supported by Python install; may have '
1772 b'connectivity issues with some servers\n'
1777 b'connectivity issues with some servers\n'
1773 )
1778 )
1774 )
1779 )
1775
1780
1776 fm.plain(
1781 fm.plain(
1777 _(
1782 _(
1778 b"checking Rust extensions (%s)\n"
1783 b"checking Rust extensions (%s)\n"
1779 % (b'missing' if rustext is None else b'installed')
1784 % (b'missing' if rustext is None else b'installed')
1780 ),
1785 ),
1781 )
1786 )
1782
1787
1783 # TODO print CA cert info
1788 # TODO print CA cert info
1784
1789
1785 # hg version
1790 # hg version
1786 hgver = util.version()
1791 hgver = util.version()
1787 fm.write(
1792 fm.write(
1788 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1793 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1789 )
1794 )
1790 fm.write(
1795 fm.write(
1791 b'hgverextra',
1796 b'hgverextra',
1792 _(b"checking Mercurial custom build (%s)\n"),
1797 _(b"checking Mercurial custom build (%s)\n"),
1793 b'+'.join(hgver.split(b'+')[1:]),
1798 b'+'.join(hgver.split(b'+')[1:]),
1794 )
1799 )
1795
1800
1796 # compiled modules
1801 # compiled modules
1797 hgmodules = None
1802 hgmodules = None
1798 if util.safehasattr(sys.modules[__name__], '__file__'):
1803 if util.safehasattr(sys.modules[__name__], '__file__'):
1799 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1804 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1800 elif getattr(sys, 'oxidized', False):
1805 elif getattr(sys, 'oxidized', False):
1801 hgmodules = pycompat.sysexecutable
1806 hgmodules = pycompat.sysexecutable
1802
1807
1803 fm.write(
1808 fm.write(
1804 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1809 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1805 )
1810 )
1806 fm.write(
1811 fm.write(
1807 b'hgmodules',
1812 b'hgmodules',
1808 _(b"checking installed modules (%s)...\n"),
1813 _(b"checking installed modules (%s)...\n"),
1809 hgmodules or _(b"unknown"),
1814 hgmodules or _(b"unknown"),
1810 )
1815 )
1811
1816
1812 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1817 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1813 rustext = rustandc # for now, that's the only case
1818 rustext = rustandc # for now, that's the only case
1814 cext = policy.policy in (b'c', b'allow') or rustandc
1819 cext = policy.policy in (b'c', b'allow') or rustandc
1815 nopure = cext or rustext
1820 nopure = cext or rustext
1816 if nopure:
1821 if nopure:
1817 err = None
1822 err = None
1818 try:
1823 try:
1819 if cext:
1824 if cext:
1820 from .cext import ( # pytype: disable=import-error
1825 from .cext import ( # pytype: disable=import-error
1821 base85,
1826 base85,
1822 bdiff,
1827 bdiff,
1823 mpatch,
1828 mpatch,
1824 osutil,
1829 osutil,
1825 )
1830 )
1826
1831
1827 # quiet pyflakes
1832 # quiet pyflakes
1828 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1833 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1829 if rustext:
1834 if rustext:
1830 from .rustext import ( # pytype: disable=import-error
1835 from .rustext import ( # pytype: disable=import-error
1831 ancestor,
1836 ancestor,
1832 dirstate,
1837 dirstate,
1833 )
1838 )
1834
1839
1835 dir(ancestor), dir(dirstate) # quiet pyflakes
1840 dir(ancestor), dir(dirstate) # quiet pyflakes
1836 except Exception as inst:
1841 except Exception as inst:
1837 err = stringutil.forcebytestr(inst)
1842 err = stringutil.forcebytestr(inst)
1838 problems += 1
1843 problems += 1
1839 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1844 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1840
1845
1841 compengines = util.compengines._engines.values()
1846 compengines = util.compengines._engines.values()
1842 fm.write(
1847 fm.write(
1843 b'compengines',
1848 b'compengines',
1844 _(b'checking registered compression engines (%s)\n'),
1849 _(b'checking registered compression engines (%s)\n'),
1845 fm.formatlist(
1850 fm.formatlist(
1846 sorted(e.name() for e in compengines),
1851 sorted(e.name() for e in compengines),
1847 name=b'compengine',
1852 name=b'compengine',
1848 fmt=b'%s',
1853 fmt=b'%s',
1849 sep=b', ',
1854 sep=b', ',
1850 ),
1855 ),
1851 )
1856 )
1852 fm.write(
1857 fm.write(
1853 b'compenginesavail',
1858 b'compenginesavail',
1854 _(b'checking available compression engines (%s)\n'),
1859 _(b'checking available compression engines (%s)\n'),
1855 fm.formatlist(
1860 fm.formatlist(
1856 sorted(e.name() for e in compengines if e.available()),
1861 sorted(e.name() for e in compengines if e.available()),
1857 name=b'compengine',
1862 name=b'compengine',
1858 fmt=b'%s',
1863 fmt=b'%s',
1859 sep=b', ',
1864 sep=b', ',
1860 ),
1865 ),
1861 )
1866 )
1862 wirecompengines = compression.compengines.supportedwireengines(
1867 wirecompengines = compression.compengines.supportedwireengines(
1863 compression.SERVERROLE
1868 compression.SERVERROLE
1864 )
1869 )
1865 fm.write(
1870 fm.write(
1866 b'compenginesserver',
1871 b'compenginesserver',
1867 _(
1872 _(
1868 b'checking available compression engines '
1873 b'checking available compression engines '
1869 b'for wire protocol (%s)\n'
1874 b'for wire protocol (%s)\n'
1870 ),
1875 ),
1871 fm.formatlist(
1876 fm.formatlist(
1872 [e.name() for e in wirecompengines if e.wireprotosupport()],
1877 [e.name() for e in wirecompengines if e.wireprotosupport()],
1873 name=b'compengine',
1878 name=b'compengine',
1874 fmt=b'%s',
1879 fmt=b'%s',
1875 sep=b', ',
1880 sep=b', ',
1876 ),
1881 ),
1877 )
1882 )
1878 re2 = b'missing'
1883 re2 = b'missing'
1879 if util._re2:
1884 if util._re2:
1880 re2 = b'available'
1885 re2 = b'available'
1881 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1886 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1882 fm.data(re2=bool(util._re2))
1887 fm.data(re2=bool(util._re2))
1883
1888
1884 # templates
1889 # templates
1885 p = templater.templatedir()
1890 p = templater.templatedir()
1886 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1891 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1887 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1892 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1888 if p:
1893 if p:
1889 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1894 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1890 if m:
1895 if m:
1891 # template found, check if it is working
1896 # template found, check if it is working
1892 err = None
1897 err = None
1893 try:
1898 try:
1894 templater.templater.frommapfile(m)
1899 templater.templater.frommapfile(m)
1895 except Exception as inst:
1900 except Exception as inst:
1896 err = stringutil.forcebytestr(inst)
1901 err = stringutil.forcebytestr(inst)
1897 p = None
1902 p = None
1898 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1903 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1899 else:
1904 else:
1900 p = None
1905 p = None
1901 fm.condwrite(
1906 fm.condwrite(
1902 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1907 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1903 )
1908 )
1904 fm.condwrite(
1909 fm.condwrite(
1905 not m,
1910 not m,
1906 b'defaulttemplatenotfound',
1911 b'defaulttemplatenotfound',
1907 _(b" template '%s' not found\n"),
1912 _(b" template '%s' not found\n"),
1908 b"default",
1913 b"default",
1909 )
1914 )
1910 if not p:
1915 if not p:
1911 problems += 1
1916 problems += 1
1912 fm.condwrite(
1917 fm.condwrite(
1913 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1918 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1914 )
1919 )
1915
1920
1916 # editor
1921 # editor
1917 editor = ui.geteditor()
1922 editor = ui.geteditor()
1918 editor = util.expandpath(editor)
1923 editor = util.expandpath(editor)
1919 editorbin = procutil.shellsplit(editor)[0]
1924 editorbin = procutil.shellsplit(editor)[0]
1920 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1925 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1921 cmdpath = procutil.findexe(editorbin)
1926 cmdpath = procutil.findexe(editorbin)
1922 fm.condwrite(
1927 fm.condwrite(
1923 not cmdpath and editor == b'vi',
1928 not cmdpath and editor == b'vi',
1924 b'vinotfound',
1929 b'vinotfound',
1925 _(
1930 _(
1926 b" No commit editor set and can't find %s in PATH\n"
1931 b" No commit editor set and can't find %s in PATH\n"
1927 b" (specify a commit editor in your configuration"
1932 b" (specify a commit editor in your configuration"
1928 b" file)\n"
1933 b" file)\n"
1929 ),
1934 ),
1930 not cmdpath and editor == b'vi' and editorbin,
1935 not cmdpath and editor == b'vi' and editorbin,
1931 )
1936 )
1932 fm.condwrite(
1937 fm.condwrite(
1933 not cmdpath and editor != b'vi',
1938 not cmdpath and editor != b'vi',
1934 b'editornotfound',
1939 b'editornotfound',
1935 _(
1940 _(
1936 b" Can't find editor '%s' in PATH\n"
1941 b" Can't find editor '%s' in PATH\n"
1937 b" (specify a commit editor in your configuration"
1942 b" (specify a commit editor in your configuration"
1938 b" file)\n"
1943 b" file)\n"
1939 ),
1944 ),
1940 not cmdpath and editorbin,
1945 not cmdpath and editorbin,
1941 )
1946 )
1942 if not cmdpath and editor != b'vi':
1947 if not cmdpath and editor != b'vi':
1943 problems += 1
1948 problems += 1
1944
1949
1945 # check username
1950 # check username
1946 username = None
1951 username = None
1947 err = None
1952 err = None
1948 try:
1953 try:
1949 username = ui.username()
1954 username = ui.username()
1950 except error.Abort as e:
1955 except error.Abort as e:
1951 err = e.message
1956 err = e.message
1952 problems += 1
1957 problems += 1
1953
1958
1954 fm.condwrite(
1959 fm.condwrite(
1955 username, b'username', _(b"checking username (%s)\n"), username
1960 username, b'username', _(b"checking username (%s)\n"), username
1956 )
1961 )
1957 fm.condwrite(
1962 fm.condwrite(
1958 err,
1963 err,
1959 b'usernameerror',
1964 b'usernameerror',
1960 _(
1965 _(
1961 b"checking username...\n %s\n"
1966 b"checking username...\n %s\n"
1962 b" (specify a username in your configuration file)\n"
1967 b" (specify a username in your configuration file)\n"
1963 ),
1968 ),
1964 err,
1969 err,
1965 )
1970 )
1966
1971
1967 for name, mod in extensions.extensions():
1972 for name, mod in extensions.extensions():
1968 handler = getattr(mod, 'debuginstall', None)
1973 handler = getattr(mod, 'debuginstall', None)
1969 if handler is not None:
1974 if handler is not None:
1970 problems += handler(ui, fm)
1975 problems += handler(ui, fm)
1971
1976
1972 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1977 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1973 if not problems:
1978 if not problems:
1974 fm.data(problems=problems)
1979 fm.data(problems=problems)
1975 fm.condwrite(
1980 fm.condwrite(
1976 problems,
1981 problems,
1977 b'problems',
1982 b'problems',
1978 _(b"%d problems detected, please check your install!\n"),
1983 _(b"%d problems detected, please check your install!\n"),
1979 problems,
1984 problems,
1980 )
1985 )
1981 fm.end()
1986 fm.end()
1982
1987
1983 return problems
1988 return problems
1984
1989
1985
1990
1986 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1991 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1987 def debugknown(ui, repopath, *ids, **opts):
1992 def debugknown(ui, repopath, *ids, **opts):
1988 """test whether node ids are known to a repo
1993 """test whether node ids are known to a repo
1989
1994
1990 Every ID must be a full-length hex node id string. Returns a list of 0s
1995 Every ID must be a full-length hex node id string. Returns a list of 0s
1991 and 1s indicating unknown/known.
1996 and 1s indicating unknown/known.
1992 """
1997 """
1993 opts = pycompat.byteskwargs(opts)
1998 opts = pycompat.byteskwargs(opts)
1994 repo = hg.peer(ui, opts, repopath)
1999 repo = hg.peer(ui, opts, repopath)
1995 if not repo.capable(b'known'):
2000 if not repo.capable(b'known'):
1996 raise error.Abort(b"known() not supported by target repository")
2001 raise error.Abort(b"known() not supported by target repository")
1997 flags = repo.known([bin(s) for s in ids])
2002 flags = repo.known([bin(s) for s in ids])
1998 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2003 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1999
2004
2000
2005
2001 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2006 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2002 def debuglabelcomplete(ui, repo, *args):
2007 def debuglabelcomplete(ui, repo, *args):
2003 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2008 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2004 debugnamecomplete(ui, repo, *args)
2009 debugnamecomplete(ui, repo, *args)
2005
2010
2006
2011
2007 @command(
2012 @command(
2008 b'debuglocks',
2013 b'debuglocks',
2009 [
2014 [
2010 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2015 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2011 (
2016 (
2012 b'W',
2017 b'W',
2013 b'force-free-wlock',
2018 b'force-free-wlock',
2014 None,
2019 None,
2015 _(b'free the working state lock (DANGEROUS)'),
2020 _(b'free the working state lock (DANGEROUS)'),
2016 ),
2021 ),
2017 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2022 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2018 (
2023 (
2019 b'S',
2024 b'S',
2020 b'set-wlock',
2025 b'set-wlock',
2021 None,
2026 None,
2022 _(b'set the working state lock until stopped'),
2027 _(b'set the working state lock until stopped'),
2023 ),
2028 ),
2024 ],
2029 ],
2025 _(b'[OPTION]...'),
2030 _(b'[OPTION]...'),
2026 )
2031 )
2027 def debuglocks(ui, repo, **opts):
2032 def debuglocks(ui, repo, **opts):
2028 """show or modify state of locks
2033 """show or modify state of locks
2029
2034
2030 By default, this command will show which locks are held. This
2035 By default, this command will show which locks are held. This
2031 includes the user and process holding the lock, the amount of time
2036 includes the user and process holding the lock, the amount of time
2032 the lock has been held, and the machine name where the process is
2037 the lock has been held, and the machine name where the process is
2033 running if it's not local.
2038 running if it's not local.
2034
2039
2035 Locks protect the integrity of Mercurial's data, so should be
2040 Locks protect the integrity of Mercurial's data, so should be
2036 treated with care. System crashes or other interruptions may cause
2041 treated with care. System crashes or other interruptions may cause
2037 locks to not be properly released, though Mercurial will usually
2042 locks to not be properly released, though Mercurial will usually
2038 detect and remove such stale locks automatically.
2043 detect and remove such stale locks automatically.
2039
2044
2040 However, detecting stale locks may not always be possible (for
2045 However, detecting stale locks may not always be possible (for
2041 instance, on a shared filesystem). Removing locks may also be
2046 instance, on a shared filesystem). Removing locks may also be
2042 blocked by filesystem permissions.
2047 blocked by filesystem permissions.
2043
2048
2044 Setting a lock will prevent other commands from changing the data.
2049 Setting a lock will prevent other commands from changing the data.
2045 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2050 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2046 The set locks are removed when the command exits.
2051 The set locks are removed when the command exits.
2047
2052
2048 Returns 0 if no locks are held.
2053 Returns 0 if no locks are held.
2049
2054
2050 """
2055 """
2051
2056
2052 if opts.get('force_free_lock'):
2057 if opts.get('force_free_lock'):
2053 repo.svfs.unlink(b'lock')
2058 repo.svfs.unlink(b'lock')
2054 if opts.get('force_free_wlock'):
2059 if opts.get('force_free_wlock'):
2055 repo.vfs.unlink(b'wlock')
2060 repo.vfs.unlink(b'wlock')
2056 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2061 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2057 return 0
2062 return 0
2058
2063
2059 locks = []
2064 locks = []
2060 try:
2065 try:
2061 if opts.get('set_wlock'):
2066 if opts.get('set_wlock'):
2062 try:
2067 try:
2063 locks.append(repo.wlock(False))
2068 locks.append(repo.wlock(False))
2064 except error.LockHeld:
2069 except error.LockHeld:
2065 raise error.Abort(_(b'wlock is already held'))
2070 raise error.Abort(_(b'wlock is already held'))
2066 if opts.get('set_lock'):
2071 if opts.get('set_lock'):
2067 try:
2072 try:
2068 locks.append(repo.lock(False))
2073 locks.append(repo.lock(False))
2069 except error.LockHeld:
2074 except error.LockHeld:
2070 raise error.Abort(_(b'lock is already held'))
2075 raise error.Abort(_(b'lock is already held'))
2071 if len(locks):
2076 if len(locks):
2072 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2077 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2073 return 0
2078 return 0
2074 finally:
2079 finally:
2075 release(*locks)
2080 release(*locks)
2076
2081
2077 now = time.time()
2082 now = time.time()
2078 held = 0
2083 held = 0
2079
2084
2080 def report(vfs, name, method):
2085 def report(vfs, name, method):
2081 # this causes stale locks to get reaped for more accurate reporting
2086 # this causes stale locks to get reaped for more accurate reporting
2082 try:
2087 try:
2083 l = method(False)
2088 l = method(False)
2084 except error.LockHeld:
2089 except error.LockHeld:
2085 l = None
2090 l = None
2086
2091
2087 if l:
2092 if l:
2088 l.release()
2093 l.release()
2089 else:
2094 else:
2090 try:
2095 try:
2091 st = vfs.lstat(name)
2096 st = vfs.lstat(name)
2092 age = now - st[stat.ST_MTIME]
2097 age = now - st[stat.ST_MTIME]
2093 user = util.username(st.st_uid)
2098 user = util.username(st.st_uid)
2094 locker = vfs.readlock(name)
2099 locker = vfs.readlock(name)
2095 if b":" in locker:
2100 if b":" in locker:
2096 host, pid = locker.split(b':')
2101 host, pid = locker.split(b':')
2097 if host == socket.gethostname():
2102 if host == socket.gethostname():
2098 locker = b'user %s, process %s' % (user or b'None', pid)
2103 locker = b'user %s, process %s' % (user or b'None', pid)
2099 else:
2104 else:
2100 locker = b'user %s, process %s, host %s' % (
2105 locker = b'user %s, process %s, host %s' % (
2101 user or b'None',
2106 user or b'None',
2102 pid,
2107 pid,
2103 host,
2108 host,
2104 )
2109 )
2105 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2110 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2106 return 1
2111 return 1
2107 except OSError as e:
2112 except OSError as e:
2108 if e.errno != errno.ENOENT:
2113 if e.errno != errno.ENOENT:
2109 raise
2114 raise
2110
2115
2111 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2116 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2112 return 0
2117 return 0
2113
2118
2114 held += report(repo.svfs, b"lock", repo.lock)
2119 held += report(repo.svfs, b"lock", repo.lock)
2115 held += report(repo.vfs, b"wlock", repo.wlock)
2120 held += report(repo.vfs, b"wlock", repo.wlock)
2116
2121
2117 return held
2122 return held
2118
2123
2119
2124
2120 @command(
2125 @command(
2121 b'debugmanifestfulltextcache',
2126 b'debugmanifestfulltextcache',
2122 [
2127 [
2123 (b'', b'clear', False, _(b'clear the cache')),
2128 (b'', b'clear', False, _(b'clear the cache')),
2124 (
2129 (
2125 b'a',
2130 b'a',
2126 b'add',
2131 b'add',
2127 [],
2132 [],
2128 _(b'add the given manifest nodes to the cache'),
2133 _(b'add the given manifest nodes to the cache'),
2129 _(b'NODE'),
2134 _(b'NODE'),
2130 ),
2135 ),
2131 ],
2136 ],
2132 b'',
2137 b'',
2133 )
2138 )
2134 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2139 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2135 """show, clear or amend the contents of the manifest fulltext cache"""
2140 """show, clear or amend the contents of the manifest fulltext cache"""
2136
2141
2137 def getcache():
2142 def getcache():
2138 r = repo.manifestlog.getstorage(b'')
2143 r = repo.manifestlog.getstorage(b'')
2139 try:
2144 try:
2140 return r._fulltextcache
2145 return r._fulltextcache
2141 except AttributeError:
2146 except AttributeError:
2142 msg = _(
2147 msg = _(
2143 b"Current revlog implementation doesn't appear to have a "
2148 b"Current revlog implementation doesn't appear to have a "
2144 b"manifest fulltext cache\n"
2149 b"manifest fulltext cache\n"
2145 )
2150 )
2146 raise error.Abort(msg)
2151 raise error.Abort(msg)
2147
2152
2148 if opts.get('clear'):
2153 if opts.get('clear'):
2149 with repo.wlock():
2154 with repo.wlock():
2150 cache = getcache()
2155 cache = getcache()
2151 cache.clear(clear_persisted_data=True)
2156 cache.clear(clear_persisted_data=True)
2152 return
2157 return
2153
2158
2154 if add:
2159 if add:
2155 with repo.wlock():
2160 with repo.wlock():
2156 m = repo.manifestlog
2161 m = repo.manifestlog
2157 store = m.getstorage(b'')
2162 store = m.getstorage(b'')
2158 for n in add:
2163 for n in add:
2159 try:
2164 try:
2160 manifest = m[store.lookup(n)]
2165 manifest = m[store.lookup(n)]
2161 except error.LookupError as e:
2166 except error.LookupError as e:
2162 raise error.Abort(
2167 raise error.Abort(
2163 bytes(e), hint=b"Check your manifest node id"
2168 bytes(e), hint=b"Check your manifest node id"
2164 )
2169 )
2165 manifest.read() # stores revisision in cache too
2170 manifest.read() # stores revisision in cache too
2166 return
2171 return
2167
2172
2168 cache = getcache()
2173 cache = getcache()
2169 if not len(cache):
2174 if not len(cache):
2170 ui.write(_(b'cache empty\n'))
2175 ui.write(_(b'cache empty\n'))
2171 else:
2176 else:
2172 ui.write(
2177 ui.write(
2173 _(
2178 _(
2174 b'cache contains %d manifest entries, in order of most to '
2179 b'cache contains %d manifest entries, in order of most to '
2175 b'least recent:\n'
2180 b'least recent:\n'
2176 )
2181 )
2177 % (len(cache),)
2182 % (len(cache),)
2178 )
2183 )
2179 totalsize = 0
2184 totalsize = 0
2180 for nodeid in cache:
2185 for nodeid in cache:
2181 # Use cache.get to not update the LRU order
2186 # Use cache.get to not update the LRU order
2182 data = cache.peek(nodeid)
2187 data = cache.peek(nodeid)
2183 size = len(data)
2188 size = len(data)
2184 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2189 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2185 ui.write(
2190 ui.write(
2186 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2191 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2187 )
2192 )
2188 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2193 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2189 ui.write(
2194 ui.write(
2190 _(b'total cache data size %s, on-disk %s\n')
2195 _(b'total cache data size %s, on-disk %s\n')
2191 % (util.bytecount(totalsize), util.bytecount(ondisk))
2196 % (util.bytecount(totalsize), util.bytecount(ondisk))
2192 )
2197 )
2193
2198
2194
2199
2195 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2200 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2196 def debugmergestate(ui, repo, *args, **opts):
2201 def debugmergestate(ui, repo, *args, **opts):
2197 """print merge state
2202 """print merge state
2198
2203
2199 Use --verbose to print out information about whether v1 or v2 merge state
2204 Use --verbose to print out information about whether v1 or v2 merge state
2200 was chosen."""
2205 was chosen."""
2201
2206
2202 if ui.verbose:
2207 if ui.verbose:
2203 ms = mergestatemod.mergestate(repo)
2208 ms = mergestatemod.mergestate(repo)
2204
2209
2205 # sort so that reasonable information is on top
2210 # sort so that reasonable information is on top
2206 v1records = ms._readrecordsv1()
2211 v1records = ms._readrecordsv1()
2207 v2records = ms._readrecordsv2()
2212 v2records = ms._readrecordsv2()
2208
2213
2209 if not v1records and not v2records:
2214 if not v1records and not v2records:
2210 pass
2215 pass
2211 elif not v2records:
2216 elif not v2records:
2212 ui.writenoi18n(b'no version 2 merge state\n')
2217 ui.writenoi18n(b'no version 2 merge state\n')
2213 elif ms._v1v2match(v1records, v2records):
2218 elif ms._v1v2match(v1records, v2records):
2214 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2219 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2215 else:
2220 else:
2216 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2221 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2217
2222
2218 opts = pycompat.byteskwargs(opts)
2223 opts = pycompat.byteskwargs(opts)
2219 if not opts[b'template']:
2224 if not opts[b'template']:
2220 opts[b'template'] = (
2225 opts[b'template'] = (
2221 b'{if(commits, "", "no merge state found\n")}'
2226 b'{if(commits, "", "no merge state found\n")}'
2222 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2227 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2223 b'{files % "file: {path} (state \\"{state}\\")\n'
2228 b'{files % "file: {path} (state \\"{state}\\")\n'
2224 b'{if(local_path, "'
2229 b'{if(local_path, "'
2225 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2230 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2226 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2231 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2227 b' other path: {other_path} (node {other_node})\n'
2232 b' other path: {other_path} (node {other_node})\n'
2228 b'")}'
2233 b'")}'
2229 b'{if(rename_side, "'
2234 b'{if(rename_side, "'
2230 b' rename side: {rename_side}\n'
2235 b' rename side: {rename_side}\n'
2231 b' renamed path: {renamed_path}\n'
2236 b' renamed path: {renamed_path}\n'
2232 b'")}'
2237 b'")}'
2233 b'{extras % " extra: {key} = {value}\n"}'
2238 b'{extras % " extra: {key} = {value}\n"}'
2234 b'"}'
2239 b'"}'
2235 b'{extras % "extra: {file} ({key} = {value})\n"}'
2240 b'{extras % "extra: {file} ({key} = {value})\n"}'
2236 )
2241 )
2237
2242
2238 ms = mergestatemod.mergestate.read(repo)
2243 ms = mergestatemod.mergestate.read(repo)
2239
2244
2240 fm = ui.formatter(b'debugmergestate', opts)
2245 fm = ui.formatter(b'debugmergestate', opts)
2241 fm.startitem()
2246 fm.startitem()
2242
2247
2243 fm_commits = fm.nested(b'commits')
2248 fm_commits = fm.nested(b'commits')
2244 if ms.active():
2249 if ms.active():
2245 for name, node, label_index in (
2250 for name, node, label_index in (
2246 (b'local', ms.local, 0),
2251 (b'local', ms.local, 0),
2247 (b'other', ms.other, 1),
2252 (b'other', ms.other, 1),
2248 ):
2253 ):
2249 fm_commits.startitem()
2254 fm_commits.startitem()
2250 fm_commits.data(name=name)
2255 fm_commits.data(name=name)
2251 fm_commits.data(node=hex(node))
2256 fm_commits.data(node=hex(node))
2252 if ms._labels and len(ms._labels) > label_index:
2257 if ms._labels and len(ms._labels) > label_index:
2253 fm_commits.data(label=ms._labels[label_index])
2258 fm_commits.data(label=ms._labels[label_index])
2254 fm_commits.end()
2259 fm_commits.end()
2255
2260
2256 fm_files = fm.nested(b'files')
2261 fm_files = fm.nested(b'files')
2257 if ms.active():
2262 if ms.active():
2258 for f in ms:
2263 for f in ms:
2259 fm_files.startitem()
2264 fm_files.startitem()
2260 fm_files.data(path=f)
2265 fm_files.data(path=f)
2261 state = ms._state[f]
2266 state = ms._state[f]
2262 fm_files.data(state=state[0])
2267 fm_files.data(state=state[0])
2263 if state[0] in (
2268 if state[0] in (
2264 mergestatemod.MERGE_RECORD_UNRESOLVED,
2269 mergestatemod.MERGE_RECORD_UNRESOLVED,
2265 mergestatemod.MERGE_RECORD_RESOLVED,
2270 mergestatemod.MERGE_RECORD_RESOLVED,
2266 ):
2271 ):
2267 fm_files.data(local_key=state[1])
2272 fm_files.data(local_key=state[1])
2268 fm_files.data(local_path=state[2])
2273 fm_files.data(local_path=state[2])
2269 fm_files.data(ancestor_path=state[3])
2274 fm_files.data(ancestor_path=state[3])
2270 fm_files.data(ancestor_node=state[4])
2275 fm_files.data(ancestor_node=state[4])
2271 fm_files.data(other_path=state[5])
2276 fm_files.data(other_path=state[5])
2272 fm_files.data(other_node=state[6])
2277 fm_files.data(other_node=state[6])
2273 fm_files.data(local_flags=state[7])
2278 fm_files.data(local_flags=state[7])
2274 elif state[0] in (
2279 elif state[0] in (
2275 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2280 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2276 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2281 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2277 ):
2282 ):
2278 fm_files.data(renamed_path=state[1])
2283 fm_files.data(renamed_path=state[1])
2279 fm_files.data(rename_side=state[2])
2284 fm_files.data(rename_side=state[2])
2280 fm_extras = fm_files.nested(b'extras')
2285 fm_extras = fm_files.nested(b'extras')
2281 for k, v in sorted(ms.extras(f).items()):
2286 for k, v in sorted(ms.extras(f).items()):
2282 fm_extras.startitem()
2287 fm_extras.startitem()
2283 fm_extras.data(key=k)
2288 fm_extras.data(key=k)
2284 fm_extras.data(value=v)
2289 fm_extras.data(value=v)
2285 fm_extras.end()
2290 fm_extras.end()
2286
2291
2287 fm_files.end()
2292 fm_files.end()
2288
2293
2289 fm_extras = fm.nested(b'extras')
2294 fm_extras = fm.nested(b'extras')
2290 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2295 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2291 if f in ms:
2296 if f in ms:
2292 # If file is in mergestate, we have already processed it's extras
2297 # If file is in mergestate, we have already processed it's extras
2293 continue
2298 continue
2294 for k, v in pycompat.iteritems(d):
2299 for k, v in pycompat.iteritems(d):
2295 fm_extras.startitem()
2300 fm_extras.startitem()
2296 fm_extras.data(file=f)
2301 fm_extras.data(file=f)
2297 fm_extras.data(key=k)
2302 fm_extras.data(key=k)
2298 fm_extras.data(value=v)
2303 fm_extras.data(value=v)
2299 fm_extras.end()
2304 fm_extras.end()
2300
2305
2301 fm.end()
2306 fm.end()
2302
2307
2303
2308
2304 @command(b'debugnamecomplete', [], _(b'NAME...'))
2309 @command(b'debugnamecomplete', [], _(b'NAME...'))
2305 def debugnamecomplete(ui, repo, *args):
2310 def debugnamecomplete(ui, repo, *args):
2306 '''complete "names" - tags, open branch names, bookmark names'''
2311 '''complete "names" - tags, open branch names, bookmark names'''
2307
2312
2308 names = set()
2313 names = set()
2309 # since we previously only listed open branches, we will handle that
2314 # since we previously only listed open branches, we will handle that
2310 # specially (after this for loop)
2315 # specially (after this for loop)
2311 for name, ns in pycompat.iteritems(repo.names):
2316 for name, ns in pycompat.iteritems(repo.names):
2312 if name != b'branches':
2317 if name != b'branches':
2313 names.update(ns.listnames(repo))
2318 names.update(ns.listnames(repo))
2314 names.update(
2319 names.update(
2315 tag
2320 tag
2316 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2321 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2317 if not closed
2322 if not closed
2318 )
2323 )
2319 completions = set()
2324 completions = set()
2320 if not args:
2325 if not args:
2321 args = [b'']
2326 args = [b'']
2322 for a in args:
2327 for a in args:
2323 completions.update(n for n in names if n.startswith(a))
2328 completions.update(n for n in names if n.startswith(a))
2324 ui.write(b'\n'.join(sorted(completions)))
2329 ui.write(b'\n'.join(sorted(completions)))
2325 ui.write(b'\n')
2330 ui.write(b'\n')
2326
2331
2327
2332
2328 @command(
2333 @command(
2329 b'debugnodemap',
2334 b'debugnodemap',
2330 [
2335 [
2331 (
2336 (
2332 b'',
2337 b'',
2333 b'dump-new',
2338 b'dump-new',
2334 False,
2339 False,
2335 _(b'write a (new) persistent binary nodemap on stdout'),
2340 _(b'write a (new) persistent binary nodemap on stdout'),
2336 ),
2341 ),
2337 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2342 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2338 (
2343 (
2339 b'',
2344 b'',
2340 b'check',
2345 b'check',
2341 False,
2346 False,
2342 _(b'check that the data on disk data are correct.'),
2347 _(b'check that the data on disk data are correct.'),
2343 ),
2348 ),
2344 (
2349 (
2345 b'',
2350 b'',
2346 b'metadata',
2351 b'metadata',
2347 False,
2352 False,
2348 _(b'display the on disk meta data for the nodemap'),
2353 _(b'display the on disk meta data for the nodemap'),
2349 ),
2354 ),
2350 ],
2355 ],
2351 )
2356 )
2352 def debugnodemap(ui, repo, **opts):
2357 def debugnodemap(ui, repo, **opts):
2353 """write and inspect on disk nodemap"""
2358 """write and inspect on disk nodemap"""
2354 if opts['dump_new']:
2359 if opts['dump_new']:
2355 unfi = repo.unfiltered()
2360 unfi = repo.unfiltered()
2356 cl = unfi.changelog
2361 cl = unfi.changelog
2357 if util.safehasattr(cl.index, "nodemap_data_all"):
2362 if util.safehasattr(cl.index, "nodemap_data_all"):
2358 data = cl.index.nodemap_data_all()
2363 data = cl.index.nodemap_data_all()
2359 else:
2364 else:
2360 data = nodemap.persistent_data(cl.index)
2365 data = nodemap.persistent_data(cl.index)
2361 ui.write(data)
2366 ui.write(data)
2362 elif opts['dump_disk']:
2367 elif opts['dump_disk']:
2363 unfi = repo.unfiltered()
2368 unfi = repo.unfiltered()
2364 cl = unfi.changelog
2369 cl = unfi.changelog
2365 nm_data = nodemap.persisted_data(cl)
2370 nm_data = nodemap.persisted_data(cl)
2366 if nm_data is not None:
2371 if nm_data is not None:
2367 docket, data = nm_data
2372 docket, data = nm_data
2368 ui.write(data[:])
2373 ui.write(data[:])
2369 elif opts['check']:
2374 elif opts['check']:
2370 unfi = repo.unfiltered()
2375 unfi = repo.unfiltered()
2371 cl = unfi.changelog
2376 cl = unfi.changelog
2372 nm_data = nodemap.persisted_data(cl)
2377 nm_data = nodemap.persisted_data(cl)
2373 if nm_data is not None:
2378 if nm_data is not None:
2374 docket, data = nm_data
2379 docket, data = nm_data
2375 return nodemap.check_data(ui, cl.index, data)
2380 return nodemap.check_data(ui, cl.index, data)
2376 elif opts['metadata']:
2381 elif opts['metadata']:
2377 unfi = repo.unfiltered()
2382 unfi = repo.unfiltered()
2378 cl = unfi.changelog
2383 cl = unfi.changelog
2379 nm_data = nodemap.persisted_data(cl)
2384 nm_data = nodemap.persisted_data(cl)
2380 if nm_data is not None:
2385 if nm_data is not None:
2381 docket, data = nm_data
2386 docket, data = nm_data
2382 ui.write((b"uid: %s\n") % docket.uid)
2387 ui.write((b"uid: %s\n") % docket.uid)
2383 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2388 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2384 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2389 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2385 ui.write((b"data-length: %d\n") % docket.data_length)
2390 ui.write((b"data-length: %d\n") % docket.data_length)
2386 ui.write((b"data-unused: %d\n") % docket.data_unused)
2391 ui.write((b"data-unused: %d\n") % docket.data_unused)
2387 unused_perc = docket.data_unused * 100.0 / docket.data_length
2392 unused_perc = docket.data_unused * 100.0 / docket.data_length
2388 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2393 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2389
2394
2390
2395
2391 @command(
2396 @command(
2392 b'debugobsolete',
2397 b'debugobsolete',
2393 [
2398 [
2394 (b'', b'flags', 0, _(b'markers flag')),
2399 (b'', b'flags', 0, _(b'markers flag')),
2395 (
2400 (
2396 b'',
2401 b'',
2397 b'record-parents',
2402 b'record-parents',
2398 False,
2403 False,
2399 _(b'record parent information for the precursor'),
2404 _(b'record parent information for the precursor'),
2400 ),
2405 ),
2401 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2406 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2402 (
2407 (
2403 b'',
2408 b'',
2404 b'exclusive',
2409 b'exclusive',
2405 False,
2410 False,
2406 _(b'restrict display to markers only relevant to REV'),
2411 _(b'restrict display to markers only relevant to REV'),
2407 ),
2412 ),
2408 (b'', b'index', False, _(b'display index of the marker')),
2413 (b'', b'index', False, _(b'display index of the marker')),
2409 (b'', b'delete', [], _(b'delete markers specified by indices')),
2414 (b'', b'delete', [], _(b'delete markers specified by indices')),
2410 ]
2415 ]
2411 + cmdutil.commitopts2
2416 + cmdutil.commitopts2
2412 + cmdutil.formatteropts,
2417 + cmdutil.formatteropts,
2413 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2418 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2414 )
2419 )
2415 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2420 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2416 """create arbitrary obsolete marker
2421 """create arbitrary obsolete marker
2417
2422
2418 With no arguments, displays the list of obsolescence markers."""
2423 With no arguments, displays the list of obsolescence markers."""
2419
2424
2420 opts = pycompat.byteskwargs(opts)
2425 opts = pycompat.byteskwargs(opts)
2421
2426
2422 def parsenodeid(s):
2427 def parsenodeid(s):
2423 try:
2428 try:
2424 # We do not use revsingle/revrange functions here to accept
2429 # We do not use revsingle/revrange functions here to accept
2425 # arbitrary node identifiers, possibly not present in the
2430 # arbitrary node identifiers, possibly not present in the
2426 # local repository.
2431 # local repository.
2427 n = bin(s)
2432 n = bin(s)
2428 if len(n) != repo.nodeconstants.nodelen:
2433 if len(n) != repo.nodeconstants.nodelen:
2429 raise TypeError()
2434 raise TypeError()
2430 return n
2435 return n
2431 except TypeError:
2436 except TypeError:
2432 raise error.InputError(
2437 raise error.InputError(
2433 b'changeset references must be full hexadecimal '
2438 b'changeset references must be full hexadecimal '
2434 b'node identifiers'
2439 b'node identifiers'
2435 )
2440 )
2436
2441
2437 if opts.get(b'delete'):
2442 if opts.get(b'delete'):
2438 indices = []
2443 indices = []
2439 for v in opts.get(b'delete'):
2444 for v in opts.get(b'delete'):
2440 try:
2445 try:
2441 indices.append(int(v))
2446 indices.append(int(v))
2442 except ValueError:
2447 except ValueError:
2443 raise error.InputError(
2448 raise error.InputError(
2444 _(b'invalid index value: %r') % v,
2449 _(b'invalid index value: %r') % v,
2445 hint=_(b'use integers for indices'),
2450 hint=_(b'use integers for indices'),
2446 )
2451 )
2447
2452
2448 if repo.currenttransaction():
2453 if repo.currenttransaction():
2449 raise error.Abort(
2454 raise error.Abort(
2450 _(b'cannot delete obsmarkers in the middle of transaction.')
2455 _(b'cannot delete obsmarkers in the middle of transaction.')
2451 )
2456 )
2452
2457
2453 with repo.lock():
2458 with repo.lock():
2454 n = repair.deleteobsmarkers(repo.obsstore, indices)
2459 n = repair.deleteobsmarkers(repo.obsstore, indices)
2455 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2460 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2456
2461
2457 return
2462 return
2458
2463
2459 if precursor is not None:
2464 if precursor is not None:
2460 if opts[b'rev']:
2465 if opts[b'rev']:
2461 raise error.InputError(
2466 raise error.InputError(
2462 b'cannot select revision when creating marker'
2467 b'cannot select revision when creating marker'
2463 )
2468 )
2464 metadata = {}
2469 metadata = {}
2465 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2470 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2466 succs = tuple(parsenodeid(succ) for succ in successors)
2471 succs = tuple(parsenodeid(succ) for succ in successors)
2467 l = repo.lock()
2472 l = repo.lock()
2468 try:
2473 try:
2469 tr = repo.transaction(b'debugobsolete')
2474 tr = repo.transaction(b'debugobsolete')
2470 try:
2475 try:
2471 date = opts.get(b'date')
2476 date = opts.get(b'date')
2472 if date:
2477 if date:
2473 date = dateutil.parsedate(date)
2478 date = dateutil.parsedate(date)
2474 else:
2479 else:
2475 date = None
2480 date = None
2476 prec = parsenodeid(precursor)
2481 prec = parsenodeid(precursor)
2477 parents = None
2482 parents = None
2478 if opts[b'record_parents']:
2483 if opts[b'record_parents']:
2479 if prec not in repo.unfiltered():
2484 if prec not in repo.unfiltered():
2480 raise error.Abort(
2485 raise error.Abort(
2481 b'cannot used --record-parents on '
2486 b'cannot used --record-parents on '
2482 b'unknown changesets'
2487 b'unknown changesets'
2483 )
2488 )
2484 parents = repo.unfiltered()[prec].parents()
2489 parents = repo.unfiltered()[prec].parents()
2485 parents = tuple(p.node() for p in parents)
2490 parents = tuple(p.node() for p in parents)
2486 repo.obsstore.create(
2491 repo.obsstore.create(
2487 tr,
2492 tr,
2488 prec,
2493 prec,
2489 succs,
2494 succs,
2490 opts[b'flags'],
2495 opts[b'flags'],
2491 parents=parents,
2496 parents=parents,
2492 date=date,
2497 date=date,
2493 metadata=metadata,
2498 metadata=metadata,
2494 ui=ui,
2499 ui=ui,
2495 )
2500 )
2496 tr.close()
2501 tr.close()
2497 except ValueError as exc:
2502 except ValueError as exc:
2498 raise error.Abort(
2503 raise error.Abort(
2499 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2504 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2500 )
2505 )
2501 finally:
2506 finally:
2502 tr.release()
2507 tr.release()
2503 finally:
2508 finally:
2504 l.release()
2509 l.release()
2505 else:
2510 else:
2506 if opts[b'rev']:
2511 if opts[b'rev']:
2507 revs = scmutil.revrange(repo, opts[b'rev'])
2512 revs = scmutil.revrange(repo, opts[b'rev'])
2508 nodes = [repo[r].node() for r in revs]
2513 nodes = [repo[r].node() for r in revs]
2509 markers = list(
2514 markers = list(
2510 obsutil.getmarkers(
2515 obsutil.getmarkers(
2511 repo, nodes=nodes, exclusive=opts[b'exclusive']
2516 repo, nodes=nodes, exclusive=opts[b'exclusive']
2512 )
2517 )
2513 )
2518 )
2514 markers.sort(key=lambda x: x._data)
2519 markers.sort(key=lambda x: x._data)
2515 else:
2520 else:
2516 markers = obsutil.getmarkers(repo)
2521 markers = obsutil.getmarkers(repo)
2517
2522
2518 markerstoiter = markers
2523 markerstoiter = markers
2519 isrelevant = lambda m: True
2524 isrelevant = lambda m: True
2520 if opts.get(b'rev') and opts.get(b'index'):
2525 if opts.get(b'rev') and opts.get(b'index'):
2521 markerstoiter = obsutil.getmarkers(repo)
2526 markerstoiter = obsutil.getmarkers(repo)
2522 markerset = set(markers)
2527 markerset = set(markers)
2523 isrelevant = lambda m: m in markerset
2528 isrelevant = lambda m: m in markerset
2524
2529
2525 fm = ui.formatter(b'debugobsolete', opts)
2530 fm = ui.formatter(b'debugobsolete', opts)
2526 for i, m in enumerate(markerstoiter):
2531 for i, m in enumerate(markerstoiter):
2527 if not isrelevant(m):
2532 if not isrelevant(m):
2528 # marker can be irrelevant when we're iterating over a set
2533 # marker can be irrelevant when we're iterating over a set
2529 # of markers (markerstoiter) which is bigger than the set
2534 # of markers (markerstoiter) which is bigger than the set
2530 # of markers we want to display (markers)
2535 # of markers we want to display (markers)
2531 # this can happen if both --index and --rev options are
2536 # this can happen if both --index and --rev options are
2532 # provided and thus we need to iterate over all of the markers
2537 # provided and thus we need to iterate over all of the markers
2533 # to get the correct indices, but only display the ones that
2538 # to get the correct indices, but only display the ones that
2534 # are relevant to --rev value
2539 # are relevant to --rev value
2535 continue
2540 continue
2536 fm.startitem()
2541 fm.startitem()
2537 ind = i if opts.get(b'index') else None
2542 ind = i if opts.get(b'index') else None
2538 cmdutil.showmarker(fm, m, index=ind)
2543 cmdutil.showmarker(fm, m, index=ind)
2539 fm.end()
2544 fm.end()
2540
2545
2541
2546
2542 @command(
2547 @command(
2543 b'debugp1copies',
2548 b'debugp1copies',
2544 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2549 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2545 _(b'[-r REV]'),
2550 _(b'[-r REV]'),
2546 )
2551 )
2547 def debugp1copies(ui, repo, **opts):
2552 def debugp1copies(ui, repo, **opts):
2548 """dump copy information compared to p1"""
2553 """dump copy information compared to p1"""
2549
2554
2550 opts = pycompat.byteskwargs(opts)
2555 opts = pycompat.byteskwargs(opts)
2551 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2556 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2552 for dst, src in ctx.p1copies().items():
2557 for dst, src in ctx.p1copies().items():
2553 ui.write(b'%s -> %s\n' % (src, dst))
2558 ui.write(b'%s -> %s\n' % (src, dst))
2554
2559
2555
2560
2556 @command(
2561 @command(
2557 b'debugp2copies',
2562 b'debugp2copies',
2558 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2563 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2559 _(b'[-r REV]'),
2564 _(b'[-r REV]'),
2560 )
2565 )
2561 def debugp1copies(ui, repo, **opts):
2566 def debugp1copies(ui, repo, **opts):
2562 """dump copy information compared to p2"""
2567 """dump copy information compared to p2"""
2563
2568
2564 opts = pycompat.byteskwargs(opts)
2569 opts = pycompat.byteskwargs(opts)
2565 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2570 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2566 for dst, src in ctx.p2copies().items():
2571 for dst, src in ctx.p2copies().items():
2567 ui.write(b'%s -> %s\n' % (src, dst))
2572 ui.write(b'%s -> %s\n' % (src, dst))
2568
2573
2569
2574
2570 @command(
2575 @command(
2571 b'debugpathcomplete',
2576 b'debugpathcomplete',
2572 [
2577 [
2573 (b'f', b'full', None, _(b'complete an entire path')),
2578 (b'f', b'full', None, _(b'complete an entire path')),
2574 (b'n', b'normal', None, _(b'show only normal files')),
2579 (b'n', b'normal', None, _(b'show only normal files')),
2575 (b'a', b'added', None, _(b'show only added files')),
2580 (b'a', b'added', None, _(b'show only added files')),
2576 (b'r', b'removed', None, _(b'show only removed files')),
2581 (b'r', b'removed', None, _(b'show only removed files')),
2577 ],
2582 ],
2578 _(b'FILESPEC...'),
2583 _(b'FILESPEC...'),
2579 )
2584 )
2580 def debugpathcomplete(ui, repo, *specs, **opts):
2585 def debugpathcomplete(ui, repo, *specs, **opts):
2581 """complete part or all of a tracked path
2586 """complete part or all of a tracked path
2582
2587
2583 This command supports shells that offer path name completion. It
2588 This command supports shells that offer path name completion. It
2584 currently completes only files already known to the dirstate.
2589 currently completes only files already known to the dirstate.
2585
2590
2586 Completion extends only to the next path segment unless
2591 Completion extends only to the next path segment unless
2587 --full is specified, in which case entire paths are used."""
2592 --full is specified, in which case entire paths are used."""
2588
2593
2589 def complete(path, acceptable):
2594 def complete(path, acceptable):
2590 dirstate = repo.dirstate
2595 dirstate = repo.dirstate
2591 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2596 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2592 rootdir = repo.root + pycompat.ossep
2597 rootdir = repo.root + pycompat.ossep
2593 if spec != repo.root and not spec.startswith(rootdir):
2598 if spec != repo.root and not spec.startswith(rootdir):
2594 return [], []
2599 return [], []
2595 if os.path.isdir(spec):
2600 if os.path.isdir(spec):
2596 spec += b'/'
2601 spec += b'/'
2597 spec = spec[len(rootdir) :]
2602 spec = spec[len(rootdir) :]
2598 fixpaths = pycompat.ossep != b'/'
2603 fixpaths = pycompat.ossep != b'/'
2599 if fixpaths:
2604 if fixpaths:
2600 spec = spec.replace(pycompat.ossep, b'/')
2605 spec = spec.replace(pycompat.ossep, b'/')
2601 speclen = len(spec)
2606 speclen = len(spec)
2602 fullpaths = opts['full']
2607 fullpaths = opts['full']
2603 files, dirs = set(), set()
2608 files, dirs = set(), set()
2604 adddir, addfile = dirs.add, files.add
2609 adddir, addfile = dirs.add, files.add
2605 for f, st in pycompat.iteritems(dirstate):
2610 for f, st in pycompat.iteritems(dirstate):
2606 if f.startswith(spec) and st[0] in acceptable:
2611 if f.startswith(spec) and st[0] in acceptable:
2607 if fixpaths:
2612 if fixpaths:
2608 f = f.replace(b'/', pycompat.ossep)
2613 f = f.replace(b'/', pycompat.ossep)
2609 if fullpaths:
2614 if fullpaths:
2610 addfile(f)
2615 addfile(f)
2611 continue
2616 continue
2612 s = f.find(pycompat.ossep, speclen)
2617 s = f.find(pycompat.ossep, speclen)
2613 if s >= 0:
2618 if s >= 0:
2614 adddir(f[:s])
2619 adddir(f[:s])
2615 else:
2620 else:
2616 addfile(f)
2621 addfile(f)
2617 return files, dirs
2622 return files, dirs
2618
2623
2619 acceptable = b''
2624 acceptable = b''
2620 if opts['normal']:
2625 if opts['normal']:
2621 acceptable += b'nm'
2626 acceptable += b'nm'
2622 if opts['added']:
2627 if opts['added']:
2623 acceptable += b'a'
2628 acceptable += b'a'
2624 if opts['removed']:
2629 if opts['removed']:
2625 acceptable += b'r'
2630 acceptable += b'r'
2626 cwd = repo.getcwd()
2631 cwd = repo.getcwd()
2627 if not specs:
2632 if not specs:
2628 specs = [b'.']
2633 specs = [b'.']
2629
2634
2630 files, dirs = set(), set()
2635 files, dirs = set(), set()
2631 for spec in specs:
2636 for spec in specs:
2632 f, d = complete(spec, acceptable or b'nmar')
2637 f, d = complete(spec, acceptable or b'nmar')
2633 files.update(f)
2638 files.update(f)
2634 dirs.update(d)
2639 dirs.update(d)
2635 files.update(dirs)
2640 files.update(dirs)
2636 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2641 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2637 ui.write(b'\n')
2642 ui.write(b'\n')
2638
2643
2639
2644
2640 @command(
2645 @command(
2641 b'debugpathcopies',
2646 b'debugpathcopies',
2642 cmdutil.walkopts,
2647 cmdutil.walkopts,
2643 b'hg debugpathcopies REV1 REV2 [FILE]',
2648 b'hg debugpathcopies REV1 REV2 [FILE]',
2644 inferrepo=True,
2649 inferrepo=True,
2645 )
2650 )
2646 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2651 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2647 """show copies between two revisions"""
2652 """show copies between two revisions"""
2648 ctx1 = scmutil.revsingle(repo, rev1)
2653 ctx1 = scmutil.revsingle(repo, rev1)
2649 ctx2 = scmutil.revsingle(repo, rev2)
2654 ctx2 = scmutil.revsingle(repo, rev2)
2650 m = scmutil.match(ctx1, pats, opts)
2655 m = scmutil.match(ctx1, pats, opts)
2651 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2656 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2652 ui.write(b'%s -> %s\n' % (src, dst))
2657 ui.write(b'%s -> %s\n' % (src, dst))
2653
2658
2654
2659
2655 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2660 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2656 def debugpeer(ui, path):
2661 def debugpeer(ui, path):
2657 """establish a connection to a peer repository"""
2662 """establish a connection to a peer repository"""
2658 # Always enable peer request logging. Requires --debug to display
2663 # Always enable peer request logging. Requires --debug to display
2659 # though.
2664 # though.
2660 overrides = {
2665 overrides = {
2661 (b'devel', b'debug.peer-request'): True,
2666 (b'devel', b'debug.peer-request'): True,
2662 }
2667 }
2663
2668
2664 with ui.configoverride(overrides):
2669 with ui.configoverride(overrides):
2665 peer = hg.peer(ui, {}, path)
2670 peer = hg.peer(ui, {}, path)
2666
2671
2667 try:
2672 try:
2668 local = peer.local() is not None
2673 local = peer.local() is not None
2669 canpush = peer.canpush()
2674 canpush = peer.canpush()
2670
2675
2671 ui.write(_(b'url: %s\n') % peer.url())
2676 ui.write(_(b'url: %s\n') % peer.url())
2672 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2677 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2673 ui.write(
2678 ui.write(
2674 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2679 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2675 )
2680 )
2676 finally:
2681 finally:
2677 peer.close()
2682 peer.close()
2678
2683
2679
2684
2680 @command(
2685 @command(
2681 b'debugpickmergetool',
2686 b'debugpickmergetool',
2682 [
2687 [
2683 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2688 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2684 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2689 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2685 ]
2690 ]
2686 + cmdutil.walkopts
2691 + cmdutil.walkopts
2687 + cmdutil.mergetoolopts,
2692 + cmdutil.mergetoolopts,
2688 _(b'[PATTERN]...'),
2693 _(b'[PATTERN]...'),
2689 inferrepo=True,
2694 inferrepo=True,
2690 )
2695 )
2691 def debugpickmergetool(ui, repo, *pats, **opts):
2696 def debugpickmergetool(ui, repo, *pats, **opts):
2692 """examine which merge tool is chosen for specified file
2697 """examine which merge tool is chosen for specified file
2693
2698
2694 As described in :hg:`help merge-tools`, Mercurial examines
2699 As described in :hg:`help merge-tools`, Mercurial examines
2695 configurations below in this order to decide which merge tool is
2700 configurations below in this order to decide which merge tool is
2696 chosen for specified file.
2701 chosen for specified file.
2697
2702
2698 1. ``--tool`` option
2703 1. ``--tool`` option
2699 2. ``HGMERGE`` environment variable
2704 2. ``HGMERGE`` environment variable
2700 3. configurations in ``merge-patterns`` section
2705 3. configurations in ``merge-patterns`` section
2701 4. configuration of ``ui.merge``
2706 4. configuration of ``ui.merge``
2702 5. configurations in ``merge-tools`` section
2707 5. configurations in ``merge-tools`` section
2703 6. ``hgmerge`` tool (for historical reason only)
2708 6. ``hgmerge`` tool (for historical reason only)
2704 7. default tool for fallback (``:merge`` or ``:prompt``)
2709 7. default tool for fallback (``:merge`` or ``:prompt``)
2705
2710
2706 This command writes out examination result in the style below::
2711 This command writes out examination result in the style below::
2707
2712
2708 FILE = MERGETOOL
2713 FILE = MERGETOOL
2709
2714
2710 By default, all files known in the first parent context of the
2715 By default, all files known in the first parent context of the
2711 working directory are examined. Use file patterns and/or -I/-X
2716 working directory are examined. Use file patterns and/or -I/-X
2712 options to limit target files. -r/--rev is also useful to examine
2717 options to limit target files. -r/--rev is also useful to examine
2713 files in another context without actual updating to it.
2718 files in another context without actual updating to it.
2714
2719
2715 With --debug, this command shows warning messages while matching
2720 With --debug, this command shows warning messages while matching
2716 against ``merge-patterns`` and so on, too. It is recommended to
2721 against ``merge-patterns`` and so on, too. It is recommended to
2717 use this option with explicit file patterns and/or -I/-X options,
2722 use this option with explicit file patterns and/or -I/-X options,
2718 because this option increases amount of output per file according
2723 because this option increases amount of output per file according
2719 to configurations in hgrc.
2724 to configurations in hgrc.
2720
2725
2721 With -v/--verbose, this command shows configurations below at
2726 With -v/--verbose, this command shows configurations below at
2722 first (only if specified).
2727 first (only if specified).
2723
2728
2724 - ``--tool`` option
2729 - ``--tool`` option
2725 - ``HGMERGE`` environment variable
2730 - ``HGMERGE`` environment variable
2726 - configuration of ``ui.merge``
2731 - configuration of ``ui.merge``
2727
2732
2728 If merge tool is chosen before matching against
2733 If merge tool is chosen before matching against
2729 ``merge-patterns``, this command can't show any helpful
2734 ``merge-patterns``, this command can't show any helpful
2730 information, even with --debug. In such case, information above is
2735 information, even with --debug. In such case, information above is
2731 useful to know why a merge tool is chosen.
2736 useful to know why a merge tool is chosen.
2732 """
2737 """
2733 opts = pycompat.byteskwargs(opts)
2738 opts = pycompat.byteskwargs(opts)
2734 overrides = {}
2739 overrides = {}
2735 if opts[b'tool']:
2740 if opts[b'tool']:
2736 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2741 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2737 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2742 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2738
2743
2739 with ui.configoverride(overrides, b'debugmergepatterns'):
2744 with ui.configoverride(overrides, b'debugmergepatterns'):
2740 hgmerge = encoding.environ.get(b"HGMERGE")
2745 hgmerge = encoding.environ.get(b"HGMERGE")
2741 if hgmerge is not None:
2746 if hgmerge is not None:
2742 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2747 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2743 uimerge = ui.config(b"ui", b"merge")
2748 uimerge = ui.config(b"ui", b"merge")
2744 if uimerge:
2749 if uimerge:
2745 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2750 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2746
2751
2747 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2752 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2748 m = scmutil.match(ctx, pats, opts)
2753 m = scmutil.match(ctx, pats, opts)
2749 changedelete = opts[b'changedelete']
2754 changedelete = opts[b'changedelete']
2750 for path in ctx.walk(m):
2755 for path in ctx.walk(m):
2751 fctx = ctx[path]
2756 fctx = ctx[path]
2752 try:
2757 try:
2753 if not ui.debugflag:
2758 if not ui.debugflag:
2754 ui.pushbuffer(error=True)
2759 ui.pushbuffer(error=True)
2755 tool, toolpath = filemerge._picktool(
2760 tool, toolpath = filemerge._picktool(
2756 repo,
2761 repo,
2757 ui,
2762 ui,
2758 path,
2763 path,
2759 fctx.isbinary(),
2764 fctx.isbinary(),
2760 b'l' in fctx.flags(),
2765 b'l' in fctx.flags(),
2761 changedelete,
2766 changedelete,
2762 )
2767 )
2763 finally:
2768 finally:
2764 if not ui.debugflag:
2769 if not ui.debugflag:
2765 ui.popbuffer()
2770 ui.popbuffer()
2766 ui.write(b'%s = %s\n' % (path, tool))
2771 ui.write(b'%s = %s\n' % (path, tool))
2767
2772
2768
2773
2769 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2774 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2770 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2775 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2771 """access the pushkey key/value protocol
2776 """access the pushkey key/value protocol
2772
2777
2773 With two args, list the keys in the given namespace.
2778 With two args, list the keys in the given namespace.
2774
2779
2775 With five args, set a key to new if it currently is set to old.
2780 With five args, set a key to new if it currently is set to old.
2776 Reports success or failure.
2781 Reports success or failure.
2777 """
2782 """
2778
2783
2779 target = hg.peer(ui, {}, repopath)
2784 target = hg.peer(ui, {}, repopath)
2780 try:
2785 try:
2781 if keyinfo:
2786 if keyinfo:
2782 key, old, new = keyinfo
2787 key, old, new = keyinfo
2783 with target.commandexecutor() as e:
2788 with target.commandexecutor() as e:
2784 r = e.callcommand(
2789 r = e.callcommand(
2785 b'pushkey',
2790 b'pushkey',
2786 {
2791 {
2787 b'namespace': namespace,
2792 b'namespace': namespace,
2788 b'key': key,
2793 b'key': key,
2789 b'old': old,
2794 b'old': old,
2790 b'new': new,
2795 b'new': new,
2791 },
2796 },
2792 ).result()
2797 ).result()
2793
2798
2794 ui.status(pycompat.bytestr(r) + b'\n')
2799 ui.status(pycompat.bytestr(r) + b'\n')
2795 return not r
2800 return not r
2796 else:
2801 else:
2797 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2802 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2798 ui.write(
2803 ui.write(
2799 b"%s\t%s\n"
2804 b"%s\t%s\n"
2800 % (stringutil.escapestr(k), stringutil.escapestr(v))
2805 % (stringutil.escapestr(k), stringutil.escapestr(v))
2801 )
2806 )
2802 finally:
2807 finally:
2803 target.close()
2808 target.close()
2804
2809
2805
2810
2806 @command(b'debugpvec', [], _(b'A B'))
2811 @command(b'debugpvec', [], _(b'A B'))
2807 def debugpvec(ui, repo, a, b=None):
2812 def debugpvec(ui, repo, a, b=None):
2808 ca = scmutil.revsingle(repo, a)
2813 ca = scmutil.revsingle(repo, a)
2809 cb = scmutil.revsingle(repo, b)
2814 cb = scmutil.revsingle(repo, b)
2810 pa = pvec.ctxpvec(ca)
2815 pa = pvec.ctxpvec(ca)
2811 pb = pvec.ctxpvec(cb)
2816 pb = pvec.ctxpvec(cb)
2812 if pa == pb:
2817 if pa == pb:
2813 rel = b"="
2818 rel = b"="
2814 elif pa > pb:
2819 elif pa > pb:
2815 rel = b">"
2820 rel = b">"
2816 elif pa < pb:
2821 elif pa < pb:
2817 rel = b"<"
2822 rel = b"<"
2818 elif pa | pb:
2823 elif pa | pb:
2819 rel = b"|"
2824 rel = b"|"
2820 ui.write(_(b"a: %s\n") % pa)
2825 ui.write(_(b"a: %s\n") % pa)
2821 ui.write(_(b"b: %s\n") % pb)
2826 ui.write(_(b"b: %s\n") % pb)
2822 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2827 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2823 ui.write(
2828 ui.write(
2824 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2829 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2825 % (
2830 % (
2826 abs(pa._depth - pb._depth),
2831 abs(pa._depth - pb._depth),
2827 pvec._hamming(pa._vec, pb._vec),
2832 pvec._hamming(pa._vec, pb._vec),
2828 pa.distance(pb),
2833 pa.distance(pb),
2829 rel,
2834 rel,
2830 )
2835 )
2831 )
2836 )
2832
2837
2833
2838
2834 @command(
2839 @command(
2835 b'debugrebuilddirstate|debugrebuildstate',
2840 b'debugrebuilddirstate|debugrebuildstate',
2836 [
2841 [
2837 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2842 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2838 (
2843 (
2839 b'',
2844 b'',
2840 b'minimal',
2845 b'minimal',
2841 None,
2846 None,
2842 _(
2847 _(
2843 b'only rebuild files that are inconsistent with '
2848 b'only rebuild files that are inconsistent with '
2844 b'the working copy parent'
2849 b'the working copy parent'
2845 ),
2850 ),
2846 ),
2851 ),
2847 ],
2852 ],
2848 _(b'[-r REV]'),
2853 _(b'[-r REV]'),
2849 )
2854 )
2850 def debugrebuilddirstate(ui, repo, rev, **opts):
2855 def debugrebuilddirstate(ui, repo, rev, **opts):
2851 """rebuild the dirstate as it would look like for the given revision
2856 """rebuild the dirstate as it would look like for the given revision
2852
2857
2853 If no revision is specified the first current parent will be used.
2858 If no revision is specified the first current parent will be used.
2854
2859
2855 The dirstate will be set to the files of the given revision.
2860 The dirstate will be set to the files of the given revision.
2856 The actual working directory content or existing dirstate
2861 The actual working directory content or existing dirstate
2857 information such as adds or removes is not considered.
2862 information such as adds or removes is not considered.
2858
2863
2859 ``minimal`` will only rebuild the dirstate status for files that claim to be
2864 ``minimal`` will only rebuild the dirstate status for files that claim to be
2860 tracked but are not in the parent manifest, or that exist in the parent
2865 tracked but are not in the parent manifest, or that exist in the parent
2861 manifest but are not in the dirstate. It will not change adds, removes, or
2866 manifest but are not in the dirstate. It will not change adds, removes, or
2862 modified files that are in the working copy parent.
2867 modified files that are in the working copy parent.
2863
2868
2864 One use of this command is to make the next :hg:`status` invocation
2869 One use of this command is to make the next :hg:`status` invocation
2865 check the actual file content.
2870 check the actual file content.
2866 """
2871 """
2867 ctx = scmutil.revsingle(repo, rev)
2872 ctx = scmutil.revsingle(repo, rev)
2868 with repo.wlock():
2873 with repo.wlock():
2869 dirstate = repo.dirstate
2874 dirstate = repo.dirstate
2870 changedfiles = None
2875 changedfiles = None
2871 # See command doc for what minimal does.
2876 # See command doc for what minimal does.
2872 if opts.get('minimal'):
2877 if opts.get('minimal'):
2873 manifestfiles = set(ctx.manifest().keys())
2878 manifestfiles = set(ctx.manifest().keys())
2874 dirstatefiles = set(dirstate)
2879 dirstatefiles = set(dirstate)
2875 manifestonly = manifestfiles - dirstatefiles
2880 manifestonly = manifestfiles - dirstatefiles
2876 dsonly = dirstatefiles - manifestfiles
2881 dsonly = dirstatefiles - manifestfiles
2877 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2882 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2878 changedfiles = manifestonly | dsnotadded
2883 changedfiles = manifestonly | dsnotadded
2879
2884
2880 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2885 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2881
2886
2882
2887
2883 @command(b'debugrebuildfncache', [], b'')
2888 @command(b'debugrebuildfncache', [], b'')
2884 def debugrebuildfncache(ui, repo):
2889 def debugrebuildfncache(ui, repo):
2885 """rebuild the fncache file"""
2890 """rebuild the fncache file"""
2886 repair.rebuildfncache(ui, repo)
2891 repair.rebuildfncache(ui, repo)
2887
2892
2888
2893
2889 @command(
2894 @command(
2890 b'debugrename',
2895 b'debugrename',
2891 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2896 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2892 _(b'[-r REV] [FILE]...'),
2897 _(b'[-r REV] [FILE]...'),
2893 )
2898 )
2894 def debugrename(ui, repo, *pats, **opts):
2899 def debugrename(ui, repo, *pats, **opts):
2895 """dump rename information"""
2900 """dump rename information"""
2896
2901
2897 opts = pycompat.byteskwargs(opts)
2902 opts = pycompat.byteskwargs(opts)
2898 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2903 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2899 m = scmutil.match(ctx, pats, opts)
2904 m = scmutil.match(ctx, pats, opts)
2900 for abs in ctx.walk(m):
2905 for abs in ctx.walk(m):
2901 fctx = ctx[abs]
2906 fctx = ctx[abs]
2902 o = fctx.filelog().renamed(fctx.filenode())
2907 o = fctx.filelog().renamed(fctx.filenode())
2903 rel = repo.pathto(abs)
2908 rel = repo.pathto(abs)
2904 if o:
2909 if o:
2905 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2910 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2906 else:
2911 else:
2907 ui.write(_(b"%s not renamed\n") % rel)
2912 ui.write(_(b"%s not renamed\n") % rel)
2908
2913
2909
2914
2910 @command(b'debugrequires|debugrequirements', [], b'')
2915 @command(b'debugrequires|debugrequirements', [], b'')
2911 def debugrequirements(ui, repo):
2916 def debugrequirements(ui, repo):
2912 """print the current repo requirements"""
2917 """print the current repo requirements"""
2913 for r in sorted(repo.requirements):
2918 for r in sorted(repo.requirements):
2914 ui.write(b"%s\n" % r)
2919 ui.write(b"%s\n" % r)
2915
2920
2916
2921
2917 @command(
2922 @command(
2918 b'debugrevlog',
2923 b'debugrevlog',
2919 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2924 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2920 _(b'-c|-m|FILE'),
2925 _(b'-c|-m|FILE'),
2921 optionalrepo=True,
2926 optionalrepo=True,
2922 )
2927 )
2923 def debugrevlog(ui, repo, file_=None, **opts):
2928 def debugrevlog(ui, repo, file_=None, **opts):
2924 """show data and statistics about a revlog"""
2929 """show data and statistics about a revlog"""
2925 opts = pycompat.byteskwargs(opts)
2930 opts = pycompat.byteskwargs(opts)
2926 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2931 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2927
2932
2928 if opts.get(b"dump"):
2933 if opts.get(b"dump"):
2929 numrevs = len(r)
2934 numrevs = len(r)
2930 ui.write(
2935 ui.write(
2931 (
2936 (
2932 b"# rev p1rev p2rev start end deltastart base p1 p2"
2937 b"# rev p1rev p2rev start end deltastart base p1 p2"
2933 b" rawsize totalsize compression heads chainlen\n"
2938 b" rawsize totalsize compression heads chainlen\n"
2934 )
2939 )
2935 )
2940 )
2936 ts = 0
2941 ts = 0
2937 heads = set()
2942 heads = set()
2938
2943
2939 for rev in pycompat.xrange(numrevs):
2944 for rev in pycompat.xrange(numrevs):
2940 dbase = r.deltaparent(rev)
2945 dbase = r.deltaparent(rev)
2941 if dbase == -1:
2946 if dbase == -1:
2942 dbase = rev
2947 dbase = rev
2943 cbase = r.chainbase(rev)
2948 cbase = r.chainbase(rev)
2944 clen = r.chainlen(rev)
2949 clen = r.chainlen(rev)
2945 p1, p2 = r.parentrevs(rev)
2950 p1, p2 = r.parentrevs(rev)
2946 rs = r.rawsize(rev)
2951 rs = r.rawsize(rev)
2947 ts = ts + rs
2952 ts = ts + rs
2948 heads -= set(r.parentrevs(rev))
2953 heads -= set(r.parentrevs(rev))
2949 heads.add(rev)
2954 heads.add(rev)
2950 try:
2955 try:
2951 compression = ts / r.end(rev)
2956 compression = ts / r.end(rev)
2952 except ZeroDivisionError:
2957 except ZeroDivisionError:
2953 compression = 0
2958 compression = 0
2954 ui.write(
2959 ui.write(
2955 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2960 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2956 b"%11d %5d %8d\n"
2961 b"%11d %5d %8d\n"
2957 % (
2962 % (
2958 rev,
2963 rev,
2959 p1,
2964 p1,
2960 p2,
2965 p2,
2961 r.start(rev),
2966 r.start(rev),
2962 r.end(rev),
2967 r.end(rev),
2963 r.start(dbase),
2968 r.start(dbase),
2964 r.start(cbase),
2969 r.start(cbase),
2965 r.start(p1),
2970 r.start(p1),
2966 r.start(p2),
2971 r.start(p2),
2967 rs,
2972 rs,
2968 ts,
2973 ts,
2969 compression,
2974 compression,
2970 len(heads),
2975 len(heads),
2971 clen,
2976 clen,
2972 )
2977 )
2973 )
2978 )
2974 return 0
2979 return 0
2975
2980
2976 format = r._format_version
2981 format = r._format_version
2977 v = r._format_flags
2982 v = r._format_flags
2978 flags = []
2983 flags = []
2979 gdelta = False
2984 gdelta = False
2980 if v & revlog.FLAG_INLINE_DATA:
2985 if v & revlog.FLAG_INLINE_DATA:
2981 flags.append(b'inline')
2986 flags.append(b'inline')
2982 if v & revlog.FLAG_GENERALDELTA:
2987 if v & revlog.FLAG_GENERALDELTA:
2983 gdelta = True
2988 gdelta = True
2984 flags.append(b'generaldelta')
2989 flags.append(b'generaldelta')
2985 if not flags:
2990 if not flags:
2986 flags = [b'(none)']
2991 flags = [b'(none)']
2987
2992
2988 ### tracks merge vs single parent
2993 ### tracks merge vs single parent
2989 nummerges = 0
2994 nummerges = 0
2990
2995
2991 ### tracks ways the "delta" are build
2996 ### tracks ways the "delta" are build
2992 # nodelta
2997 # nodelta
2993 numempty = 0
2998 numempty = 0
2994 numemptytext = 0
2999 numemptytext = 0
2995 numemptydelta = 0
3000 numemptydelta = 0
2996 # full file content
3001 # full file content
2997 numfull = 0
3002 numfull = 0
2998 # intermediate snapshot against a prior snapshot
3003 # intermediate snapshot against a prior snapshot
2999 numsemi = 0
3004 numsemi = 0
3000 # snapshot count per depth
3005 # snapshot count per depth
3001 numsnapdepth = collections.defaultdict(lambda: 0)
3006 numsnapdepth = collections.defaultdict(lambda: 0)
3002 # delta against previous revision
3007 # delta against previous revision
3003 numprev = 0
3008 numprev = 0
3004 # delta against first or second parent (not prev)
3009 # delta against first or second parent (not prev)
3005 nump1 = 0
3010 nump1 = 0
3006 nump2 = 0
3011 nump2 = 0
3007 # delta against neither prev nor parents
3012 # delta against neither prev nor parents
3008 numother = 0
3013 numother = 0
3009 # delta against prev that are also first or second parent
3014 # delta against prev that are also first or second parent
3010 # (details of `numprev`)
3015 # (details of `numprev`)
3011 nump1prev = 0
3016 nump1prev = 0
3012 nump2prev = 0
3017 nump2prev = 0
3013
3018
3014 # data about delta chain of each revs
3019 # data about delta chain of each revs
3015 chainlengths = []
3020 chainlengths = []
3016 chainbases = []
3021 chainbases = []
3017 chainspans = []
3022 chainspans = []
3018
3023
3019 # data about each revision
3024 # data about each revision
3020 datasize = [None, 0, 0]
3025 datasize = [None, 0, 0]
3021 fullsize = [None, 0, 0]
3026 fullsize = [None, 0, 0]
3022 semisize = [None, 0, 0]
3027 semisize = [None, 0, 0]
3023 # snapshot count per depth
3028 # snapshot count per depth
3024 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3029 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3025 deltasize = [None, 0, 0]
3030 deltasize = [None, 0, 0]
3026 chunktypecounts = {}
3031 chunktypecounts = {}
3027 chunktypesizes = {}
3032 chunktypesizes = {}
3028
3033
3029 def addsize(size, l):
3034 def addsize(size, l):
3030 if l[0] is None or size < l[0]:
3035 if l[0] is None or size < l[0]:
3031 l[0] = size
3036 l[0] = size
3032 if size > l[1]:
3037 if size > l[1]:
3033 l[1] = size
3038 l[1] = size
3034 l[2] += size
3039 l[2] += size
3035
3040
3036 numrevs = len(r)
3041 numrevs = len(r)
3037 for rev in pycompat.xrange(numrevs):
3042 for rev in pycompat.xrange(numrevs):
3038 p1, p2 = r.parentrevs(rev)
3043 p1, p2 = r.parentrevs(rev)
3039 delta = r.deltaparent(rev)
3044 delta = r.deltaparent(rev)
3040 if format > 0:
3045 if format > 0:
3041 addsize(r.rawsize(rev), datasize)
3046 addsize(r.rawsize(rev), datasize)
3042 if p2 != nullrev:
3047 if p2 != nullrev:
3043 nummerges += 1
3048 nummerges += 1
3044 size = r.length(rev)
3049 size = r.length(rev)
3045 if delta == nullrev:
3050 if delta == nullrev:
3046 chainlengths.append(0)
3051 chainlengths.append(0)
3047 chainbases.append(r.start(rev))
3052 chainbases.append(r.start(rev))
3048 chainspans.append(size)
3053 chainspans.append(size)
3049 if size == 0:
3054 if size == 0:
3050 numempty += 1
3055 numempty += 1
3051 numemptytext += 1
3056 numemptytext += 1
3052 else:
3057 else:
3053 numfull += 1
3058 numfull += 1
3054 numsnapdepth[0] += 1
3059 numsnapdepth[0] += 1
3055 addsize(size, fullsize)
3060 addsize(size, fullsize)
3056 addsize(size, snapsizedepth[0])
3061 addsize(size, snapsizedepth[0])
3057 else:
3062 else:
3058 chainlengths.append(chainlengths[delta] + 1)
3063 chainlengths.append(chainlengths[delta] + 1)
3059 baseaddr = chainbases[delta]
3064 baseaddr = chainbases[delta]
3060 revaddr = r.start(rev)
3065 revaddr = r.start(rev)
3061 chainbases.append(baseaddr)
3066 chainbases.append(baseaddr)
3062 chainspans.append((revaddr - baseaddr) + size)
3067 chainspans.append((revaddr - baseaddr) + size)
3063 if size == 0:
3068 if size == 0:
3064 numempty += 1
3069 numempty += 1
3065 numemptydelta += 1
3070 numemptydelta += 1
3066 elif r.issnapshot(rev):
3071 elif r.issnapshot(rev):
3067 addsize(size, semisize)
3072 addsize(size, semisize)
3068 numsemi += 1
3073 numsemi += 1
3069 depth = r.snapshotdepth(rev)
3074 depth = r.snapshotdepth(rev)
3070 numsnapdepth[depth] += 1
3075 numsnapdepth[depth] += 1
3071 addsize(size, snapsizedepth[depth])
3076 addsize(size, snapsizedepth[depth])
3072 else:
3077 else:
3073 addsize(size, deltasize)
3078 addsize(size, deltasize)
3074 if delta == rev - 1:
3079 if delta == rev - 1:
3075 numprev += 1
3080 numprev += 1
3076 if delta == p1:
3081 if delta == p1:
3077 nump1prev += 1
3082 nump1prev += 1
3078 elif delta == p2:
3083 elif delta == p2:
3079 nump2prev += 1
3084 nump2prev += 1
3080 elif delta == p1:
3085 elif delta == p1:
3081 nump1 += 1
3086 nump1 += 1
3082 elif delta == p2:
3087 elif delta == p2:
3083 nump2 += 1
3088 nump2 += 1
3084 elif delta != nullrev:
3089 elif delta != nullrev:
3085 numother += 1
3090 numother += 1
3086
3091
3087 # Obtain data on the raw chunks in the revlog.
3092 # Obtain data on the raw chunks in the revlog.
3088 if util.safehasattr(r, b'_getsegmentforrevs'):
3093 if util.safehasattr(r, b'_getsegmentforrevs'):
3089 segment = r._getsegmentforrevs(rev, rev)[1]
3094 segment = r._getsegmentforrevs(rev, rev)[1]
3090 else:
3095 else:
3091 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3096 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3092 if segment:
3097 if segment:
3093 chunktype = bytes(segment[0:1])
3098 chunktype = bytes(segment[0:1])
3094 else:
3099 else:
3095 chunktype = b'empty'
3100 chunktype = b'empty'
3096
3101
3097 if chunktype not in chunktypecounts:
3102 if chunktype not in chunktypecounts:
3098 chunktypecounts[chunktype] = 0
3103 chunktypecounts[chunktype] = 0
3099 chunktypesizes[chunktype] = 0
3104 chunktypesizes[chunktype] = 0
3100
3105
3101 chunktypecounts[chunktype] += 1
3106 chunktypecounts[chunktype] += 1
3102 chunktypesizes[chunktype] += size
3107 chunktypesizes[chunktype] += size
3103
3108
3104 # Adjust size min value for empty cases
3109 # Adjust size min value for empty cases
3105 for size in (datasize, fullsize, semisize, deltasize):
3110 for size in (datasize, fullsize, semisize, deltasize):
3106 if size[0] is None:
3111 if size[0] is None:
3107 size[0] = 0
3112 size[0] = 0
3108
3113
3109 numdeltas = numrevs - numfull - numempty - numsemi
3114 numdeltas = numrevs - numfull - numempty - numsemi
3110 numoprev = numprev - nump1prev - nump2prev
3115 numoprev = numprev - nump1prev - nump2prev
3111 totalrawsize = datasize[2]
3116 totalrawsize = datasize[2]
3112 datasize[2] /= numrevs
3117 datasize[2] /= numrevs
3113 fulltotal = fullsize[2]
3118 fulltotal = fullsize[2]
3114 if numfull == 0:
3119 if numfull == 0:
3115 fullsize[2] = 0
3120 fullsize[2] = 0
3116 else:
3121 else:
3117 fullsize[2] /= numfull
3122 fullsize[2] /= numfull
3118 semitotal = semisize[2]
3123 semitotal = semisize[2]
3119 snaptotal = {}
3124 snaptotal = {}
3120 if numsemi > 0:
3125 if numsemi > 0:
3121 semisize[2] /= numsemi
3126 semisize[2] /= numsemi
3122 for depth in snapsizedepth:
3127 for depth in snapsizedepth:
3123 snaptotal[depth] = snapsizedepth[depth][2]
3128 snaptotal[depth] = snapsizedepth[depth][2]
3124 snapsizedepth[depth][2] /= numsnapdepth[depth]
3129 snapsizedepth[depth][2] /= numsnapdepth[depth]
3125
3130
3126 deltatotal = deltasize[2]
3131 deltatotal = deltasize[2]
3127 if numdeltas > 0:
3132 if numdeltas > 0:
3128 deltasize[2] /= numdeltas
3133 deltasize[2] /= numdeltas
3129 totalsize = fulltotal + semitotal + deltatotal
3134 totalsize = fulltotal + semitotal + deltatotal
3130 avgchainlen = sum(chainlengths) / numrevs
3135 avgchainlen = sum(chainlengths) / numrevs
3131 maxchainlen = max(chainlengths)
3136 maxchainlen = max(chainlengths)
3132 maxchainspan = max(chainspans)
3137 maxchainspan = max(chainspans)
3133 compratio = 1
3138 compratio = 1
3134 if totalsize:
3139 if totalsize:
3135 compratio = totalrawsize / totalsize
3140 compratio = totalrawsize / totalsize
3136
3141
3137 basedfmtstr = b'%%%dd\n'
3142 basedfmtstr = b'%%%dd\n'
3138 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3143 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3139
3144
3140 def dfmtstr(max):
3145 def dfmtstr(max):
3141 return basedfmtstr % len(str(max))
3146 return basedfmtstr % len(str(max))
3142
3147
3143 def pcfmtstr(max, padding=0):
3148 def pcfmtstr(max, padding=0):
3144 return basepcfmtstr % (len(str(max)), b' ' * padding)
3149 return basepcfmtstr % (len(str(max)), b' ' * padding)
3145
3150
3146 def pcfmt(value, total):
3151 def pcfmt(value, total):
3147 if total:
3152 if total:
3148 return (value, 100 * float(value) / total)
3153 return (value, 100 * float(value) / total)
3149 else:
3154 else:
3150 return value, 100.0
3155 return value, 100.0
3151
3156
3152 ui.writenoi18n(b'format : %d\n' % format)
3157 ui.writenoi18n(b'format : %d\n' % format)
3153 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3158 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3154
3159
3155 ui.write(b'\n')
3160 ui.write(b'\n')
3156 fmt = pcfmtstr(totalsize)
3161 fmt = pcfmtstr(totalsize)
3157 fmt2 = dfmtstr(totalsize)
3162 fmt2 = dfmtstr(totalsize)
3158 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3163 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3159 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3164 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3160 ui.writenoi18n(
3165 ui.writenoi18n(
3161 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3166 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3162 )
3167 )
3163 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3168 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3164 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3169 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3165 ui.writenoi18n(
3170 ui.writenoi18n(
3166 b' text : '
3171 b' text : '
3167 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3172 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3168 )
3173 )
3169 ui.writenoi18n(
3174 ui.writenoi18n(
3170 b' delta : '
3175 b' delta : '
3171 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3176 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3172 )
3177 )
3173 ui.writenoi18n(
3178 ui.writenoi18n(
3174 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3179 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3175 )
3180 )
3176 for depth in sorted(numsnapdepth):
3181 for depth in sorted(numsnapdepth):
3177 ui.write(
3182 ui.write(
3178 (b' lvl-%-3d : ' % depth)
3183 (b' lvl-%-3d : ' % depth)
3179 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3184 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3180 )
3185 )
3181 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3186 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3182 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3187 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3183 ui.writenoi18n(
3188 ui.writenoi18n(
3184 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3189 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3185 )
3190 )
3186 for depth in sorted(numsnapdepth):
3191 for depth in sorted(numsnapdepth):
3187 ui.write(
3192 ui.write(
3188 (b' lvl-%-3d : ' % depth)
3193 (b' lvl-%-3d : ' % depth)
3189 + fmt % pcfmt(snaptotal[depth], totalsize)
3194 + fmt % pcfmt(snaptotal[depth], totalsize)
3190 )
3195 )
3191 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3196 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3192
3197
3193 def fmtchunktype(chunktype):
3198 def fmtchunktype(chunktype):
3194 if chunktype == b'empty':
3199 if chunktype == b'empty':
3195 return b' %s : ' % chunktype
3200 return b' %s : ' % chunktype
3196 elif chunktype in pycompat.bytestr(string.ascii_letters):
3201 elif chunktype in pycompat.bytestr(string.ascii_letters):
3197 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3202 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3198 else:
3203 else:
3199 return b' 0x%s : ' % hex(chunktype)
3204 return b' 0x%s : ' % hex(chunktype)
3200
3205
3201 ui.write(b'\n')
3206 ui.write(b'\n')
3202 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3207 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3203 for chunktype in sorted(chunktypecounts):
3208 for chunktype in sorted(chunktypecounts):
3204 ui.write(fmtchunktype(chunktype))
3209 ui.write(fmtchunktype(chunktype))
3205 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3210 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3206 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3211 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3207 for chunktype in sorted(chunktypecounts):
3212 for chunktype in sorted(chunktypecounts):
3208 ui.write(fmtchunktype(chunktype))
3213 ui.write(fmtchunktype(chunktype))
3209 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3214 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3210
3215
3211 ui.write(b'\n')
3216 ui.write(b'\n')
3212 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3217 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3213 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3218 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3214 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3219 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3215 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3220 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3216 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3221 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3217
3222
3218 if format > 0:
3223 if format > 0:
3219 ui.write(b'\n')
3224 ui.write(b'\n')
3220 ui.writenoi18n(
3225 ui.writenoi18n(
3221 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3226 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3222 % tuple(datasize)
3227 % tuple(datasize)
3223 )
3228 )
3224 ui.writenoi18n(
3229 ui.writenoi18n(
3225 b'full revision size (min/max/avg) : %d / %d / %d\n'
3230 b'full revision size (min/max/avg) : %d / %d / %d\n'
3226 % tuple(fullsize)
3231 % tuple(fullsize)
3227 )
3232 )
3228 ui.writenoi18n(
3233 ui.writenoi18n(
3229 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3234 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3230 % tuple(semisize)
3235 % tuple(semisize)
3231 )
3236 )
3232 for depth in sorted(snapsizedepth):
3237 for depth in sorted(snapsizedepth):
3233 if depth == 0:
3238 if depth == 0:
3234 continue
3239 continue
3235 ui.writenoi18n(
3240 ui.writenoi18n(
3236 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3241 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3237 % ((depth,) + tuple(snapsizedepth[depth]))
3242 % ((depth,) + tuple(snapsizedepth[depth]))
3238 )
3243 )
3239 ui.writenoi18n(
3244 ui.writenoi18n(
3240 b'delta size (min/max/avg) : %d / %d / %d\n'
3245 b'delta size (min/max/avg) : %d / %d / %d\n'
3241 % tuple(deltasize)
3246 % tuple(deltasize)
3242 )
3247 )
3243
3248
3244 if numdeltas > 0:
3249 if numdeltas > 0:
3245 ui.write(b'\n')
3250 ui.write(b'\n')
3246 fmt = pcfmtstr(numdeltas)
3251 fmt = pcfmtstr(numdeltas)
3247 fmt2 = pcfmtstr(numdeltas, 4)
3252 fmt2 = pcfmtstr(numdeltas, 4)
3248 ui.writenoi18n(
3253 ui.writenoi18n(
3249 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3254 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3250 )
3255 )
3251 if numprev > 0:
3256 if numprev > 0:
3252 ui.writenoi18n(
3257 ui.writenoi18n(
3253 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3258 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3254 )
3259 )
3255 ui.writenoi18n(
3260 ui.writenoi18n(
3256 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3261 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3257 )
3262 )
3258 ui.writenoi18n(
3263 ui.writenoi18n(
3259 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3264 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3260 )
3265 )
3261 if gdelta:
3266 if gdelta:
3262 ui.writenoi18n(
3267 ui.writenoi18n(
3263 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3268 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3264 )
3269 )
3265 ui.writenoi18n(
3270 ui.writenoi18n(
3266 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3271 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3267 )
3272 )
3268 ui.writenoi18n(
3273 ui.writenoi18n(
3269 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3274 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3270 )
3275 )
3271
3276
3272
3277
3273 @command(
3278 @command(
3274 b'debugrevlogindex',
3279 b'debugrevlogindex',
3275 cmdutil.debugrevlogopts
3280 cmdutil.debugrevlogopts
3276 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3281 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3277 _(b'[-f FORMAT] -c|-m|FILE'),
3282 _(b'[-f FORMAT] -c|-m|FILE'),
3278 optionalrepo=True,
3283 optionalrepo=True,
3279 )
3284 )
3280 def debugrevlogindex(ui, repo, file_=None, **opts):
3285 def debugrevlogindex(ui, repo, file_=None, **opts):
3281 """dump the contents of a revlog index"""
3286 """dump the contents of a revlog index"""
3282 opts = pycompat.byteskwargs(opts)
3287 opts = pycompat.byteskwargs(opts)
3283 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3288 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3284 format = opts.get(b'format', 0)
3289 format = opts.get(b'format', 0)
3285 if format not in (0, 1):
3290 if format not in (0, 1):
3286 raise error.Abort(_(b"unknown format %d") % format)
3291 raise error.Abort(_(b"unknown format %d") % format)
3287
3292
3288 if ui.debugflag:
3293 if ui.debugflag:
3289 shortfn = hex
3294 shortfn = hex
3290 else:
3295 else:
3291 shortfn = short
3296 shortfn = short
3292
3297
3293 # There might not be anything in r, so have a sane default
3298 # There might not be anything in r, so have a sane default
3294 idlen = 12
3299 idlen = 12
3295 for i in r:
3300 for i in r:
3296 idlen = len(shortfn(r.node(i)))
3301 idlen = len(shortfn(r.node(i)))
3297 break
3302 break
3298
3303
3299 if format == 0:
3304 if format == 0:
3300 if ui.verbose:
3305 if ui.verbose:
3301 ui.writenoi18n(
3306 ui.writenoi18n(
3302 b" rev offset length linkrev %s %s p2\n"
3307 b" rev offset length linkrev %s %s p2\n"
3303 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3308 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3304 )
3309 )
3305 else:
3310 else:
3306 ui.writenoi18n(
3311 ui.writenoi18n(
3307 b" rev linkrev %s %s p2\n"
3312 b" rev linkrev %s %s p2\n"
3308 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3313 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3309 )
3314 )
3310 elif format == 1:
3315 elif format == 1:
3311 if ui.verbose:
3316 if ui.verbose:
3312 ui.writenoi18n(
3317 ui.writenoi18n(
3313 (
3318 (
3314 b" rev flag offset length size link p1"
3319 b" rev flag offset length size link p1"
3315 b" p2 %s\n"
3320 b" p2 %s\n"
3316 )
3321 )
3317 % b"nodeid".rjust(idlen)
3322 % b"nodeid".rjust(idlen)
3318 )
3323 )
3319 else:
3324 else:
3320 ui.writenoi18n(
3325 ui.writenoi18n(
3321 b" rev flag size link p1 p2 %s\n"
3326 b" rev flag size link p1 p2 %s\n"
3322 % b"nodeid".rjust(idlen)
3327 % b"nodeid".rjust(idlen)
3323 )
3328 )
3324
3329
3325 for i in r:
3330 for i in r:
3326 node = r.node(i)
3331 node = r.node(i)
3327 if format == 0:
3332 if format == 0:
3328 try:
3333 try:
3329 pp = r.parents(node)
3334 pp = r.parents(node)
3330 except Exception:
3335 except Exception:
3331 pp = [repo.nullid, repo.nullid]
3336 pp = [repo.nullid, repo.nullid]
3332 if ui.verbose:
3337 if ui.verbose:
3333 ui.write(
3338 ui.write(
3334 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3339 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3335 % (
3340 % (
3336 i,
3341 i,
3337 r.start(i),
3342 r.start(i),
3338 r.length(i),
3343 r.length(i),
3339 r.linkrev(i),
3344 r.linkrev(i),
3340 shortfn(node),
3345 shortfn(node),
3341 shortfn(pp[0]),
3346 shortfn(pp[0]),
3342 shortfn(pp[1]),
3347 shortfn(pp[1]),
3343 )
3348 )
3344 )
3349 )
3345 else:
3350 else:
3346 ui.write(
3351 ui.write(
3347 b"% 6d % 7d %s %s %s\n"
3352 b"% 6d % 7d %s %s %s\n"
3348 % (
3353 % (
3349 i,
3354 i,
3350 r.linkrev(i),
3355 r.linkrev(i),
3351 shortfn(node),
3356 shortfn(node),
3352 shortfn(pp[0]),
3357 shortfn(pp[0]),
3353 shortfn(pp[1]),
3358 shortfn(pp[1]),
3354 )
3359 )
3355 )
3360 )
3356 elif format == 1:
3361 elif format == 1:
3357 pr = r.parentrevs(i)
3362 pr = r.parentrevs(i)
3358 if ui.verbose:
3363 if ui.verbose:
3359 ui.write(
3364 ui.write(
3360 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3365 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3361 % (
3366 % (
3362 i,
3367 i,
3363 r.flags(i),
3368 r.flags(i),
3364 r.start(i),
3369 r.start(i),
3365 r.length(i),
3370 r.length(i),
3366 r.rawsize(i),
3371 r.rawsize(i),
3367 r.linkrev(i),
3372 r.linkrev(i),
3368 pr[0],
3373 pr[0],
3369 pr[1],
3374 pr[1],
3370 shortfn(node),
3375 shortfn(node),
3371 )
3376 )
3372 )
3377 )
3373 else:
3378 else:
3374 ui.write(
3379 ui.write(
3375 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3380 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3376 % (
3381 % (
3377 i,
3382 i,
3378 r.flags(i),
3383 r.flags(i),
3379 r.rawsize(i),
3384 r.rawsize(i),
3380 r.linkrev(i),
3385 r.linkrev(i),
3381 pr[0],
3386 pr[0],
3382 pr[1],
3387 pr[1],
3383 shortfn(node),
3388 shortfn(node),
3384 )
3389 )
3385 )
3390 )
3386
3391
3387
3392
3388 @command(
3393 @command(
3389 b'debugrevspec',
3394 b'debugrevspec',
3390 [
3395 [
3391 (
3396 (
3392 b'',
3397 b'',
3393 b'optimize',
3398 b'optimize',
3394 None,
3399 None,
3395 _(b'print parsed tree after optimizing (DEPRECATED)'),
3400 _(b'print parsed tree after optimizing (DEPRECATED)'),
3396 ),
3401 ),
3397 (
3402 (
3398 b'',
3403 b'',
3399 b'show-revs',
3404 b'show-revs',
3400 True,
3405 True,
3401 _(b'print list of result revisions (default)'),
3406 _(b'print list of result revisions (default)'),
3402 ),
3407 ),
3403 (
3408 (
3404 b's',
3409 b's',
3405 b'show-set',
3410 b'show-set',
3406 None,
3411 None,
3407 _(b'print internal representation of result set'),
3412 _(b'print internal representation of result set'),
3408 ),
3413 ),
3409 (
3414 (
3410 b'p',
3415 b'p',
3411 b'show-stage',
3416 b'show-stage',
3412 [],
3417 [],
3413 _(b'print parsed tree at the given stage'),
3418 _(b'print parsed tree at the given stage'),
3414 _(b'NAME'),
3419 _(b'NAME'),
3415 ),
3420 ),
3416 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3421 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3417 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3422 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3418 ],
3423 ],
3419 b'REVSPEC',
3424 b'REVSPEC',
3420 )
3425 )
3421 def debugrevspec(ui, repo, expr, **opts):
3426 def debugrevspec(ui, repo, expr, **opts):
3422 """parse and apply a revision specification
3427 """parse and apply a revision specification
3423
3428
3424 Use -p/--show-stage option to print the parsed tree at the given stages.
3429 Use -p/--show-stage option to print the parsed tree at the given stages.
3425 Use -p all to print tree at every stage.
3430 Use -p all to print tree at every stage.
3426
3431
3427 Use --no-show-revs option with -s or -p to print only the set
3432 Use --no-show-revs option with -s or -p to print only the set
3428 representation or the parsed tree respectively.
3433 representation or the parsed tree respectively.
3429
3434
3430 Use --verify-optimized to compare the optimized result with the unoptimized
3435 Use --verify-optimized to compare the optimized result with the unoptimized
3431 one. Returns 1 if the optimized result differs.
3436 one. Returns 1 if the optimized result differs.
3432 """
3437 """
3433 opts = pycompat.byteskwargs(opts)
3438 opts = pycompat.byteskwargs(opts)
3434 aliases = ui.configitems(b'revsetalias')
3439 aliases = ui.configitems(b'revsetalias')
3435 stages = [
3440 stages = [
3436 (b'parsed', lambda tree: tree),
3441 (b'parsed', lambda tree: tree),
3437 (
3442 (
3438 b'expanded',
3443 b'expanded',
3439 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3444 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3440 ),
3445 ),
3441 (b'concatenated', revsetlang.foldconcat),
3446 (b'concatenated', revsetlang.foldconcat),
3442 (b'analyzed', revsetlang.analyze),
3447 (b'analyzed', revsetlang.analyze),
3443 (b'optimized', revsetlang.optimize),
3448 (b'optimized', revsetlang.optimize),
3444 ]
3449 ]
3445 if opts[b'no_optimized']:
3450 if opts[b'no_optimized']:
3446 stages = stages[:-1]
3451 stages = stages[:-1]
3447 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3452 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3448 raise error.Abort(
3453 raise error.Abort(
3449 _(b'cannot use --verify-optimized with --no-optimized')
3454 _(b'cannot use --verify-optimized with --no-optimized')
3450 )
3455 )
3451 stagenames = {n for n, f in stages}
3456 stagenames = {n for n, f in stages}
3452
3457
3453 showalways = set()
3458 showalways = set()
3454 showchanged = set()
3459 showchanged = set()
3455 if ui.verbose and not opts[b'show_stage']:
3460 if ui.verbose and not opts[b'show_stage']:
3456 # show parsed tree by --verbose (deprecated)
3461 # show parsed tree by --verbose (deprecated)
3457 showalways.add(b'parsed')
3462 showalways.add(b'parsed')
3458 showchanged.update([b'expanded', b'concatenated'])
3463 showchanged.update([b'expanded', b'concatenated'])
3459 if opts[b'optimize']:
3464 if opts[b'optimize']:
3460 showalways.add(b'optimized')
3465 showalways.add(b'optimized')
3461 if opts[b'show_stage'] and opts[b'optimize']:
3466 if opts[b'show_stage'] and opts[b'optimize']:
3462 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3467 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3463 if opts[b'show_stage'] == [b'all']:
3468 if opts[b'show_stage'] == [b'all']:
3464 showalways.update(stagenames)
3469 showalways.update(stagenames)
3465 else:
3470 else:
3466 for n in opts[b'show_stage']:
3471 for n in opts[b'show_stage']:
3467 if n not in stagenames:
3472 if n not in stagenames:
3468 raise error.Abort(_(b'invalid stage name: %s') % n)
3473 raise error.Abort(_(b'invalid stage name: %s') % n)
3469 showalways.update(opts[b'show_stage'])
3474 showalways.update(opts[b'show_stage'])
3470
3475
3471 treebystage = {}
3476 treebystage = {}
3472 printedtree = None
3477 printedtree = None
3473 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3478 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3474 for n, f in stages:
3479 for n, f in stages:
3475 treebystage[n] = tree = f(tree)
3480 treebystage[n] = tree = f(tree)
3476 if n in showalways or (n in showchanged and tree != printedtree):
3481 if n in showalways or (n in showchanged and tree != printedtree):
3477 if opts[b'show_stage'] or n != b'parsed':
3482 if opts[b'show_stage'] or n != b'parsed':
3478 ui.write(b"* %s:\n" % n)
3483 ui.write(b"* %s:\n" % n)
3479 ui.write(revsetlang.prettyformat(tree), b"\n")
3484 ui.write(revsetlang.prettyformat(tree), b"\n")
3480 printedtree = tree
3485 printedtree = tree
3481
3486
3482 if opts[b'verify_optimized']:
3487 if opts[b'verify_optimized']:
3483 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3488 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3484 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3489 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3485 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3490 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3486 ui.writenoi18n(
3491 ui.writenoi18n(
3487 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3492 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3488 )
3493 )
3489 ui.writenoi18n(
3494 ui.writenoi18n(
3490 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3495 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3491 )
3496 )
3492 arevs = list(arevs)
3497 arevs = list(arevs)
3493 brevs = list(brevs)
3498 brevs = list(brevs)
3494 if arevs == brevs:
3499 if arevs == brevs:
3495 return 0
3500 return 0
3496 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3501 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3497 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3502 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3498 sm = difflib.SequenceMatcher(None, arevs, brevs)
3503 sm = difflib.SequenceMatcher(None, arevs, brevs)
3499 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3504 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3500 if tag in ('delete', 'replace'):
3505 if tag in ('delete', 'replace'):
3501 for c in arevs[alo:ahi]:
3506 for c in arevs[alo:ahi]:
3502 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3507 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3503 if tag in ('insert', 'replace'):
3508 if tag in ('insert', 'replace'):
3504 for c in brevs[blo:bhi]:
3509 for c in brevs[blo:bhi]:
3505 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3510 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3506 if tag == 'equal':
3511 if tag == 'equal':
3507 for c in arevs[alo:ahi]:
3512 for c in arevs[alo:ahi]:
3508 ui.write(b' %d\n' % c)
3513 ui.write(b' %d\n' % c)
3509 return 1
3514 return 1
3510
3515
3511 func = revset.makematcher(tree)
3516 func = revset.makematcher(tree)
3512 revs = func(repo)
3517 revs = func(repo)
3513 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3518 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3514 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3519 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3515 if not opts[b'show_revs']:
3520 if not opts[b'show_revs']:
3516 return
3521 return
3517 for c in revs:
3522 for c in revs:
3518 ui.write(b"%d\n" % c)
3523 ui.write(b"%d\n" % c)
3519
3524
3520
3525
3521 @command(
3526 @command(
3522 b'debugserve',
3527 b'debugserve',
3523 [
3528 [
3524 (
3529 (
3525 b'',
3530 b'',
3526 b'sshstdio',
3531 b'sshstdio',
3527 False,
3532 False,
3528 _(b'run an SSH server bound to process handles'),
3533 _(b'run an SSH server bound to process handles'),
3529 ),
3534 ),
3530 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3535 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3531 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3536 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3532 ],
3537 ],
3533 b'',
3538 b'',
3534 )
3539 )
3535 def debugserve(ui, repo, **opts):
3540 def debugserve(ui, repo, **opts):
3536 """run a server with advanced settings
3541 """run a server with advanced settings
3537
3542
3538 This command is similar to :hg:`serve`. It exists partially as a
3543 This command is similar to :hg:`serve`. It exists partially as a
3539 workaround to the fact that ``hg serve --stdio`` must have specific
3544 workaround to the fact that ``hg serve --stdio`` must have specific
3540 arguments for security reasons.
3545 arguments for security reasons.
3541 """
3546 """
3542 opts = pycompat.byteskwargs(opts)
3547 opts = pycompat.byteskwargs(opts)
3543
3548
3544 if not opts[b'sshstdio']:
3549 if not opts[b'sshstdio']:
3545 raise error.Abort(_(b'only --sshstdio is currently supported'))
3550 raise error.Abort(_(b'only --sshstdio is currently supported'))
3546
3551
3547 logfh = None
3552 logfh = None
3548
3553
3549 if opts[b'logiofd'] and opts[b'logiofile']:
3554 if opts[b'logiofd'] and opts[b'logiofile']:
3550 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3555 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3551
3556
3552 if opts[b'logiofd']:
3557 if opts[b'logiofd']:
3553 # Ideally we would be line buffered. But line buffering in binary
3558 # Ideally we would be line buffered. But line buffering in binary
3554 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3559 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3555 # buffering could have performance impacts. But since this isn't
3560 # buffering could have performance impacts. But since this isn't
3556 # performance critical code, it should be fine.
3561 # performance critical code, it should be fine.
3557 try:
3562 try:
3558 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3563 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3559 except OSError as e:
3564 except OSError as e:
3560 if e.errno != errno.ESPIPE:
3565 if e.errno != errno.ESPIPE:
3561 raise
3566 raise
3562 # can't seek a pipe, so `ab` mode fails on py3
3567 # can't seek a pipe, so `ab` mode fails on py3
3563 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3568 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3564 elif opts[b'logiofile']:
3569 elif opts[b'logiofile']:
3565 logfh = open(opts[b'logiofile'], b'ab', 0)
3570 logfh = open(opts[b'logiofile'], b'ab', 0)
3566
3571
3567 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3572 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3568 s.serve_forever()
3573 s.serve_forever()
3569
3574
3570
3575
3571 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3576 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3572 def debugsetparents(ui, repo, rev1, rev2=None):
3577 def debugsetparents(ui, repo, rev1, rev2=None):
3573 """manually set the parents of the current working directory (DANGEROUS)
3578 """manually set the parents of the current working directory (DANGEROUS)
3574
3579
3575 This command is not what you are looking for and should not be used. Using
3580 This command is not what you are looking for and should not be used. Using
3576 this command will most certainly results in slight corruption of the file
3581 this command will most certainly results in slight corruption of the file
3577 level histories withing your repository. DO NOT USE THIS COMMAND.
3582 level histories withing your repository. DO NOT USE THIS COMMAND.
3578
3583
3579 The command update the p1 and p2 field in the dirstate, and not touching
3584 The command update the p1 and p2 field in the dirstate, and not touching
3580 anything else. This useful for writing repository conversion tools, but
3585 anything else. This useful for writing repository conversion tools, but
3581 should be used with extreme care. For example, neither the working
3586 should be used with extreme care. For example, neither the working
3582 directory nor the dirstate is updated, so file status may be incorrect
3587 directory nor the dirstate is updated, so file status may be incorrect
3583 after running this command. Only used if you are one of the few people that
3588 after running this command. Only used if you are one of the few people that
3584 deeply unstand both conversion tools and file level histories. If you are
3589 deeply unstand both conversion tools and file level histories. If you are
3585 reading this help, you are not one of this people (most of them sailed west
3590 reading this help, you are not one of this people (most of them sailed west
3586 from Mithlond anyway.
3591 from Mithlond anyway.
3587
3592
3588 So one last time DO NOT USE THIS COMMAND.
3593 So one last time DO NOT USE THIS COMMAND.
3589
3594
3590 Returns 0 on success.
3595 Returns 0 on success.
3591 """
3596 """
3592
3597
3593 node1 = scmutil.revsingle(repo, rev1).node()
3598 node1 = scmutil.revsingle(repo, rev1).node()
3594 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3599 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3595
3600
3596 with repo.wlock():
3601 with repo.wlock():
3597 repo.setparents(node1, node2)
3602 repo.setparents(node1, node2)
3598
3603
3599
3604
3600 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3605 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3601 def debugsidedata(ui, repo, file_, rev=None, **opts):
3606 def debugsidedata(ui, repo, file_, rev=None, **opts):
3602 """dump the side data for a cl/manifest/file revision
3607 """dump the side data for a cl/manifest/file revision
3603
3608
3604 Use --verbose to dump the sidedata content."""
3609 Use --verbose to dump the sidedata content."""
3605 opts = pycompat.byteskwargs(opts)
3610 opts = pycompat.byteskwargs(opts)
3606 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3611 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3607 if rev is not None:
3612 if rev is not None:
3608 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3613 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3609 file_, rev = None, file_
3614 file_, rev = None, file_
3610 elif rev is None:
3615 elif rev is None:
3611 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3616 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3612 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3617 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3613 r = getattr(r, '_revlog', r)
3618 r = getattr(r, '_revlog', r)
3614 try:
3619 try:
3615 sidedata = r.sidedata(r.lookup(rev))
3620 sidedata = r.sidedata(r.lookup(rev))
3616 except KeyError:
3621 except KeyError:
3617 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3622 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3618 if sidedata:
3623 if sidedata:
3619 sidedata = list(sidedata.items())
3624 sidedata = list(sidedata.items())
3620 sidedata.sort()
3625 sidedata.sort()
3621 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3626 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3622 for key, value in sidedata:
3627 for key, value in sidedata:
3623 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3628 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3624 if ui.verbose:
3629 if ui.verbose:
3625 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3630 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3626
3631
3627
3632
3628 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3633 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3629 def debugssl(ui, repo, source=None, **opts):
3634 def debugssl(ui, repo, source=None, **opts):
3630 """test a secure connection to a server
3635 """test a secure connection to a server
3631
3636
3632 This builds the certificate chain for the server on Windows, installing the
3637 This builds the certificate chain for the server on Windows, installing the
3633 missing intermediates and trusted root via Windows Update if necessary. It
3638 missing intermediates and trusted root via Windows Update if necessary. It
3634 does nothing on other platforms.
3639 does nothing on other platforms.
3635
3640
3636 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3641 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3637 that server is used. See :hg:`help urls` for more information.
3642 that server is used. See :hg:`help urls` for more information.
3638
3643
3639 If the update succeeds, retry the original operation. Otherwise, the cause
3644 If the update succeeds, retry the original operation. Otherwise, the cause
3640 of the SSL error is likely another issue.
3645 of the SSL error is likely another issue.
3641 """
3646 """
3642 if not pycompat.iswindows:
3647 if not pycompat.iswindows:
3643 raise error.Abort(
3648 raise error.Abort(
3644 _(b'certificate chain building is only possible on Windows')
3649 _(b'certificate chain building is only possible on Windows')
3645 )
3650 )
3646
3651
3647 if not source:
3652 if not source:
3648 if not repo:
3653 if not repo:
3649 raise error.Abort(
3654 raise error.Abort(
3650 _(
3655 _(
3651 b"there is no Mercurial repository here, and no "
3656 b"there is no Mercurial repository here, and no "
3652 b"server specified"
3657 b"server specified"
3653 )
3658 )
3654 )
3659 )
3655 source = b"default"
3660 source = b"default"
3656
3661
3657 source, branches = urlutil.get_unique_pull_path(
3662 source, branches = urlutil.get_unique_pull_path(
3658 b'debugssl', repo, ui, source
3663 b'debugssl', repo, ui, source
3659 )
3664 )
3660 url = urlutil.url(source)
3665 url = urlutil.url(source)
3661
3666
3662 defaultport = {b'https': 443, b'ssh': 22}
3667 defaultport = {b'https': 443, b'ssh': 22}
3663 if url.scheme in defaultport:
3668 if url.scheme in defaultport:
3664 try:
3669 try:
3665 addr = (url.host, int(url.port or defaultport[url.scheme]))
3670 addr = (url.host, int(url.port or defaultport[url.scheme]))
3666 except ValueError:
3671 except ValueError:
3667 raise error.Abort(_(b"malformed port number in URL"))
3672 raise error.Abort(_(b"malformed port number in URL"))
3668 else:
3673 else:
3669 raise error.Abort(_(b"only https and ssh connections are supported"))
3674 raise error.Abort(_(b"only https and ssh connections are supported"))
3670
3675
3671 from . import win32
3676 from . import win32
3672
3677
3673 s = ssl.wrap_socket(
3678 s = ssl.wrap_socket(
3674 socket.socket(),
3679 socket.socket(),
3675 ssl_version=ssl.PROTOCOL_TLS,
3680 ssl_version=ssl.PROTOCOL_TLS,
3676 cert_reqs=ssl.CERT_NONE,
3681 cert_reqs=ssl.CERT_NONE,
3677 ca_certs=None,
3682 ca_certs=None,
3678 )
3683 )
3679
3684
3680 try:
3685 try:
3681 s.connect(addr)
3686 s.connect(addr)
3682 cert = s.getpeercert(True)
3687 cert = s.getpeercert(True)
3683
3688
3684 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3689 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3685
3690
3686 complete = win32.checkcertificatechain(cert, build=False)
3691 complete = win32.checkcertificatechain(cert, build=False)
3687
3692
3688 if not complete:
3693 if not complete:
3689 ui.status(_(b'certificate chain is incomplete, updating... '))
3694 ui.status(_(b'certificate chain is incomplete, updating... '))
3690
3695
3691 if not win32.checkcertificatechain(cert):
3696 if not win32.checkcertificatechain(cert):
3692 ui.status(_(b'failed.\n'))
3697 ui.status(_(b'failed.\n'))
3693 else:
3698 else:
3694 ui.status(_(b'done.\n'))
3699 ui.status(_(b'done.\n'))
3695 else:
3700 else:
3696 ui.status(_(b'full certificate chain is available\n'))
3701 ui.status(_(b'full certificate chain is available\n'))
3697 finally:
3702 finally:
3698 s.close()
3703 s.close()
3699
3704
3700
3705
3701 @command(
3706 @command(
3702 b"debugbackupbundle",
3707 b"debugbackupbundle",
3703 [
3708 [
3704 (
3709 (
3705 b"",
3710 b"",
3706 b"recover",
3711 b"recover",
3707 b"",
3712 b"",
3708 b"brings the specified changeset back into the repository",
3713 b"brings the specified changeset back into the repository",
3709 )
3714 )
3710 ]
3715 ]
3711 + cmdutil.logopts,
3716 + cmdutil.logopts,
3712 _(b"hg debugbackupbundle [--recover HASH]"),
3717 _(b"hg debugbackupbundle [--recover HASH]"),
3713 )
3718 )
3714 def debugbackupbundle(ui, repo, *pats, **opts):
3719 def debugbackupbundle(ui, repo, *pats, **opts):
3715 """lists the changesets available in backup bundles
3720 """lists the changesets available in backup bundles
3716
3721
3717 Without any arguments, this command prints a list of the changesets in each
3722 Without any arguments, this command prints a list of the changesets in each
3718 backup bundle.
3723 backup bundle.
3719
3724
3720 --recover takes a changeset hash and unbundles the first bundle that
3725 --recover takes a changeset hash and unbundles the first bundle that
3721 contains that hash, which puts that changeset back in your repository.
3726 contains that hash, which puts that changeset back in your repository.
3722
3727
3723 --verbose will print the entire commit message and the bundle path for that
3728 --verbose will print the entire commit message and the bundle path for that
3724 backup.
3729 backup.
3725 """
3730 """
3726 backups = list(
3731 backups = list(
3727 filter(
3732 filter(
3728 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3733 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3729 )
3734 )
3730 )
3735 )
3731 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3736 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3732
3737
3733 opts = pycompat.byteskwargs(opts)
3738 opts = pycompat.byteskwargs(opts)
3734 opts[b"bundle"] = b""
3739 opts[b"bundle"] = b""
3735 opts[b"force"] = None
3740 opts[b"force"] = None
3736 limit = logcmdutil.getlimit(opts)
3741 limit = logcmdutil.getlimit(opts)
3737
3742
3738 def display(other, chlist, displayer):
3743 def display(other, chlist, displayer):
3739 if opts.get(b"newest_first"):
3744 if opts.get(b"newest_first"):
3740 chlist.reverse()
3745 chlist.reverse()
3741 count = 0
3746 count = 0
3742 for n in chlist:
3747 for n in chlist:
3743 if limit is not None and count >= limit:
3748 if limit is not None and count >= limit:
3744 break
3749 break
3745 parents = [
3750 parents = [
3746 True for p in other.changelog.parents(n) if p != repo.nullid
3751 True for p in other.changelog.parents(n) if p != repo.nullid
3747 ]
3752 ]
3748 if opts.get(b"no_merges") and len(parents) == 2:
3753 if opts.get(b"no_merges") and len(parents) == 2:
3749 continue
3754 continue
3750 count += 1
3755 count += 1
3751 displayer.show(other[n])
3756 displayer.show(other[n])
3752
3757
3753 recovernode = opts.get(b"recover")
3758 recovernode = opts.get(b"recover")
3754 if recovernode:
3759 if recovernode:
3755 if scmutil.isrevsymbol(repo, recovernode):
3760 if scmutil.isrevsymbol(repo, recovernode):
3756 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3761 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3757 return
3762 return
3758 elif backups:
3763 elif backups:
3759 msg = _(
3764 msg = _(
3760 b"Recover changesets using: hg debugbackupbundle --recover "
3765 b"Recover changesets using: hg debugbackupbundle --recover "
3761 b"<changeset hash>\n\nAvailable backup changesets:"
3766 b"<changeset hash>\n\nAvailable backup changesets:"
3762 )
3767 )
3763 ui.status(msg, label=b"status.removed")
3768 ui.status(msg, label=b"status.removed")
3764 else:
3769 else:
3765 ui.status(_(b"no backup changesets found\n"))
3770 ui.status(_(b"no backup changesets found\n"))
3766 return
3771 return
3767
3772
3768 for backup in backups:
3773 for backup in backups:
3769 # Much of this is copied from the hg incoming logic
3774 # Much of this is copied from the hg incoming logic
3770 source = os.path.relpath(backup, encoding.getcwd())
3775 source = os.path.relpath(backup, encoding.getcwd())
3771 source, branches = urlutil.get_unique_pull_path(
3776 source, branches = urlutil.get_unique_pull_path(
3772 b'debugbackupbundle',
3777 b'debugbackupbundle',
3773 repo,
3778 repo,
3774 ui,
3779 ui,
3775 source,
3780 source,
3776 default_branches=opts.get(b'branch'),
3781 default_branches=opts.get(b'branch'),
3777 )
3782 )
3778 try:
3783 try:
3779 other = hg.peer(repo, opts, source)
3784 other = hg.peer(repo, opts, source)
3780 except error.LookupError as ex:
3785 except error.LookupError as ex:
3781 msg = _(b"\nwarning: unable to open bundle %s") % source
3786 msg = _(b"\nwarning: unable to open bundle %s") % source
3782 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3787 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3783 ui.warn(msg, hint=hint)
3788 ui.warn(msg, hint=hint)
3784 continue
3789 continue
3785 revs, checkout = hg.addbranchrevs(
3790 revs, checkout = hg.addbranchrevs(
3786 repo, other, branches, opts.get(b"rev")
3791 repo, other, branches, opts.get(b"rev")
3787 )
3792 )
3788
3793
3789 if revs:
3794 if revs:
3790 revs = [other.lookup(rev) for rev in revs]
3795 revs = [other.lookup(rev) for rev in revs]
3791
3796
3792 quiet = ui.quiet
3797 quiet = ui.quiet
3793 try:
3798 try:
3794 ui.quiet = True
3799 ui.quiet = True
3795 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3800 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3796 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3801 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3797 )
3802 )
3798 except error.LookupError:
3803 except error.LookupError:
3799 continue
3804 continue
3800 finally:
3805 finally:
3801 ui.quiet = quiet
3806 ui.quiet = quiet
3802
3807
3803 try:
3808 try:
3804 if not chlist:
3809 if not chlist:
3805 continue
3810 continue
3806 if recovernode:
3811 if recovernode:
3807 with repo.lock(), repo.transaction(b"unbundle") as tr:
3812 with repo.lock(), repo.transaction(b"unbundle") as tr:
3808 if scmutil.isrevsymbol(other, recovernode):
3813 if scmutil.isrevsymbol(other, recovernode):
3809 ui.status(_(b"Unbundling %s\n") % (recovernode))
3814 ui.status(_(b"Unbundling %s\n") % (recovernode))
3810 f = hg.openpath(ui, source)
3815 f = hg.openpath(ui, source)
3811 gen = exchange.readbundle(ui, f, source)
3816 gen = exchange.readbundle(ui, f, source)
3812 if isinstance(gen, bundle2.unbundle20):
3817 if isinstance(gen, bundle2.unbundle20):
3813 bundle2.applybundle(
3818 bundle2.applybundle(
3814 repo,
3819 repo,
3815 gen,
3820 gen,
3816 tr,
3821 tr,
3817 source=b"unbundle",
3822 source=b"unbundle",
3818 url=b"bundle:" + source,
3823 url=b"bundle:" + source,
3819 )
3824 )
3820 else:
3825 else:
3821 gen.apply(repo, b"unbundle", b"bundle:" + source)
3826 gen.apply(repo, b"unbundle", b"bundle:" + source)
3822 break
3827 break
3823 else:
3828 else:
3824 backupdate = encoding.strtolocal(
3829 backupdate = encoding.strtolocal(
3825 time.strftime(
3830 time.strftime(
3826 "%a %H:%M, %Y-%m-%d",
3831 "%a %H:%M, %Y-%m-%d",
3827 time.localtime(os.path.getmtime(source)),
3832 time.localtime(os.path.getmtime(source)),
3828 )
3833 )
3829 )
3834 )
3830 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3835 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3831 if ui.verbose:
3836 if ui.verbose:
3832 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3837 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3833 else:
3838 else:
3834 opts[
3839 opts[
3835 b"template"
3840 b"template"
3836 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3841 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3837 displayer = logcmdutil.changesetdisplayer(
3842 displayer = logcmdutil.changesetdisplayer(
3838 ui, other, opts, False
3843 ui, other, opts, False
3839 )
3844 )
3840 display(other, chlist, displayer)
3845 display(other, chlist, displayer)
3841 displayer.close()
3846 displayer.close()
3842 finally:
3847 finally:
3843 cleanupfn()
3848 cleanupfn()
3844
3849
3845
3850
3846 @command(
3851 @command(
3847 b'debugsub',
3852 b'debugsub',
3848 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3853 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3849 _(b'[-r REV] [REV]'),
3854 _(b'[-r REV] [REV]'),
3850 )
3855 )
3851 def debugsub(ui, repo, rev=None):
3856 def debugsub(ui, repo, rev=None):
3852 ctx = scmutil.revsingle(repo, rev, None)
3857 ctx = scmutil.revsingle(repo, rev, None)
3853 for k, v in sorted(ctx.substate.items()):
3858 for k, v in sorted(ctx.substate.items()):
3854 ui.writenoi18n(b'path %s\n' % k)
3859 ui.writenoi18n(b'path %s\n' % k)
3855 ui.writenoi18n(b' source %s\n' % v[0])
3860 ui.writenoi18n(b' source %s\n' % v[0])
3856 ui.writenoi18n(b' revision %s\n' % v[1])
3861 ui.writenoi18n(b' revision %s\n' % v[1])
3857
3862
3858
3863
3859 @command(b'debugshell', optionalrepo=True)
3864 @command(b'debugshell', optionalrepo=True)
3860 def debugshell(ui, repo):
3865 def debugshell(ui, repo):
3861 """run an interactive Python interpreter
3866 """run an interactive Python interpreter
3862
3867
3863 The local namespace is provided with a reference to the ui and
3868 The local namespace is provided with a reference to the ui and
3864 the repo instance (if available).
3869 the repo instance (if available).
3865 """
3870 """
3866 import code
3871 import code
3867
3872
3868 imported_objects = {
3873 imported_objects = {
3869 'ui': ui,
3874 'ui': ui,
3870 'repo': repo,
3875 'repo': repo,
3871 }
3876 }
3872
3877
3873 code.interact(local=imported_objects)
3878 code.interact(local=imported_objects)
3874
3879
3875
3880
3876 @command(
3881 @command(
3877 b'debugsuccessorssets',
3882 b'debugsuccessorssets',
3878 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3883 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3879 _(b'[REV]'),
3884 _(b'[REV]'),
3880 )
3885 )
3881 def debugsuccessorssets(ui, repo, *revs, **opts):
3886 def debugsuccessorssets(ui, repo, *revs, **opts):
3882 """show set of successors for revision
3887 """show set of successors for revision
3883
3888
3884 A successors set of changeset A is a consistent group of revisions that
3889 A successors set of changeset A is a consistent group of revisions that
3885 succeed A. It contains non-obsolete changesets only unless closests
3890 succeed A. It contains non-obsolete changesets only unless closests
3886 successors set is set.
3891 successors set is set.
3887
3892
3888 In most cases a changeset A has a single successors set containing a single
3893 In most cases a changeset A has a single successors set containing a single
3889 successor (changeset A replaced by A').
3894 successor (changeset A replaced by A').
3890
3895
3891 A changeset that is made obsolete with no successors are called "pruned".
3896 A changeset that is made obsolete with no successors are called "pruned".
3892 Such changesets have no successors sets at all.
3897 Such changesets have no successors sets at all.
3893
3898
3894 A changeset that has been "split" will have a successors set containing
3899 A changeset that has been "split" will have a successors set containing
3895 more than one successor.
3900 more than one successor.
3896
3901
3897 A changeset that has been rewritten in multiple different ways is called
3902 A changeset that has been rewritten in multiple different ways is called
3898 "divergent". Such changesets have multiple successor sets (each of which
3903 "divergent". Such changesets have multiple successor sets (each of which
3899 may also be split, i.e. have multiple successors).
3904 may also be split, i.e. have multiple successors).
3900
3905
3901 Results are displayed as follows::
3906 Results are displayed as follows::
3902
3907
3903 <rev1>
3908 <rev1>
3904 <successors-1A>
3909 <successors-1A>
3905 <rev2>
3910 <rev2>
3906 <successors-2A>
3911 <successors-2A>
3907 <successors-2B1> <successors-2B2> <successors-2B3>
3912 <successors-2B1> <successors-2B2> <successors-2B3>
3908
3913
3909 Here rev2 has two possible (i.e. divergent) successors sets. The first
3914 Here rev2 has two possible (i.e. divergent) successors sets. The first
3910 holds one element, whereas the second holds three (i.e. the changeset has
3915 holds one element, whereas the second holds three (i.e. the changeset has
3911 been split).
3916 been split).
3912 """
3917 """
3913 # passed to successorssets caching computation from one call to another
3918 # passed to successorssets caching computation from one call to another
3914 cache = {}
3919 cache = {}
3915 ctx2str = bytes
3920 ctx2str = bytes
3916 node2str = short
3921 node2str = short
3917 for rev in scmutil.revrange(repo, revs):
3922 for rev in scmutil.revrange(repo, revs):
3918 ctx = repo[rev]
3923 ctx = repo[rev]
3919 ui.write(b'%s\n' % ctx2str(ctx))
3924 ui.write(b'%s\n' % ctx2str(ctx))
3920 for succsset in obsutil.successorssets(
3925 for succsset in obsutil.successorssets(
3921 repo, ctx.node(), closest=opts['closest'], cache=cache
3926 repo, ctx.node(), closest=opts['closest'], cache=cache
3922 ):
3927 ):
3923 if succsset:
3928 if succsset:
3924 ui.write(b' ')
3929 ui.write(b' ')
3925 ui.write(node2str(succsset[0]))
3930 ui.write(node2str(succsset[0]))
3926 for node in succsset[1:]:
3931 for node in succsset[1:]:
3927 ui.write(b' ')
3932 ui.write(b' ')
3928 ui.write(node2str(node))
3933 ui.write(node2str(node))
3929 ui.write(b'\n')
3934 ui.write(b'\n')
3930
3935
3931
3936
3932 @command(b'debugtagscache', [])
3937 @command(b'debugtagscache', [])
3933 def debugtagscache(ui, repo):
3938 def debugtagscache(ui, repo):
3934 """display the contents of .hg/cache/hgtagsfnodes1"""
3939 """display the contents of .hg/cache/hgtagsfnodes1"""
3935 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3940 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3936 flog = repo.file(b'.hgtags')
3941 flog = repo.file(b'.hgtags')
3937 for r in repo:
3942 for r in repo:
3938 node = repo[r].node()
3943 node = repo[r].node()
3939 tagsnode = cache.getfnode(node, computemissing=False)
3944 tagsnode = cache.getfnode(node, computemissing=False)
3940 if tagsnode:
3945 if tagsnode:
3941 tagsnodedisplay = hex(tagsnode)
3946 tagsnodedisplay = hex(tagsnode)
3942 if not flog.hasnode(tagsnode):
3947 if not flog.hasnode(tagsnode):
3943 tagsnodedisplay += b' (unknown node)'
3948 tagsnodedisplay += b' (unknown node)'
3944 elif tagsnode is None:
3949 elif tagsnode is None:
3945 tagsnodedisplay = b'missing'
3950 tagsnodedisplay = b'missing'
3946 else:
3951 else:
3947 tagsnodedisplay = b'invalid'
3952 tagsnodedisplay = b'invalid'
3948
3953
3949 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3954 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3950
3955
3951
3956
3952 @command(
3957 @command(
3953 b'debugtemplate',
3958 b'debugtemplate',
3954 [
3959 [
3955 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3960 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3956 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3961 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3957 ],
3962 ],
3958 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3963 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3959 optionalrepo=True,
3964 optionalrepo=True,
3960 )
3965 )
3961 def debugtemplate(ui, repo, tmpl, **opts):
3966 def debugtemplate(ui, repo, tmpl, **opts):
3962 """parse and apply a template
3967 """parse and apply a template
3963
3968
3964 If -r/--rev is given, the template is processed as a log template and
3969 If -r/--rev is given, the template is processed as a log template and
3965 applied to the given changesets. Otherwise, it is processed as a generic
3970 applied to the given changesets. Otherwise, it is processed as a generic
3966 template.
3971 template.
3967
3972
3968 Use --verbose to print the parsed tree.
3973 Use --verbose to print the parsed tree.
3969 """
3974 """
3970 revs = None
3975 revs = None
3971 if opts['rev']:
3976 if opts['rev']:
3972 if repo is None:
3977 if repo is None:
3973 raise error.RepoError(
3978 raise error.RepoError(
3974 _(b'there is no Mercurial repository here (.hg not found)')
3979 _(b'there is no Mercurial repository here (.hg not found)')
3975 )
3980 )
3976 revs = scmutil.revrange(repo, opts['rev'])
3981 revs = scmutil.revrange(repo, opts['rev'])
3977
3982
3978 props = {}
3983 props = {}
3979 for d in opts['define']:
3984 for d in opts['define']:
3980 try:
3985 try:
3981 k, v = (e.strip() for e in d.split(b'=', 1))
3986 k, v = (e.strip() for e in d.split(b'=', 1))
3982 if not k or k == b'ui':
3987 if not k or k == b'ui':
3983 raise ValueError
3988 raise ValueError
3984 props[k] = v
3989 props[k] = v
3985 except ValueError:
3990 except ValueError:
3986 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3991 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3987
3992
3988 if ui.verbose:
3993 if ui.verbose:
3989 aliases = ui.configitems(b'templatealias')
3994 aliases = ui.configitems(b'templatealias')
3990 tree = templater.parse(tmpl)
3995 tree = templater.parse(tmpl)
3991 ui.note(templater.prettyformat(tree), b'\n')
3996 ui.note(templater.prettyformat(tree), b'\n')
3992 newtree = templater.expandaliases(tree, aliases)
3997 newtree = templater.expandaliases(tree, aliases)
3993 if newtree != tree:
3998 if newtree != tree:
3994 ui.notenoi18n(
3999 ui.notenoi18n(
3995 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4000 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3996 )
4001 )
3997
4002
3998 if revs is None:
4003 if revs is None:
3999 tres = formatter.templateresources(ui, repo)
4004 tres = formatter.templateresources(ui, repo)
4000 t = formatter.maketemplater(ui, tmpl, resources=tres)
4005 t = formatter.maketemplater(ui, tmpl, resources=tres)
4001 if ui.verbose:
4006 if ui.verbose:
4002 kwds, funcs = t.symbolsuseddefault()
4007 kwds, funcs = t.symbolsuseddefault()
4003 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4008 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4004 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4009 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4005 ui.write(t.renderdefault(props))
4010 ui.write(t.renderdefault(props))
4006 else:
4011 else:
4007 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4012 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4008 if ui.verbose:
4013 if ui.verbose:
4009 kwds, funcs = displayer.t.symbolsuseddefault()
4014 kwds, funcs = displayer.t.symbolsuseddefault()
4010 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4015 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4011 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4016 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4012 for r in revs:
4017 for r in revs:
4013 displayer.show(repo[r], **pycompat.strkwargs(props))
4018 displayer.show(repo[r], **pycompat.strkwargs(props))
4014 displayer.close()
4019 displayer.close()
4015
4020
4016
4021
4017 @command(
4022 @command(
4018 b'debuguigetpass',
4023 b'debuguigetpass',
4019 [
4024 [
4020 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4025 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4021 ],
4026 ],
4022 _(b'[-p TEXT]'),
4027 _(b'[-p TEXT]'),
4023 norepo=True,
4028 norepo=True,
4024 )
4029 )
4025 def debuguigetpass(ui, prompt=b''):
4030 def debuguigetpass(ui, prompt=b''):
4026 """show prompt to type password"""
4031 """show prompt to type password"""
4027 r = ui.getpass(prompt)
4032 r = ui.getpass(prompt)
4028 if r is None:
4033 if r is None:
4029 r = b"<default response>"
4034 r = b"<default response>"
4030 ui.writenoi18n(b'response: %s\n' % r)
4035 ui.writenoi18n(b'response: %s\n' % r)
4031
4036
4032
4037
4033 @command(
4038 @command(
4034 b'debuguiprompt',
4039 b'debuguiprompt',
4035 [
4040 [
4036 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4041 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4037 ],
4042 ],
4038 _(b'[-p TEXT]'),
4043 _(b'[-p TEXT]'),
4039 norepo=True,
4044 norepo=True,
4040 )
4045 )
4041 def debuguiprompt(ui, prompt=b''):
4046 def debuguiprompt(ui, prompt=b''):
4042 """show plain prompt"""
4047 """show plain prompt"""
4043 r = ui.prompt(prompt)
4048 r = ui.prompt(prompt)
4044 ui.writenoi18n(b'response: %s\n' % r)
4049 ui.writenoi18n(b'response: %s\n' % r)
4045
4050
4046
4051
4047 @command(b'debugupdatecaches', [])
4052 @command(b'debugupdatecaches', [])
4048 def debugupdatecaches(ui, repo, *pats, **opts):
4053 def debugupdatecaches(ui, repo, *pats, **opts):
4049 """warm all known caches in the repository"""
4054 """warm all known caches in the repository"""
4050 with repo.wlock(), repo.lock():
4055 with repo.wlock(), repo.lock():
4051 repo.updatecaches(caches=repository.CACHES_ALL)
4056 repo.updatecaches(caches=repository.CACHES_ALL)
4052
4057
4053
4058
4054 @command(
4059 @command(
4055 b'debugupgraderepo',
4060 b'debugupgraderepo',
4056 [
4061 [
4057 (
4062 (
4058 b'o',
4063 b'o',
4059 b'optimize',
4064 b'optimize',
4060 [],
4065 [],
4061 _(b'extra optimization to perform'),
4066 _(b'extra optimization to perform'),
4062 _(b'NAME'),
4067 _(b'NAME'),
4063 ),
4068 ),
4064 (b'', b'run', False, _(b'performs an upgrade')),
4069 (b'', b'run', False, _(b'performs an upgrade')),
4065 (b'', b'backup', True, _(b'keep the old repository content around')),
4070 (b'', b'backup', True, _(b'keep the old repository content around')),
4066 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4071 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4067 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4072 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4068 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4073 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4069 ],
4074 ],
4070 )
4075 )
4071 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4076 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4072 """upgrade a repository to use different features
4077 """upgrade a repository to use different features
4073
4078
4074 If no arguments are specified, the repository is evaluated for upgrade
4079 If no arguments are specified, the repository is evaluated for upgrade
4075 and a list of problems and potential optimizations is printed.
4080 and a list of problems and potential optimizations is printed.
4076
4081
4077 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4082 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4078 can be influenced via additional arguments. More details will be provided
4083 can be influenced via additional arguments. More details will be provided
4079 by the command output when run without ``--run``.
4084 by the command output when run without ``--run``.
4080
4085
4081 During the upgrade, the repository will be locked and no writes will be
4086 During the upgrade, the repository will be locked and no writes will be
4082 allowed.
4087 allowed.
4083
4088
4084 At the end of the upgrade, the repository may not be readable while new
4089 At the end of the upgrade, the repository may not be readable while new
4085 repository data is swapped in. This window will be as long as it takes to
4090 repository data is swapped in. This window will be as long as it takes to
4086 rename some directories inside the ``.hg`` directory. On most machines, this
4091 rename some directories inside the ``.hg`` directory. On most machines, this
4087 should complete almost instantaneously and the chances of a consumer being
4092 should complete almost instantaneously and the chances of a consumer being
4088 unable to access the repository should be low.
4093 unable to access the repository should be low.
4089
4094
4090 By default, all revlogs will be upgraded. You can restrict this using flags
4095 By default, all revlogs will be upgraded. You can restrict this using flags
4091 such as `--manifest`:
4096 such as `--manifest`:
4092
4097
4093 * `--manifest`: only optimize the manifest
4098 * `--manifest`: only optimize the manifest
4094 * `--no-manifest`: optimize all revlog but the manifest
4099 * `--no-manifest`: optimize all revlog but the manifest
4095 * `--changelog`: optimize the changelog only
4100 * `--changelog`: optimize the changelog only
4096 * `--no-changelog --no-manifest`: optimize filelogs only
4101 * `--no-changelog --no-manifest`: optimize filelogs only
4097 * `--filelogs`: optimize the filelogs only
4102 * `--filelogs`: optimize the filelogs only
4098 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4103 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4099 """
4104 """
4100 return upgrade.upgraderepo(
4105 return upgrade.upgraderepo(
4101 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4106 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4102 )
4107 )
4103
4108
4104
4109
4105 @command(
4110 @command(
4106 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4111 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4107 )
4112 )
4108 def debugwalk(ui, repo, *pats, **opts):
4113 def debugwalk(ui, repo, *pats, **opts):
4109 """show how files match on given patterns"""
4114 """show how files match on given patterns"""
4110 opts = pycompat.byteskwargs(opts)
4115 opts = pycompat.byteskwargs(opts)
4111 m = scmutil.match(repo[None], pats, opts)
4116 m = scmutil.match(repo[None], pats, opts)
4112 if ui.verbose:
4117 if ui.verbose:
4113 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4118 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4114 items = list(repo[None].walk(m))
4119 items = list(repo[None].walk(m))
4115 if not items:
4120 if not items:
4116 return
4121 return
4117 f = lambda fn: fn
4122 f = lambda fn: fn
4118 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4123 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4119 f = lambda fn: util.normpath(fn)
4124 f = lambda fn: util.normpath(fn)
4120 fmt = b'f %%-%ds %%-%ds %%s' % (
4125 fmt = b'f %%-%ds %%-%ds %%s' % (
4121 max([len(abs) for abs in items]),
4126 max([len(abs) for abs in items]),
4122 max([len(repo.pathto(abs)) for abs in items]),
4127 max([len(repo.pathto(abs)) for abs in items]),
4123 )
4128 )
4124 for abs in items:
4129 for abs in items:
4125 line = fmt % (
4130 line = fmt % (
4126 abs,
4131 abs,
4127 f(repo.pathto(abs)),
4132 f(repo.pathto(abs)),
4128 m.exact(abs) and b'exact' or b'',
4133 m.exact(abs) and b'exact' or b'',
4129 )
4134 )
4130 ui.write(b"%s\n" % line.rstrip())
4135 ui.write(b"%s\n" % line.rstrip())
4131
4136
4132
4137
4133 @command(b'debugwhyunstable', [], _(b'REV'))
4138 @command(b'debugwhyunstable', [], _(b'REV'))
4134 def debugwhyunstable(ui, repo, rev):
4139 def debugwhyunstable(ui, repo, rev):
4135 """explain instabilities of a changeset"""
4140 """explain instabilities of a changeset"""
4136 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4141 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4137 dnodes = b''
4142 dnodes = b''
4138 if entry.get(b'divergentnodes'):
4143 if entry.get(b'divergentnodes'):
4139 dnodes = (
4144 dnodes = (
4140 b' '.join(
4145 b' '.join(
4141 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4146 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4142 for ctx in entry[b'divergentnodes']
4147 for ctx in entry[b'divergentnodes']
4143 )
4148 )
4144 + b' '
4149 + b' '
4145 )
4150 )
4146 ui.write(
4151 ui.write(
4147 b'%s: %s%s %s\n'
4152 b'%s: %s%s %s\n'
4148 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4153 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4149 )
4154 )
4150
4155
4151
4156
4152 @command(
4157 @command(
4153 b'debugwireargs',
4158 b'debugwireargs',
4154 [
4159 [
4155 (b'', b'three', b'', b'three'),
4160 (b'', b'three', b'', b'three'),
4156 (b'', b'four', b'', b'four'),
4161 (b'', b'four', b'', b'four'),
4157 (b'', b'five', b'', b'five'),
4162 (b'', b'five', b'', b'five'),
4158 ]
4163 ]
4159 + cmdutil.remoteopts,
4164 + cmdutil.remoteopts,
4160 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4165 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4161 norepo=True,
4166 norepo=True,
4162 )
4167 )
4163 def debugwireargs(ui, repopath, *vals, **opts):
4168 def debugwireargs(ui, repopath, *vals, **opts):
4164 opts = pycompat.byteskwargs(opts)
4169 opts = pycompat.byteskwargs(opts)
4165 repo = hg.peer(ui, opts, repopath)
4170 repo = hg.peer(ui, opts, repopath)
4166 try:
4171 try:
4167 for opt in cmdutil.remoteopts:
4172 for opt in cmdutil.remoteopts:
4168 del opts[opt[1]]
4173 del opts[opt[1]]
4169 args = {}
4174 args = {}
4170 for k, v in pycompat.iteritems(opts):
4175 for k, v in pycompat.iteritems(opts):
4171 if v:
4176 if v:
4172 args[k] = v
4177 args[k] = v
4173 args = pycompat.strkwargs(args)
4178 args = pycompat.strkwargs(args)
4174 # run twice to check that we don't mess up the stream for the next command
4179 # run twice to check that we don't mess up the stream for the next command
4175 res1 = repo.debugwireargs(*vals, **args)
4180 res1 = repo.debugwireargs(*vals, **args)
4176 res2 = repo.debugwireargs(*vals, **args)
4181 res2 = repo.debugwireargs(*vals, **args)
4177 ui.write(b"%s\n" % res1)
4182 ui.write(b"%s\n" % res1)
4178 if res1 != res2:
4183 if res1 != res2:
4179 ui.warn(b"%s\n" % res2)
4184 ui.warn(b"%s\n" % res2)
4180 finally:
4185 finally:
4181 repo.close()
4186 repo.close()
4182
4187
4183
4188
4184 def _parsewirelangblocks(fh):
4189 def _parsewirelangblocks(fh):
4185 activeaction = None
4190 activeaction = None
4186 blocklines = []
4191 blocklines = []
4187 lastindent = 0
4192 lastindent = 0
4188
4193
4189 for line in fh:
4194 for line in fh:
4190 line = line.rstrip()
4195 line = line.rstrip()
4191 if not line:
4196 if not line:
4192 continue
4197 continue
4193
4198
4194 if line.startswith(b'#'):
4199 if line.startswith(b'#'):
4195 continue
4200 continue
4196
4201
4197 if not line.startswith(b' '):
4202 if not line.startswith(b' '):
4198 # New block. Flush previous one.
4203 # New block. Flush previous one.
4199 if activeaction:
4204 if activeaction:
4200 yield activeaction, blocklines
4205 yield activeaction, blocklines
4201
4206
4202 activeaction = line
4207 activeaction = line
4203 blocklines = []
4208 blocklines = []
4204 lastindent = 0
4209 lastindent = 0
4205 continue
4210 continue
4206
4211
4207 # Else we start with an indent.
4212 # Else we start with an indent.
4208
4213
4209 if not activeaction:
4214 if not activeaction:
4210 raise error.Abort(_(b'indented line outside of block'))
4215 raise error.Abort(_(b'indented line outside of block'))
4211
4216
4212 indent = len(line) - len(line.lstrip())
4217 indent = len(line) - len(line.lstrip())
4213
4218
4214 # If this line is indented more than the last line, concatenate it.
4219 # If this line is indented more than the last line, concatenate it.
4215 if indent > lastindent and blocklines:
4220 if indent > lastindent and blocklines:
4216 blocklines[-1] += line.lstrip()
4221 blocklines[-1] += line.lstrip()
4217 else:
4222 else:
4218 blocklines.append(line)
4223 blocklines.append(line)
4219 lastindent = indent
4224 lastindent = indent
4220
4225
4221 # Flush last block.
4226 # Flush last block.
4222 if activeaction:
4227 if activeaction:
4223 yield activeaction, blocklines
4228 yield activeaction, blocklines
4224
4229
4225
4230
4226 @command(
4231 @command(
4227 b'debugwireproto',
4232 b'debugwireproto',
4228 [
4233 [
4229 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4234 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4230 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4235 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4231 (
4236 (
4232 b'',
4237 b'',
4233 b'noreadstderr',
4238 b'noreadstderr',
4234 False,
4239 False,
4235 _(b'do not read from stderr of the remote'),
4240 _(b'do not read from stderr of the remote'),
4236 ),
4241 ),
4237 (
4242 (
4238 b'',
4243 b'',
4239 b'nologhandshake',
4244 b'nologhandshake',
4240 False,
4245 False,
4241 _(b'do not log I/O related to the peer handshake'),
4246 _(b'do not log I/O related to the peer handshake'),
4242 ),
4247 ),
4243 ]
4248 ]
4244 + cmdutil.remoteopts,
4249 + cmdutil.remoteopts,
4245 _(b'[PATH]'),
4250 _(b'[PATH]'),
4246 optionalrepo=True,
4251 optionalrepo=True,
4247 )
4252 )
4248 def debugwireproto(ui, repo, path=None, **opts):
4253 def debugwireproto(ui, repo, path=None, **opts):
4249 """send wire protocol commands to a server
4254 """send wire protocol commands to a server
4250
4255
4251 This command can be used to issue wire protocol commands to remote
4256 This command can be used to issue wire protocol commands to remote
4252 peers and to debug the raw data being exchanged.
4257 peers and to debug the raw data being exchanged.
4253
4258
4254 ``--localssh`` will start an SSH server against the current repository
4259 ``--localssh`` will start an SSH server against the current repository
4255 and connect to that. By default, the connection will perform a handshake
4260 and connect to that. By default, the connection will perform a handshake
4256 and establish an appropriate peer instance.
4261 and establish an appropriate peer instance.
4257
4262
4258 ``--peer`` can be used to bypass the handshake protocol and construct a
4263 ``--peer`` can be used to bypass the handshake protocol and construct a
4259 peer instance using the specified class type. Valid values are ``raw``,
4264 peer instance using the specified class type. Valid values are ``raw``,
4260 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4265 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4261 raw data payloads and don't support higher-level command actions.
4266 raw data payloads and don't support higher-level command actions.
4262
4267
4263 ``--noreadstderr`` can be used to disable automatic reading from stderr
4268 ``--noreadstderr`` can be used to disable automatic reading from stderr
4264 of the peer (for SSH connections only). Disabling automatic reading of
4269 of the peer (for SSH connections only). Disabling automatic reading of
4265 stderr is useful for making output more deterministic.
4270 stderr is useful for making output more deterministic.
4266
4271
4267 Commands are issued via a mini language which is specified via stdin.
4272 Commands are issued via a mini language which is specified via stdin.
4268 The language consists of individual actions to perform. An action is
4273 The language consists of individual actions to perform. An action is
4269 defined by a block. A block is defined as a line with no leading
4274 defined by a block. A block is defined as a line with no leading
4270 space followed by 0 or more lines with leading space. Blocks are
4275 space followed by 0 or more lines with leading space. Blocks are
4271 effectively a high-level command with additional metadata.
4276 effectively a high-level command with additional metadata.
4272
4277
4273 Lines beginning with ``#`` are ignored.
4278 Lines beginning with ``#`` are ignored.
4274
4279
4275 The following sections denote available actions.
4280 The following sections denote available actions.
4276
4281
4277 raw
4282 raw
4278 ---
4283 ---
4279
4284
4280 Send raw data to the server.
4285 Send raw data to the server.
4281
4286
4282 The block payload contains the raw data to send as one atomic send
4287 The block payload contains the raw data to send as one atomic send
4283 operation. The data may not actually be delivered in a single system
4288 operation. The data may not actually be delivered in a single system
4284 call: it depends on the abilities of the transport being used.
4289 call: it depends on the abilities of the transport being used.
4285
4290
4286 Each line in the block is de-indented and concatenated. Then, that
4291 Each line in the block is de-indented and concatenated. Then, that
4287 value is evaluated as a Python b'' literal. This allows the use of
4292 value is evaluated as a Python b'' literal. This allows the use of
4288 backslash escaping, etc.
4293 backslash escaping, etc.
4289
4294
4290 raw+
4295 raw+
4291 ----
4296 ----
4292
4297
4293 Behaves like ``raw`` except flushes output afterwards.
4298 Behaves like ``raw`` except flushes output afterwards.
4294
4299
4295 command <X>
4300 command <X>
4296 -----------
4301 -----------
4297
4302
4298 Send a request to run a named command, whose name follows the ``command``
4303 Send a request to run a named command, whose name follows the ``command``
4299 string.
4304 string.
4300
4305
4301 Arguments to the command are defined as lines in this block. The format of
4306 Arguments to the command are defined as lines in this block. The format of
4302 each line is ``<key> <value>``. e.g.::
4307 each line is ``<key> <value>``. e.g.::
4303
4308
4304 command listkeys
4309 command listkeys
4305 namespace bookmarks
4310 namespace bookmarks
4306
4311
4307 If the value begins with ``eval:``, it will be interpreted as a Python
4312 If the value begins with ``eval:``, it will be interpreted as a Python
4308 literal expression. Otherwise values are interpreted as Python b'' literals.
4313 literal expression. Otherwise values are interpreted as Python b'' literals.
4309 This allows sending complex types and encoding special byte sequences via
4314 This allows sending complex types and encoding special byte sequences via
4310 backslash escaping.
4315 backslash escaping.
4311
4316
4312 The following arguments have special meaning:
4317 The following arguments have special meaning:
4313
4318
4314 ``PUSHFILE``
4319 ``PUSHFILE``
4315 When defined, the *push* mechanism of the peer will be used instead
4320 When defined, the *push* mechanism of the peer will be used instead
4316 of the static request-response mechanism and the content of the
4321 of the static request-response mechanism and the content of the
4317 file specified in the value of this argument will be sent as the
4322 file specified in the value of this argument will be sent as the
4318 command payload.
4323 command payload.
4319
4324
4320 This can be used to submit a local bundle file to the remote.
4325 This can be used to submit a local bundle file to the remote.
4321
4326
4322 batchbegin
4327 batchbegin
4323 ----------
4328 ----------
4324
4329
4325 Instruct the peer to begin a batched send.
4330 Instruct the peer to begin a batched send.
4326
4331
4327 All ``command`` blocks are queued for execution until the next
4332 All ``command`` blocks are queued for execution until the next
4328 ``batchsubmit`` block.
4333 ``batchsubmit`` block.
4329
4334
4330 batchsubmit
4335 batchsubmit
4331 -----------
4336 -----------
4332
4337
4333 Submit previously queued ``command`` blocks as a batch request.
4338 Submit previously queued ``command`` blocks as a batch request.
4334
4339
4335 This action MUST be paired with a ``batchbegin`` action.
4340 This action MUST be paired with a ``batchbegin`` action.
4336
4341
4337 httprequest <method> <path>
4342 httprequest <method> <path>
4338 ---------------------------
4343 ---------------------------
4339
4344
4340 (HTTP peer only)
4345 (HTTP peer only)
4341
4346
4342 Send an HTTP request to the peer.
4347 Send an HTTP request to the peer.
4343
4348
4344 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4349 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4345
4350
4346 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4351 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4347 headers to add to the request. e.g. ``Accept: foo``.
4352 headers to add to the request. e.g. ``Accept: foo``.
4348
4353
4349 The following arguments are special:
4354 The following arguments are special:
4350
4355
4351 ``BODYFILE``
4356 ``BODYFILE``
4352 The content of the file defined as the value to this argument will be
4357 The content of the file defined as the value to this argument will be
4353 transferred verbatim as the HTTP request body.
4358 transferred verbatim as the HTTP request body.
4354
4359
4355 ``frame <type> <flags> <payload>``
4360 ``frame <type> <flags> <payload>``
4356 Send a unified protocol frame as part of the request body.
4361 Send a unified protocol frame as part of the request body.
4357
4362
4358 All frames will be collected and sent as the body to the HTTP
4363 All frames will be collected and sent as the body to the HTTP
4359 request.
4364 request.
4360
4365
4361 close
4366 close
4362 -----
4367 -----
4363
4368
4364 Close the connection to the server.
4369 Close the connection to the server.
4365
4370
4366 flush
4371 flush
4367 -----
4372 -----
4368
4373
4369 Flush data written to the server.
4374 Flush data written to the server.
4370
4375
4371 readavailable
4376 readavailable
4372 -------------
4377 -------------
4373
4378
4374 Close the write end of the connection and read all available data from
4379 Close the write end of the connection and read all available data from
4375 the server.
4380 the server.
4376
4381
4377 If the connection to the server encompasses multiple pipes, we poll both
4382 If the connection to the server encompasses multiple pipes, we poll both
4378 pipes and read available data.
4383 pipes and read available data.
4379
4384
4380 readline
4385 readline
4381 --------
4386 --------
4382
4387
4383 Read a line of output from the server. If there are multiple output
4388 Read a line of output from the server. If there are multiple output
4384 pipes, reads only the main pipe.
4389 pipes, reads only the main pipe.
4385
4390
4386 ereadline
4391 ereadline
4387 ---------
4392 ---------
4388
4393
4389 Like ``readline``, but read from the stderr pipe, if available.
4394 Like ``readline``, but read from the stderr pipe, if available.
4390
4395
4391 read <X>
4396 read <X>
4392 --------
4397 --------
4393
4398
4394 ``read()`` N bytes from the server's main output pipe.
4399 ``read()`` N bytes from the server's main output pipe.
4395
4400
4396 eread <X>
4401 eread <X>
4397 ---------
4402 ---------
4398
4403
4399 ``read()`` N bytes from the server's stderr pipe, if available.
4404 ``read()`` N bytes from the server's stderr pipe, if available.
4400
4405
4401 Specifying Unified Frame-Based Protocol Frames
4406 Specifying Unified Frame-Based Protocol Frames
4402 ----------------------------------------------
4407 ----------------------------------------------
4403
4408
4404 It is possible to emit a *Unified Frame-Based Protocol* by using special
4409 It is possible to emit a *Unified Frame-Based Protocol* by using special
4405 syntax.
4410 syntax.
4406
4411
4407 A frame is composed as a type, flags, and payload. These can be parsed
4412 A frame is composed as a type, flags, and payload. These can be parsed
4408 from a string of the form:
4413 from a string of the form:
4409
4414
4410 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4415 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4411
4416
4412 ``request-id`` and ``stream-id`` are integers defining the request and
4417 ``request-id`` and ``stream-id`` are integers defining the request and
4413 stream identifiers.
4418 stream identifiers.
4414
4419
4415 ``type`` can be an integer value for the frame type or the string name
4420 ``type`` can be an integer value for the frame type or the string name
4416 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4421 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4417 ``command-name``.
4422 ``command-name``.
4418
4423
4419 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4424 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4420 components. Each component (and there can be just one) can be an integer
4425 components. Each component (and there can be just one) can be an integer
4421 or a flag name for stream flags or frame flags, respectively. Values are
4426 or a flag name for stream flags or frame flags, respectively. Values are
4422 resolved to integers and then bitwise OR'd together.
4427 resolved to integers and then bitwise OR'd together.
4423
4428
4424 ``payload`` represents the raw frame payload. If it begins with
4429 ``payload`` represents the raw frame payload. If it begins with
4425 ``cbor:``, the following string is evaluated as Python code and the
4430 ``cbor:``, the following string is evaluated as Python code and the
4426 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4431 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4427 as a Python byte string literal.
4432 as a Python byte string literal.
4428 """
4433 """
4429 opts = pycompat.byteskwargs(opts)
4434 opts = pycompat.byteskwargs(opts)
4430
4435
4431 if opts[b'localssh'] and not repo:
4436 if opts[b'localssh'] and not repo:
4432 raise error.Abort(_(b'--localssh requires a repository'))
4437 raise error.Abort(_(b'--localssh requires a repository'))
4433
4438
4434 if opts[b'peer'] and opts[b'peer'] not in (
4439 if opts[b'peer'] and opts[b'peer'] not in (
4435 b'raw',
4440 b'raw',
4436 b'http2',
4441 b'http2',
4437 b'ssh1',
4442 b'ssh1',
4438 b'ssh2',
4443 b'ssh2',
4439 ):
4444 ):
4440 raise error.Abort(
4445 raise error.Abort(
4441 _(b'invalid value for --peer'),
4446 _(b'invalid value for --peer'),
4442 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4447 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4443 )
4448 )
4444
4449
4445 if path and opts[b'localssh']:
4450 if path and opts[b'localssh']:
4446 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4451 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4447
4452
4448 if ui.interactive():
4453 if ui.interactive():
4449 ui.write(_(b'(waiting for commands on stdin)\n'))
4454 ui.write(_(b'(waiting for commands on stdin)\n'))
4450
4455
4451 blocks = list(_parsewirelangblocks(ui.fin))
4456 blocks = list(_parsewirelangblocks(ui.fin))
4452
4457
4453 proc = None
4458 proc = None
4454 stdin = None
4459 stdin = None
4455 stdout = None
4460 stdout = None
4456 stderr = None
4461 stderr = None
4457 opener = None
4462 opener = None
4458
4463
4459 if opts[b'localssh']:
4464 if opts[b'localssh']:
4460 # We start the SSH server in its own process so there is process
4465 # We start the SSH server in its own process so there is process
4461 # separation. This prevents a whole class of potential bugs around
4466 # separation. This prevents a whole class of potential bugs around
4462 # shared state from interfering with server operation.
4467 # shared state from interfering with server operation.
4463 args = procutil.hgcmd() + [
4468 args = procutil.hgcmd() + [
4464 b'-R',
4469 b'-R',
4465 repo.root,
4470 repo.root,
4466 b'debugserve',
4471 b'debugserve',
4467 b'--sshstdio',
4472 b'--sshstdio',
4468 ]
4473 ]
4469 proc = subprocess.Popen(
4474 proc = subprocess.Popen(
4470 pycompat.rapply(procutil.tonativestr, args),
4475 pycompat.rapply(procutil.tonativestr, args),
4471 stdin=subprocess.PIPE,
4476 stdin=subprocess.PIPE,
4472 stdout=subprocess.PIPE,
4477 stdout=subprocess.PIPE,
4473 stderr=subprocess.PIPE,
4478 stderr=subprocess.PIPE,
4474 bufsize=0,
4479 bufsize=0,
4475 )
4480 )
4476
4481
4477 stdin = proc.stdin
4482 stdin = proc.stdin
4478 stdout = proc.stdout
4483 stdout = proc.stdout
4479 stderr = proc.stderr
4484 stderr = proc.stderr
4480
4485
4481 # We turn the pipes into observers so we can log I/O.
4486 # We turn the pipes into observers so we can log I/O.
4482 if ui.verbose or opts[b'peer'] == b'raw':
4487 if ui.verbose or opts[b'peer'] == b'raw':
4483 stdin = util.makeloggingfileobject(
4488 stdin = util.makeloggingfileobject(
4484 ui, proc.stdin, b'i', logdata=True
4489 ui, proc.stdin, b'i', logdata=True
4485 )
4490 )
4486 stdout = util.makeloggingfileobject(
4491 stdout = util.makeloggingfileobject(
4487 ui, proc.stdout, b'o', logdata=True
4492 ui, proc.stdout, b'o', logdata=True
4488 )
4493 )
4489 stderr = util.makeloggingfileobject(
4494 stderr = util.makeloggingfileobject(
4490 ui, proc.stderr, b'e', logdata=True
4495 ui, proc.stderr, b'e', logdata=True
4491 )
4496 )
4492
4497
4493 # --localssh also implies the peer connection settings.
4498 # --localssh also implies the peer connection settings.
4494
4499
4495 url = b'ssh://localserver'
4500 url = b'ssh://localserver'
4496 autoreadstderr = not opts[b'noreadstderr']
4501 autoreadstderr = not opts[b'noreadstderr']
4497
4502
4498 if opts[b'peer'] == b'ssh1':
4503 if opts[b'peer'] == b'ssh1':
4499 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4504 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4500 peer = sshpeer.sshv1peer(
4505 peer = sshpeer.sshv1peer(
4501 ui,
4506 ui,
4502 url,
4507 url,
4503 proc,
4508 proc,
4504 stdin,
4509 stdin,
4505 stdout,
4510 stdout,
4506 stderr,
4511 stderr,
4507 None,
4512 None,
4508 autoreadstderr=autoreadstderr,
4513 autoreadstderr=autoreadstderr,
4509 )
4514 )
4510 elif opts[b'peer'] == b'ssh2':
4515 elif opts[b'peer'] == b'ssh2':
4511 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4516 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4512 peer = sshpeer.sshv2peer(
4517 peer = sshpeer.sshv2peer(
4513 ui,
4518 ui,
4514 url,
4519 url,
4515 proc,
4520 proc,
4516 stdin,
4521 stdin,
4517 stdout,
4522 stdout,
4518 stderr,
4523 stderr,
4519 None,
4524 None,
4520 autoreadstderr=autoreadstderr,
4525 autoreadstderr=autoreadstderr,
4521 )
4526 )
4522 elif opts[b'peer'] == b'raw':
4527 elif opts[b'peer'] == b'raw':
4523 ui.write(_(b'using raw connection to peer\n'))
4528 ui.write(_(b'using raw connection to peer\n'))
4524 peer = None
4529 peer = None
4525 else:
4530 else:
4526 ui.write(_(b'creating ssh peer from handshake results\n'))
4531 ui.write(_(b'creating ssh peer from handshake results\n'))
4527 peer = sshpeer.makepeer(
4532 peer = sshpeer.makepeer(
4528 ui,
4533 ui,
4529 url,
4534 url,
4530 proc,
4535 proc,
4531 stdin,
4536 stdin,
4532 stdout,
4537 stdout,
4533 stderr,
4538 stderr,
4534 autoreadstderr=autoreadstderr,
4539 autoreadstderr=autoreadstderr,
4535 )
4540 )
4536
4541
4537 elif path:
4542 elif path:
4538 # We bypass hg.peer() so we can proxy the sockets.
4543 # We bypass hg.peer() so we can proxy the sockets.
4539 # TODO consider not doing this because we skip
4544 # TODO consider not doing this because we skip
4540 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4545 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4541 u = urlutil.url(path)
4546 u = urlutil.url(path)
4542 if u.scheme != b'http':
4547 if u.scheme != b'http':
4543 raise error.Abort(_(b'only http:// paths are currently supported'))
4548 raise error.Abort(_(b'only http:// paths are currently supported'))
4544
4549
4545 url, authinfo = u.authinfo()
4550 url, authinfo = u.authinfo()
4546 openerargs = {
4551 openerargs = {
4547 'useragent': b'Mercurial debugwireproto',
4552 'useragent': b'Mercurial debugwireproto',
4548 }
4553 }
4549
4554
4550 # Turn pipes/sockets into observers so we can log I/O.
4555 # Turn pipes/sockets into observers so we can log I/O.
4551 if ui.verbose:
4556 if ui.verbose:
4552 openerargs.update(
4557 openerargs.update(
4553 {
4558 {
4554 'loggingfh': ui,
4559 'loggingfh': ui,
4555 'loggingname': b's',
4560 'loggingname': b's',
4556 'loggingopts': {
4561 'loggingopts': {
4557 'logdata': True,
4562 'logdata': True,
4558 'logdataapis': False,
4563 'logdataapis': False,
4559 },
4564 },
4560 }
4565 }
4561 )
4566 )
4562
4567
4563 if ui.debugflag:
4568 if ui.debugflag:
4564 openerargs['loggingopts']['logdataapis'] = True
4569 openerargs['loggingopts']['logdataapis'] = True
4565
4570
4566 # Don't send default headers when in raw mode. This allows us to
4571 # Don't send default headers when in raw mode. This allows us to
4567 # bypass most of the behavior of our URL handling code so we can
4572 # bypass most of the behavior of our URL handling code so we can
4568 # have near complete control over what's sent on the wire.
4573 # have near complete control over what's sent on the wire.
4569 if opts[b'peer'] == b'raw':
4574 if opts[b'peer'] == b'raw':
4570 openerargs['sendaccept'] = False
4575 openerargs['sendaccept'] = False
4571
4576
4572 opener = urlmod.opener(ui, authinfo, **openerargs)
4577 opener = urlmod.opener(ui, authinfo, **openerargs)
4573
4578
4574 if opts[b'peer'] == b'http2':
4579 if opts[b'peer'] == b'http2':
4575 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4580 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4576 # We go through makepeer() because we need an API descriptor for
4581 # We go through makepeer() because we need an API descriptor for
4577 # the peer instance to be useful.
4582 # the peer instance to be useful.
4578 with ui.configoverride(
4583 with ui.configoverride(
4579 {(b'experimental', b'httppeer.advertise-v2'): True}
4584 {(b'experimental', b'httppeer.advertise-v2'): True}
4580 ):
4585 ):
4581 if opts[b'nologhandshake']:
4586 if opts[b'nologhandshake']:
4582 ui.pushbuffer()
4587 ui.pushbuffer()
4583
4588
4584 peer = httppeer.makepeer(ui, path, opener=opener)
4589 peer = httppeer.makepeer(ui, path, opener=opener)
4585
4590
4586 if opts[b'nologhandshake']:
4591 if opts[b'nologhandshake']:
4587 ui.popbuffer()
4592 ui.popbuffer()
4588
4593
4589 if not isinstance(peer, httppeer.httpv2peer):
4594 if not isinstance(peer, httppeer.httpv2peer):
4590 raise error.Abort(
4595 raise error.Abort(
4591 _(
4596 _(
4592 b'could not instantiate HTTP peer for '
4597 b'could not instantiate HTTP peer for '
4593 b'wire protocol version 2'
4598 b'wire protocol version 2'
4594 ),
4599 ),
4595 hint=_(
4600 hint=_(
4596 b'the server may not have the feature '
4601 b'the server may not have the feature '
4597 b'enabled or is not allowing this '
4602 b'enabled or is not allowing this '
4598 b'client version'
4603 b'client version'
4599 ),
4604 ),
4600 )
4605 )
4601
4606
4602 elif opts[b'peer'] == b'raw':
4607 elif opts[b'peer'] == b'raw':
4603 ui.write(_(b'using raw connection to peer\n'))
4608 ui.write(_(b'using raw connection to peer\n'))
4604 peer = None
4609 peer = None
4605 elif opts[b'peer']:
4610 elif opts[b'peer']:
4606 raise error.Abort(
4611 raise error.Abort(
4607 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4612 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4608 )
4613 )
4609 else:
4614 else:
4610 peer = httppeer.makepeer(ui, path, opener=opener)
4615 peer = httppeer.makepeer(ui, path, opener=opener)
4611
4616
4612 # We /could/ populate stdin/stdout with sock.makefile()...
4617 # We /could/ populate stdin/stdout with sock.makefile()...
4613 else:
4618 else:
4614 raise error.Abort(_(b'unsupported connection configuration'))
4619 raise error.Abort(_(b'unsupported connection configuration'))
4615
4620
4616 batchedcommands = None
4621 batchedcommands = None
4617
4622
4618 # Now perform actions based on the parsed wire language instructions.
4623 # Now perform actions based on the parsed wire language instructions.
4619 for action, lines in blocks:
4624 for action, lines in blocks:
4620 if action in (b'raw', b'raw+'):
4625 if action in (b'raw', b'raw+'):
4621 if not stdin:
4626 if not stdin:
4622 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4627 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4623
4628
4624 # Concatenate the data together.
4629 # Concatenate the data together.
4625 data = b''.join(l.lstrip() for l in lines)
4630 data = b''.join(l.lstrip() for l in lines)
4626 data = stringutil.unescapestr(data)
4631 data = stringutil.unescapestr(data)
4627 stdin.write(data)
4632 stdin.write(data)
4628
4633
4629 if action == b'raw+':
4634 if action == b'raw+':
4630 stdin.flush()
4635 stdin.flush()
4631 elif action == b'flush':
4636 elif action == b'flush':
4632 if not stdin:
4637 if not stdin:
4633 raise error.Abort(_(b'cannot call flush on this peer'))
4638 raise error.Abort(_(b'cannot call flush on this peer'))
4634 stdin.flush()
4639 stdin.flush()
4635 elif action.startswith(b'command'):
4640 elif action.startswith(b'command'):
4636 if not peer:
4641 if not peer:
4637 raise error.Abort(
4642 raise error.Abort(
4638 _(
4643 _(
4639 b'cannot send commands unless peer instance '
4644 b'cannot send commands unless peer instance '
4640 b'is available'
4645 b'is available'
4641 )
4646 )
4642 )
4647 )
4643
4648
4644 command = action.split(b' ', 1)[1]
4649 command = action.split(b' ', 1)[1]
4645
4650
4646 args = {}
4651 args = {}
4647 for line in lines:
4652 for line in lines:
4648 # We need to allow empty values.
4653 # We need to allow empty values.
4649 fields = line.lstrip().split(b' ', 1)
4654 fields = line.lstrip().split(b' ', 1)
4650 if len(fields) == 1:
4655 if len(fields) == 1:
4651 key = fields[0]
4656 key = fields[0]
4652 value = b''
4657 value = b''
4653 else:
4658 else:
4654 key, value = fields
4659 key, value = fields
4655
4660
4656 if value.startswith(b'eval:'):
4661 if value.startswith(b'eval:'):
4657 value = stringutil.evalpythonliteral(value[5:])
4662 value = stringutil.evalpythonliteral(value[5:])
4658 else:
4663 else:
4659 value = stringutil.unescapestr(value)
4664 value = stringutil.unescapestr(value)
4660
4665
4661 args[key] = value
4666 args[key] = value
4662
4667
4663 if batchedcommands is not None:
4668 if batchedcommands is not None:
4664 batchedcommands.append((command, args))
4669 batchedcommands.append((command, args))
4665 continue
4670 continue
4666
4671
4667 ui.status(_(b'sending %s command\n') % command)
4672 ui.status(_(b'sending %s command\n') % command)
4668
4673
4669 if b'PUSHFILE' in args:
4674 if b'PUSHFILE' in args:
4670 with open(args[b'PUSHFILE'], 'rb') as fh:
4675 with open(args[b'PUSHFILE'], 'rb') as fh:
4671 del args[b'PUSHFILE']
4676 del args[b'PUSHFILE']
4672 res, output = peer._callpush(
4677 res, output = peer._callpush(
4673 command, fh, **pycompat.strkwargs(args)
4678 command, fh, **pycompat.strkwargs(args)
4674 )
4679 )
4675 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4680 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4676 ui.status(
4681 ui.status(
4677 _(b'remote output: %s\n') % stringutil.escapestr(output)
4682 _(b'remote output: %s\n') % stringutil.escapestr(output)
4678 )
4683 )
4679 else:
4684 else:
4680 with peer.commandexecutor() as e:
4685 with peer.commandexecutor() as e:
4681 res = e.callcommand(command, args).result()
4686 res = e.callcommand(command, args).result()
4682
4687
4683 if isinstance(res, wireprotov2peer.commandresponse):
4688 if isinstance(res, wireprotov2peer.commandresponse):
4684 val = res.objects()
4689 val = res.objects()
4685 ui.status(
4690 ui.status(
4686 _(b'response: %s\n')
4691 _(b'response: %s\n')
4687 % stringutil.pprint(val, bprefix=True, indent=2)
4692 % stringutil.pprint(val, bprefix=True, indent=2)
4688 )
4693 )
4689 else:
4694 else:
4690 ui.status(
4695 ui.status(
4691 _(b'response: %s\n')
4696 _(b'response: %s\n')
4692 % stringutil.pprint(res, bprefix=True, indent=2)
4697 % stringutil.pprint(res, bprefix=True, indent=2)
4693 )
4698 )
4694
4699
4695 elif action == b'batchbegin':
4700 elif action == b'batchbegin':
4696 if batchedcommands is not None:
4701 if batchedcommands is not None:
4697 raise error.Abort(_(b'nested batchbegin not allowed'))
4702 raise error.Abort(_(b'nested batchbegin not allowed'))
4698
4703
4699 batchedcommands = []
4704 batchedcommands = []
4700 elif action == b'batchsubmit':
4705 elif action == b'batchsubmit':
4701 # There is a batching API we could go through. But it would be
4706 # There is a batching API we could go through. But it would be
4702 # difficult to normalize requests into function calls. It is easier
4707 # difficult to normalize requests into function calls. It is easier
4703 # to bypass this layer and normalize to commands + args.
4708 # to bypass this layer and normalize to commands + args.
4704 ui.status(
4709 ui.status(
4705 _(b'sending batch with %d sub-commands\n')
4710 _(b'sending batch with %d sub-commands\n')
4706 % len(batchedcommands)
4711 % len(batchedcommands)
4707 )
4712 )
4708 assert peer is not None
4713 assert peer is not None
4709 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4714 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4710 ui.status(
4715 ui.status(
4711 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4716 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4712 )
4717 )
4713
4718
4714 batchedcommands = None
4719 batchedcommands = None
4715
4720
4716 elif action.startswith(b'httprequest '):
4721 elif action.startswith(b'httprequest '):
4717 if not opener:
4722 if not opener:
4718 raise error.Abort(
4723 raise error.Abort(
4719 _(b'cannot use httprequest without an HTTP peer')
4724 _(b'cannot use httprequest without an HTTP peer')
4720 )
4725 )
4721
4726
4722 request = action.split(b' ', 2)
4727 request = action.split(b' ', 2)
4723 if len(request) != 3:
4728 if len(request) != 3:
4724 raise error.Abort(
4729 raise error.Abort(
4725 _(
4730 _(
4726 b'invalid httprequest: expected format is '
4731 b'invalid httprequest: expected format is '
4727 b'"httprequest <method> <path>'
4732 b'"httprequest <method> <path>'
4728 )
4733 )
4729 )
4734 )
4730
4735
4731 method, httppath = request[1:]
4736 method, httppath = request[1:]
4732 headers = {}
4737 headers = {}
4733 body = None
4738 body = None
4734 frames = []
4739 frames = []
4735 for line in lines:
4740 for line in lines:
4736 line = line.lstrip()
4741 line = line.lstrip()
4737 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4742 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4738 if m:
4743 if m:
4739 # Headers need to use native strings.
4744 # Headers need to use native strings.
4740 key = pycompat.strurl(m.group(1))
4745 key = pycompat.strurl(m.group(1))
4741 value = pycompat.strurl(m.group(2))
4746 value = pycompat.strurl(m.group(2))
4742 headers[key] = value
4747 headers[key] = value
4743 continue
4748 continue
4744
4749
4745 if line.startswith(b'BODYFILE '):
4750 if line.startswith(b'BODYFILE '):
4746 with open(line.split(b' ', 1), b'rb') as fh:
4751 with open(line.split(b' ', 1), b'rb') as fh:
4747 body = fh.read()
4752 body = fh.read()
4748 elif line.startswith(b'frame '):
4753 elif line.startswith(b'frame '):
4749 frame = wireprotoframing.makeframefromhumanstring(
4754 frame = wireprotoframing.makeframefromhumanstring(
4750 line[len(b'frame ') :]
4755 line[len(b'frame ') :]
4751 )
4756 )
4752
4757
4753 frames.append(frame)
4758 frames.append(frame)
4754 else:
4759 else:
4755 raise error.Abort(
4760 raise error.Abort(
4756 _(b'unknown argument to httprequest: %s') % line
4761 _(b'unknown argument to httprequest: %s') % line
4757 )
4762 )
4758
4763
4759 url = path + httppath
4764 url = path + httppath
4760
4765
4761 if frames:
4766 if frames:
4762 body = b''.join(bytes(f) for f in frames)
4767 body = b''.join(bytes(f) for f in frames)
4763
4768
4764 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4769 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4765
4770
4766 # urllib.Request insists on using has_data() as a proxy for
4771 # urllib.Request insists on using has_data() as a proxy for
4767 # determining the request method. Override that to use our
4772 # determining the request method. Override that to use our
4768 # explicitly requested method.
4773 # explicitly requested method.
4769 req.get_method = lambda: pycompat.sysstr(method)
4774 req.get_method = lambda: pycompat.sysstr(method)
4770
4775
4771 try:
4776 try:
4772 res = opener.open(req)
4777 res = opener.open(req)
4773 body = res.read()
4778 body = res.read()
4774 except util.urlerr.urlerror as e:
4779 except util.urlerr.urlerror as e:
4775 # read() method must be called, but only exists in Python 2
4780 # read() method must be called, but only exists in Python 2
4776 getattr(e, 'read', lambda: None)()
4781 getattr(e, 'read', lambda: None)()
4777 continue
4782 continue
4778
4783
4779 ct = res.headers.get('Content-Type')
4784 ct = res.headers.get('Content-Type')
4780 if ct == 'application/mercurial-cbor':
4785 if ct == 'application/mercurial-cbor':
4781 ui.write(
4786 ui.write(
4782 _(b'cbor> %s\n')
4787 _(b'cbor> %s\n')
4783 % stringutil.pprint(
4788 % stringutil.pprint(
4784 cborutil.decodeall(body), bprefix=True, indent=2
4789 cborutil.decodeall(body), bprefix=True, indent=2
4785 )
4790 )
4786 )
4791 )
4787
4792
4788 elif action == b'close':
4793 elif action == b'close':
4789 assert peer is not None
4794 assert peer is not None
4790 peer.close()
4795 peer.close()
4791 elif action == b'readavailable':
4796 elif action == b'readavailable':
4792 if not stdout or not stderr:
4797 if not stdout or not stderr:
4793 raise error.Abort(
4798 raise error.Abort(
4794 _(b'readavailable not available on this peer')
4799 _(b'readavailable not available on this peer')
4795 )
4800 )
4796
4801
4797 stdin.close()
4802 stdin.close()
4798 stdout.read()
4803 stdout.read()
4799 stderr.read()
4804 stderr.read()
4800
4805
4801 elif action == b'readline':
4806 elif action == b'readline':
4802 if not stdout:
4807 if not stdout:
4803 raise error.Abort(_(b'readline not available on this peer'))
4808 raise error.Abort(_(b'readline not available on this peer'))
4804 stdout.readline()
4809 stdout.readline()
4805 elif action == b'ereadline':
4810 elif action == b'ereadline':
4806 if not stderr:
4811 if not stderr:
4807 raise error.Abort(_(b'ereadline not available on this peer'))
4812 raise error.Abort(_(b'ereadline not available on this peer'))
4808 stderr.readline()
4813 stderr.readline()
4809 elif action.startswith(b'read '):
4814 elif action.startswith(b'read '):
4810 count = int(action.split(b' ', 1)[1])
4815 count = int(action.split(b' ', 1)[1])
4811 if not stdout:
4816 if not stdout:
4812 raise error.Abort(_(b'read not available on this peer'))
4817 raise error.Abort(_(b'read not available on this peer'))
4813 stdout.read(count)
4818 stdout.read(count)
4814 elif action.startswith(b'eread '):
4819 elif action.startswith(b'eread '):
4815 count = int(action.split(b' ', 1)[1])
4820 count = int(action.split(b' ', 1)[1])
4816 if not stderr:
4821 if not stderr:
4817 raise error.Abort(_(b'eread not available on this peer'))
4822 raise error.Abort(_(b'eread not available on this peer'))
4818 stderr.read(count)
4823 stderr.read(count)
4819 else:
4824 else:
4820 raise error.Abort(_(b'unknown action: %s') % action)
4825 raise error.Abort(_(b'unknown action: %s') % action)
4821
4826
4822 if batchedcommands is not None:
4827 if batchedcommands is not None:
4823 raise error.Abort(_(b'unclosed "batchbegin" request'))
4828 raise error.Abort(_(b'unclosed "batchbegin" request'))
4824
4829
4825 if peer:
4830 if peer:
4826 peer.close()
4831 peer.close()
4827
4832
4828 if proc:
4833 if proc:
4829 proc.kill()
4834 proc.kill()
@@ -1,1983 +1,1993 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 pathutil,
25 pathutil,
26 policy,
26 policy,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = 0x7FFFFFFF
46 _rangemask = 0x7FFFFFFF
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap
112 self._mapcls = dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304 """
304 """
305 return self._map.get(key, (b"?",))[0]
305 return self._map.get(key, (b"?",))[0]
306
306
307 def __contains__(self, key):
307 def __contains__(self, key):
308 return key in self._map
308 return key in self._map
309
309
310 def __iter__(self):
310 def __iter__(self):
311 return iter(sorted(self._map))
311 return iter(sorted(self._map))
312
312
313 def items(self):
313 def items(self):
314 return pycompat.iteritems(self._map)
314 return pycompat.iteritems(self._map)
315
315
316 iteritems = items
316 iteritems = items
317
317
318 def directories(self):
319 return self._map.directories()
320
318 def parents(self):
321 def parents(self):
319 return [self._validate(p) for p in self._pl]
322 return [self._validate(p) for p in self._pl]
320
323
321 def p1(self):
324 def p1(self):
322 return self._validate(self._pl[0])
325 return self._validate(self._pl[0])
323
326
324 def p2(self):
327 def p2(self):
325 return self._validate(self._pl[1])
328 return self._validate(self._pl[1])
326
329
327 def branch(self):
330 def branch(self):
328 return encoding.tolocal(self._branch)
331 return encoding.tolocal(self._branch)
329
332
330 def setparents(self, p1, p2=None):
333 def setparents(self, p1, p2=None):
331 """Set dirstate parents to p1 and p2.
334 """Set dirstate parents to p1 and p2.
332
335
333 When moving from two parents to one, 'm' merged entries a
336 When moving from two parents to one, 'm' merged entries a
334 adjusted to normal and previous copy records discarded and
337 adjusted to normal and previous copy records discarded and
335 returned by the call.
338 returned by the call.
336
339
337 See localrepo.setparents()
340 See localrepo.setparents()
338 """
341 """
339 if p2 is None:
342 if p2 is None:
340 p2 = self._nodeconstants.nullid
343 p2 = self._nodeconstants.nullid
341 if self._parentwriters == 0:
344 if self._parentwriters == 0:
342 raise ValueError(
345 raise ValueError(
343 b"cannot set dirstate parent outside of "
346 b"cannot set dirstate parent outside of "
344 b"dirstate.parentchange context manager"
347 b"dirstate.parentchange context manager"
345 )
348 )
346
349
347 self._dirty = True
350 self._dirty = True
348 oldp2 = self._pl[1]
351 oldp2 = self._pl[1]
349 if self._origpl is None:
352 if self._origpl is None:
350 self._origpl = self._pl
353 self._origpl = self._pl
351 self._map.setparents(p1, p2)
354 self._map.setparents(p1, p2)
352 copies = {}
355 copies = {}
353 if (
356 if (
354 oldp2 != self._nodeconstants.nullid
357 oldp2 != self._nodeconstants.nullid
355 and p2 == self._nodeconstants.nullid
358 and p2 == self._nodeconstants.nullid
356 ):
359 ):
357 candidatefiles = self._map.non_normal_or_other_parent_paths()
360 candidatefiles = self._map.non_normal_or_other_parent_paths()
358
361
359 for f in candidatefiles:
362 for f in candidatefiles:
360 s = self._map.get(f)
363 s = self._map.get(f)
361 if s is None:
364 if s is None:
362 continue
365 continue
363
366
364 # Discard 'm' markers when moving away from a merge state
367 # Discard 'm' markers when moving away from a merge state
365 if s[0] == b'm':
368 if s[0] == b'm':
366 source = self._map.copymap.get(f)
369 source = self._map.copymap.get(f)
367 if source:
370 if source:
368 copies[f] = source
371 copies[f] = source
369 self.normallookup(f)
372 self.normallookup(f)
370 # Also fix up otherparent markers
373 # Also fix up otherparent markers
371 elif s[0] == b'n' and s[2] == -2:
374 elif s[0] == b'n' and s[2] == -2:
372 source = self._map.copymap.get(f)
375 source = self._map.copymap.get(f)
373 if source:
376 if source:
374 copies[f] = source
377 copies[f] = source
375 self.add(f)
378 self.add(f)
376 return copies
379 return copies
377
380
378 def setbranch(self, branch):
381 def setbranch(self, branch):
379 self.__class__._branch.set(self, encoding.fromlocal(branch))
382 self.__class__._branch.set(self, encoding.fromlocal(branch))
380 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
383 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
381 try:
384 try:
382 f.write(self._branch + b'\n')
385 f.write(self._branch + b'\n')
383 f.close()
386 f.close()
384
387
385 # make sure filecache has the correct stat info for _branch after
388 # make sure filecache has the correct stat info for _branch after
386 # replacing the underlying file
389 # replacing the underlying file
387 ce = self._filecache[b'_branch']
390 ce = self._filecache[b'_branch']
388 if ce:
391 if ce:
389 ce.refresh()
392 ce.refresh()
390 except: # re-raises
393 except: # re-raises
391 f.discard()
394 f.discard()
392 raise
395 raise
393
396
394 def invalidate(self):
397 def invalidate(self):
395 """Causes the next access to reread the dirstate.
398 """Causes the next access to reread the dirstate.
396
399
397 This is different from localrepo.invalidatedirstate() because it always
400 This is different from localrepo.invalidatedirstate() because it always
398 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
401 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
399 check whether the dirstate has changed before rereading it."""
402 check whether the dirstate has changed before rereading it."""
400
403
401 for a in ("_map", "_branch", "_ignore"):
404 for a in ("_map", "_branch", "_ignore"):
402 if a in self.__dict__:
405 if a in self.__dict__:
403 delattr(self, a)
406 delattr(self, a)
404 self._lastnormaltime = 0
407 self._lastnormaltime = 0
405 self._dirty = False
408 self._dirty = False
406 self._updatedfiles.clear()
409 self._updatedfiles.clear()
407 self._parentwriters = 0
410 self._parentwriters = 0
408 self._origpl = None
411 self._origpl = None
409
412
410 def copy(self, source, dest):
413 def copy(self, source, dest):
411 """Mark dest as a copy of source. Unmark dest if source is None."""
414 """Mark dest as a copy of source. Unmark dest if source is None."""
412 if source == dest:
415 if source == dest:
413 return
416 return
414 self._dirty = True
417 self._dirty = True
415 if source is not None:
418 if source is not None:
416 self._map.copymap[dest] = source
419 self._map.copymap[dest] = source
417 self._updatedfiles.add(source)
420 self._updatedfiles.add(source)
418 self._updatedfiles.add(dest)
421 self._updatedfiles.add(dest)
419 elif self._map.copymap.pop(dest, None):
422 elif self._map.copymap.pop(dest, None):
420 self._updatedfiles.add(dest)
423 self._updatedfiles.add(dest)
421
424
422 def copied(self, file):
425 def copied(self, file):
423 return self._map.copymap.get(file, None)
426 return self._map.copymap.get(file, None)
424
427
425 def copies(self):
428 def copies(self):
426 return self._map.copymap
429 return self._map.copymap
427
430
428 def _addpath(self, f, state, mode, size, mtime):
431 def _addpath(self, f, state, mode, size, mtime):
429 oldstate = self[f]
432 oldstate = self[f]
430 if state == b'a' or oldstate == b'r':
433 if state == b'a' or oldstate == b'r':
431 scmutil.checkfilename(f)
434 scmutil.checkfilename(f)
432 if self._map.hastrackeddir(f):
435 if self._map.hastrackeddir(f):
433 raise error.Abort(
436 raise error.Abort(
434 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
437 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
435 )
438 )
436 # shadows
439 # shadows
437 for d in pathutil.finddirs(f):
440 for d in pathutil.finddirs(f):
438 if self._map.hastrackeddir(d):
441 if self._map.hastrackeddir(d):
439 break
442 break
440 entry = self._map.get(d)
443 entry = self._map.get(d)
441 if entry is not None and entry[0] != b'r':
444 if entry is not None and entry[0] != b'r':
442 raise error.Abort(
445 raise error.Abort(
443 _(b'file %r in dirstate clashes with %r')
446 _(b'file %r in dirstate clashes with %r')
444 % (pycompat.bytestr(d), pycompat.bytestr(f))
447 % (pycompat.bytestr(d), pycompat.bytestr(f))
445 )
448 )
446 self._dirty = True
449 self._dirty = True
447 self._updatedfiles.add(f)
450 self._updatedfiles.add(f)
448 self._map.addfile(f, oldstate, state, mode, size, mtime)
451 self._map.addfile(f, oldstate, state, mode, size, mtime)
449
452
450 def normal(self, f, parentfiledata=None):
453 def normal(self, f, parentfiledata=None):
451 """Mark a file normal and clean.
454 """Mark a file normal and clean.
452
455
453 parentfiledata: (mode, size, mtime) of the clean file
456 parentfiledata: (mode, size, mtime) of the clean file
454
457
455 parentfiledata should be computed from memory (for mode,
458 parentfiledata should be computed from memory (for mode,
456 size), as or close as possible from the point where we
459 size), as or close as possible from the point where we
457 determined the file was clean, to limit the risk of the
460 determined the file was clean, to limit the risk of the
458 file having been changed by an external process between the
461 file having been changed by an external process between the
459 moment where the file was determined to be clean and now."""
462 moment where the file was determined to be clean and now."""
460 if parentfiledata:
463 if parentfiledata:
461 (mode, size, mtime) = parentfiledata
464 (mode, size, mtime) = parentfiledata
462 else:
465 else:
463 s = os.lstat(self._join(f))
466 s = os.lstat(self._join(f))
464 mode = s.st_mode
467 mode = s.st_mode
465 size = s.st_size
468 size = s.st_size
466 mtime = s[stat.ST_MTIME]
469 mtime = s[stat.ST_MTIME]
467 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
470 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
468 self._map.copymap.pop(f, None)
471 self._map.copymap.pop(f, None)
469 if f in self._map.nonnormalset:
472 if f in self._map.nonnormalset:
470 self._map.nonnormalset.remove(f)
473 self._map.nonnormalset.remove(f)
471 if mtime > self._lastnormaltime:
474 if mtime > self._lastnormaltime:
472 # Remember the most recent modification timeslot for status(),
475 # Remember the most recent modification timeslot for status(),
473 # to make sure we won't miss future size-preserving file content
476 # to make sure we won't miss future size-preserving file content
474 # modifications that happen within the same timeslot.
477 # modifications that happen within the same timeslot.
475 self._lastnormaltime = mtime
478 self._lastnormaltime = mtime
476
479
477 def normallookup(self, f):
480 def normallookup(self, f):
478 '''Mark a file normal, but possibly dirty.'''
481 '''Mark a file normal, but possibly dirty.'''
479 if self._pl[1] != self._nodeconstants.nullid:
482 if self._pl[1] != self._nodeconstants.nullid:
480 # if there is a merge going on and the file was either
483 # if there is a merge going on and the file was either
481 # in state 'm' (-1) or coming from other parent (-2) before
484 # in state 'm' (-1) or coming from other parent (-2) before
482 # being removed, restore that state.
485 # being removed, restore that state.
483 entry = self._map.get(f)
486 entry = self._map.get(f)
484 if entry is not None:
487 if entry is not None:
485 if entry[0] == b'r' and entry[2] in (-1, -2):
488 if entry[0] == b'r' and entry[2] in (-1, -2):
486 source = self._map.copymap.get(f)
489 source = self._map.copymap.get(f)
487 if entry[2] == -1:
490 if entry[2] == -1:
488 self.merge(f)
491 self.merge(f)
489 elif entry[2] == -2:
492 elif entry[2] == -2:
490 self.otherparent(f)
493 self.otherparent(f)
491 if source:
494 if source:
492 self.copy(source, f)
495 self.copy(source, f)
493 return
496 return
494 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
497 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
495 return
498 return
496 self._addpath(f, b'n', 0, -1, -1)
499 self._addpath(f, b'n', 0, -1, -1)
497 self._map.copymap.pop(f, None)
500 self._map.copymap.pop(f, None)
498
501
499 def otherparent(self, f):
502 def otherparent(self, f):
500 '''Mark as coming from the other parent, always dirty.'''
503 '''Mark as coming from the other parent, always dirty.'''
501 if self._pl[1] == self._nodeconstants.nullid:
504 if self._pl[1] == self._nodeconstants.nullid:
502 raise error.Abort(
505 raise error.Abort(
503 _(b"setting %r to other parent only allowed in merges") % f
506 _(b"setting %r to other parent only allowed in merges") % f
504 )
507 )
505 if f in self and self[f] == b'n':
508 if f in self and self[f] == b'n':
506 # merge-like
509 # merge-like
507 self._addpath(f, b'm', 0, -2, -1)
510 self._addpath(f, b'm', 0, -2, -1)
508 else:
511 else:
509 # add-like
512 # add-like
510 self._addpath(f, b'n', 0, -2, -1)
513 self._addpath(f, b'n', 0, -2, -1)
511 self._map.copymap.pop(f, None)
514 self._map.copymap.pop(f, None)
512
515
513 def add(self, f):
516 def add(self, f):
514 '''Mark a file added.'''
517 '''Mark a file added.'''
515 self._addpath(f, b'a', 0, -1, -1)
518 self._addpath(f, b'a', 0, -1, -1)
516 self._map.copymap.pop(f, None)
519 self._map.copymap.pop(f, None)
517
520
518 def remove(self, f):
521 def remove(self, f):
519 '''Mark a file removed.'''
522 '''Mark a file removed.'''
520 self._dirty = True
523 self._dirty = True
521 oldstate = self[f]
524 oldstate = self[f]
522 size = 0
525 size = 0
523 if self._pl[1] != self._nodeconstants.nullid:
526 if self._pl[1] != self._nodeconstants.nullid:
524 entry = self._map.get(f)
527 entry = self._map.get(f)
525 if entry is not None:
528 if entry is not None:
526 # backup the previous state
529 # backup the previous state
527 if entry[0] == b'm': # merge
530 if entry[0] == b'm': # merge
528 size = -1
531 size = -1
529 elif entry[0] == b'n' and entry[2] == -2: # other parent
532 elif entry[0] == b'n' and entry[2] == -2: # other parent
530 size = -2
533 size = -2
531 self._map.otherparentset.add(f)
534 self._map.otherparentset.add(f)
532 self._updatedfiles.add(f)
535 self._updatedfiles.add(f)
533 self._map.removefile(f, oldstate, size)
536 self._map.removefile(f, oldstate, size)
534 if size == 0:
537 if size == 0:
535 self._map.copymap.pop(f, None)
538 self._map.copymap.pop(f, None)
536
539
537 def merge(self, f):
540 def merge(self, f):
538 '''Mark a file merged.'''
541 '''Mark a file merged.'''
539 if self._pl[1] == self._nodeconstants.nullid:
542 if self._pl[1] == self._nodeconstants.nullid:
540 return self.normallookup(f)
543 return self.normallookup(f)
541 return self.otherparent(f)
544 return self.otherparent(f)
542
545
543 def drop(self, f):
546 def drop(self, f):
544 '''Drop a file from the dirstate'''
547 '''Drop a file from the dirstate'''
545 oldstate = self[f]
548 oldstate = self[f]
546 if self._map.dropfile(f, oldstate):
549 if self._map.dropfile(f, oldstate):
547 self._dirty = True
550 self._dirty = True
548 self._updatedfiles.add(f)
551 self._updatedfiles.add(f)
549 self._map.copymap.pop(f, None)
552 self._map.copymap.pop(f, None)
550
553
551 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
554 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
552 if exists is None:
555 if exists is None:
553 exists = os.path.lexists(os.path.join(self._root, path))
556 exists = os.path.lexists(os.path.join(self._root, path))
554 if not exists:
557 if not exists:
555 # Maybe a path component exists
558 # Maybe a path component exists
556 if not ignoremissing and b'/' in path:
559 if not ignoremissing and b'/' in path:
557 d, f = path.rsplit(b'/', 1)
560 d, f = path.rsplit(b'/', 1)
558 d = self._normalize(d, False, ignoremissing, None)
561 d = self._normalize(d, False, ignoremissing, None)
559 folded = d + b"/" + f
562 folded = d + b"/" + f
560 else:
563 else:
561 # No path components, preserve original case
564 # No path components, preserve original case
562 folded = path
565 folded = path
563 else:
566 else:
564 # recursively normalize leading directory components
567 # recursively normalize leading directory components
565 # against dirstate
568 # against dirstate
566 if b'/' in normed:
569 if b'/' in normed:
567 d, f = normed.rsplit(b'/', 1)
570 d, f = normed.rsplit(b'/', 1)
568 d = self._normalize(d, False, ignoremissing, True)
571 d = self._normalize(d, False, ignoremissing, True)
569 r = self._root + b"/" + d
572 r = self._root + b"/" + d
570 folded = d + b"/" + util.fspath(f, r)
573 folded = d + b"/" + util.fspath(f, r)
571 else:
574 else:
572 folded = util.fspath(normed, self._root)
575 folded = util.fspath(normed, self._root)
573 storemap[normed] = folded
576 storemap[normed] = folded
574
577
575 return folded
578 return folded
576
579
577 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
580 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
578 normed = util.normcase(path)
581 normed = util.normcase(path)
579 folded = self._map.filefoldmap.get(normed, None)
582 folded = self._map.filefoldmap.get(normed, None)
580 if folded is None:
583 if folded is None:
581 if isknown:
584 if isknown:
582 folded = path
585 folded = path
583 else:
586 else:
584 folded = self._discoverpath(
587 folded = self._discoverpath(
585 path, normed, ignoremissing, exists, self._map.filefoldmap
588 path, normed, ignoremissing, exists, self._map.filefoldmap
586 )
589 )
587 return folded
590 return folded
588
591
589 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
592 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
590 normed = util.normcase(path)
593 normed = util.normcase(path)
591 folded = self._map.filefoldmap.get(normed, None)
594 folded = self._map.filefoldmap.get(normed, None)
592 if folded is None:
595 if folded is None:
593 folded = self._map.dirfoldmap.get(normed, None)
596 folded = self._map.dirfoldmap.get(normed, None)
594 if folded is None:
597 if folded is None:
595 if isknown:
598 if isknown:
596 folded = path
599 folded = path
597 else:
600 else:
598 # store discovered result in dirfoldmap so that future
601 # store discovered result in dirfoldmap so that future
599 # normalizefile calls don't start matching directories
602 # normalizefile calls don't start matching directories
600 folded = self._discoverpath(
603 folded = self._discoverpath(
601 path, normed, ignoremissing, exists, self._map.dirfoldmap
604 path, normed, ignoremissing, exists, self._map.dirfoldmap
602 )
605 )
603 return folded
606 return folded
604
607
605 def normalize(self, path, isknown=False, ignoremissing=False):
608 def normalize(self, path, isknown=False, ignoremissing=False):
606 """
609 """
607 normalize the case of a pathname when on a casefolding filesystem
610 normalize the case of a pathname when on a casefolding filesystem
608
611
609 isknown specifies whether the filename came from walking the
612 isknown specifies whether the filename came from walking the
610 disk, to avoid extra filesystem access.
613 disk, to avoid extra filesystem access.
611
614
612 If ignoremissing is True, missing path are returned
615 If ignoremissing is True, missing path are returned
613 unchanged. Otherwise, we try harder to normalize possibly
616 unchanged. Otherwise, we try harder to normalize possibly
614 existing path components.
617 existing path components.
615
618
616 The normalized case is determined based on the following precedence:
619 The normalized case is determined based on the following precedence:
617
620
618 - version of name already stored in the dirstate
621 - version of name already stored in the dirstate
619 - version of name stored on disk
622 - version of name stored on disk
620 - version provided via command arguments
623 - version provided via command arguments
621 """
624 """
622
625
623 if self._checkcase:
626 if self._checkcase:
624 return self._normalize(path, isknown, ignoremissing)
627 return self._normalize(path, isknown, ignoremissing)
625 return path
628 return path
626
629
627 def clear(self):
630 def clear(self):
628 self._map.clear()
631 self._map.clear()
629 self._lastnormaltime = 0
632 self._lastnormaltime = 0
630 self._updatedfiles.clear()
633 self._updatedfiles.clear()
631 self._dirty = True
634 self._dirty = True
632
635
633 def rebuild(self, parent, allfiles, changedfiles=None):
636 def rebuild(self, parent, allfiles, changedfiles=None):
634 if changedfiles is None:
637 if changedfiles is None:
635 # Rebuild entire dirstate
638 # Rebuild entire dirstate
636 to_lookup = allfiles
639 to_lookup = allfiles
637 to_drop = []
640 to_drop = []
638 lastnormaltime = self._lastnormaltime
641 lastnormaltime = self._lastnormaltime
639 self.clear()
642 self.clear()
640 self._lastnormaltime = lastnormaltime
643 self._lastnormaltime = lastnormaltime
641 elif len(changedfiles) < 10:
644 elif len(changedfiles) < 10:
642 # Avoid turning allfiles into a set, which can be expensive if it's
645 # Avoid turning allfiles into a set, which can be expensive if it's
643 # large.
646 # large.
644 to_lookup = []
647 to_lookup = []
645 to_drop = []
648 to_drop = []
646 for f in changedfiles:
649 for f in changedfiles:
647 if f in allfiles:
650 if f in allfiles:
648 to_lookup.append(f)
651 to_lookup.append(f)
649 else:
652 else:
650 to_drop.append(f)
653 to_drop.append(f)
651 else:
654 else:
652 changedfilesset = set(changedfiles)
655 changedfilesset = set(changedfiles)
653 to_lookup = changedfilesset & set(allfiles)
656 to_lookup = changedfilesset & set(allfiles)
654 to_drop = changedfilesset - to_lookup
657 to_drop = changedfilesset - to_lookup
655
658
656 if self._origpl is None:
659 if self._origpl is None:
657 self._origpl = self._pl
660 self._origpl = self._pl
658 self._map.setparents(parent, self._nodeconstants.nullid)
661 self._map.setparents(parent, self._nodeconstants.nullid)
659
662
660 for f in to_lookup:
663 for f in to_lookup:
661 self.normallookup(f)
664 self.normallookup(f)
662 for f in to_drop:
665 for f in to_drop:
663 self.drop(f)
666 self.drop(f)
664
667
665 self._dirty = True
668 self._dirty = True
666
669
667 def identity(self):
670 def identity(self):
668 """Return identity of dirstate itself to detect changing in storage
671 """Return identity of dirstate itself to detect changing in storage
669
672
670 If identity of previous dirstate is equal to this, writing
673 If identity of previous dirstate is equal to this, writing
671 changes based on the former dirstate out can keep consistency.
674 changes based on the former dirstate out can keep consistency.
672 """
675 """
673 return self._map.identity
676 return self._map.identity
674
677
675 def write(self, tr):
678 def write(self, tr):
676 if not self._dirty:
679 if not self._dirty:
677 return
680 return
678
681
679 filename = self._filename
682 filename = self._filename
680 if tr:
683 if tr:
681 # 'dirstate.write()' is not only for writing in-memory
684 # 'dirstate.write()' is not only for writing in-memory
682 # changes out, but also for dropping ambiguous timestamp.
685 # changes out, but also for dropping ambiguous timestamp.
683 # delayed writing re-raise "ambiguous timestamp issue".
686 # delayed writing re-raise "ambiguous timestamp issue".
684 # See also the wiki page below for detail:
687 # See also the wiki page below for detail:
685 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
688 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
686
689
687 # emulate dropping timestamp in 'parsers.pack_dirstate'
690 # emulate dropping timestamp in 'parsers.pack_dirstate'
688 now = _getfsnow(self._opener)
691 now = _getfsnow(self._opener)
689 self._map.clearambiguoustimes(self._updatedfiles, now)
692 self._map.clearambiguoustimes(self._updatedfiles, now)
690
693
691 # emulate that all 'dirstate.normal' results are written out
694 # emulate that all 'dirstate.normal' results are written out
692 self._lastnormaltime = 0
695 self._lastnormaltime = 0
693 self._updatedfiles.clear()
696 self._updatedfiles.clear()
694
697
695 # delay writing in-memory changes out
698 # delay writing in-memory changes out
696 tr.addfilegenerator(
699 tr.addfilegenerator(
697 b'dirstate',
700 b'dirstate',
698 (self._filename,),
701 (self._filename,),
699 self._writedirstate,
702 self._writedirstate,
700 location=b'plain',
703 location=b'plain',
701 )
704 )
702 return
705 return
703
706
704 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
707 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
705 self._writedirstate(st)
708 self._writedirstate(st)
706
709
707 def addparentchangecallback(self, category, callback):
710 def addparentchangecallback(self, category, callback):
708 """add a callback to be called when the wd parents are changed
711 """add a callback to be called when the wd parents are changed
709
712
710 Callback will be called with the following arguments:
713 Callback will be called with the following arguments:
711 dirstate, (oldp1, oldp2), (newp1, newp2)
714 dirstate, (oldp1, oldp2), (newp1, newp2)
712
715
713 Category is a unique identifier to allow overwriting an old callback
716 Category is a unique identifier to allow overwriting an old callback
714 with a newer callback.
717 with a newer callback.
715 """
718 """
716 self._plchangecallbacks[category] = callback
719 self._plchangecallbacks[category] = callback
717
720
718 def _writedirstate(self, st):
721 def _writedirstate(self, st):
719 # notify callbacks about parents change
722 # notify callbacks about parents change
720 if self._origpl is not None and self._origpl != self._pl:
723 if self._origpl is not None and self._origpl != self._pl:
721 for c, callback in sorted(
724 for c, callback in sorted(
722 pycompat.iteritems(self._plchangecallbacks)
725 pycompat.iteritems(self._plchangecallbacks)
723 ):
726 ):
724 callback(self, self._origpl, self._pl)
727 callback(self, self._origpl, self._pl)
725 self._origpl = None
728 self._origpl = None
726 # use the modification time of the newly created temporary file as the
729 # use the modification time of the newly created temporary file as the
727 # filesystem's notion of 'now'
730 # filesystem's notion of 'now'
728 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
731 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
729
732
730 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
733 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
731 # timestamp of each entries in dirstate, because of 'now > mtime'
734 # timestamp of each entries in dirstate, because of 'now > mtime'
732 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
735 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
733 if delaywrite > 0:
736 if delaywrite > 0:
734 # do we have any files to delay for?
737 # do we have any files to delay for?
735 for f, e in pycompat.iteritems(self._map):
738 for f, e in pycompat.iteritems(self._map):
736 if e[0] == b'n' and e[3] == now:
739 if e[0] == b'n' and e[3] == now:
737 import time # to avoid useless import
740 import time # to avoid useless import
738
741
739 # rather than sleep n seconds, sleep until the next
742 # rather than sleep n seconds, sleep until the next
740 # multiple of n seconds
743 # multiple of n seconds
741 clock = time.time()
744 clock = time.time()
742 start = int(clock) - (int(clock) % delaywrite)
745 start = int(clock) - (int(clock) % delaywrite)
743 end = start + delaywrite
746 end = start + delaywrite
744 time.sleep(end - clock)
747 time.sleep(end - clock)
745 now = end # trust our estimate that the end is near now
748 now = end # trust our estimate that the end is near now
746 break
749 break
747
750
748 self._map.write(st, now)
751 self._map.write(st, now)
749 self._lastnormaltime = 0
752 self._lastnormaltime = 0
750 self._dirty = False
753 self._dirty = False
751
754
752 def _dirignore(self, f):
755 def _dirignore(self, f):
753 if self._ignore(f):
756 if self._ignore(f):
754 return True
757 return True
755 for p in pathutil.finddirs(f):
758 for p in pathutil.finddirs(f):
756 if self._ignore(p):
759 if self._ignore(p):
757 return True
760 return True
758 return False
761 return False
759
762
760 def _ignorefiles(self):
763 def _ignorefiles(self):
761 files = []
764 files = []
762 if os.path.exists(self._join(b'.hgignore')):
765 if os.path.exists(self._join(b'.hgignore')):
763 files.append(self._join(b'.hgignore'))
766 files.append(self._join(b'.hgignore'))
764 for name, path in self._ui.configitems(b"ui"):
767 for name, path in self._ui.configitems(b"ui"):
765 if name == b'ignore' or name.startswith(b'ignore.'):
768 if name == b'ignore' or name.startswith(b'ignore.'):
766 # we need to use os.path.join here rather than self._join
769 # we need to use os.path.join here rather than self._join
767 # because path is arbitrary and user-specified
770 # because path is arbitrary and user-specified
768 files.append(os.path.join(self._rootdir, util.expandpath(path)))
771 files.append(os.path.join(self._rootdir, util.expandpath(path)))
769 return files
772 return files
770
773
771 def _ignorefileandline(self, f):
774 def _ignorefileandline(self, f):
772 files = collections.deque(self._ignorefiles())
775 files = collections.deque(self._ignorefiles())
773 visited = set()
776 visited = set()
774 while files:
777 while files:
775 i = files.popleft()
778 i = files.popleft()
776 patterns = matchmod.readpatternfile(
779 patterns = matchmod.readpatternfile(
777 i, self._ui.warn, sourceinfo=True
780 i, self._ui.warn, sourceinfo=True
778 )
781 )
779 for pattern, lineno, line in patterns:
782 for pattern, lineno, line in patterns:
780 kind, p = matchmod._patsplit(pattern, b'glob')
783 kind, p = matchmod._patsplit(pattern, b'glob')
781 if kind == b"subinclude":
784 if kind == b"subinclude":
782 if p not in visited:
785 if p not in visited:
783 files.append(p)
786 files.append(p)
784 continue
787 continue
785 m = matchmod.match(
788 m = matchmod.match(
786 self._root, b'', [], [pattern], warn=self._ui.warn
789 self._root, b'', [], [pattern], warn=self._ui.warn
787 )
790 )
788 if m(f):
791 if m(f):
789 return (i, lineno, line)
792 return (i, lineno, line)
790 visited.add(i)
793 visited.add(i)
791 return (None, -1, b"")
794 return (None, -1, b"")
792
795
793 def _walkexplicit(self, match, subrepos):
796 def _walkexplicit(self, match, subrepos):
794 """Get stat data about the files explicitly specified by match.
797 """Get stat data about the files explicitly specified by match.
795
798
796 Return a triple (results, dirsfound, dirsnotfound).
799 Return a triple (results, dirsfound, dirsnotfound).
797 - results is a mapping from filename to stat result. It also contains
800 - results is a mapping from filename to stat result. It also contains
798 listings mapping subrepos and .hg to None.
801 listings mapping subrepos and .hg to None.
799 - dirsfound is a list of files found to be directories.
802 - dirsfound is a list of files found to be directories.
800 - dirsnotfound is a list of files that the dirstate thinks are
803 - dirsnotfound is a list of files that the dirstate thinks are
801 directories and that were not found."""
804 directories and that were not found."""
802
805
803 def badtype(mode):
806 def badtype(mode):
804 kind = _(b'unknown')
807 kind = _(b'unknown')
805 if stat.S_ISCHR(mode):
808 if stat.S_ISCHR(mode):
806 kind = _(b'character device')
809 kind = _(b'character device')
807 elif stat.S_ISBLK(mode):
810 elif stat.S_ISBLK(mode):
808 kind = _(b'block device')
811 kind = _(b'block device')
809 elif stat.S_ISFIFO(mode):
812 elif stat.S_ISFIFO(mode):
810 kind = _(b'fifo')
813 kind = _(b'fifo')
811 elif stat.S_ISSOCK(mode):
814 elif stat.S_ISSOCK(mode):
812 kind = _(b'socket')
815 kind = _(b'socket')
813 elif stat.S_ISDIR(mode):
816 elif stat.S_ISDIR(mode):
814 kind = _(b'directory')
817 kind = _(b'directory')
815 return _(b'unsupported file type (type is %s)') % kind
818 return _(b'unsupported file type (type is %s)') % kind
816
819
817 badfn = match.bad
820 badfn = match.bad
818 dmap = self._map
821 dmap = self._map
819 lstat = os.lstat
822 lstat = os.lstat
820 getkind = stat.S_IFMT
823 getkind = stat.S_IFMT
821 dirkind = stat.S_IFDIR
824 dirkind = stat.S_IFDIR
822 regkind = stat.S_IFREG
825 regkind = stat.S_IFREG
823 lnkkind = stat.S_IFLNK
826 lnkkind = stat.S_IFLNK
824 join = self._join
827 join = self._join
825 dirsfound = []
828 dirsfound = []
826 foundadd = dirsfound.append
829 foundadd = dirsfound.append
827 dirsnotfound = []
830 dirsnotfound = []
828 notfoundadd = dirsnotfound.append
831 notfoundadd = dirsnotfound.append
829
832
830 if not match.isexact() and self._checkcase:
833 if not match.isexact() and self._checkcase:
831 normalize = self._normalize
834 normalize = self._normalize
832 else:
835 else:
833 normalize = None
836 normalize = None
834
837
835 files = sorted(match.files())
838 files = sorted(match.files())
836 subrepos.sort()
839 subrepos.sort()
837 i, j = 0, 0
840 i, j = 0, 0
838 while i < len(files) and j < len(subrepos):
841 while i < len(files) and j < len(subrepos):
839 subpath = subrepos[j] + b"/"
842 subpath = subrepos[j] + b"/"
840 if files[i] < subpath:
843 if files[i] < subpath:
841 i += 1
844 i += 1
842 continue
845 continue
843 while i < len(files) and files[i].startswith(subpath):
846 while i < len(files) and files[i].startswith(subpath):
844 del files[i]
847 del files[i]
845 j += 1
848 j += 1
846
849
847 if not files or b'' in files:
850 if not files or b'' in files:
848 files = [b'']
851 files = [b'']
849 # constructing the foldmap is expensive, so don't do it for the
852 # constructing the foldmap is expensive, so don't do it for the
850 # common case where files is ['']
853 # common case where files is ['']
851 normalize = None
854 normalize = None
852 results = dict.fromkeys(subrepos)
855 results = dict.fromkeys(subrepos)
853 results[b'.hg'] = None
856 results[b'.hg'] = None
854
857
855 for ff in files:
858 for ff in files:
856 if normalize:
859 if normalize:
857 nf = normalize(ff, False, True)
860 nf = normalize(ff, False, True)
858 else:
861 else:
859 nf = ff
862 nf = ff
860 if nf in results:
863 if nf in results:
861 continue
864 continue
862
865
863 try:
866 try:
864 st = lstat(join(nf))
867 st = lstat(join(nf))
865 kind = getkind(st.st_mode)
868 kind = getkind(st.st_mode)
866 if kind == dirkind:
869 if kind == dirkind:
867 if nf in dmap:
870 if nf in dmap:
868 # file replaced by dir on disk but still in dirstate
871 # file replaced by dir on disk but still in dirstate
869 results[nf] = None
872 results[nf] = None
870 foundadd((nf, ff))
873 foundadd((nf, ff))
871 elif kind == regkind or kind == lnkkind:
874 elif kind == regkind or kind == lnkkind:
872 results[nf] = st
875 results[nf] = st
873 else:
876 else:
874 badfn(ff, badtype(kind))
877 badfn(ff, badtype(kind))
875 if nf in dmap:
878 if nf in dmap:
876 results[nf] = None
879 results[nf] = None
877 except OSError as inst: # nf not found on disk - it is dirstate only
880 except OSError as inst: # nf not found on disk - it is dirstate only
878 if nf in dmap: # does it exactly match a missing file?
881 if nf in dmap: # does it exactly match a missing file?
879 results[nf] = None
882 results[nf] = None
880 else: # does it match a missing directory?
883 else: # does it match a missing directory?
881 if self._map.hasdir(nf):
884 if self._map.hasdir(nf):
882 notfoundadd(nf)
885 notfoundadd(nf)
883 else:
886 else:
884 badfn(ff, encoding.strtolocal(inst.strerror))
887 badfn(ff, encoding.strtolocal(inst.strerror))
885
888
886 # match.files() may contain explicitly-specified paths that shouldn't
889 # match.files() may contain explicitly-specified paths that shouldn't
887 # be taken; drop them from the list of files found. dirsfound/notfound
890 # be taken; drop them from the list of files found. dirsfound/notfound
888 # aren't filtered here because they will be tested later.
891 # aren't filtered here because they will be tested later.
889 if match.anypats():
892 if match.anypats():
890 for f in list(results):
893 for f in list(results):
891 if f == b'.hg' or f in subrepos:
894 if f == b'.hg' or f in subrepos:
892 # keep sentinel to disable further out-of-repo walks
895 # keep sentinel to disable further out-of-repo walks
893 continue
896 continue
894 if not match(f):
897 if not match(f):
895 del results[f]
898 del results[f]
896
899
897 # Case insensitive filesystems cannot rely on lstat() failing to detect
900 # Case insensitive filesystems cannot rely on lstat() failing to detect
898 # a case-only rename. Prune the stat object for any file that does not
901 # a case-only rename. Prune the stat object for any file that does not
899 # match the case in the filesystem, if there are multiple files that
902 # match the case in the filesystem, if there are multiple files that
900 # normalize to the same path.
903 # normalize to the same path.
901 if match.isexact() and self._checkcase:
904 if match.isexact() and self._checkcase:
902 normed = {}
905 normed = {}
903
906
904 for f, st in pycompat.iteritems(results):
907 for f, st in pycompat.iteritems(results):
905 if st is None:
908 if st is None:
906 continue
909 continue
907
910
908 nc = util.normcase(f)
911 nc = util.normcase(f)
909 paths = normed.get(nc)
912 paths = normed.get(nc)
910
913
911 if paths is None:
914 if paths is None:
912 paths = set()
915 paths = set()
913 normed[nc] = paths
916 normed[nc] = paths
914
917
915 paths.add(f)
918 paths.add(f)
916
919
917 for norm, paths in pycompat.iteritems(normed):
920 for norm, paths in pycompat.iteritems(normed):
918 if len(paths) > 1:
921 if len(paths) > 1:
919 for path in paths:
922 for path in paths:
920 folded = self._discoverpath(
923 folded = self._discoverpath(
921 path, norm, True, None, self._map.dirfoldmap
924 path, norm, True, None, self._map.dirfoldmap
922 )
925 )
923 if path != folded:
926 if path != folded:
924 results[path] = None
927 results[path] = None
925
928
926 return results, dirsfound, dirsnotfound
929 return results, dirsfound, dirsnotfound
927
930
928 def walk(self, match, subrepos, unknown, ignored, full=True):
931 def walk(self, match, subrepos, unknown, ignored, full=True):
929 """
932 """
930 Walk recursively through the directory tree, finding all files
933 Walk recursively through the directory tree, finding all files
931 matched by match.
934 matched by match.
932
935
933 If full is False, maybe skip some known-clean files.
936 If full is False, maybe skip some known-clean files.
934
937
935 Return a dict mapping filename to stat-like object (either
938 Return a dict mapping filename to stat-like object (either
936 mercurial.osutil.stat instance or return value of os.stat()).
939 mercurial.osutil.stat instance or return value of os.stat()).
937
940
938 """
941 """
939 # full is a flag that extensions that hook into walk can use -- this
942 # full is a flag that extensions that hook into walk can use -- this
940 # implementation doesn't use it at all. This satisfies the contract
943 # implementation doesn't use it at all. This satisfies the contract
941 # because we only guarantee a "maybe".
944 # because we only guarantee a "maybe".
942
945
943 if ignored:
946 if ignored:
944 ignore = util.never
947 ignore = util.never
945 dirignore = util.never
948 dirignore = util.never
946 elif unknown:
949 elif unknown:
947 ignore = self._ignore
950 ignore = self._ignore
948 dirignore = self._dirignore
951 dirignore = self._dirignore
949 else:
952 else:
950 # if not unknown and not ignored, drop dir recursion and step 2
953 # if not unknown and not ignored, drop dir recursion and step 2
951 ignore = util.always
954 ignore = util.always
952 dirignore = util.always
955 dirignore = util.always
953
956
954 matchfn = match.matchfn
957 matchfn = match.matchfn
955 matchalways = match.always()
958 matchalways = match.always()
956 matchtdir = match.traversedir
959 matchtdir = match.traversedir
957 dmap = self._map
960 dmap = self._map
958 listdir = util.listdir
961 listdir = util.listdir
959 lstat = os.lstat
962 lstat = os.lstat
960 dirkind = stat.S_IFDIR
963 dirkind = stat.S_IFDIR
961 regkind = stat.S_IFREG
964 regkind = stat.S_IFREG
962 lnkkind = stat.S_IFLNK
965 lnkkind = stat.S_IFLNK
963 join = self._join
966 join = self._join
964
967
965 exact = skipstep3 = False
968 exact = skipstep3 = False
966 if match.isexact(): # match.exact
969 if match.isexact(): # match.exact
967 exact = True
970 exact = True
968 dirignore = util.always # skip step 2
971 dirignore = util.always # skip step 2
969 elif match.prefix(): # match.match, no patterns
972 elif match.prefix(): # match.match, no patterns
970 skipstep3 = True
973 skipstep3 = True
971
974
972 if not exact and self._checkcase:
975 if not exact and self._checkcase:
973 normalize = self._normalize
976 normalize = self._normalize
974 normalizefile = self._normalizefile
977 normalizefile = self._normalizefile
975 skipstep3 = False
978 skipstep3 = False
976 else:
979 else:
977 normalize = self._normalize
980 normalize = self._normalize
978 normalizefile = None
981 normalizefile = None
979
982
980 # step 1: find all explicit files
983 # step 1: find all explicit files
981 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
984 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
982 if matchtdir:
985 if matchtdir:
983 for d in work:
986 for d in work:
984 matchtdir(d[0])
987 matchtdir(d[0])
985 for d in dirsnotfound:
988 for d in dirsnotfound:
986 matchtdir(d)
989 matchtdir(d)
987
990
988 skipstep3 = skipstep3 and not (work or dirsnotfound)
991 skipstep3 = skipstep3 and not (work or dirsnotfound)
989 work = [d for d in work if not dirignore(d[0])]
992 work = [d for d in work if not dirignore(d[0])]
990
993
991 # step 2: visit subdirectories
994 # step 2: visit subdirectories
992 def traverse(work, alreadynormed):
995 def traverse(work, alreadynormed):
993 wadd = work.append
996 wadd = work.append
994 while work:
997 while work:
995 tracing.counter('dirstate.walk work', len(work))
998 tracing.counter('dirstate.walk work', len(work))
996 nd = work.pop()
999 nd = work.pop()
997 visitentries = match.visitchildrenset(nd)
1000 visitentries = match.visitchildrenset(nd)
998 if not visitentries:
1001 if not visitentries:
999 continue
1002 continue
1000 if visitentries == b'this' or visitentries == b'all':
1003 if visitentries == b'this' or visitentries == b'all':
1001 visitentries = None
1004 visitentries = None
1002 skip = None
1005 skip = None
1003 if nd != b'':
1006 if nd != b'':
1004 skip = b'.hg'
1007 skip = b'.hg'
1005 try:
1008 try:
1006 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1009 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1007 entries = listdir(join(nd), stat=True, skip=skip)
1010 entries = listdir(join(nd), stat=True, skip=skip)
1008 except OSError as inst:
1011 except OSError as inst:
1009 if inst.errno in (errno.EACCES, errno.ENOENT):
1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1010 match.bad(
1013 match.bad(
1011 self.pathto(nd), encoding.strtolocal(inst.strerror)
1014 self.pathto(nd), encoding.strtolocal(inst.strerror)
1012 )
1015 )
1013 continue
1016 continue
1014 raise
1017 raise
1015 for f, kind, st in entries:
1018 for f, kind, st in entries:
1016 # Some matchers may return files in the visitentries set,
1019 # Some matchers may return files in the visitentries set,
1017 # instead of 'this', if the matcher explicitly mentions them
1020 # instead of 'this', if the matcher explicitly mentions them
1018 # and is not an exactmatcher. This is acceptable; we do not
1021 # and is not an exactmatcher. This is acceptable; we do not
1019 # make any hard assumptions about file-or-directory below
1022 # make any hard assumptions about file-or-directory below
1020 # based on the presence of `f` in visitentries. If
1023 # based on the presence of `f` in visitentries. If
1021 # visitchildrenset returned a set, we can always skip the
1024 # visitchildrenset returned a set, we can always skip the
1022 # entries *not* in the set it provided regardless of whether
1025 # entries *not* in the set it provided regardless of whether
1023 # they're actually a file or a directory.
1026 # they're actually a file or a directory.
1024 if visitentries and f not in visitentries:
1027 if visitentries and f not in visitentries:
1025 continue
1028 continue
1026 if normalizefile:
1029 if normalizefile:
1027 # even though f might be a directory, we're only
1030 # even though f might be a directory, we're only
1028 # interested in comparing it to files currently in the
1031 # interested in comparing it to files currently in the
1029 # dmap -- therefore normalizefile is enough
1032 # dmap -- therefore normalizefile is enough
1030 nf = normalizefile(
1033 nf = normalizefile(
1031 nd and (nd + b"/" + f) or f, True, True
1034 nd and (nd + b"/" + f) or f, True, True
1032 )
1035 )
1033 else:
1036 else:
1034 nf = nd and (nd + b"/" + f) or f
1037 nf = nd and (nd + b"/" + f) or f
1035 if nf not in results:
1038 if nf not in results:
1036 if kind == dirkind:
1039 if kind == dirkind:
1037 if not ignore(nf):
1040 if not ignore(nf):
1038 if matchtdir:
1041 if matchtdir:
1039 matchtdir(nf)
1042 matchtdir(nf)
1040 wadd(nf)
1043 wadd(nf)
1041 if nf in dmap and (matchalways or matchfn(nf)):
1044 if nf in dmap and (matchalways or matchfn(nf)):
1042 results[nf] = None
1045 results[nf] = None
1043 elif kind == regkind or kind == lnkkind:
1046 elif kind == regkind or kind == lnkkind:
1044 if nf in dmap:
1047 if nf in dmap:
1045 if matchalways or matchfn(nf):
1048 if matchalways or matchfn(nf):
1046 results[nf] = st
1049 results[nf] = st
1047 elif (matchalways or matchfn(nf)) and not ignore(
1050 elif (matchalways or matchfn(nf)) and not ignore(
1048 nf
1051 nf
1049 ):
1052 ):
1050 # unknown file -- normalize if necessary
1053 # unknown file -- normalize if necessary
1051 if not alreadynormed:
1054 if not alreadynormed:
1052 nf = normalize(nf, False, True)
1055 nf = normalize(nf, False, True)
1053 results[nf] = st
1056 results[nf] = st
1054 elif nf in dmap and (matchalways or matchfn(nf)):
1057 elif nf in dmap and (matchalways or matchfn(nf)):
1055 results[nf] = None
1058 results[nf] = None
1056
1059
1057 for nd, d in work:
1060 for nd, d in work:
1058 # alreadynormed means that processwork doesn't have to do any
1061 # alreadynormed means that processwork doesn't have to do any
1059 # expensive directory normalization
1062 # expensive directory normalization
1060 alreadynormed = not normalize or nd == d
1063 alreadynormed = not normalize or nd == d
1061 traverse([d], alreadynormed)
1064 traverse([d], alreadynormed)
1062
1065
1063 for s in subrepos:
1066 for s in subrepos:
1064 del results[s]
1067 del results[s]
1065 del results[b'.hg']
1068 del results[b'.hg']
1066
1069
1067 # step 3: visit remaining files from dmap
1070 # step 3: visit remaining files from dmap
1068 if not skipstep3 and not exact:
1071 if not skipstep3 and not exact:
1069 # If a dmap file is not in results yet, it was either
1072 # If a dmap file is not in results yet, it was either
1070 # a) not matching matchfn b) ignored, c) missing, or d) under a
1073 # a) not matching matchfn b) ignored, c) missing, or d) under a
1071 # symlink directory.
1074 # symlink directory.
1072 if not results and matchalways:
1075 if not results and matchalways:
1073 visit = [f for f in dmap]
1076 visit = [f for f in dmap]
1074 else:
1077 else:
1075 visit = [f for f in dmap if f not in results and matchfn(f)]
1078 visit = [f for f in dmap if f not in results and matchfn(f)]
1076 visit.sort()
1079 visit.sort()
1077
1080
1078 if unknown:
1081 if unknown:
1079 # unknown == True means we walked all dirs under the roots
1082 # unknown == True means we walked all dirs under the roots
1080 # that wasn't ignored, and everything that matched was stat'ed
1083 # that wasn't ignored, and everything that matched was stat'ed
1081 # and is already in results.
1084 # and is already in results.
1082 # The rest must thus be ignored or under a symlink.
1085 # The rest must thus be ignored or under a symlink.
1083 audit_path = pathutil.pathauditor(self._root, cached=True)
1086 audit_path = pathutil.pathauditor(self._root, cached=True)
1084
1087
1085 for nf in iter(visit):
1088 for nf in iter(visit):
1086 # If a stat for the same file was already added with a
1089 # If a stat for the same file was already added with a
1087 # different case, don't add one for this, since that would
1090 # different case, don't add one for this, since that would
1088 # make it appear as if the file exists under both names
1091 # make it appear as if the file exists under both names
1089 # on disk.
1092 # on disk.
1090 if (
1093 if (
1091 normalizefile
1094 normalizefile
1092 and normalizefile(nf, True, True) in results
1095 and normalizefile(nf, True, True) in results
1093 ):
1096 ):
1094 results[nf] = None
1097 results[nf] = None
1095 # Report ignored items in the dmap as long as they are not
1098 # Report ignored items in the dmap as long as they are not
1096 # under a symlink directory.
1099 # under a symlink directory.
1097 elif audit_path.check(nf):
1100 elif audit_path.check(nf):
1098 try:
1101 try:
1099 results[nf] = lstat(join(nf))
1102 results[nf] = lstat(join(nf))
1100 # file was just ignored, no links, and exists
1103 # file was just ignored, no links, and exists
1101 except OSError:
1104 except OSError:
1102 # file doesn't exist
1105 # file doesn't exist
1103 results[nf] = None
1106 results[nf] = None
1104 else:
1107 else:
1105 # It's either missing or under a symlink directory
1108 # It's either missing or under a symlink directory
1106 # which we in this case report as missing
1109 # which we in this case report as missing
1107 results[nf] = None
1110 results[nf] = None
1108 else:
1111 else:
1109 # We may not have walked the full directory tree above,
1112 # We may not have walked the full directory tree above,
1110 # so stat and check everything we missed.
1113 # so stat and check everything we missed.
1111 iv = iter(visit)
1114 iv = iter(visit)
1112 for st in util.statfiles([join(i) for i in visit]):
1115 for st in util.statfiles([join(i) for i in visit]):
1113 results[next(iv)] = st
1116 results[next(iv)] = st
1114 return results
1117 return results
1115
1118
1116 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1119 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1117 # Force Rayon (Rust parallelism library) to respect the number of
1120 # Force Rayon (Rust parallelism library) to respect the number of
1118 # workers. This is a temporary workaround until Rust code knows
1121 # workers. This is a temporary workaround until Rust code knows
1119 # how to read the config file.
1122 # how to read the config file.
1120 numcpus = self._ui.configint(b"worker", b"numcpus")
1123 numcpus = self._ui.configint(b"worker", b"numcpus")
1121 if numcpus is not None:
1124 if numcpus is not None:
1122 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1125 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1123
1126
1124 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1127 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1125 if not workers_enabled:
1128 if not workers_enabled:
1126 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1129 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1127
1130
1128 (
1131 (
1129 lookup,
1132 lookup,
1130 modified,
1133 modified,
1131 added,
1134 added,
1132 removed,
1135 removed,
1133 deleted,
1136 deleted,
1134 clean,
1137 clean,
1135 ignored,
1138 ignored,
1136 unknown,
1139 unknown,
1137 warnings,
1140 warnings,
1138 bad,
1141 bad,
1139 traversed,
1142 traversed,
1140 dirty,
1143 dirty,
1141 ) = rustmod.status(
1144 ) = rustmod.status(
1142 self._map._rustmap,
1145 self._map._rustmap,
1143 matcher,
1146 matcher,
1144 self._rootdir,
1147 self._rootdir,
1145 self._ignorefiles(),
1148 self._ignorefiles(),
1146 self._checkexec,
1149 self._checkexec,
1147 self._lastnormaltime,
1150 self._lastnormaltime,
1148 bool(list_clean),
1151 bool(list_clean),
1149 bool(list_ignored),
1152 bool(list_ignored),
1150 bool(list_unknown),
1153 bool(list_unknown),
1151 bool(matcher.traversedir),
1154 bool(matcher.traversedir),
1152 )
1155 )
1153
1156
1154 self._dirty |= dirty
1157 self._dirty |= dirty
1155
1158
1156 if matcher.traversedir:
1159 if matcher.traversedir:
1157 for dir in traversed:
1160 for dir in traversed:
1158 matcher.traversedir(dir)
1161 matcher.traversedir(dir)
1159
1162
1160 if self._ui.warn:
1163 if self._ui.warn:
1161 for item in warnings:
1164 for item in warnings:
1162 if isinstance(item, tuple):
1165 if isinstance(item, tuple):
1163 file_path, syntax = item
1166 file_path, syntax = item
1164 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1167 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1165 file_path,
1168 file_path,
1166 syntax,
1169 syntax,
1167 )
1170 )
1168 self._ui.warn(msg)
1171 self._ui.warn(msg)
1169 else:
1172 else:
1170 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1173 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1171 self._ui.warn(
1174 self._ui.warn(
1172 msg
1175 msg
1173 % (
1176 % (
1174 pathutil.canonpath(
1177 pathutil.canonpath(
1175 self._rootdir, self._rootdir, item
1178 self._rootdir, self._rootdir, item
1176 ),
1179 ),
1177 b"No such file or directory",
1180 b"No such file or directory",
1178 )
1181 )
1179 )
1182 )
1180
1183
1181 for (fn, message) in bad:
1184 for (fn, message) in bad:
1182 matcher.bad(fn, encoding.strtolocal(message))
1185 matcher.bad(fn, encoding.strtolocal(message))
1183
1186
1184 status = scmutil.status(
1187 status = scmutil.status(
1185 modified=modified,
1188 modified=modified,
1186 added=added,
1189 added=added,
1187 removed=removed,
1190 removed=removed,
1188 deleted=deleted,
1191 deleted=deleted,
1189 unknown=unknown,
1192 unknown=unknown,
1190 ignored=ignored,
1193 ignored=ignored,
1191 clean=clean,
1194 clean=clean,
1192 )
1195 )
1193 return (lookup, status)
1196 return (lookup, status)
1194
1197
1195 def status(self, match, subrepos, ignored, clean, unknown):
1198 def status(self, match, subrepos, ignored, clean, unknown):
1196 """Determine the status of the working copy relative to the
1199 """Determine the status of the working copy relative to the
1197 dirstate and return a pair of (unsure, status), where status is of type
1200 dirstate and return a pair of (unsure, status), where status is of type
1198 scmutil.status and:
1201 scmutil.status and:
1199
1202
1200 unsure:
1203 unsure:
1201 files that might have been modified since the dirstate was
1204 files that might have been modified since the dirstate was
1202 written, but need to be read to be sure (size is the same
1205 written, but need to be read to be sure (size is the same
1203 but mtime differs)
1206 but mtime differs)
1204 status.modified:
1207 status.modified:
1205 files that have definitely been modified since the dirstate
1208 files that have definitely been modified since the dirstate
1206 was written (different size or mode)
1209 was written (different size or mode)
1207 status.clean:
1210 status.clean:
1208 files that have definitely not been modified since the
1211 files that have definitely not been modified since the
1209 dirstate was written
1212 dirstate was written
1210 """
1213 """
1211 listignored, listclean, listunknown = ignored, clean, unknown
1214 listignored, listclean, listunknown = ignored, clean, unknown
1212 lookup, modified, added, unknown, ignored = [], [], [], [], []
1215 lookup, modified, added, unknown, ignored = [], [], [], [], []
1213 removed, deleted, clean = [], [], []
1216 removed, deleted, clean = [], [], []
1214
1217
1215 dmap = self._map
1218 dmap = self._map
1216 dmap.preload()
1219 dmap.preload()
1217
1220
1218 use_rust = True
1221 use_rust = True
1219
1222
1220 allowed_matchers = (
1223 allowed_matchers = (
1221 matchmod.alwaysmatcher,
1224 matchmod.alwaysmatcher,
1222 matchmod.exactmatcher,
1225 matchmod.exactmatcher,
1223 matchmod.includematcher,
1226 matchmod.includematcher,
1224 )
1227 )
1225
1228
1226 if rustmod is None:
1229 if rustmod is None:
1227 use_rust = False
1230 use_rust = False
1228 elif self._checkcase:
1231 elif self._checkcase:
1229 # Case-insensitive filesystems are not handled yet
1232 # Case-insensitive filesystems are not handled yet
1230 use_rust = False
1233 use_rust = False
1231 elif subrepos:
1234 elif subrepos:
1232 use_rust = False
1235 use_rust = False
1233 elif sparse.enabled:
1236 elif sparse.enabled:
1234 use_rust = False
1237 use_rust = False
1235 elif not isinstance(match, allowed_matchers):
1238 elif not isinstance(match, allowed_matchers):
1236 # Some matchers have yet to be implemented
1239 # Some matchers have yet to be implemented
1237 use_rust = False
1240 use_rust = False
1238
1241
1239 if use_rust:
1242 if use_rust:
1240 try:
1243 try:
1241 return self._rust_status(
1244 return self._rust_status(
1242 match, listclean, listignored, listunknown
1245 match, listclean, listignored, listunknown
1243 )
1246 )
1244 except rustmod.FallbackError:
1247 except rustmod.FallbackError:
1245 pass
1248 pass
1246
1249
1247 def noop(f):
1250 def noop(f):
1248 pass
1251 pass
1249
1252
1250 dcontains = dmap.__contains__
1253 dcontains = dmap.__contains__
1251 dget = dmap.__getitem__
1254 dget = dmap.__getitem__
1252 ladd = lookup.append # aka "unsure"
1255 ladd = lookup.append # aka "unsure"
1253 madd = modified.append
1256 madd = modified.append
1254 aadd = added.append
1257 aadd = added.append
1255 uadd = unknown.append if listunknown else noop
1258 uadd = unknown.append if listunknown else noop
1256 iadd = ignored.append if listignored else noop
1259 iadd = ignored.append if listignored else noop
1257 radd = removed.append
1260 radd = removed.append
1258 dadd = deleted.append
1261 dadd = deleted.append
1259 cadd = clean.append if listclean else noop
1262 cadd = clean.append if listclean else noop
1260 mexact = match.exact
1263 mexact = match.exact
1261 dirignore = self._dirignore
1264 dirignore = self._dirignore
1262 checkexec = self._checkexec
1265 checkexec = self._checkexec
1263 copymap = self._map.copymap
1266 copymap = self._map.copymap
1264 lastnormaltime = self._lastnormaltime
1267 lastnormaltime = self._lastnormaltime
1265
1268
1266 # We need to do full walks when either
1269 # We need to do full walks when either
1267 # - we're listing all clean files, or
1270 # - we're listing all clean files, or
1268 # - match.traversedir does something, because match.traversedir should
1271 # - match.traversedir does something, because match.traversedir should
1269 # be called for every dir in the working dir
1272 # be called for every dir in the working dir
1270 full = listclean or match.traversedir is not None
1273 full = listclean or match.traversedir is not None
1271 for fn, st in pycompat.iteritems(
1274 for fn, st in pycompat.iteritems(
1272 self.walk(match, subrepos, listunknown, listignored, full=full)
1275 self.walk(match, subrepos, listunknown, listignored, full=full)
1273 ):
1276 ):
1274 if not dcontains(fn):
1277 if not dcontains(fn):
1275 if (listignored or mexact(fn)) and dirignore(fn):
1278 if (listignored or mexact(fn)) and dirignore(fn):
1276 if listignored:
1279 if listignored:
1277 iadd(fn)
1280 iadd(fn)
1278 else:
1281 else:
1279 uadd(fn)
1282 uadd(fn)
1280 continue
1283 continue
1281
1284
1282 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1285 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1283 # written like that for performance reasons. dmap[fn] is not a
1286 # written like that for performance reasons. dmap[fn] is not a
1284 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1287 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1285 # opcode has fast paths when the value to be unpacked is a tuple or
1288 # opcode has fast paths when the value to be unpacked is a tuple or
1286 # a list, but falls back to creating a full-fledged iterator in
1289 # a list, but falls back to creating a full-fledged iterator in
1287 # general. That is much slower than simply accessing and storing the
1290 # general. That is much slower than simply accessing and storing the
1288 # tuple members one by one.
1291 # tuple members one by one.
1289 t = dget(fn)
1292 t = dget(fn)
1290 state = t[0]
1293 state = t[0]
1291 mode = t[1]
1294 mode = t[1]
1292 size = t[2]
1295 size = t[2]
1293 time = t[3]
1296 time = t[3]
1294
1297
1295 if not st and state in b"nma":
1298 if not st and state in b"nma":
1296 dadd(fn)
1299 dadd(fn)
1297 elif state == b'n':
1300 elif state == b'n':
1298 if (
1301 if (
1299 size >= 0
1302 size >= 0
1300 and (
1303 and (
1301 (size != st.st_size and size != st.st_size & _rangemask)
1304 (size != st.st_size and size != st.st_size & _rangemask)
1302 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1305 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1303 )
1306 )
1304 or size == -2 # other parent
1307 or size == -2 # other parent
1305 or fn in copymap
1308 or fn in copymap
1306 ):
1309 ):
1307 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1310 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1308 # issue6456: Size returned may be longer due to
1311 # issue6456: Size returned may be longer due to
1309 # encryption on EXT-4 fscrypt, undecided.
1312 # encryption on EXT-4 fscrypt, undecided.
1310 ladd(fn)
1313 ladd(fn)
1311 else:
1314 else:
1312 madd(fn)
1315 madd(fn)
1313 elif (
1316 elif (
1314 time != st[stat.ST_MTIME]
1317 time != st[stat.ST_MTIME]
1315 and time != st[stat.ST_MTIME] & _rangemask
1318 and time != st[stat.ST_MTIME] & _rangemask
1316 ):
1319 ):
1317 ladd(fn)
1320 ladd(fn)
1318 elif st[stat.ST_MTIME] == lastnormaltime:
1321 elif st[stat.ST_MTIME] == lastnormaltime:
1319 # fn may have just been marked as normal and it may have
1322 # fn may have just been marked as normal and it may have
1320 # changed in the same second without changing its size.
1323 # changed in the same second without changing its size.
1321 # This can happen if we quickly do multiple commits.
1324 # This can happen if we quickly do multiple commits.
1322 # Force lookup, so we don't miss such a racy file change.
1325 # Force lookup, so we don't miss such a racy file change.
1323 ladd(fn)
1326 ladd(fn)
1324 elif listclean:
1327 elif listclean:
1325 cadd(fn)
1328 cadd(fn)
1326 elif state == b'm':
1329 elif state == b'm':
1327 madd(fn)
1330 madd(fn)
1328 elif state == b'a':
1331 elif state == b'a':
1329 aadd(fn)
1332 aadd(fn)
1330 elif state == b'r':
1333 elif state == b'r':
1331 radd(fn)
1334 radd(fn)
1332 status = scmutil.status(
1335 status = scmutil.status(
1333 modified, added, removed, deleted, unknown, ignored, clean
1336 modified, added, removed, deleted, unknown, ignored, clean
1334 )
1337 )
1335 return (lookup, status)
1338 return (lookup, status)
1336
1339
1337 def matches(self, match):
1340 def matches(self, match):
1338 """
1341 """
1339 return files in the dirstate (in whatever state) filtered by match
1342 return files in the dirstate (in whatever state) filtered by match
1340 """
1343 """
1341 dmap = self._map
1344 dmap = self._map
1342 if rustmod is not None:
1345 if rustmod is not None:
1343 dmap = self._map._rustmap
1346 dmap = self._map._rustmap
1344
1347
1345 if match.always():
1348 if match.always():
1346 return dmap.keys()
1349 return dmap.keys()
1347 files = match.files()
1350 files = match.files()
1348 if match.isexact():
1351 if match.isexact():
1349 # fast path -- filter the other way around, since typically files is
1352 # fast path -- filter the other way around, since typically files is
1350 # much smaller than dmap
1353 # much smaller than dmap
1351 return [f for f in files if f in dmap]
1354 return [f for f in files if f in dmap]
1352 if match.prefix() and all(fn in dmap for fn in files):
1355 if match.prefix() and all(fn in dmap for fn in files):
1353 # fast path -- all the values are known to be files, so just return
1356 # fast path -- all the values are known to be files, so just return
1354 # that
1357 # that
1355 return list(files)
1358 return list(files)
1356 return [f for f in dmap if match(f)]
1359 return [f for f in dmap if match(f)]
1357
1360
1358 def _actualfilename(self, tr):
1361 def _actualfilename(self, tr):
1359 if tr:
1362 if tr:
1360 return self._pendingfilename
1363 return self._pendingfilename
1361 else:
1364 else:
1362 return self._filename
1365 return self._filename
1363
1366
1364 def savebackup(self, tr, backupname):
1367 def savebackup(self, tr, backupname):
1365 '''Save current dirstate into backup file'''
1368 '''Save current dirstate into backup file'''
1366 filename = self._actualfilename(tr)
1369 filename = self._actualfilename(tr)
1367 assert backupname != filename
1370 assert backupname != filename
1368
1371
1369 # use '_writedirstate' instead of 'write' to write changes certainly,
1372 # use '_writedirstate' instead of 'write' to write changes certainly,
1370 # because the latter omits writing out if transaction is running.
1373 # because the latter omits writing out if transaction is running.
1371 # output file will be used to create backup of dirstate at this point.
1374 # output file will be used to create backup of dirstate at this point.
1372 if self._dirty or not self._opener.exists(filename):
1375 if self._dirty or not self._opener.exists(filename):
1373 self._writedirstate(
1376 self._writedirstate(
1374 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1377 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1375 )
1378 )
1376
1379
1377 if tr:
1380 if tr:
1378 # ensure that subsequent tr.writepending returns True for
1381 # ensure that subsequent tr.writepending returns True for
1379 # changes written out above, even if dirstate is never
1382 # changes written out above, even if dirstate is never
1380 # changed after this
1383 # changed after this
1381 tr.addfilegenerator(
1384 tr.addfilegenerator(
1382 b'dirstate',
1385 b'dirstate',
1383 (self._filename,),
1386 (self._filename,),
1384 self._writedirstate,
1387 self._writedirstate,
1385 location=b'plain',
1388 location=b'plain',
1386 )
1389 )
1387
1390
1388 # ensure that pending file written above is unlinked at
1391 # ensure that pending file written above is unlinked at
1389 # failure, even if tr.writepending isn't invoked until the
1392 # failure, even if tr.writepending isn't invoked until the
1390 # end of this transaction
1393 # end of this transaction
1391 tr.registertmp(filename, location=b'plain')
1394 tr.registertmp(filename, location=b'plain')
1392
1395
1393 self._opener.tryunlink(backupname)
1396 self._opener.tryunlink(backupname)
1394 # hardlink backup is okay because _writedirstate is always called
1397 # hardlink backup is okay because _writedirstate is always called
1395 # with an "atomictemp=True" file.
1398 # with an "atomictemp=True" file.
1396 util.copyfile(
1399 util.copyfile(
1397 self._opener.join(filename),
1400 self._opener.join(filename),
1398 self._opener.join(backupname),
1401 self._opener.join(backupname),
1399 hardlink=True,
1402 hardlink=True,
1400 )
1403 )
1401
1404
1402 def restorebackup(self, tr, backupname):
1405 def restorebackup(self, tr, backupname):
1403 '''Restore dirstate by backup file'''
1406 '''Restore dirstate by backup file'''
1404 # this "invalidate()" prevents "wlock.release()" from writing
1407 # this "invalidate()" prevents "wlock.release()" from writing
1405 # changes of dirstate out after restoring from backup file
1408 # changes of dirstate out after restoring from backup file
1406 self.invalidate()
1409 self.invalidate()
1407 filename = self._actualfilename(tr)
1410 filename = self._actualfilename(tr)
1408 o = self._opener
1411 o = self._opener
1409 if util.samefile(o.join(backupname), o.join(filename)):
1412 if util.samefile(o.join(backupname), o.join(filename)):
1410 o.unlink(backupname)
1413 o.unlink(backupname)
1411 else:
1414 else:
1412 o.rename(backupname, filename, checkambig=True)
1415 o.rename(backupname, filename, checkambig=True)
1413
1416
1414 def clearbackup(self, tr, backupname):
1417 def clearbackup(self, tr, backupname):
1415 '''Clear backup file'''
1418 '''Clear backup file'''
1416 self._opener.unlink(backupname)
1419 self._opener.unlink(backupname)
1417
1420
1418
1421
1419 class dirstatemap(object):
1422 class dirstatemap(object):
1420 """Map encapsulating the dirstate's contents.
1423 """Map encapsulating the dirstate's contents.
1421
1424
1422 The dirstate contains the following state:
1425 The dirstate contains the following state:
1423
1426
1424 - `identity` is the identity of the dirstate file, which can be used to
1427 - `identity` is the identity of the dirstate file, which can be used to
1425 detect when changes have occurred to the dirstate file.
1428 detect when changes have occurred to the dirstate file.
1426
1429
1427 - `parents` is a pair containing the parents of the working copy. The
1430 - `parents` is a pair containing the parents of the working copy. The
1428 parents are updated by calling `setparents`.
1431 parents are updated by calling `setparents`.
1429
1432
1430 - the state map maps filenames to tuples of (state, mode, size, mtime),
1433 - the state map maps filenames to tuples of (state, mode, size, mtime),
1431 where state is a single character representing 'normal', 'added',
1434 where state is a single character representing 'normal', 'added',
1432 'removed', or 'merged'. It is read by treating the dirstate as a
1435 'removed', or 'merged'. It is read by treating the dirstate as a
1433 dict. File state is updated by calling the `addfile`, `removefile` and
1436 dict. File state is updated by calling the `addfile`, `removefile` and
1434 `dropfile` methods.
1437 `dropfile` methods.
1435
1438
1436 - `copymap` maps destination filenames to their source filename.
1439 - `copymap` maps destination filenames to their source filename.
1437
1440
1438 The dirstate also provides the following views onto the state:
1441 The dirstate also provides the following views onto the state:
1439
1442
1440 - `nonnormalset` is a set of the filenames that have state other
1443 - `nonnormalset` is a set of the filenames that have state other
1441 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1444 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1442
1445
1443 - `otherparentset` is a set of the filenames that are marked as coming
1446 - `otherparentset` is a set of the filenames that are marked as coming
1444 from the second parent when the dirstate is currently being merged.
1447 from the second parent when the dirstate is currently being merged.
1445
1448
1446 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1449 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1447 form that they appear as in the dirstate.
1450 form that they appear as in the dirstate.
1448
1451
1449 - `dirfoldmap` is a dict mapping normalized directory names to the
1452 - `dirfoldmap` is a dict mapping normalized directory names to the
1450 denormalized form that they appear as in the dirstate.
1453 denormalized form that they appear as in the dirstate.
1451 """
1454 """
1452
1455
1453 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1456 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1454 self._ui = ui
1457 self._ui = ui
1455 self._opener = opener
1458 self._opener = opener
1456 self._root = root
1459 self._root = root
1457 self._filename = b'dirstate'
1460 self._filename = b'dirstate'
1458 self._nodelen = 20
1461 self._nodelen = 20
1459 self._nodeconstants = nodeconstants
1462 self._nodeconstants = nodeconstants
1460 assert (
1463 assert (
1461 not use_dirstate_v2
1464 not use_dirstate_v2
1462 ), "should have detected unsupported requirement"
1465 ), "should have detected unsupported requirement"
1463
1466
1464 self._parents = None
1467 self._parents = None
1465 self._dirtyparents = False
1468 self._dirtyparents = False
1466
1469
1467 # for consistent view between _pl() and _read() invocations
1470 # for consistent view between _pl() and _read() invocations
1468 self._pendingmode = None
1471 self._pendingmode = None
1469
1472
1470 @propertycache
1473 @propertycache
1471 def _map(self):
1474 def _map(self):
1472 self._map = {}
1475 self._map = {}
1473 self.read()
1476 self.read()
1474 return self._map
1477 return self._map
1475
1478
1476 @propertycache
1479 @propertycache
1477 def copymap(self):
1480 def copymap(self):
1478 self.copymap = {}
1481 self.copymap = {}
1479 self._map
1482 self._map
1480 return self.copymap
1483 return self.copymap
1481
1484
1485 def directories(self):
1486 # Rust / dirstate-v2 only
1487 return []
1488
1482 def clear(self):
1489 def clear(self):
1483 self._map.clear()
1490 self._map.clear()
1484 self.copymap.clear()
1491 self.copymap.clear()
1485 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1492 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1486 util.clearcachedproperty(self, b"_dirs")
1493 util.clearcachedproperty(self, b"_dirs")
1487 util.clearcachedproperty(self, b"_alldirs")
1494 util.clearcachedproperty(self, b"_alldirs")
1488 util.clearcachedproperty(self, b"filefoldmap")
1495 util.clearcachedproperty(self, b"filefoldmap")
1489 util.clearcachedproperty(self, b"dirfoldmap")
1496 util.clearcachedproperty(self, b"dirfoldmap")
1490 util.clearcachedproperty(self, b"nonnormalset")
1497 util.clearcachedproperty(self, b"nonnormalset")
1491 util.clearcachedproperty(self, b"otherparentset")
1498 util.clearcachedproperty(self, b"otherparentset")
1492
1499
1493 def items(self):
1500 def items(self):
1494 return pycompat.iteritems(self._map)
1501 return pycompat.iteritems(self._map)
1495
1502
1496 # forward for python2,3 compat
1503 # forward for python2,3 compat
1497 iteritems = items
1504 iteritems = items
1498
1505
1499 def __len__(self):
1506 def __len__(self):
1500 return len(self._map)
1507 return len(self._map)
1501
1508
1502 def __iter__(self):
1509 def __iter__(self):
1503 return iter(self._map)
1510 return iter(self._map)
1504
1511
1505 def get(self, key, default=None):
1512 def get(self, key, default=None):
1506 return self._map.get(key, default)
1513 return self._map.get(key, default)
1507
1514
1508 def __contains__(self, key):
1515 def __contains__(self, key):
1509 return key in self._map
1516 return key in self._map
1510
1517
1511 def __getitem__(self, key):
1518 def __getitem__(self, key):
1512 return self._map[key]
1519 return self._map[key]
1513
1520
1514 def keys(self):
1521 def keys(self):
1515 return self._map.keys()
1522 return self._map.keys()
1516
1523
1517 def preload(self):
1524 def preload(self):
1518 """Loads the underlying data, if it's not already loaded"""
1525 """Loads the underlying data, if it's not already loaded"""
1519 self._map
1526 self._map
1520
1527
1521 def addfile(self, f, oldstate, state, mode, size, mtime):
1528 def addfile(self, f, oldstate, state, mode, size, mtime):
1522 """Add a tracked file to the dirstate."""
1529 """Add a tracked file to the dirstate."""
1523 if oldstate in b"?r" and "_dirs" in self.__dict__:
1530 if oldstate in b"?r" and "_dirs" in self.__dict__:
1524 self._dirs.addpath(f)
1531 self._dirs.addpath(f)
1525 if oldstate == b"?" and "_alldirs" in self.__dict__:
1532 if oldstate == b"?" and "_alldirs" in self.__dict__:
1526 self._alldirs.addpath(f)
1533 self._alldirs.addpath(f)
1527 self._map[f] = dirstatetuple(state, mode, size, mtime)
1534 self._map[f] = dirstatetuple(state, mode, size, mtime)
1528 if state != b'n' or mtime == -1:
1535 if state != b'n' or mtime == -1:
1529 self.nonnormalset.add(f)
1536 self.nonnormalset.add(f)
1530 if size == -2:
1537 if size == -2:
1531 self.otherparentset.add(f)
1538 self.otherparentset.add(f)
1532
1539
1533 def removefile(self, f, oldstate, size):
1540 def removefile(self, f, oldstate, size):
1534 """
1541 """
1535 Mark a file as removed in the dirstate.
1542 Mark a file as removed in the dirstate.
1536
1543
1537 The `size` parameter is used to store sentinel values that indicate
1544 The `size` parameter is used to store sentinel values that indicate
1538 the file's previous state. In the future, we should refactor this
1545 the file's previous state. In the future, we should refactor this
1539 to be more explicit about what that state is.
1546 to be more explicit about what that state is.
1540 """
1547 """
1541 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1548 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1542 self._dirs.delpath(f)
1549 self._dirs.delpath(f)
1543 if oldstate == b"?" and "_alldirs" in self.__dict__:
1550 if oldstate == b"?" and "_alldirs" in self.__dict__:
1544 self._alldirs.addpath(f)
1551 self._alldirs.addpath(f)
1545 if "filefoldmap" in self.__dict__:
1552 if "filefoldmap" in self.__dict__:
1546 normed = util.normcase(f)
1553 normed = util.normcase(f)
1547 self.filefoldmap.pop(normed, None)
1554 self.filefoldmap.pop(normed, None)
1548 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1555 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1549 self.nonnormalset.add(f)
1556 self.nonnormalset.add(f)
1550
1557
1551 def dropfile(self, f, oldstate):
1558 def dropfile(self, f, oldstate):
1552 """
1559 """
1553 Remove a file from the dirstate. Returns True if the file was
1560 Remove a file from the dirstate. Returns True if the file was
1554 previously recorded.
1561 previously recorded.
1555 """
1562 """
1556 exists = self._map.pop(f, None) is not None
1563 exists = self._map.pop(f, None) is not None
1557 if exists:
1564 if exists:
1558 if oldstate != b"r" and "_dirs" in self.__dict__:
1565 if oldstate != b"r" and "_dirs" in self.__dict__:
1559 self._dirs.delpath(f)
1566 self._dirs.delpath(f)
1560 if "_alldirs" in self.__dict__:
1567 if "_alldirs" in self.__dict__:
1561 self._alldirs.delpath(f)
1568 self._alldirs.delpath(f)
1562 if "filefoldmap" in self.__dict__:
1569 if "filefoldmap" in self.__dict__:
1563 normed = util.normcase(f)
1570 normed = util.normcase(f)
1564 self.filefoldmap.pop(normed, None)
1571 self.filefoldmap.pop(normed, None)
1565 self.nonnormalset.discard(f)
1572 self.nonnormalset.discard(f)
1566 return exists
1573 return exists
1567
1574
1568 def clearambiguoustimes(self, files, now):
1575 def clearambiguoustimes(self, files, now):
1569 for f in files:
1576 for f in files:
1570 e = self.get(f)
1577 e = self.get(f)
1571 if e is not None and e[0] == b'n' and e[3] == now:
1578 if e is not None and e[0] == b'n' and e[3] == now:
1572 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1579 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1573 self.nonnormalset.add(f)
1580 self.nonnormalset.add(f)
1574
1581
1575 def nonnormalentries(self):
1582 def nonnormalentries(self):
1576 '''Compute the nonnormal dirstate entries from the dmap'''
1583 '''Compute the nonnormal dirstate entries from the dmap'''
1577 try:
1584 try:
1578 return parsers.nonnormalotherparententries(self._map)
1585 return parsers.nonnormalotherparententries(self._map)
1579 except AttributeError:
1586 except AttributeError:
1580 nonnorm = set()
1587 nonnorm = set()
1581 otherparent = set()
1588 otherparent = set()
1582 for fname, e in pycompat.iteritems(self._map):
1589 for fname, e in pycompat.iteritems(self._map):
1583 if e[0] != b'n' or e[3] == -1:
1590 if e[0] != b'n' or e[3] == -1:
1584 nonnorm.add(fname)
1591 nonnorm.add(fname)
1585 if e[0] == b'n' and e[2] == -2:
1592 if e[0] == b'n' and e[2] == -2:
1586 otherparent.add(fname)
1593 otherparent.add(fname)
1587 return nonnorm, otherparent
1594 return nonnorm, otherparent
1588
1595
1589 @propertycache
1596 @propertycache
1590 def filefoldmap(self):
1597 def filefoldmap(self):
1591 """Returns a dictionary mapping normalized case paths to their
1598 """Returns a dictionary mapping normalized case paths to their
1592 non-normalized versions.
1599 non-normalized versions.
1593 """
1600 """
1594 try:
1601 try:
1595 makefilefoldmap = parsers.make_file_foldmap
1602 makefilefoldmap = parsers.make_file_foldmap
1596 except AttributeError:
1603 except AttributeError:
1597 pass
1604 pass
1598 else:
1605 else:
1599 return makefilefoldmap(
1606 return makefilefoldmap(
1600 self._map, util.normcasespec, util.normcasefallback
1607 self._map, util.normcasespec, util.normcasefallback
1601 )
1608 )
1602
1609
1603 f = {}
1610 f = {}
1604 normcase = util.normcase
1611 normcase = util.normcase
1605 for name, s in pycompat.iteritems(self._map):
1612 for name, s in pycompat.iteritems(self._map):
1606 if s[0] != b'r':
1613 if s[0] != b'r':
1607 f[normcase(name)] = name
1614 f[normcase(name)] = name
1608 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1615 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1609 return f
1616 return f
1610
1617
1611 def hastrackeddir(self, d):
1618 def hastrackeddir(self, d):
1612 """
1619 """
1613 Returns True if the dirstate contains a tracked (not removed) file
1620 Returns True if the dirstate contains a tracked (not removed) file
1614 in this directory.
1621 in this directory.
1615 """
1622 """
1616 return d in self._dirs
1623 return d in self._dirs
1617
1624
1618 def hasdir(self, d):
1625 def hasdir(self, d):
1619 """
1626 """
1620 Returns True if the dirstate contains a file (tracked or removed)
1627 Returns True if the dirstate contains a file (tracked or removed)
1621 in this directory.
1628 in this directory.
1622 """
1629 """
1623 return d in self._alldirs
1630 return d in self._alldirs
1624
1631
1625 @propertycache
1632 @propertycache
1626 def _dirs(self):
1633 def _dirs(self):
1627 return pathutil.dirs(self._map, b'r')
1634 return pathutil.dirs(self._map, b'r')
1628
1635
1629 @propertycache
1636 @propertycache
1630 def _alldirs(self):
1637 def _alldirs(self):
1631 return pathutil.dirs(self._map)
1638 return pathutil.dirs(self._map)
1632
1639
1633 def _opendirstatefile(self):
1640 def _opendirstatefile(self):
1634 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1641 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1635 if self._pendingmode is not None and self._pendingmode != mode:
1642 if self._pendingmode is not None and self._pendingmode != mode:
1636 fp.close()
1643 fp.close()
1637 raise error.Abort(
1644 raise error.Abort(
1638 _(b'working directory state may be changed parallelly')
1645 _(b'working directory state may be changed parallelly')
1639 )
1646 )
1640 self._pendingmode = mode
1647 self._pendingmode = mode
1641 return fp
1648 return fp
1642
1649
1643 def parents(self):
1650 def parents(self):
1644 if not self._parents:
1651 if not self._parents:
1645 try:
1652 try:
1646 fp = self._opendirstatefile()
1653 fp = self._opendirstatefile()
1647 st = fp.read(2 * self._nodelen)
1654 st = fp.read(2 * self._nodelen)
1648 fp.close()
1655 fp.close()
1649 except IOError as err:
1656 except IOError as err:
1650 if err.errno != errno.ENOENT:
1657 if err.errno != errno.ENOENT:
1651 raise
1658 raise
1652 # File doesn't exist, so the current state is empty
1659 # File doesn't exist, so the current state is empty
1653 st = b''
1660 st = b''
1654
1661
1655 l = len(st)
1662 l = len(st)
1656 if l == self._nodelen * 2:
1663 if l == self._nodelen * 2:
1657 self._parents = (
1664 self._parents = (
1658 st[: self._nodelen],
1665 st[: self._nodelen],
1659 st[self._nodelen : 2 * self._nodelen],
1666 st[self._nodelen : 2 * self._nodelen],
1660 )
1667 )
1661 elif l == 0:
1668 elif l == 0:
1662 self._parents = (
1669 self._parents = (
1663 self._nodeconstants.nullid,
1670 self._nodeconstants.nullid,
1664 self._nodeconstants.nullid,
1671 self._nodeconstants.nullid,
1665 )
1672 )
1666 else:
1673 else:
1667 raise error.Abort(
1674 raise error.Abort(
1668 _(b'working directory state appears damaged!')
1675 _(b'working directory state appears damaged!')
1669 )
1676 )
1670
1677
1671 return self._parents
1678 return self._parents
1672
1679
1673 def setparents(self, p1, p2):
1680 def setparents(self, p1, p2):
1674 self._parents = (p1, p2)
1681 self._parents = (p1, p2)
1675 self._dirtyparents = True
1682 self._dirtyparents = True
1676
1683
1677 def read(self):
1684 def read(self):
1678 # ignore HG_PENDING because identity is used only for writing
1685 # ignore HG_PENDING because identity is used only for writing
1679 self.identity = util.filestat.frompath(
1686 self.identity = util.filestat.frompath(
1680 self._opener.join(self._filename)
1687 self._opener.join(self._filename)
1681 )
1688 )
1682
1689
1683 try:
1690 try:
1684 fp = self._opendirstatefile()
1691 fp = self._opendirstatefile()
1685 try:
1692 try:
1686 st = fp.read()
1693 st = fp.read()
1687 finally:
1694 finally:
1688 fp.close()
1695 fp.close()
1689 except IOError as err:
1696 except IOError as err:
1690 if err.errno != errno.ENOENT:
1697 if err.errno != errno.ENOENT:
1691 raise
1698 raise
1692 return
1699 return
1693 if not st:
1700 if not st:
1694 return
1701 return
1695
1702
1696 if util.safehasattr(parsers, b'dict_new_presized'):
1703 if util.safehasattr(parsers, b'dict_new_presized'):
1697 # Make an estimate of the number of files in the dirstate based on
1704 # Make an estimate of the number of files in the dirstate based on
1698 # its size. This trades wasting some memory for avoiding costly
1705 # its size. This trades wasting some memory for avoiding costly
1699 # resizes. Each entry have a prefix of 17 bytes followed by one or
1706 # resizes. Each entry have a prefix of 17 bytes followed by one or
1700 # two path names. Studies on various large-scale real-world repositories
1707 # two path names. Studies on various large-scale real-world repositories
1701 # found 54 bytes a reasonable upper limit for the average path names.
1708 # found 54 bytes a reasonable upper limit for the average path names.
1702 # Copy entries are ignored for the sake of this estimate.
1709 # Copy entries are ignored for the sake of this estimate.
1703 self._map = parsers.dict_new_presized(len(st) // 71)
1710 self._map = parsers.dict_new_presized(len(st) // 71)
1704
1711
1705 # Python's garbage collector triggers a GC each time a certain number
1712 # Python's garbage collector triggers a GC each time a certain number
1706 # of container objects (the number being defined by
1713 # of container objects (the number being defined by
1707 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1714 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1708 # for each file in the dirstate. The C version then immediately marks
1715 # for each file in the dirstate. The C version then immediately marks
1709 # them as not to be tracked by the collector. However, this has no
1716 # them as not to be tracked by the collector. However, this has no
1710 # effect on when GCs are triggered, only on what objects the GC looks
1717 # effect on when GCs are triggered, only on what objects the GC looks
1711 # into. This means that O(number of files) GCs are unavoidable.
1718 # into. This means that O(number of files) GCs are unavoidable.
1712 # Depending on when in the process's lifetime the dirstate is parsed,
1719 # Depending on when in the process's lifetime the dirstate is parsed,
1713 # this can get very expensive. As a workaround, disable GC while
1720 # this can get very expensive. As a workaround, disable GC while
1714 # parsing the dirstate.
1721 # parsing the dirstate.
1715 #
1722 #
1716 # (we cannot decorate the function directly since it is in a C module)
1723 # (we cannot decorate the function directly since it is in a C module)
1717 parse_dirstate = util.nogc(parsers.parse_dirstate)
1724 parse_dirstate = util.nogc(parsers.parse_dirstate)
1718 p = parse_dirstate(self._map, self.copymap, st)
1725 p = parse_dirstate(self._map, self.copymap, st)
1719 if not self._dirtyparents:
1726 if not self._dirtyparents:
1720 self.setparents(*p)
1727 self.setparents(*p)
1721
1728
1722 # Avoid excess attribute lookups by fast pathing certain checks
1729 # Avoid excess attribute lookups by fast pathing certain checks
1723 self.__contains__ = self._map.__contains__
1730 self.__contains__ = self._map.__contains__
1724 self.__getitem__ = self._map.__getitem__
1731 self.__getitem__ = self._map.__getitem__
1725 self.get = self._map.get
1732 self.get = self._map.get
1726
1733
1727 def write(self, st, now):
1734 def write(self, st, now):
1728 st.write(
1735 st.write(
1729 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1736 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1730 )
1737 )
1731 st.close()
1738 st.close()
1732 self._dirtyparents = False
1739 self._dirtyparents = False
1733 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1740 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1734
1741
1735 @propertycache
1742 @propertycache
1736 def nonnormalset(self):
1743 def nonnormalset(self):
1737 nonnorm, otherparents = self.nonnormalentries()
1744 nonnorm, otherparents = self.nonnormalentries()
1738 self.otherparentset = otherparents
1745 self.otherparentset = otherparents
1739 return nonnorm
1746 return nonnorm
1740
1747
1741 @propertycache
1748 @propertycache
1742 def otherparentset(self):
1749 def otherparentset(self):
1743 nonnorm, otherparents = self.nonnormalentries()
1750 nonnorm, otherparents = self.nonnormalentries()
1744 self.nonnormalset = nonnorm
1751 self.nonnormalset = nonnorm
1745 return otherparents
1752 return otherparents
1746
1753
1747 def non_normal_or_other_parent_paths(self):
1754 def non_normal_or_other_parent_paths(self):
1748 return self.nonnormalset.union(self.otherparentset)
1755 return self.nonnormalset.union(self.otherparentset)
1749
1756
1750 @propertycache
1757 @propertycache
1751 def identity(self):
1758 def identity(self):
1752 self._map
1759 self._map
1753 return self.identity
1760 return self.identity
1754
1761
1755 @propertycache
1762 @propertycache
1756 def dirfoldmap(self):
1763 def dirfoldmap(self):
1757 f = {}
1764 f = {}
1758 normcase = util.normcase
1765 normcase = util.normcase
1759 for name in self._dirs:
1766 for name in self._dirs:
1760 f[normcase(name)] = name
1767 f[normcase(name)] = name
1761 return f
1768 return f
1762
1769
1763
1770
1764 if rustmod is not None:
1771 if rustmod is not None:
1765
1772
1766 class dirstatemap(object):
1773 class dirstatemap(object):
1767 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1774 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1768 self._use_dirstate_v2 = use_dirstate_v2
1775 self._use_dirstate_v2 = use_dirstate_v2
1769 self._nodeconstants = nodeconstants
1776 self._nodeconstants = nodeconstants
1770 self._ui = ui
1777 self._ui = ui
1771 self._opener = opener
1778 self._opener = opener
1772 self._root = root
1779 self._root = root
1773 self._filename = b'dirstate'
1780 self._filename = b'dirstate'
1774 self._nodelen = 20 # Also update Rust code when changing this!
1781 self._nodelen = 20 # Also update Rust code when changing this!
1775 self._parents = None
1782 self._parents = None
1776 self._dirtyparents = False
1783 self._dirtyparents = False
1777
1784
1778 # for consistent view between _pl() and _read() invocations
1785 # for consistent view between _pl() and _read() invocations
1779 self._pendingmode = None
1786 self._pendingmode = None
1780
1787
1781 self._use_dirstate_tree = self._ui.configbool(
1788 self._use_dirstate_tree = self._ui.configbool(
1782 b"experimental",
1789 b"experimental",
1783 b"dirstate-tree.in-memory",
1790 b"dirstate-tree.in-memory",
1784 False,
1791 False,
1785 )
1792 )
1786
1793
1787 def addfile(self, *args, **kwargs):
1794 def addfile(self, *args, **kwargs):
1788 return self._rustmap.addfile(*args, **kwargs)
1795 return self._rustmap.addfile(*args, **kwargs)
1789
1796
1790 def removefile(self, *args, **kwargs):
1797 def removefile(self, *args, **kwargs):
1791 return self._rustmap.removefile(*args, **kwargs)
1798 return self._rustmap.removefile(*args, **kwargs)
1792
1799
1793 def dropfile(self, *args, **kwargs):
1800 def dropfile(self, *args, **kwargs):
1794 return self._rustmap.dropfile(*args, **kwargs)
1801 return self._rustmap.dropfile(*args, **kwargs)
1795
1802
1796 def clearambiguoustimes(self, *args, **kwargs):
1803 def clearambiguoustimes(self, *args, **kwargs):
1797 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1804 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1798
1805
1799 def nonnormalentries(self):
1806 def nonnormalentries(self):
1800 return self._rustmap.nonnormalentries()
1807 return self._rustmap.nonnormalentries()
1801
1808
1802 def get(self, *args, **kwargs):
1809 def get(self, *args, **kwargs):
1803 return self._rustmap.get(*args, **kwargs)
1810 return self._rustmap.get(*args, **kwargs)
1804
1811
1805 @property
1812 @property
1806 def copymap(self):
1813 def copymap(self):
1807 return self._rustmap.copymap()
1814 return self._rustmap.copymap()
1808
1815
1816 def directories(self):
1817 return self._rustmap.directories()
1818
1809 def preload(self):
1819 def preload(self):
1810 self._rustmap
1820 self._rustmap
1811
1821
1812 def clear(self):
1822 def clear(self):
1813 self._rustmap.clear()
1823 self._rustmap.clear()
1814 self.setparents(
1824 self.setparents(
1815 self._nodeconstants.nullid, self._nodeconstants.nullid
1825 self._nodeconstants.nullid, self._nodeconstants.nullid
1816 )
1826 )
1817 util.clearcachedproperty(self, b"_dirs")
1827 util.clearcachedproperty(self, b"_dirs")
1818 util.clearcachedproperty(self, b"_alldirs")
1828 util.clearcachedproperty(self, b"_alldirs")
1819 util.clearcachedproperty(self, b"dirfoldmap")
1829 util.clearcachedproperty(self, b"dirfoldmap")
1820
1830
1821 def items(self):
1831 def items(self):
1822 return self._rustmap.items()
1832 return self._rustmap.items()
1823
1833
1824 def keys(self):
1834 def keys(self):
1825 return iter(self._rustmap)
1835 return iter(self._rustmap)
1826
1836
1827 def __contains__(self, key):
1837 def __contains__(self, key):
1828 return key in self._rustmap
1838 return key in self._rustmap
1829
1839
1830 def __getitem__(self, item):
1840 def __getitem__(self, item):
1831 return self._rustmap[item]
1841 return self._rustmap[item]
1832
1842
1833 def __len__(self):
1843 def __len__(self):
1834 return len(self._rustmap)
1844 return len(self._rustmap)
1835
1845
1836 def __iter__(self):
1846 def __iter__(self):
1837 return iter(self._rustmap)
1847 return iter(self._rustmap)
1838
1848
1839 # forward for python2,3 compat
1849 # forward for python2,3 compat
1840 iteritems = items
1850 iteritems = items
1841
1851
1842 def _opendirstatefile(self):
1852 def _opendirstatefile(self):
1843 fp, mode = txnutil.trypending(
1853 fp, mode = txnutil.trypending(
1844 self._root, self._opener, self._filename
1854 self._root, self._opener, self._filename
1845 )
1855 )
1846 if self._pendingmode is not None and self._pendingmode != mode:
1856 if self._pendingmode is not None and self._pendingmode != mode:
1847 fp.close()
1857 fp.close()
1848 raise error.Abort(
1858 raise error.Abort(
1849 _(b'working directory state may be changed parallelly')
1859 _(b'working directory state may be changed parallelly')
1850 )
1860 )
1851 self._pendingmode = mode
1861 self._pendingmode = mode
1852 return fp
1862 return fp
1853
1863
1854 def setparents(self, p1, p2):
1864 def setparents(self, p1, p2):
1855 self._parents = (p1, p2)
1865 self._parents = (p1, p2)
1856 self._dirtyparents = True
1866 self._dirtyparents = True
1857
1867
1858 def parents(self):
1868 def parents(self):
1859 if not self._parents:
1869 if not self._parents:
1860 if self._use_dirstate_v2:
1870 if self._use_dirstate_v2:
1861 offset = len(rustmod.V2_FORMAT_MARKER)
1871 offset = len(rustmod.V2_FORMAT_MARKER)
1862 else:
1872 else:
1863 offset = 0
1873 offset = 0
1864 read_len = offset + self._nodelen * 2
1874 read_len = offset + self._nodelen * 2
1865 try:
1875 try:
1866 fp = self._opendirstatefile()
1876 fp = self._opendirstatefile()
1867 st = fp.read(read_len)
1877 st = fp.read(read_len)
1868 fp.close()
1878 fp.close()
1869 except IOError as err:
1879 except IOError as err:
1870 if err.errno != errno.ENOENT:
1880 if err.errno != errno.ENOENT:
1871 raise
1881 raise
1872 # File doesn't exist, so the current state is empty
1882 # File doesn't exist, so the current state is empty
1873 st = b''
1883 st = b''
1874
1884
1875 l = len(st)
1885 l = len(st)
1876 if l == read_len:
1886 if l == read_len:
1877 st = st[offset:]
1887 st = st[offset:]
1878 self._parents = (
1888 self._parents = (
1879 st[: self._nodelen],
1889 st[: self._nodelen],
1880 st[self._nodelen : 2 * self._nodelen],
1890 st[self._nodelen : 2 * self._nodelen],
1881 )
1891 )
1882 elif l == 0:
1892 elif l == 0:
1883 self._parents = (
1893 self._parents = (
1884 self._nodeconstants.nullid,
1894 self._nodeconstants.nullid,
1885 self._nodeconstants.nullid,
1895 self._nodeconstants.nullid,
1886 )
1896 )
1887 else:
1897 else:
1888 raise error.Abort(
1898 raise error.Abort(
1889 _(b'working directory state appears damaged!')
1899 _(b'working directory state appears damaged!')
1890 )
1900 )
1891
1901
1892 return self._parents
1902 return self._parents
1893
1903
1894 @propertycache
1904 @propertycache
1895 def _rustmap(self):
1905 def _rustmap(self):
1896 """
1906 """
1897 Fills the Dirstatemap when called.
1907 Fills the Dirstatemap when called.
1898 """
1908 """
1899 # ignore HG_PENDING because identity is used only for writing
1909 # ignore HG_PENDING because identity is used only for writing
1900 self.identity = util.filestat.frompath(
1910 self.identity = util.filestat.frompath(
1901 self._opener.join(self._filename)
1911 self._opener.join(self._filename)
1902 )
1912 )
1903
1913
1904 try:
1914 try:
1905 fp = self._opendirstatefile()
1915 fp = self._opendirstatefile()
1906 try:
1916 try:
1907 st = fp.read()
1917 st = fp.read()
1908 finally:
1918 finally:
1909 fp.close()
1919 fp.close()
1910 except IOError as err:
1920 except IOError as err:
1911 if err.errno != errno.ENOENT:
1921 if err.errno != errno.ENOENT:
1912 raise
1922 raise
1913 st = b''
1923 st = b''
1914
1924
1915 self._rustmap, parents = rustmod.DirstateMap.new(
1925 self._rustmap, parents = rustmod.DirstateMap.new(
1916 self._use_dirstate_tree, self._use_dirstate_v2, st
1926 self._use_dirstate_tree, self._use_dirstate_v2, st
1917 )
1927 )
1918
1928
1919 if parents and not self._dirtyparents:
1929 if parents and not self._dirtyparents:
1920 self.setparents(*parents)
1930 self.setparents(*parents)
1921
1931
1922 self.__contains__ = self._rustmap.__contains__
1932 self.__contains__ = self._rustmap.__contains__
1923 self.__getitem__ = self._rustmap.__getitem__
1933 self.__getitem__ = self._rustmap.__getitem__
1924 self.get = self._rustmap.get
1934 self.get = self._rustmap.get
1925 return self._rustmap
1935 return self._rustmap
1926
1936
1927 def write(self, st, now):
1937 def write(self, st, now):
1928 parents = self.parents()
1938 parents = self.parents()
1929 packed = self._rustmap.write(
1939 packed = self._rustmap.write(
1930 self._use_dirstate_v2, parents[0], parents[1], now
1940 self._use_dirstate_v2, parents[0], parents[1], now
1931 )
1941 )
1932 st.write(packed)
1942 st.write(packed)
1933 st.close()
1943 st.close()
1934 self._dirtyparents = False
1944 self._dirtyparents = False
1935
1945
1936 @propertycache
1946 @propertycache
1937 def filefoldmap(self):
1947 def filefoldmap(self):
1938 """Returns a dictionary mapping normalized case paths to their
1948 """Returns a dictionary mapping normalized case paths to their
1939 non-normalized versions.
1949 non-normalized versions.
1940 """
1950 """
1941 return self._rustmap.filefoldmapasdict()
1951 return self._rustmap.filefoldmapasdict()
1942
1952
1943 def hastrackeddir(self, d):
1953 def hastrackeddir(self, d):
1944 self._dirs # Trigger Python's propertycache
1954 self._dirs # Trigger Python's propertycache
1945 return self._rustmap.hastrackeddir(d)
1955 return self._rustmap.hastrackeddir(d)
1946
1956
1947 def hasdir(self, d):
1957 def hasdir(self, d):
1948 self._dirs # Trigger Python's propertycache
1958 self._dirs # Trigger Python's propertycache
1949 return self._rustmap.hasdir(d)
1959 return self._rustmap.hasdir(d)
1950
1960
1951 @propertycache
1961 @propertycache
1952 def _dirs(self):
1962 def _dirs(self):
1953 return self._rustmap.getdirs()
1963 return self._rustmap.getdirs()
1954
1964
1955 @propertycache
1965 @propertycache
1956 def _alldirs(self):
1966 def _alldirs(self):
1957 return self._rustmap.getalldirs()
1967 return self._rustmap.getalldirs()
1958
1968
1959 @propertycache
1969 @propertycache
1960 def identity(self):
1970 def identity(self):
1961 self._rustmap
1971 self._rustmap
1962 return self.identity
1972 return self.identity
1963
1973
1964 @property
1974 @property
1965 def nonnormalset(self):
1975 def nonnormalset(self):
1966 nonnorm = self._rustmap.non_normal_entries()
1976 nonnorm = self._rustmap.non_normal_entries()
1967 return nonnorm
1977 return nonnorm
1968
1978
1969 @propertycache
1979 @propertycache
1970 def otherparentset(self):
1980 def otherparentset(self):
1971 otherparents = self._rustmap.other_parent_entries()
1981 otherparents = self._rustmap.other_parent_entries()
1972 return otherparents
1982 return otherparents
1973
1983
1974 def non_normal_or_other_parent_paths(self):
1984 def non_normal_or_other_parent_paths(self):
1975 return self._rustmap.non_normal_or_other_parent_paths()
1985 return self._rustmap.non_normal_or_other_parent_paths()
1976
1986
1977 @propertycache
1987 @propertycache
1978 def dirfoldmap(self):
1988 def dirfoldmap(self):
1979 f = {}
1989 f = {}
1980 normcase = util.normcase
1990 normcase = util.normcase
1981 for name in self._dirs:
1991 for name in self._dirs:
1982 f[normcase(name)] = name
1992 f[normcase(name)] = name
1983 return f
1993 return f
@@ -1,489 +1,489 b''
1 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
1 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
2 //
2 //
3 // This software may be used and distributed according to the terms of the
3 // This software may be used and distributed according to the terms of the
4 // GNU General Public License version 2 or any later version.
4 // GNU General Public License version 2 or any later version.
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::utils::hg_path::HgPath;
7 use crate::utils::hg_path::HgPath;
8 use crate::{
8 use crate::{
9 dirstate::{CopyMap, EntryState, RawEntry, StateMap},
9 dirstate::{CopyMap, EntryState, RawEntry, StateMap},
10 DirstateEntry, DirstateParents,
10 DirstateEntry, DirstateParents,
11 };
11 };
12 use byteorder::{BigEndian, WriteBytesExt};
12 use byteorder::{BigEndian, WriteBytesExt};
13 use bytes_cast::BytesCast;
13 use bytes_cast::BytesCast;
14 use micro_timer::timed;
14 use micro_timer::timed;
15 use std::convert::{TryFrom, TryInto};
15 use std::convert::{TryFrom, TryInto};
16
16
17 /// Parents are stored in the dirstate as byte hashes.
17 /// Parents are stored in the dirstate as byte hashes.
18 pub const PARENT_SIZE: usize = 20;
18 pub const PARENT_SIZE: usize = 20;
19 /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits.
19 /// Dirstate entries have a static part of 8 + 32 + 32 + 32 + 32 bits.
20 const MIN_ENTRY_SIZE: usize = 17;
20 const MIN_ENTRY_SIZE: usize = 17;
21
21
22 type ParseResult<'a> = (
22 type ParseResult<'a> = (
23 &'a DirstateParents,
23 &'a DirstateParents,
24 Vec<(&'a HgPath, DirstateEntry)>,
24 Vec<(&'a HgPath, DirstateEntry)>,
25 Vec<(&'a HgPath, &'a HgPath)>,
25 Vec<(&'a HgPath, &'a HgPath)>,
26 );
26 );
27
27
28 pub fn parse_dirstate_parents(
28 pub fn parse_dirstate_parents(
29 contents: &[u8],
29 contents: &[u8],
30 ) -> Result<&DirstateParents, HgError> {
30 ) -> Result<&DirstateParents, HgError> {
31 let (parents, _rest) = DirstateParents::from_bytes(contents)
31 let (parents, _rest) = DirstateParents::from_bytes(contents)
32 .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
32 .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
33 Ok(parents)
33 Ok(parents)
34 }
34 }
35
35
36 #[timed]
36 #[timed]
37 pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
37 pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
38 let mut copies = Vec::new();
38 let mut copies = Vec::new();
39 let mut entries = Vec::new();
39 let mut entries = Vec::new();
40 let parents =
40 let parents =
41 parse_dirstate_entries(contents, |path, entry, copy_source| {
41 parse_dirstate_entries(contents, |path, entry, copy_source| {
42 if let Some(source) = copy_source {
42 if let Some(source) = copy_source {
43 copies.push((path, source));
43 copies.push((path, source));
44 }
44 }
45 entries.push((path, *entry));
45 entries.push((path, *entry));
46 Ok(())
46 Ok(())
47 })?;
47 })?;
48 Ok((parents, entries, copies))
48 Ok((parents, entries, copies))
49 }
49 }
50
50
51 pub fn parse_dirstate_entries<'a>(
51 pub fn parse_dirstate_entries<'a>(
52 mut contents: &'a [u8],
52 mut contents: &'a [u8],
53 mut each_entry: impl FnMut(
53 mut each_entry: impl FnMut(
54 &'a HgPath,
54 &'a HgPath,
55 &DirstateEntry,
55 &DirstateEntry,
56 Option<&'a HgPath>,
56 Option<&'a HgPath>,
57 ) -> Result<(), HgError>,
57 ) -> Result<(), HgError>,
58 ) -> Result<&'a DirstateParents, HgError> {
58 ) -> Result<&'a DirstateParents, HgError> {
59 let (parents, rest) = DirstateParents::from_bytes(contents)
59 let (parents, rest) = DirstateParents::from_bytes(contents)
60 .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
60 .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
61 contents = rest;
61 contents = rest;
62 while !contents.is_empty() {
62 while !contents.is_empty() {
63 let (raw_entry, rest) = RawEntry::from_bytes(contents)
63 let (raw_entry, rest) = RawEntry::from_bytes(contents)
64 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
64 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
65
65
66 let entry = DirstateEntry {
66 let entry = DirstateEntry {
67 state: EntryState::try_from(raw_entry.state)?,
67 state: EntryState::try_from(raw_entry.state)?,
68 mode: raw_entry.mode.get(),
68 mode: raw_entry.mode.get(),
69 mtime: raw_entry.mtime.get(),
69 mtime: raw_entry.mtime.get(),
70 size: raw_entry.size.get(),
70 size: raw_entry.size.get(),
71 };
71 };
72 let (paths, rest) =
72 let (paths, rest) =
73 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
73 u8::slice_from_bytes(rest, raw_entry.length.get() as usize)
74 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
74 .map_err(|_| HgError::corrupted("Overflow in dirstate."))?;
75
75
76 // `paths` is either a single path, or two paths separated by a NULL
76 // `paths` is either a single path, or two paths separated by a NULL
77 // byte
77 // byte
78 let mut iter = paths.splitn(2, |&byte| byte == b'\0');
78 let mut iter = paths.splitn(2, |&byte| byte == b'\0');
79 let path = HgPath::new(
79 let path = HgPath::new(
80 iter.next().expect("splitn always yields at least one item"),
80 iter.next().expect("splitn always yields at least one item"),
81 );
81 );
82 let copy_source = iter.next().map(HgPath::new);
82 let copy_source = iter.next().map(HgPath::new);
83 each_entry(path, &entry, copy_source)?;
83 each_entry(path, &entry, copy_source)?;
84
84
85 contents = rest;
85 contents = rest;
86 }
86 }
87 Ok(parents)
87 Ok(parents)
88 }
88 }
89
89
90 fn packed_filename_and_copy_source_size(
90 fn packed_filename_and_copy_source_size(
91 filename: &HgPath,
91 filename: &HgPath,
92 copy_source: Option<&HgPath>,
92 copy_source: Option<&HgPath>,
93 ) -> usize {
93 ) -> usize {
94 filename.len()
94 filename.len()
95 + if let Some(source) = copy_source {
95 + if let Some(source) = copy_source {
96 b"\0".len() + source.len()
96 b"\0".len() + source.len()
97 } else {
97 } else {
98 0
98 0
99 }
99 }
100 }
100 }
101
101
102 pub fn packed_entry_size(
102 pub fn packed_entry_size(
103 filename: &HgPath,
103 filename: &HgPath,
104 copy_source: Option<&HgPath>,
104 copy_source: Option<&HgPath>,
105 ) -> usize {
105 ) -> usize {
106 MIN_ENTRY_SIZE
106 MIN_ENTRY_SIZE
107 + packed_filename_and_copy_source_size(filename, copy_source)
107 + packed_filename_and_copy_source_size(filename, copy_source)
108 }
108 }
109
109
110 pub fn pack_entry(
110 pub fn pack_entry(
111 filename: &HgPath,
111 filename: &HgPath,
112 entry: &DirstateEntry,
112 entry: &DirstateEntry,
113 copy_source: Option<&HgPath>,
113 copy_source: Option<&HgPath>,
114 packed: &mut Vec<u8>,
114 packed: &mut Vec<u8>,
115 ) {
115 ) {
116 let length = packed_filename_and_copy_source_size(filename, copy_source);
116 let length = packed_filename_and_copy_source_size(filename, copy_source);
117
117
118 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
118 // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
119 packed.write_u8(entry.state.into()).unwrap();
119 packed.write_u8(entry.state.into()).unwrap();
120 packed.write_i32::<BigEndian>(entry.mode).unwrap();
120 packed.write_i32::<BigEndian>(entry.mode).unwrap();
121 packed.write_i32::<BigEndian>(entry.size).unwrap();
121 packed.write_i32::<BigEndian>(entry.size).unwrap();
122 packed.write_i32::<BigEndian>(entry.mtime).unwrap();
122 packed.write_i32::<BigEndian>(entry.mtime).unwrap();
123 packed.write_i32::<BigEndian>(length as i32).unwrap();
123 packed.write_i32::<BigEndian>(length as i32).unwrap();
124 packed.extend(filename.as_bytes());
124 packed.extend(filename.as_bytes());
125 if let Some(source) = copy_source {
125 if let Some(source) = copy_source {
126 packed.push(b'\0');
126 packed.push(b'\0');
127 packed.extend(source.as_bytes());
127 packed.extend(source.as_bytes());
128 }
128 }
129 }
129 }
130
130
131 /// Seconds since the Unix epoch
131 /// Seconds since the Unix epoch
132 pub struct Timestamp(pub u64);
132 pub struct Timestamp(pub i64);
133
133
134 impl DirstateEntry {
134 impl DirstateEntry {
135 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
135 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
136 self.state == EntryState::Normal && self.mtime == now
136 self.state == EntryState::Normal && self.mtime == now
137 }
137 }
138
138
139 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
139 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
140 let ambiguous = self.mtime_is_ambiguous(now);
140 let ambiguous = self.mtime_is_ambiguous(now);
141 if ambiguous {
141 if ambiguous {
142 // The file was last modified "simultaneously" with the current
142 // The file was last modified "simultaneously" with the current
143 // write to dirstate (i.e. within the same second for file-
143 // write to dirstate (i.e. within the same second for file-
144 // systems with a granularity of 1 sec). This commonly happens
144 // systems with a granularity of 1 sec). This commonly happens
145 // for at least a couple of files on 'update'.
145 // for at least a couple of files on 'update'.
146 // The user could change the file without changing its size
146 // The user could change the file without changing its size
147 // within the same second. Invalidate the file's mtime in
147 // within the same second. Invalidate the file's mtime in
148 // dirstate, forcing future 'status' calls to compare the
148 // dirstate, forcing future 'status' calls to compare the
149 // contents of the file if the size is the same. This prevents
149 // contents of the file if the size is the same. This prevents
150 // mistakenly treating such files as clean.
150 // mistakenly treating such files as clean.
151 self.clear_mtime()
151 self.clear_mtime()
152 }
152 }
153 ambiguous
153 ambiguous
154 }
154 }
155
155
156 pub fn clear_mtime(&mut self) {
156 pub fn clear_mtime(&mut self) {
157 self.mtime = -1;
157 self.mtime = -1;
158 }
158 }
159 }
159 }
160
160
161 pub fn pack_dirstate(
161 pub fn pack_dirstate(
162 state_map: &mut StateMap,
162 state_map: &mut StateMap,
163 copy_map: &CopyMap,
163 copy_map: &CopyMap,
164 parents: DirstateParents,
164 parents: DirstateParents,
165 now: Timestamp,
165 now: Timestamp,
166 ) -> Result<Vec<u8>, HgError> {
166 ) -> Result<Vec<u8>, HgError> {
167 // TODO move away from i32 before 2038.
167 // TODO move away from i32 before 2038.
168 let now: i32 = now.0.try_into().expect("time overflow");
168 let now: i32 = now.0.try_into().expect("time overflow");
169
169
170 let expected_size: usize = state_map
170 let expected_size: usize = state_map
171 .iter()
171 .iter()
172 .map(|(filename, _)| {
172 .map(|(filename, _)| {
173 packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
173 packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
174 })
174 })
175 .sum();
175 .sum();
176 let expected_size = expected_size + PARENT_SIZE * 2;
176 let expected_size = expected_size + PARENT_SIZE * 2;
177
177
178 let mut packed = Vec::with_capacity(expected_size);
178 let mut packed = Vec::with_capacity(expected_size);
179
179
180 packed.extend(parents.p1.as_bytes());
180 packed.extend(parents.p1.as_bytes());
181 packed.extend(parents.p2.as_bytes());
181 packed.extend(parents.p2.as_bytes());
182
182
183 for (filename, entry) in state_map.iter_mut() {
183 for (filename, entry) in state_map.iter_mut() {
184 entry.clear_ambiguous_mtime(now);
184 entry.clear_ambiguous_mtime(now);
185 pack_entry(
185 pack_entry(
186 filename,
186 filename,
187 entry,
187 entry,
188 copy_map.get(filename).map(|p| &**p),
188 copy_map.get(filename).map(|p| &**p),
189 &mut packed,
189 &mut packed,
190 )
190 )
191 }
191 }
192
192
193 if packed.len() != expected_size {
193 if packed.len() != expected_size {
194 return Err(HgError::CorruptedRepository(format!(
194 return Err(HgError::CorruptedRepository(format!(
195 "bad dirstate size: {} != {}",
195 "bad dirstate size: {} != {}",
196 expected_size,
196 expected_size,
197 packed.len()
197 packed.len()
198 )));
198 )));
199 }
199 }
200
200
201 Ok(packed)
201 Ok(packed)
202 }
202 }
203
203
204 #[cfg(test)]
204 #[cfg(test)]
205 mod tests {
205 mod tests {
206 use super::*;
206 use super::*;
207 use crate::{utils::hg_path::HgPathBuf, FastHashMap};
207 use crate::{utils::hg_path::HgPathBuf, FastHashMap};
208 use pretty_assertions::assert_eq;
208 use pretty_assertions::assert_eq;
209
209
210 #[test]
210 #[test]
211 fn test_pack_dirstate_empty() {
211 fn test_pack_dirstate_empty() {
212 let mut state_map = StateMap::default();
212 let mut state_map = StateMap::default();
213 let copymap = FastHashMap::default();
213 let copymap = FastHashMap::default();
214 let parents = DirstateParents {
214 let parents = DirstateParents {
215 p1: b"12345678910111213141".into(),
215 p1: b"12345678910111213141".into(),
216 p2: b"00000000000000000000".into(),
216 p2: b"00000000000000000000".into(),
217 };
217 };
218 let now = Timestamp(15000000);
218 let now = Timestamp(15000000);
219 let expected = b"1234567891011121314100000000000000000000".to_vec();
219 let expected = b"1234567891011121314100000000000000000000".to_vec();
220
220
221 assert_eq!(
221 assert_eq!(
222 expected,
222 expected,
223 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
223 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
224 );
224 );
225
225
226 assert!(state_map.is_empty())
226 assert!(state_map.is_empty())
227 }
227 }
228 #[test]
228 #[test]
229 fn test_pack_dirstate_one_entry() {
229 fn test_pack_dirstate_one_entry() {
230 let expected_state_map: StateMap = [(
230 let expected_state_map: StateMap = [(
231 HgPathBuf::from_bytes(b"f1"),
231 HgPathBuf::from_bytes(b"f1"),
232 DirstateEntry {
232 DirstateEntry {
233 state: EntryState::Normal,
233 state: EntryState::Normal,
234 mode: 0o644,
234 mode: 0o644,
235 size: 0,
235 size: 0,
236 mtime: 791231220,
236 mtime: 791231220,
237 },
237 },
238 )]
238 )]
239 .iter()
239 .iter()
240 .cloned()
240 .cloned()
241 .collect();
241 .collect();
242 let mut state_map = expected_state_map.clone();
242 let mut state_map = expected_state_map.clone();
243
243
244 let copymap = FastHashMap::default();
244 let copymap = FastHashMap::default();
245 let parents = DirstateParents {
245 let parents = DirstateParents {
246 p1: b"12345678910111213141".into(),
246 p1: b"12345678910111213141".into(),
247 p2: b"00000000000000000000".into(),
247 p2: b"00000000000000000000".into(),
248 };
248 };
249 let now = Timestamp(15000000);
249 let now = Timestamp(15000000);
250 let expected = [
250 let expected = [
251 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
251 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
252 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
252 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
253 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
253 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
254 41, 58, 244, 0, 0, 0, 2, 102, 49,
254 41, 58, 244, 0, 0, 0, 2, 102, 49,
255 ]
255 ]
256 .to_vec();
256 .to_vec();
257
257
258 assert_eq!(
258 assert_eq!(
259 expected,
259 expected,
260 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
260 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
261 );
261 );
262
262
263 assert_eq!(expected_state_map, state_map);
263 assert_eq!(expected_state_map, state_map);
264 }
264 }
265 #[test]
265 #[test]
266 fn test_pack_dirstate_one_entry_with_copy() {
266 fn test_pack_dirstate_one_entry_with_copy() {
267 let expected_state_map: StateMap = [(
267 let expected_state_map: StateMap = [(
268 HgPathBuf::from_bytes(b"f1"),
268 HgPathBuf::from_bytes(b"f1"),
269 DirstateEntry {
269 DirstateEntry {
270 state: EntryState::Normal,
270 state: EntryState::Normal,
271 mode: 0o644,
271 mode: 0o644,
272 size: 0,
272 size: 0,
273 mtime: 791231220,
273 mtime: 791231220,
274 },
274 },
275 )]
275 )]
276 .iter()
276 .iter()
277 .cloned()
277 .cloned()
278 .collect();
278 .collect();
279 let mut state_map = expected_state_map.clone();
279 let mut state_map = expected_state_map.clone();
280 let mut copymap = FastHashMap::default();
280 let mut copymap = FastHashMap::default();
281 copymap.insert(
281 copymap.insert(
282 HgPathBuf::from_bytes(b"f1"),
282 HgPathBuf::from_bytes(b"f1"),
283 HgPathBuf::from_bytes(b"copyname"),
283 HgPathBuf::from_bytes(b"copyname"),
284 );
284 );
285 let parents = DirstateParents {
285 let parents = DirstateParents {
286 p1: b"12345678910111213141".into(),
286 p1: b"12345678910111213141".into(),
287 p2: b"00000000000000000000".into(),
287 p2: b"00000000000000000000".into(),
288 };
288 };
289 let now = Timestamp(15000000);
289 let now = Timestamp(15000000);
290 let expected = [
290 let expected = [
291 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
291 49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
292 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
292 51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
293 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
293 48, 48, 48, 48, 48, 48, 48, 48, 110, 0, 0, 1, 164, 0, 0, 0, 0, 47,
294 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97,
294 41, 58, 244, 0, 0, 0, 11, 102, 49, 0, 99, 111, 112, 121, 110, 97,
295 109, 101,
295 109, 101,
296 ]
296 ]
297 .to_vec();
297 .to_vec();
298
298
299 assert_eq!(
299 assert_eq!(
300 expected,
300 expected,
301 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
301 pack_dirstate(&mut state_map, &copymap, parents, now).unwrap()
302 );
302 );
303 assert_eq!(expected_state_map, state_map);
303 assert_eq!(expected_state_map, state_map);
304 }
304 }
305
305
306 #[test]
306 #[test]
307 fn test_parse_pack_one_entry_with_copy() {
307 fn test_parse_pack_one_entry_with_copy() {
308 let mut state_map: StateMap = [(
308 let mut state_map: StateMap = [(
309 HgPathBuf::from_bytes(b"f1"),
309 HgPathBuf::from_bytes(b"f1"),
310 DirstateEntry {
310 DirstateEntry {
311 state: EntryState::Normal,
311 state: EntryState::Normal,
312 mode: 0o644,
312 mode: 0o644,
313 size: 0,
313 size: 0,
314 mtime: 791231220,
314 mtime: 791231220,
315 },
315 },
316 )]
316 )]
317 .iter()
317 .iter()
318 .cloned()
318 .cloned()
319 .collect();
319 .collect();
320 let mut copymap = FastHashMap::default();
320 let mut copymap = FastHashMap::default();
321 copymap.insert(
321 copymap.insert(
322 HgPathBuf::from_bytes(b"f1"),
322 HgPathBuf::from_bytes(b"f1"),
323 HgPathBuf::from_bytes(b"copyname"),
323 HgPathBuf::from_bytes(b"copyname"),
324 );
324 );
325 let parents = DirstateParents {
325 let parents = DirstateParents {
326 p1: b"12345678910111213141".into(),
326 p1: b"12345678910111213141".into(),
327 p2: b"00000000000000000000".into(),
327 p2: b"00000000000000000000".into(),
328 };
328 };
329 let now = Timestamp(15000000);
329 let now = Timestamp(15000000);
330 let result =
330 let result =
331 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
331 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
332 .unwrap();
332 .unwrap();
333
333
334 let (new_parents, entries, copies) =
334 let (new_parents, entries, copies) =
335 parse_dirstate(result.as_slice()).unwrap();
335 parse_dirstate(result.as_slice()).unwrap();
336 let new_state_map: StateMap = entries
336 let new_state_map: StateMap = entries
337 .into_iter()
337 .into_iter()
338 .map(|(path, entry)| (path.to_owned(), entry))
338 .map(|(path, entry)| (path.to_owned(), entry))
339 .collect();
339 .collect();
340 let new_copy_map: CopyMap = copies
340 let new_copy_map: CopyMap = copies
341 .into_iter()
341 .into_iter()
342 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
342 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
343 .collect();
343 .collect();
344
344
345 assert_eq!(
345 assert_eq!(
346 (&parents, state_map, copymap),
346 (&parents, state_map, copymap),
347 (new_parents, new_state_map, new_copy_map)
347 (new_parents, new_state_map, new_copy_map)
348 )
348 )
349 }
349 }
350
350
351 #[test]
351 #[test]
352 fn test_parse_pack_multiple_entries_with_copy() {
352 fn test_parse_pack_multiple_entries_with_copy() {
353 let mut state_map: StateMap = [
353 let mut state_map: StateMap = [
354 (
354 (
355 HgPathBuf::from_bytes(b"f1"),
355 HgPathBuf::from_bytes(b"f1"),
356 DirstateEntry {
356 DirstateEntry {
357 state: EntryState::Normal,
357 state: EntryState::Normal,
358 mode: 0o644,
358 mode: 0o644,
359 size: 0,
359 size: 0,
360 mtime: 791231220,
360 mtime: 791231220,
361 },
361 },
362 ),
362 ),
363 (
363 (
364 HgPathBuf::from_bytes(b"f2"),
364 HgPathBuf::from_bytes(b"f2"),
365 DirstateEntry {
365 DirstateEntry {
366 state: EntryState::Merged,
366 state: EntryState::Merged,
367 mode: 0o777,
367 mode: 0o777,
368 size: 1000,
368 size: 1000,
369 mtime: 791231220,
369 mtime: 791231220,
370 },
370 },
371 ),
371 ),
372 (
372 (
373 HgPathBuf::from_bytes(b"f3"),
373 HgPathBuf::from_bytes(b"f3"),
374 DirstateEntry {
374 DirstateEntry {
375 state: EntryState::Removed,
375 state: EntryState::Removed,
376 mode: 0o644,
376 mode: 0o644,
377 size: 234553,
377 size: 234553,
378 mtime: 791231220,
378 mtime: 791231220,
379 },
379 },
380 ),
380 ),
381 (
381 (
382 HgPathBuf::from_bytes(b"f4\xF6"),
382 HgPathBuf::from_bytes(b"f4\xF6"),
383 DirstateEntry {
383 DirstateEntry {
384 state: EntryState::Added,
384 state: EntryState::Added,
385 mode: 0o644,
385 mode: 0o644,
386 size: -1,
386 size: -1,
387 mtime: -1,
387 mtime: -1,
388 },
388 },
389 ),
389 ),
390 ]
390 ]
391 .iter()
391 .iter()
392 .cloned()
392 .cloned()
393 .collect();
393 .collect();
394 let mut copymap = FastHashMap::default();
394 let mut copymap = FastHashMap::default();
395 copymap.insert(
395 copymap.insert(
396 HgPathBuf::from_bytes(b"f1"),
396 HgPathBuf::from_bytes(b"f1"),
397 HgPathBuf::from_bytes(b"copyname"),
397 HgPathBuf::from_bytes(b"copyname"),
398 );
398 );
399 copymap.insert(
399 copymap.insert(
400 HgPathBuf::from_bytes(b"f4\xF6"),
400 HgPathBuf::from_bytes(b"f4\xF6"),
401 HgPathBuf::from_bytes(b"copyname2"),
401 HgPathBuf::from_bytes(b"copyname2"),
402 );
402 );
403 let parents = DirstateParents {
403 let parents = DirstateParents {
404 p1: b"12345678910111213141".into(),
404 p1: b"12345678910111213141".into(),
405 p2: b"00000000000000000000".into(),
405 p2: b"00000000000000000000".into(),
406 };
406 };
407 let now = Timestamp(15000000);
407 let now = Timestamp(15000000);
408 let result =
408 let result =
409 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
409 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
410 .unwrap();
410 .unwrap();
411
411
412 let (new_parents, entries, copies) =
412 let (new_parents, entries, copies) =
413 parse_dirstate(result.as_slice()).unwrap();
413 parse_dirstate(result.as_slice()).unwrap();
414 let new_state_map: StateMap = entries
414 let new_state_map: StateMap = entries
415 .into_iter()
415 .into_iter()
416 .map(|(path, entry)| (path.to_owned(), entry))
416 .map(|(path, entry)| (path.to_owned(), entry))
417 .collect();
417 .collect();
418 let new_copy_map: CopyMap = copies
418 let new_copy_map: CopyMap = copies
419 .into_iter()
419 .into_iter()
420 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
420 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
421 .collect();
421 .collect();
422
422
423 assert_eq!(
423 assert_eq!(
424 (&parents, state_map, copymap),
424 (&parents, state_map, copymap),
425 (new_parents, new_state_map, new_copy_map)
425 (new_parents, new_state_map, new_copy_map)
426 )
426 )
427 }
427 }
428
428
429 #[test]
429 #[test]
430 /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4
430 /// https://www.mercurial-scm.org/repo/hg/rev/af3f26b6bba4
431 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
431 fn test_parse_pack_one_entry_with_copy_and_time_conflict() {
432 let mut state_map: StateMap = [(
432 let mut state_map: StateMap = [(
433 HgPathBuf::from_bytes(b"f1"),
433 HgPathBuf::from_bytes(b"f1"),
434 DirstateEntry {
434 DirstateEntry {
435 state: EntryState::Normal,
435 state: EntryState::Normal,
436 mode: 0o644,
436 mode: 0o644,
437 size: 0,
437 size: 0,
438 mtime: 15000000,
438 mtime: 15000000,
439 },
439 },
440 )]
440 )]
441 .iter()
441 .iter()
442 .cloned()
442 .cloned()
443 .collect();
443 .collect();
444 let mut copymap = FastHashMap::default();
444 let mut copymap = FastHashMap::default();
445 copymap.insert(
445 copymap.insert(
446 HgPathBuf::from_bytes(b"f1"),
446 HgPathBuf::from_bytes(b"f1"),
447 HgPathBuf::from_bytes(b"copyname"),
447 HgPathBuf::from_bytes(b"copyname"),
448 );
448 );
449 let parents = DirstateParents {
449 let parents = DirstateParents {
450 p1: b"12345678910111213141".into(),
450 p1: b"12345678910111213141".into(),
451 p2: b"00000000000000000000".into(),
451 p2: b"00000000000000000000".into(),
452 };
452 };
453 let now = Timestamp(15000000);
453 let now = Timestamp(15000000);
454 let result =
454 let result =
455 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
455 pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
456 .unwrap();
456 .unwrap();
457
457
458 let (new_parents, entries, copies) =
458 let (new_parents, entries, copies) =
459 parse_dirstate(result.as_slice()).unwrap();
459 parse_dirstate(result.as_slice()).unwrap();
460 let new_state_map: StateMap = entries
460 let new_state_map: StateMap = entries
461 .into_iter()
461 .into_iter()
462 .map(|(path, entry)| (path.to_owned(), entry))
462 .map(|(path, entry)| (path.to_owned(), entry))
463 .collect();
463 .collect();
464 let new_copy_map: CopyMap = copies
464 let new_copy_map: CopyMap = copies
465 .into_iter()
465 .into_iter()
466 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
466 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
467 .collect();
467 .collect();
468
468
469 assert_eq!(
469 assert_eq!(
470 (
470 (
471 &parents,
471 &parents,
472 [(
472 [(
473 HgPathBuf::from_bytes(b"f1"),
473 HgPathBuf::from_bytes(b"f1"),
474 DirstateEntry {
474 DirstateEntry {
475 state: EntryState::Normal,
475 state: EntryState::Normal,
476 mode: 0o644,
476 mode: 0o644,
477 size: 0,
477 size: 0,
478 mtime: -1
478 mtime: -1
479 }
479 }
480 )]
480 )]
481 .iter()
481 .iter()
482 .cloned()
482 .cloned()
483 .collect::<StateMap>(),
483 .collect::<StateMap>(),
484 copymap,
484 copymap,
485 ),
485 ),
486 (new_parents, new_state_map, new_copy_map)
486 (new_parents, new_state_map, new_copy_map)
487 )
487 )
488 }
488 }
489 }
489 }
@@ -1,1071 +1,1095 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::matchers::Matcher;
14 use crate::matchers::Matcher;
15 use crate::utils::hg_path::{HgPath, HgPathBuf};
15 use crate::utils::hg_path::{HgPath, HgPathBuf};
16 use crate::CopyMapIter;
16 use crate::CopyMapIter;
17 use crate::DirstateEntry;
17 use crate::DirstateEntry;
18 use crate::DirstateError;
18 use crate::DirstateError;
19 use crate::DirstateParents;
19 use crate::DirstateParents;
20 use crate::DirstateStatus;
20 use crate::DirstateStatus;
21 use crate::EntryState;
21 use crate::EntryState;
22 use crate::FastHashMap;
22 use crate::FastHashMap;
23 use crate::PatternFileWarning;
23 use crate::PatternFileWarning;
24 use crate::StateMapIter;
24 use crate::StateMapIter;
25 use crate::StatusError;
25 use crate::StatusError;
26 use crate::StatusOptions;
26 use crate::StatusOptions;
27
27
28 pub struct DirstateMap<'on_disk> {
28 pub struct DirstateMap<'on_disk> {
29 /// Contents of the `.hg/dirstate` file
29 /// Contents of the `.hg/dirstate` file
30 pub(super) on_disk: &'on_disk [u8],
30 pub(super) on_disk: &'on_disk [u8],
31
31
32 pub(super) root: ChildNodes<'on_disk>,
32 pub(super) root: ChildNodes<'on_disk>,
33
33
34 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
34 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
35 pub(super) nodes_with_entry_count: u32,
35 pub(super) nodes_with_entry_count: u32,
36
36
37 /// Number of nodes anywhere in the tree that have
37 /// Number of nodes anywhere in the tree that have
38 /// `.copy_source.is_some()`.
38 /// `.copy_source.is_some()`.
39 pub(super) nodes_with_copy_source_count: u32,
39 pub(super) nodes_with_copy_source_count: u32,
40 }
40 }
41
41
42 /// Using a plain `HgPathBuf` of the full path from the repository root as a
42 /// Using a plain `HgPathBuf` of the full path from the repository root as a
43 /// map key would also work: all paths in a given map have the same parent
43 /// map key would also work: all paths in a given map have the same parent
44 /// path, so comparing full paths gives the same result as comparing base
44 /// path, so comparing full paths gives the same result as comparing base
45 /// names. However `HashMap` would waste time always re-hashing the same
45 /// names. However `HashMap` would waste time always re-hashing the same
46 /// string prefix.
46 /// string prefix.
47 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
47 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
48
48
49 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
49 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
50 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
50 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
51 pub(super) enum BorrowedPath<'tree, 'on_disk> {
51 pub(super) enum BorrowedPath<'tree, 'on_disk> {
52 InMemory(&'tree HgPathBuf),
52 InMemory(&'tree HgPathBuf),
53 OnDisk(&'on_disk HgPath),
53 OnDisk(&'on_disk HgPath),
54 }
54 }
55
55
56 pub(super) enum ChildNodes<'on_disk> {
56 pub(super) enum ChildNodes<'on_disk> {
57 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
57 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
58 OnDisk(&'on_disk [on_disk::Node]),
58 OnDisk(&'on_disk [on_disk::Node]),
59 }
59 }
60
60
61 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
61 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
62 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
62 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
63 OnDisk(&'on_disk [on_disk::Node]),
63 OnDisk(&'on_disk [on_disk::Node]),
64 }
64 }
65
65
66 pub(super) enum NodeRef<'tree, 'on_disk> {
66 pub(super) enum NodeRef<'tree, 'on_disk> {
67 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
67 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
68 OnDisk(&'on_disk on_disk::Node),
68 OnDisk(&'on_disk on_disk::Node),
69 }
69 }
70
70
71 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
71 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
72 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
72 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
73 match *self {
73 match *self {
74 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
74 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
75 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
75 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
76 }
76 }
77 }
77 }
78 }
78 }
79
79
80 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
80 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
81 type Target = HgPath;
81 type Target = HgPath;
82
82
83 fn deref(&self) -> &HgPath {
83 fn deref(&self) -> &HgPath {
84 match *self {
84 match *self {
85 BorrowedPath::InMemory(in_memory) => in_memory,
85 BorrowedPath::InMemory(in_memory) => in_memory,
86 BorrowedPath::OnDisk(on_disk) => on_disk,
86 BorrowedPath::OnDisk(on_disk) => on_disk,
87 }
87 }
88 }
88 }
89 }
89 }
90
90
91 impl Default for ChildNodes<'_> {
91 impl Default for ChildNodes<'_> {
92 fn default() -> Self {
92 fn default() -> Self {
93 ChildNodes::InMemory(Default::default())
93 ChildNodes::InMemory(Default::default())
94 }
94 }
95 }
95 }
96
96
97 impl<'on_disk> ChildNodes<'on_disk> {
97 impl<'on_disk> ChildNodes<'on_disk> {
98 pub(super) fn as_ref<'tree>(
98 pub(super) fn as_ref<'tree>(
99 &'tree self,
99 &'tree self,
100 ) -> ChildNodesRef<'tree, 'on_disk> {
100 ) -> ChildNodesRef<'tree, 'on_disk> {
101 match self {
101 match self {
102 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
102 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
103 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
103 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
104 }
104 }
105 }
105 }
106
106
107 pub(super) fn is_empty(&self) -> bool {
107 pub(super) fn is_empty(&self) -> bool {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => nodes.is_empty(),
109 ChildNodes::InMemory(nodes) => nodes.is_empty(),
110 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
110 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn make_mut(
114 pub(super) fn make_mut(
115 &mut self,
115 &mut self,
116 on_disk: &'on_disk [u8],
116 on_disk: &'on_disk [u8],
117 ) -> Result<
117 ) -> Result<
118 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
118 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
119 DirstateV2ParseError,
119 DirstateV2ParseError,
120 > {
120 > {
121 match self {
121 match self {
122 ChildNodes::InMemory(nodes) => Ok(nodes),
122 ChildNodes::InMemory(nodes) => Ok(nodes),
123 ChildNodes::OnDisk(nodes) => {
123 ChildNodes::OnDisk(nodes) => {
124 let nodes = nodes
124 let nodes = nodes
125 .iter()
125 .iter()
126 .map(|node| {
126 .map(|node| {
127 Ok((
127 Ok((
128 node.path(on_disk)?,
128 node.path(on_disk)?,
129 node.to_in_memory_node(on_disk)?,
129 node.to_in_memory_node(on_disk)?,
130 ))
130 ))
131 })
131 })
132 .collect::<Result<_, _>>()?;
132 .collect::<Result<_, _>>()?;
133 *self = ChildNodes::InMemory(nodes);
133 *self = ChildNodes::InMemory(nodes);
134 match self {
134 match self {
135 ChildNodes::InMemory(nodes) => Ok(nodes),
135 ChildNodes::InMemory(nodes) => Ok(nodes),
136 ChildNodes::OnDisk(_) => unreachable!(),
136 ChildNodes::OnDisk(_) => unreachable!(),
137 }
137 }
138 }
138 }
139 }
139 }
140 }
140 }
141 }
141 }
142
142
143 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
143 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
144 pub(super) fn get(
144 pub(super) fn get(
145 &self,
145 &self,
146 base_name: &HgPath,
146 base_name: &HgPath,
147 on_disk: &'on_disk [u8],
147 on_disk: &'on_disk [u8],
148 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
148 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
149 match self {
149 match self {
150 ChildNodesRef::InMemory(nodes) => Ok(nodes
150 ChildNodesRef::InMemory(nodes) => Ok(nodes
151 .get_key_value(base_name)
151 .get_key_value(base_name)
152 .map(|(k, v)| NodeRef::InMemory(k, v))),
152 .map(|(k, v)| NodeRef::InMemory(k, v))),
153 ChildNodesRef::OnDisk(nodes) => {
153 ChildNodesRef::OnDisk(nodes) => {
154 let mut parse_result = Ok(());
154 let mut parse_result = Ok(());
155 let search_result = nodes.binary_search_by(|node| {
155 let search_result = nodes.binary_search_by(|node| {
156 match node.base_name(on_disk) {
156 match node.base_name(on_disk) {
157 Ok(node_base_name) => node_base_name.cmp(base_name),
157 Ok(node_base_name) => node_base_name.cmp(base_name),
158 Err(e) => {
158 Err(e) => {
159 parse_result = Err(e);
159 parse_result = Err(e);
160 // Dummy comparison result, `search_result` won’t
160 // Dummy comparison result, `search_result` won’t
161 // be used since `parse_result` is an error
161 // be used since `parse_result` is an error
162 std::cmp::Ordering::Equal
162 std::cmp::Ordering::Equal
163 }
163 }
164 }
164 }
165 });
165 });
166 parse_result.map(|()| {
166 parse_result.map(|()| {
167 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
167 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
168 })
168 })
169 }
169 }
170 }
170 }
171 }
171 }
172
172
173 /// Iterate in undefined order
173 /// Iterate in undefined order
174 pub(super) fn iter(
174 pub(super) fn iter(
175 &self,
175 &self,
176 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
176 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
177 match self {
177 match self {
178 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
178 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
179 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
179 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
180 ),
180 ),
181 ChildNodesRef::OnDisk(nodes) => {
181 ChildNodesRef::OnDisk(nodes) => {
182 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
182 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
183 }
183 }
184 }
184 }
185 }
185 }
186
186
187 /// Iterate in parallel in undefined order
187 /// Iterate in parallel in undefined order
188 pub(super) fn par_iter(
188 pub(super) fn par_iter(
189 &self,
189 &self,
190 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
190 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
191 {
191 {
192 use rayon::prelude::*;
192 use rayon::prelude::*;
193 match self {
193 match self {
194 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
194 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
195 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
195 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
196 ),
196 ),
197 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
197 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
198 nodes.par_iter().map(NodeRef::OnDisk),
198 nodes.par_iter().map(NodeRef::OnDisk),
199 ),
199 ),
200 }
200 }
201 }
201 }
202
202
203 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
203 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
204 match self {
204 match self {
205 ChildNodesRef::InMemory(nodes) => {
205 ChildNodesRef::InMemory(nodes) => {
206 let mut vec: Vec<_> = nodes
206 let mut vec: Vec<_> = nodes
207 .iter()
207 .iter()
208 .map(|(k, v)| NodeRef::InMemory(k, v))
208 .map(|(k, v)| NodeRef::InMemory(k, v))
209 .collect();
209 .collect();
210 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
210 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
211 match node {
211 match node {
212 NodeRef::InMemory(path, _node) => path.base_name(),
212 NodeRef::InMemory(path, _node) => path.base_name(),
213 NodeRef::OnDisk(_) => unreachable!(),
213 NodeRef::OnDisk(_) => unreachable!(),
214 }
214 }
215 }
215 }
216 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
216 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
217 // value: https://github.com/rust-lang/rust/issues/34162
217 // value: https://github.com/rust-lang/rust/issues/34162
218 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
218 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
219 vec
219 vec
220 }
220 }
221 ChildNodesRef::OnDisk(nodes) => {
221 ChildNodesRef::OnDisk(nodes) => {
222 // Nodes on disk are already sorted
222 // Nodes on disk are already sorted
223 nodes.iter().map(NodeRef::OnDisk).collect()
223 nodes.iter().map(NodeRef::OnDisk).collect()
224 }
224 }
225 }
225 }
226 }
226 }
227 }
227 }
228
228
229 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
229 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
230 pub(super) fn full_path(
230 pub(super) fn full_path(
231 &self,
231 &self,
232 on_disk: &'on_disk [u8],
232 on_disk: &'on_disk [u8],
233 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
233 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
234 match self {
234 match self {
235 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
235 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
236 NodeRef::OnDisk(node) => node.full_path(on_disk),
236 NodeRef::OnDisk(node) => node.full_path(on_disk),
237 }
237 }
238 }
238 }
239
239
240 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
240 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
241 /// HgPath>` detached from `'tree`
241 /// HgPath>` detached from `'tree`
242 pub(super) fn full_path_borrowed(
242 pub(super) fn full_path_borrowed(
243 &self,
243 &self,
244 on_disk: &'on_disk [u8],
244 on_disk: &'on_disk [u8],
245 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
245 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
246 match self {
246 match self {
247 NodeRef::InMemory(path, _node) => match path.full_path() {
247 NodeRef::InMemory(path, _node) => match path.full_path() {
248 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
248 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
249 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
249 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
250 },
250 },
251 NodeRef::OnDisk(node) => {
251 NodeRef::OnDisk(node) => {
252 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
252 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
253 }
253 }
254 }
254 }
255 }
255 }
256
256
257 pub(super) fn base_name(
257 pub(super) fn base_name(
258 &self,
258 &self,
259 on_disk: &'on_disk [u8],
259 on_disk: &'on_disk [u8],
260 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
260 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
261 match self {
261 match self {
262 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
262 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
263 NodeRef::OnDisk(node) => node.base_name(on_disk),
263 NodeRef::OnDisk(node) => node.base_name(on_disk),
264 }
264 }
265 }
265 }
266
266
267 pub(super) fn children(
267 pub(super) fn children(
268 &self,
268 &self,
269 on_disk: &'on_disk [u8],
269 on_disk: &'on_disk [u8],
270 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
270 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
271 match self {
271 match self {
272 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
272 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
273 NodeRef::OnDisk(node) => {
273 NodeRef::OnDisk(node) => {
274 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
274 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
275 }
275 }
276 }
276 }
277 }
277 }
278
278
279 pub(super) fn has_copy_source(&self) -> bool {
279 pub(super) fn has_copy_source(&self) -> bool {
280 match self {
280 match self {
281 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
281 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
282 NodeRef::OnDisk(node) => node.has_copy_source(),
282 NodeRef::OnDisk(node) => node.has_copy_source(),
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn copy_source(
286 pub(super) fn copy_source(
287 &self,
287 &self,
288 on_disk: &'on_disk [u8],
288 on_disk: &'on_disk [u8],
289 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
289 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
290 match self {
290 match self {
291 NodeRef::InMemory(_path, node) => {
291 NodeRef::InMemory(_path, node) => {
292 Ok(node.copy_source.as_ref().map(|s| &**s))
292 Ok(node.copy_source.as_ref().map(|s| &**s))
293 }
293 }
294 NodeRef::OnDisk(node) => node.copy_source(on_disk),
294 NodeRef::OnDisk(node) => node.copy_source(on_disk),
295 }
295 }
296 }
296 }
297
297
298 pub(super) fn entry(
298 pub(super) fn entry(
299 &self,
299 &self,
300 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
300 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
301 match self {
301 match self {
302 NodeRef::InMemory(_path, node) => {
302 NodeRef::InMemory(_path, node) => {
303 Ok(node.data.as_entry().copied())
303 Ok(node.data.as_entry().copied())
304 }
304 }
305 NodeRef::OnDisk(node) => node.entry(),
305 NodeRef::OnDisk(node) => node.entry(),
306 }
306 }
307 }
307 }
308
308
309 pub(super) fn state(
309 pub(super) fn state(
310 &self,
310 &self,
311 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
311 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
312 match self {
312 match self {
313 NodeRef::InMemory(_path, node) => {
313 NodeRef::InMemory(_path, node) => {
314 Ok(node.data.as_entry().map(|entry| entry.state))
314 Ok(node.data.as_entry().map(|entry| entry.state))
315 }
315 }
316 NodeRef::OnDisk(node) => node.state(),
316 NodeRef::OnDisk(node) => node.state(),
317 }
317 }
318 }
318 }
319
319
320 pub(super) fn cached_directory_mtime(
320 pub(super) fn cached_directory_mtime(
321 &self,
321 &self,
322 ) -> Option<&on_disk::Timestamp> {
322 ) -> Option<&'tree on_disk::Timestamp> {
323 match self {
323 match self {
324 NodeRef::InMemory(_path, node) => match &node.data {
324 NodeRef::InMemory(_path, node) => match &node.data {
325 NodeData::CachedDirectory { mtime } => Some(mtime),
325 NodeData::CachedDirectory { mtime } => Some(mtime),
326 _ => None,
326 _ => None,
327 },
327 },
328 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
328 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
329 }
329 }
330 }
330 }
331
331
332 pub(super) fn tracked_descendants_count(&self) -> u32 {
332 pub(super) fn tracked_descendants_count(&self) -> u32 {
333 match self {
333 match self {
334 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
334 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
335 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
335 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
336 }
336 }
337 }
337 }
338 }
338 }
339
339
340 /// Represents a file or a directory
340 /// Represents a file or a directory
341 #[derive(Default)]
341 #[derive(Default)]
342 pub(super) struct Node<'on_disk> {
342 pub(super) struct Node<'on_disk> {
343 pub(super) data: NodeData,
343 pub(super) data: NodeData,
344
344
345 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
345 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
346
346
347 pub(super) children: ChildNodes<'on_disk>,
347 pub(super) children: ChildNodes<'on_disk>,
348
348
349 /// How many (non-inclusive) descendants of this node are tracked files
349 /// How many (non-inclusive) descendants of this node are tracked files
350 pub(super) tracked_descendants_count: u32,
350 pub(super) tracked_descendants_count: u32,
351 }
351 }
352
352
353 pub(super) enum NodeData {
353 pub(super) enum NodeData {
354 Entry(DirstateEntry),
354 Entry(DirstateEntry),
355 CachedDirectory { mtime: on_disk::Timestamp },
355 CachedDirectory { mtime: on_disk::Timestamp },
356 None,
356 None,
357 }
357 }
358
358
359 impl Default for NodeData {
359 impl Default for NodeData {
360 fn default() -> Self {
360 fn default() -> Self {
361 NodeData::None
361 NodeData::None
362 }
362 }
363 }
363 }
364
364
365 impl NodeData {
365 impl NodeData {
366 fn has_entry(&self) -> bool {
366 fn has_entry(&self) -> bool {
367 match self {
367 match self {
368 NodeData::Entry(_) => true,
368 NodeData::Entry(_) => true,
369 _ => false,
369 _ => false,
370 }
370 }
371 }
371 }
372
372
373 fn as_entry(&self) -> Option<&DirstateEntry> {
373 fn as_entry(&self) -> Option<&DirstateEntry> {
374 match self {
374 match self {
375 NodeData::Entry(entry) => Some(entry),
375 NodeData::Entry(entry) => Some(entry),
376 _ => None,
376 _ => None,
377 }
377 }
378 }
378 }
379 }
379 }
380
380
381 impl<'on_disk> DirstateMap<'on_disk> {
381 impl<'on_disk> DirstateMap<'on_disk> {
382 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
382 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
383 Self {
383 Self {
384 on_disk,
384 on_disk,
385 root: ChildNodes::default(),
385 root: ChildNodes::default(),
386 nodes_with_entry_count: 0,
386 nodes_with_entry_count: 0,
387 nodes_with_copy_source_count: 0,
387 nodes_with_copy_source_count: 0,
388 }
388 }
389 }
389 }
390
390
391 #[timed]
391 #[timed]
392 pub fn new_v2(
392 pub fn new_v2(
393 on_disk: &'on_disk [u8],
393 on_disk: &'on_disk [u8],
394 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
394 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
395 Ok(on_disk::read(on_disk)?)
395 Ok(on_disk::read(on_disk)?)
396 }
396 }
397
397
398 #[timed]
398 #[timed]
399 pub fn new_v1(
399 pub fn new_v1(
400 on_disk: &'on_disk [u8],
400 on_disk: &'on_disk [u8],
401 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
401 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
402 let mut map = Self::empty(on_disk);
402 let mut map = Self::empty(on_disk);
403 if map.on_disk.is_empty() {
403 if map.on_disk.is_empty() {
404 return Ok((map, None));
404 return Ok((map, None));
405 }
405 }
406
406
407 let parents = parse_dirstate_entries(
407 let parents = parse_dirstate_entries(
408 map.on_disk,
408 map.on_disk,
409 |path, entry, copy_source| {
409 |path, entry, copy_source| {
410 let tracked = entry.state.is_tracked();
410 let tracked = entry.state.is_tracked();
411 let node = Self::get_or_insert_node(
411 let node = Self::get_or_insert_node(
412 map.on_disk,
412 map.on_disk,
413 &mut map.root,
413 &mut map.root,
414 path,
414 path,
415 WithBasename::to_cow_borrowed,
415 WithBasename::to_cow_borrowed,
416 |ancestor| {
416 |ancestor| {
417 if tracked {
417 if tracked {
418 ancestor.tracked_descendants_count += 1
418 ancestor.tracked_descendants_count += 1
419 }
419 }
420 },
420 },
421 )?;
421 )?;
422 assert!(
422 assert!(
423 !node.data.has_entry(),
423 !node.data.has_entry(),
424 "duplicate dirstate entry in read"
424 "duplicate dirstate entry in read"
425 );
425 );
426 assert!(
426 assert!(
427 node.copy_source.is_none(),
427 node.copy_source.is_none(),
428 "duplicate dirstate entry in read"
428 "duplicate dirstate entry in read"
429 );
429 );
430 node.data = NodeData::Entry(*entry);
430 node.data = NodeData::Entry(*entry);
431 node.copy_source = copy_source.map(Cow::Borrowed);
431 node.copy_source = copy_source.map(Cow::Borrowed);
432 map.nodes_with_entry_count += 1;
432 map.nodes_with_entry_count += 1;
433 if copy_source.is_some() {
433 if copy_source.is_some() {
434 map.nodes_with_copy_source_count += 1
434 map.nodes_with_copy_source_count += 1
435 }
435 }
436 Ok(())
436 Ok(())
437 },
437 },
438 )?;
438 )?;
439 let parents = Some(parents.clone());
439 let parents = Some(parents.clone());
440
440
441 Ok((map, parents))
441 Ok((map, parents))
442 }
442 }
443
443
444 fn get_node<'tree>(
444 fn get_node<'tree>(
445 &'tree self,
445 &'tree self,
446 path: &HgPath,
446 path: &HgPath,
447 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
447 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
448 let mut children = self.root.as_ref();
448 let mut children = self.root.as_ref();
449 let mut components = path.components();
449 let mut components = path.components();
450 let mut component =
450 let mut component =
451 components.next().expect("expected at least one components");
451 components.next().expect("expected at least one components");
452 loop {
452 loop {
453 if let Some(child) = children.get(component, self.on_disk)? {
453 if let Some(child) = children.get(component, self.on_disk)? {
454 if let Some(next_component) = components.next() {
454 if let Some(next_component) = components.next() {
455 component = next_component;
455 component = next_component;
456 children = child.children(self.on_disk)?;
456 children = child.children(self.on_disk)?;
457 } else {
457 } else {
458 return Ok(Some(child));
458 return Ok(Some(child));
459 }
459 }
460 } else {
460 } else {
461 return Ok(None);
461 return Ok(None);
462 }
462 }
463 }
463 }
464 }
464 }
465
465
466 /// Returns a mutable reference to the node at `path` if it exists
466 /// Returns a mutable reference to the node at `path` if it exists
467 ///
467 ///
468 /// This takes `root` instead of `&mut self` so that callers can mutate
468 /// This takes `root` instead of `&mut self` so that callers can mutate
469 /// other fields while the returned borrow is still valid
469 /// other fields while the returned borrow is still valid
470 fn get_node_mut<'tree>(
470 fn get_node_mut<'tree>(
471 on_disk: &'on_disk [u8],
471 on_disk: &'on_disk [u8],
472 root: &'tree mut ChildNodes<'on_disk>,
472 root: &'tree mut ChildNodes<'on_disk>,
473 path: &HgPath,
473 path: &HgPath,
474 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
474 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
475 let mut children = root;
475 let mut children = root;
476 let mut components = path.components();
476 let mut components = path.components();
477 let mut component =
477 let mut component =
478 components.next().expect("expected at least one components");
478 components.next().expect("expected at least one components");
479 loop {
479 loop {
480 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
480 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
481 {
481 {
482 if let Some(next_component) = components.next() {
482 if let Some(next_component) = components.next() {
483 component = next_component;
483 component = next_component;
484 children = &mut child.children;
484 children = &mut child.children;
485 } else {
485 } else {
486 return Ok(Some(child));
486 return Ok(Some(child));
487 }
487 }
488 } else {
488 } else {
489 return Ok(None);
489 return Ok(None);
490 }
490 }
491 }
491 }
492 }
492 }
493
493
494 pub(super) fn get_or_insert_node<'tree, 'path>(
494 pub(super) fn get_or_insert_node<'tree, 'path>(
495 on_disk: &'on_disk [u8],
495 on_disk: &'on_disk [u8],
496 root: &'tree mut ChildNodes<'on_disk>,
496 root: &'tree mut ChildNodes<'on_disk>,
497 path: &'path HgPath,
497 path: &'path HgPath,
498 to_cow: impl Fn(
498 to_cow: impl Fn(
499 WithBasename<&'path HgPath>,
499 WithBasename<&'path HgPath>,
500 ) -> WithBasename<Cow<'on_disk, HgPath>>,
500 ) -> WithBasename<Cow<'on_disk, HgPath>>,
501 mut each_ancestor: impl FnMut(&mut Node),
501 mut each_ancestor: impl FnMut(&mut Node),
502 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
502 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
503 let mut child_nodes = root;
503 let mut child_nodes = root;
504 let mut inclusive_ancestor_paths =
504 let mut inclusive_ancestor_paths =
505 WithBasename::inclusive_ancestors_of(path);
505 WithBasename::inclusive_ancestors_of(path);
506 let mut ancestor_path = inclusive_ancestor_paths
506 let mut ancestor_path = inclusive_ancestor_paths
507 .next()
507 .next()
508 .expect("expected at least one inclusive ancestor");
508 .expect("expected at least one inclusive ancestor");
509 loop {
509 loop {
510 // TODO: can we avoid allocating an owned key in cases where the
510 // TODO: can we avoid allocating an owned key in cases where the
511 // map already contains that key, without introducing double
511 // map already contains that key, without introducing double
512 // lookup?
512 // lookup?
513 let child_node = child_nodes
513 let child_node = child_nodes
514 .make_mut(on_disk)?
514 .make_mut(on_disk)?
515 .entry(to_cow(ancestor_path))
515 .entry(to_cow(ancestor_path))
516 .or_default();
516 .or_default();
517 if let Some(next) = inclusive_ancestor_paths.next() {
517 if let Some(next) = inclusive_ancestor_paths.next() {
518 each_ancestor(child_node);
518 each_ancestor(child_node);
519 ancestor_path = next;
519 ancestor_path = next;
520 child_nodes = &mut child_node.children;
520 child_nodes = &mut child_node.children;
521 } else {
521 } else {
522 return Ok(child_node);
522 return Ok(child_node);
523 }
523 }
524 }
524 }
525 }
525 }
526
526
527 fn add_or_remove_file(
527 fn add_or_remove_file(
528 &mut self,
528 &mut self,
529 path: &HgPath,
529 path: &HgPath,
530 old_state: EntryState,
530 old_state: EntryState,
531 new_entry: DirstateEntry,
531 new_entry: DirstateEntry,
532 ) -> Result<(), DirstateV2ParseError> {
532 ) -> Result<(), DirstateV2ParseError> {
533 let tracked_count_increment =
533 let tracked_count_increment =
534 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
534 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
535 (false, true) => 1,
535 (false, true) => 1,
536 (true, false) => -1,
536 (true, false) => -1,
537 _ => 0,
537 _ => 0,
538 };
538 };
539
539
540 let node = Self::get_or_insert_node(
540 let node = Self::get_or_insert_node(
541 self.on_disk,
541 self.on_disk,
542 &mut self.root,
542 &mut self.root,
543 path,
543 path,
544 WithBasename::to_cow_owned,
544 WithBasename::to_cow_owned,
545 |ancestor| {
545 |ancestor| {
546 // We can’t use `+= increment` because the counter is unsigned,
546 // We can’t use `+= increment` because the counter is unsigned,
547 // and we want debug builds to detect accidental underflow
547 // and we want debug builds to detect accidental underflow
548 // through zero
548 // through zero
549 match tracked_count_increment {
549 match tracked_count_increment {
550 1 => ancestor.tracked_descendants_count += 1,
550 1 => ancestor.tracked_descendants_count += 1,
551 -1 => ancestor.tracked_descendants_count -= 1,
551 -1 => ancestor.tracked_descendants_count -= 1,
552 _ => {}
552 _ => {}
553 }
553 }
554 },
554 },
555 )?;
555 )?;
556 if !node.data.has_entry() {
556 if !node.data.has_entry() {
557 self.nodes_with_entry_count += 1
557 self.nodes_with_entry_count += 1
558 }
558 }
559 node.data = NodeData::Entry(new_entry);
559 node.data = NodeData::Entry(new_entry);
560 Ok(())
560 Ok(())
561 }
561 }
562
562
563 fn iter_nodes<'tree>(
563 fn iter_nodes<'tree>(
564 &'tree self,
564 &'tree self,
565 ) -> impl Iterator<
565 ) -> impl Iterator<
566 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
566 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
567 > + 'tree {
567 > + 'tree {
568 // Depth first tree traversal.
568 // Depth first tree traversal.
569 //
569 //
570 // If we could afford internal iteration and recursion,
570 // If we could afford internal iteration and recursion,
571 // this would look like:
571 // this would look like:
572 //
572 //
573 // ```
573 // ```
574 // fn traverse_children(
574 // fn traverse_children(
575 // children: &ChildNodes,
575 // children: &ChildNodes,
576 // each: &mut impl FnMut(&Node),
576 // each: &mut impl FnMut(&Node),
577 // ) {
577 // ) {
578 // for child in children.values() {
578 // for child in children.values() {
579 // traverse_children(&child.children, each);
579 // traverse_children(&child.children, each);
580 // each(child);
580 // each(child);
581 // }
581 // }
582 // }
582 // }
583 // ```
583 // ```
584 //
584 //
585 // However we want an external iterator and therefore can’t use the
585 // However we want an external iterator and therefore can’t use the
586 // call stack. Use an explicit stack instead:
586 // call stack. Use an explicit stack instead:
587 let mut stack = Vec::new();
587 let mut stack = Vec::new();
588 let mut iter = self.root.as_ref().iter();
588 let mut iter = self.root.as_ref().iter();
589 std::iter::from_fn(move || {
589 std::iter::from_fn(move || {
590 while let Some(child_node) = iter.next() {
590 while let Some(child_node) = iter.next() {
591 let children = match child_node.children(self.on_disk) {
591 let children = match child_node.children(self.on_disk) {
592 Ok(children) => children,
592 Ok(children) => children,
593 Err(error) => return Some(Err(error)),
593 Err(error) => return Some(Err(error)),
594 };
594 };
595 // Pseudo-recursion
595 // Pseudo-recursion
596 let new_iter = children.iter();
596 let new_iter = children.iter();
597 let old_iter = std::mem::replace(&mut iter, new_iter);
597 let old_iter = std::mem::replace(&mut iter, new_iter);
598 stack.push((child_node, old_iter));
598 stack.push((child_node, old_iter));
599 }
599 }
600 // Found the end of a `children.iter()` iterator.
600 // Found the end of a `children.iter()` iterator.
601 if let Some((child_node, next_iter)) = stack.pop() {
601 if let Some((child_node, next_iter)) = stack.pop() {
602 // "Return" from pseudo-recursion by restoring state from the
602 // "Return" from pseudo-recursion by restoring state from the
603 // explicit stack
603 // explicit stack
604 iter = next_iter;
604 iter = next_iter;
605
605
606 Some(Ok(child_node))
606 Some(Ok(child_node))
607 } else {
607 } else {
608 // Reached the bottom of the stack, we’re done
608 // Reached the bottom of the stack, we’re done
609 None
609 None
610 }
610 }
611 })
611 })
612 }
612 }
613
613
614 fn clear_known_ambiguous_mtimes(
614 fn clear_known_ambiguous_mtimes(
615 &mut self,
615 &mut self,
616 paths: &[impl AsRef<HgPath>],
616 paths: &[impl AsRef<HgPath>],
617 ) -> Result<(), DirstateV2ParseError> {
617 ) -> Result<(), DirstateV2ParseError> {
618 for path in paths {
618 for path in paths {
619 if let Some(node) = Self::get_node_mut(
619 if let Some(node) = Self::get_node_mut(
620 self.on_disk,
620 self.on_disk,
621 &mut self.root,
621 &mut self.root,
622 path.as_ref(),
622 path.as_ref(),
623 )? {
623 )? {
624 if let NodeData::Entry(entry) = &mut node.data {
624 if let NodeData::Entry(entry) = &mut node.data {
625 entry.clear_mtime();
625 entry.clear_mtime();
626 }
626 }
627 }
627 }
628 }
628 }
629 Ok(())
629 Ok(())
630 }
630 }
631
631
632 /// Return a faillilble iterator of full paths of nodes that have an
632 /// Return a faillilble iterator of full paths of nodes that have an
633 /// `entry` for which the given `predicate` returns true.
633 /// `entry` for which the given `predicate` returns true.
634 ///
634 ///
635 /// Fallibility means that each iterator item is a `Result`, which may
635 /// Fallibility means that each iterator item is a `Result`, which may
636 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
636 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
637 /// should only happen if Mercurial is buggy or a repository is corrupted.
637 /// should only happen if Mercurial is buggy or a repository is corrupted.
638 fn filter_full_paths<'tree>(
638 fn filter_full_paths<'tree>(
639 &'tree self,
639 &'tree self,
640 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
640 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
641 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
641 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
642 {
642 {
643 filter_map_results(self.iter_nodes(), move |node| {
643 filter_map_results(self.iter_nodes(), move |node| {
644 if let Some(entry) = node.entry()? {
644 if let Some(entry) = node.entry()? {
645 if predicate(&entry) {
645 if predicate(&entry) {
646 return Ok(Some(node.full_path(self.on_disk)?));
646 return Ok(Some(node.full_path(self.on_disk)?));
647 }
647 }
648 }
648 }
649 Ok(None)
649 Ok(None)
650 })
650 })
651 }
651 }
652 }
652 }
653
653
654 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
654 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
655 ///
655 ///
656 /// The callback is only called for incoming `Ok` values. Errors are passed
656 /// The callback is only called for incoming `Ok` values. Errors are passed
657 /// through as-is. In order to let it use the `?` operator the callback is
657 /// through as-is. In order to let it use the `?` operator the callback is
658 /// expected to return a `Result` of `Option`, instead of an `Option` of
658 /// expected to return a `Result` of `Option`, instead of an `Option` of
659 /// `Result`.
659 /// `Result`.
660 fn filter_map_results<'a, I, F, A, B, E>(
660 fn filter_map_results<'a, I, F, A, B, E>(
661 iter: I,
661 iter: I,
662 f: F,
662 f: F,
663 ) -> impl Iterator<Item = Result<B, E>> + 'a
663 ) -> impl Iterator<Item = Result<B, E>> + 'a
664 where
664 where
665 I: Iterator<Item = Result<A, E>> + 'a,
665 I: Iterator<Item = Result<A, E>> + 'a,
666 F: Fn(A) -> Result<Option<B>, E> + 'a,
666 F: Fn(A) -> Result<Option<B>, E> + 'a,
667 {
667 {
668 iter.filter_map(move |result| match result {
668 iter.filter_map(move |result| match result {
669 Ok(node) => f(node).transpose(),
669 Ok(node) => f(node).transpose(),
670 Err(e) => Some(Err(e)),
670 Err(e) => Some(Err(e)),
671 })
671 })
672 }
672 }
673
673
674 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
674 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
675 fn clear(&mut self) {
675 fn clear(&mut self) {
676 self.root = Default::default();
676 self.root = Default::default();
677 self.nodes_with_entry_count = 0;
677 self.nodes_with_entry_count = 0;
678 self.nodes_with_copy_source_count = 0;
678 self.nodes_with_copy_source_count = 0;
679 }
679 }
680
680
681 fn add_file(
681 fn add_file(
682 &mut self,
682 &mut self,
683 filename: &HgPath,
683 filename: &HgPath,
684 old_state: EntryState,
684 old_state: EntryState,
685 entry: DirstateEntry,
685 entry: DirstateEntry,
686 ) -> Result<(), DirstateError> {
686 ) -> Result<(), DirstateError> {
687 Ok(self.add_or_remove_file(filename, old_state, entry)?)
687 Ok(self.add_or_remove_file(filename, old_state, entry)?)
688 }
688 }
689
689
690 fn remove_file(
690 fn remove_file(
691 &mut self,
691 &mut self,
692 filename: &HgPath,
692 filename: &HgPath,
693 old_state: EntryState,
693 old_state: EntryState,
694 size: i32,
694 size: i32,
695 ) -> Result<(), DirstateError> {
695 ) -> Result<(), DirstateError> {
696 let entry = DirstateEntry {
696 let entry = DirstateEntry {
697 state: EntryState::Removed,
697 state: EntryState::Removed,
698 mode: 0,
698 mode: 0,
699 size,
699 size,
700 mtime: 0,
700 mtime: 0,
701 };
701 };
702 Ok(self.add_or_remove_file(filename, old_state, entry)?)
702 Ok(self.add_or_remove_file(filename, old_state, entry)?)
703 }
703 }
704
704
705 fn drop_file(
705 fn drop_file(
706 &mut self,
706 &mut self,
707 filename: &HgPath,
707 filename: &HgPath,
708 old_state: EntryState,
708 old_state: EntryState,
709 ) -> Result<bool, DirstateError> {
709 ) -> Result<bool, DirstateError> {
710 struct Dropped {
710 struct Dropped {
711 was_tracked: bool,
711 was_tracked: bool,
712 had_entry: bool,
712 had_entry: bool,
713 had_copy_source: bool,
713 had_copy_source: bool,
714 }
714 }
715 fn recur<'on_disk>(
715 fn recur<'on_disk>(
716 on_disk: &'on_disk [u8],
716 on_disk: &'on_disk [u8],
717 nodes: &mut ChildNodes<'on_disk>,
717 nodes: &mut ChildNodes<'on_disk>,
718 path: &HgPath,
718 path: &HgPath,
719 ) -> Result<Option<Dropped>, DirstateV2ParseError> {
719 ) -> Result<Option<Dropped>, DirstateV2ParseError> {
720 let (first_path_component, rest_of_path) =
720 let (first_path_component, rest_of_path) =
721 path.split_first_component();
721 path.split_first_component();
722 let node = if let Some(node) =
722 let node = if let Some(node) =
723 nodes.make_mut(on_disk)?.get_mut(first_path_component)
723 nodes.make_mut(on_disk)?.get_mut(first_path_component)
724 {
724 {
725 node
725 node
726 } else {
726 } else {
727 return Ok(None);
727 return Ok(None);
728 };
728 };
729 let dropped;
729 let dropped;
730 if let Some(rest) = rest_of_path {
730 if let Some(rest) = rest_of_path {
731 if let Some(d) = recur(on_disk, &mut node.children, rest)? {
731 if let Some(d) = recur(on_disk, &mut node.children, rest)? {
732 dropped = d;
732 dropped = d;
733 if dropped.was_tracked {
733 if dropped.was_tracked {
734 node.tracked_descendants_count -= 1;
734 node.tracked_descendants_count -= 1;
735 }
735 }
736 } else {
736 } else {
737 return Ok(None);
737 return Ok(None);
738 }
738 }
739 } else {
739 } else {
740 let had_entry = node.data.has_entry();
740 let had_entry = node.data.has_entry();
741 if had_entry {
741 if had_entry {
742 node.data = NodeData::None
742 node.data = NodeData::None
743 }
743 }
744 dropped = Dropped {
744 dropped = Dropped {
745 was_tracked: node
745 was_tracked: node
746 .data
746 .data
747 .as_entry()
747 .as_entry()
748 .map_or(false, |entry| entry.state.is_tracked()),
748 .map_or(false, |entry| entry.state.is_tracked()),
749 had_entry,
749 had_entry,
750 had_copy_source: node.copy_source.take().is_some(),
750 had_copy_source: node.copy_source.take().is_some(),
751 };
751 };
752 }
752 }
753 // After recursion, for both leaf (rest_of_path is None) nodes and
753 // After recursion, for both leaf (rest_of_path is None) nodes and
754 // parent nodes, remove a node if it just became empty.
754 // parent nodes, remove a node if it just became empty.
755 if !node.data.has_entry()
755 if !node.data.has_entry()
756 && node.copy_source.is_none()
756 && node.copy_source.is_none()
757 && node.children.is_empty()
757 && node.children.is_empty()
758 {
758 {
759 nodes.make_mut(on_disk)?.remove(first_path_component);
759 nodes.make_mut(on_disk)?.remove(first_path_component);
760 }
760 }
761 Ok(Some(dropped))
761 Ok(Some(dropped))
762 }
762 }
763
763
764 if let Some(dropped) = recur(self.on_disk, &mut self.root, filename)? {
764 if let Some(dropped) = recur(self.on_disk, &mut self.root, filename)? {
765 if dropped.had_entry {
765 if dropped.had_entry {
766 self.nodes_with_entry_count -= 1
766 self.nodes_with_entry_count -= 1
767 }
767 }
768 if dropped.had_copy_source {
768 if dropped.had_copy_source {
769 self.nodes_with_copy_source_count -= 1
769 self.nodes_with_copy_source_count -= 1
770 }
770 }
771 Ok(dropped.had_entry)
771 Ok(dropped.had_entry)
772 } else {
772 } else {
773 debug_assert!(!old_state.is_tracked());
773 debug_assert!(!old_state.is_tracked());
774 Ok(false)
774 Ok(false)
775 }
775 }
776 }
776 }
777
777
778 fn clear_ambiguous_times(
778 fn clear_ambiguous_times(
779 &mut self,
779 &mut self,
780 filenames: Vec<HgPathBuf>,
780 filenames: Vec<HgPathBuf>,
781 now: i32,
781 now: i32,
782 ) -> Result<(), DirstateV2ParseError> {
782 ) -> Result<(), DirstateV2ParseError> {
783 for filename in filenames {
783 for filename in filenames {
784 if let Some(node) =
784 if let Some(node) =
785 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
785 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
786 {
786 {
787 if let NodeData::Entry(entry) = &mut node.data {
787 if let NodeData::Entry(entry) = &mut node.data {
788 entry.clear_ambiguous_mtime(now);
788 entry.clear_ambiguous_mtime(now);
789 }
789 }
790 }
790 }
791 }
791 }
792 Ok(())
792 Ok(())
793 }
793 }
794
794
795 fn non_normal_entries_contains(
795 fn non_normal_entries_contains(
796 &mut self,
796 &mut self,
797 key: &HgPath,
797 key: &HgPath,
798 ) -> Result<bool, DirstateV2ParseError> {
798 ) -> Result<bool, DirstateV2ParseError> {
799 Ok(if let Some(node) = self.get_node(key)? {
799 Ok(if let Some(node) = self.get_node(key)? {
800 node.entry()?.map_or(false, |entry| entry.is_non_normal())
800 node.entry()?.map_or(false, |entry| entry.is_non_normal())
801 } else {
801 } else {
802 false
802 false
803 })
803 })
804 }
804 }
805
805
806 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
806 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
807 // Do nothing, this `DirstateMap` does not have a separate "non normal
807 // Do nothing, this `DirstateMap` does not have a separate "non normal
808 // entries" set that need to be kept up to date
808 // entries" set that need to be kept up to date
809 }
809 }
810
810
811 fn non_normal_or_other_parent_paths(
811 fn non_normal_or_other_parent_paths(
812 &mut self,
812 &mut self,
813 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
813 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
814 {
814 {
815 Box::new(self.filter_full_paths(|entry| {
815 Box::new(self.filter_full_paths(|entry| {
816 entry.is_non_normal() || entry.is_from_other_parent()
816 entry.is_non_normal() || entry.is_from_other_parent()
817 }))
817 }))
818 }
818 }
819
819
820 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
820 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
821 // Do nothing, this `DirstateMap` does not have a separate "non normal
821 // Do nothing, this `DirstateMap` does not have a separate "non normal
822 // entries" and "from other parent" sets that need to be recomputed
822 // entries" and "from other parent" sets that need to be recomputed
823 }
823 }
824
824
825 fn iter_non_normal_paths(
825 fn iter_non_normal_paths(
826 &mut self,
826 &mut self,
827 ) -> Box<
827 ) -> Box<
828 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
828 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
829 > {
829 > {
830 self.iter_non_normal_paths_panic()
830 self.iter_non_normal_paths_panic()
831 }
831 }
832
832
833 fn iter_non_normal_paths_panic(
833 fn iter_non_normal_paths_panic(
834 &self,
834 &self,
835 ) -> Box<
835 ) -> Box<
836 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
836 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
837 > {
837 > {
838 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
838 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
839 }
839 }
840
840
841 fn iter_other_parent_paths(
841 fn iter_other_parent_paths(
842 &mut self,
842 &mut self,
843 ) -> Box<
843 ) -> Box<
844 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
844 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
845 > {
845 > {
846 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
846 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
847 }
847 }
848
848
849 fn has_tracked_dir(
849 fn has_tracked_dir(
850 &mut self,
850 &mut self,
851 directory: &HgPath,
851 directory: &HgPath,
852 ) -> Result<bool, DirstateError> {
852 ) -> Result<bool, DirstateError> {
853 if let Some(node) = self.get_node(directory)? {
853 if let Some(node) = self.get_node(directory)? {
854 // A node without a `DirstateEntry` was created to hold child
854 // A node without a `DirstateEntry` was created to hold child
855 // nodes, and is therefore a directory.
855 // nodes, and is therefore a directory.
856 let state = node.state()?;
856 let state = node.state()?;
857 Ok(state.is_none() && node.tracked_descendants_count() > 0)
857 Ok(state.is_none() && node.tracked_descendants_count() > 0)
858 } else {
858 } else {
859 Ok(false)
859 Ok(false)
860 }
860 }
861 }
861 }
862
862
863 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
863 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
864 if let Some(node) = self.get_node(directory)? {
864 if let Some(node) = self.get_node(directory)? {
865 // A node without a `DirstateEntry` was created to hold child
865 // A node without a `DirstateEntry` was created to hold child
866 // nodes, and is therefore a directory.
866 // nodes, and is therefore a directory.
867 Ok(node.state()?.is_none())
867 Ok(node.state()?.is_none())
868 } else {
868 } else {
869 Ok(false)
869 Ok(false)
870 }
870 }
871 }
871 }
872
872
873 #[timed]
873 #[timed]
874 fn pack_v1(
874 fn pack_v1(
875 &mut self,
875 &mut self,
876 parents: DirstateParents,
876 parents: DirstateParents,
877 now: Timestamp,
877 now: Timestamp,
878 ) -> Result<Vec<u8>, DirstateError> {
878 ) -> Result<Vec<u8>, DirstateError> {
879 let now: i32 = now.0.try_into().expect("time overflow");
879 let now: i32 = now.0.try_into().expect("time overflow");
880 let mut ambiguous_mtimes = Vec::new();
880 let mut ambiguous_mtimes = Vec::new();
881 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
881 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
882 // reallocations
882 // reallocations
883 let mut size = parents.as_bytes().len();
883 let mut size = parents.as_bytes().len();
884 for node in self.iter_nodes() {
884 for node in self.iter_nodes() {
885 let node = node?;
885 let node = node?;
886 if let Some(entry) = node.entry()? {
886 if let Some(entry) = node.entry()? {
887 size += packed_entry_size(
887 size += packed_entry_size(
888 node.full_path(self.on_disk)?,
888 node.full_path(self.on_disk)?,
889 node.copy_source(self.on_disk)?,
889 node.copy_source(self.on_disk)?,
890 );
890 );
891 if entry.mtime_is_ambiguous(now) {
891 if entry.mtime_is_ambiguous(now) {
892 ambiguous_mtimes.push(
892 ambiguous_mtimes.push(
893 node.full_path_borrowed(self.on_disk)?
893 node.full_path_borrowed(self.on_disk)?
894 .detach_from_tree(),
894 .detach_from_tree(),
895 )
895 )
896 }
896 }
897 }
897 }
898 }
898 }
899 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
899 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
900
900
901 let mut packed = Vec::with_capacity(size);
901 let mut packed = Vec::with_capacity(size);
902 packed.extend(parents.as_bytes());
902 packed.extend(parents.as_bytes());
903
903
904 for node in self.iter_nodes() {
904 for node in self.iter_nodes() {
905 let node = node?;
905 let node = node?;
906 if let Some(entry) = node.entry()? {
906 if let Some(entry) = node.entry()? {
907 pack_entry(
907 pack_entry(
908 node.full_path(self.on_disk)?,
908 node.full_path(self.on_disk)?,
909 &entry,
909 &entry,
910 node.copy_source(self.on_disk)?,
910 node.copy_source(self.on_disk)?,
911 &mut packed,
911 &mut packed,
912 );
912 );
913 }
913 }
914 }
914 }
915 Ok(packed)
915 Ok(packed)
916 }
916 }
917
917
918 #[timed]
918 #[timed]
919 fn pack_v2(
919 fn pack_v2(
920 &mut self,
920 &mut self,
921 parents: DirstateParents,
921 parents: DirstateParents,
922 now: Timestamp,
922 now: Timestamp,
923 ) -> Result<Vec<u8>, DirstateError> {
923 ) -> Result<Vec<u8>, DirstateError> {
924 // TODO: how do we want to handle this in 2038?
924 // TODO: how do we want to handle this in 2038?
925 let now: i32 = now.0.try_into().expect("time overflow");
925 let now: i32 = now.0.try_into().expect("time overflow");
926 let mut paths = Vec::new();
926 let mut paths = Vec::new();
927 for node in self.iter_nodes() {
927 for node in self.iter_nodes() {
928 let node = node?;
928 let node = node?;
929 if let Some(entry) = node.entry()? {
929 if let Some(entry) = node.entry()? {
930 if entry.mtime_is_ambiguous(now) {
930 if entry.mtime_is_ambiguous(now) {
931 paths.push(
931 paths.push(
932 node.full_path_borrowed(self.on_disk)?
932 node.full_path_borrowed(self.on_disk)?
933 .detach_from_tree(),
933 .detach_from_tree(),
934 )
934 )
935 }
935 }
936 }
936 }
937 }
937 }
938 // Borrow of `self` ends here since we collect cloned paths
938 // Borrow of `self` ends here since we collect cloned paths
939
939
940 self.clear_known_ambiguous_mtimes(&paths)?;
940 self.clear_known_ambiguous_mtimes(&paths)?;
941
941
942 on_disk::write(self, parents)
942 on_disk::write(self, parents)
943 }
943 }
944
944
945 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
945 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
946 // Do nothing, this `DirstateMap` does not a separate `all_dirs` that
946 // Do nothing, this `DirstateMap` does not a separate `all_dirs` that
947 // needs to be recomputed
947 // needs to be recomputed
948 Ok(())
948 Ok(())
949 }
949 }
950
950
951 fn set_dirs(&mut self) -> Result<(), DirstateError> {
951 fn set_dirs(&mut self) -> Result<(), DirstateError> {
952 // Do nothing, this `DirstateMap` does not a separate `dirs` that needs
952 // Do nothing, this `DirstateMap` does not a separate `dirs` that needs
953 // to be recomputed
953 // to be recomputed
954 Ok(())
954 Ok(())
955 }
955 }
956
956
957 fn status<'a>(
957 fn status<'a>(
958 &'a mut self,
958 &'a mut self,
959 matcher: &'a (dyn Matcher + Sync),
959 matcher: &'a (dyn Matcher + Sync),
960 root_dir: PathBuf,
960 root_dir: PathBuf,
961 ignore_files: Vec<PathBuf>,
961 ignore_files: Vec<PathBuf>,
962 options: StatusOptions,
962 options: StatusOptions,
963 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
963 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
964 {
964 {
965 super::status::status(self, matcher, root_dir, ignore_files, options)
965 super::status::status(self, matcher, root_dir, ignore_files, options)
966 }
966 }
967
967
968 fn copy_map_len(&self) -> usize {
968 fn copy_map_len(&self) -> usize {
969 self.nodes_with_copy_source_count as usize
969 self.nodes_with_copy_source_count as usize
970 }
970 }
971
971
972 fn copy_map_iter(&self) -> CopyMapIter<'_> {
972 fn copy_map_iter(&self) -> CopyMapIter<'_> {
973 Box::new(filter_map_results(self.iter_nodes(), move |node| {
973 Box::new(filter_map_results(self.iter_nodes(), move |node| {
974 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
974 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
975 Some((node.full_path(self.on_disk)?, source))
975 Some((node.full_path(self.on_disk)?, source))
976 } else {
976 } else {
977 None
977 None
978 })
978 })
979 }))
979 }))
980 }
980 }
981
981
982 fn copy_map_contains_key(
982 fn copy_map_contains_key(
983 &self,
983 &self,
984 key: &HgPath,
984 key: &HgPath,
985 ) -> Result<bool, DirstateV2ParseError> {
985 ) -> Result<bool, DirstateV2ParseError> {
986 Ok(if let Some(node) = self.get_node(key)? {
986 Ok(if let Some(node) = self.get_node(key)? {
987 node.has_copy_source()
987 node.has_copy_source()
988 } else {
988 } else {
989 false
989 false
990 })
990 })
991 }
991 }
992
992
993 fn copy_map_get(
993 fn copy_map_get(
994 &self,
994 &self,
995 key: &HgPath,
995 key: &HgPath,
996 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
996 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
997 if let Some(node) = self.get_node(key)? {
997 if let Some(node) = self.get_node(key)? {
998 if let Some(source) = node.copy_source(self.on_disk)? {
998 if let Some(source) = node.copy_source(self.on_disk)? {
999 return Ok(Some(source));
999 return Ok(Some(source));
1000 }
1000 }
1001 }
1001 }
1002 Ok(None)
1002 Ok(None)
1003 }
1003 }
1004
1004
1005 fn copy_map_remove(
1005 fn copy_map_remove(
1006 &mut self,
1006 &mut self,
1007 key: &HgPath,
1007 key: &HgPath,
1008 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1008 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1009 let count = &mut self.nodes_with_copy_source_count;
1009 let count = &mut self.nodes_with_copy_source_count;
1010 Ok(
1010 Ok(
1011 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1011 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1012 |node| {
1012 |node| {
1013 if node.copy_source.is_some() {
1013 if node.copy_source.is_some() {
1014 *count -= 1
1014 *count -= 1
1015 }
1015 }
1016 node.copy_source.take().map(Cow::into_owned)
1016 node.copy_source.take().map(Cow::into_owned)
1017 },
1017 },
1018 ),
1018 ),
1019 )
1019 )
1020 }
1020 }
1021
1021
1022 fn copy_map_insert(
1022 fn copy_map_insert(
1023 &mut self,
1023 &mut self,
1024 key: HgPathBuf,
1024 key: HgPathBuf,
1025 value: HgPathBuf,
1025 value: HgPathBuf,
1026 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1026 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1027 let node = Self::get_or_insert_node(
1027 let node = Self::get_or_insert_node(
1028 self.on_disk,
1028 self.on_disk,
1029 &mut self.root,
1029 &mut self.root,
1030 &key,
1030 &key,
1031 WithBasename::to_cow_owned,
1031 WithBasename::to_cow_owned,
1032 |_ancestor| {},
1032 |_ancestor| {},
1033 )?;
1033 )?;
1034 if node.copy_source.is_none() {
1034 if node.copy_source.is_none() {
1035 self.nodes_with_copy_source_count += 1
1035 self.nodes_with_copy_source_count += 1
1036 }
1036 }
1037 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1037 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1038 }
1038 }
1039
1039
1040 fn len(&self) -> usize {
1040 fn len(&self) -> usize {
1041 self.nodes_with_entry_count as usize
1041 self.nodes_with_entry_count as usize
1042 }
1042 }
1043
1043
1044 fn contains_key(
1044 fn contains_key(
1045 &self,
1045 &self,
1046 key: &HgPath,
1046 key: &HgPath,
1047 ) -> Result<bool, DirstateV2ParseError> {
1047 ) -> Result<bool, DirstateV2ParseError> {
1048 Ok(self.get(key)?.is_some())
1048 Ok(self.get(key)?.is_some())
1049 }
1049 }
1050
1050
1051 fn get(
1051 fn get(
1052 &self,
1052 &self,
1053 key: &HgPath,
1053 key: &HgPath,
1054 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1054 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1055 Ok(if let Some(node) = self.get_node(key)? {
1055 Ok(if let Some(node) = self.get_node(key)? {
1056 node.entry()?
1056 node.entry()?
1057 } else {
1057 } else {
1058 None
1058 None
1059 })
1059 })
1060 }
1060 }
1061
1061
1062 fn iter(&self) -> StateMapIter<'_> {
1062 fn iter(&self) -> StateMapIter<'_> {
1063 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1063 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1064 Ok(if let Some(entry) = node.entry()? {
1064 Ok(if let Some(entry) = node.entry()? {
1065 Some((node.full_path(self.on_disk)?, entry))
1065 Some((node.full_path(self.on_disk)?, entry))
1066 } else {
1066 } else {
1067 None
1067 None
1068 })
1068 })
1069 }))
1069 }))
1070 }
1070 }
1071
1072 fn iter_directories(
1073 &self,
1074 ) -> Box<
1075 dyn Iterator<
1076 Item = Result<
1077 (&HgPath, Option<Timestamp>),
1078 DirstateV2ParseError,
1079 >,
1080 > + Send
1081 + '_,
1082 > {
1083 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1084 Ok(if node.state()?.is_none() {
1085 Some((
1086 node.full_path(self.on_disk)?,
1087 node.cached_directory_mtime()
1088 .map(|mtime| Timestamp(mtime.seconds())),
1089 ))
1090 } else {
1091 None
1092 })
1093 }))
1094 }
1071 }
1095 }
@@ -1,353 +1,379 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::EntryState;
13 use crate::EntryState;
14 use crate::PatternFileWarning;
14 use crate::PatternFileWarning;
15 use crate::StateMapIter;
15 use crate::StateMapIter;
16 use crate::StatusError;
16 use crate::StatusError;
17 use crate::StatusOptions;
17 use crate::StatusOptions;
18
18
19 pub trait DirstateMapMethods {
19 pub trait DirstateMapMethods {
20 fn clear(&mut self);
20 fn clear(&mut self);
21
21
22 fn add_file(
22 fn add_file(
23 &mut self,
23 &mut self,
24 filename: &HgPath,
24 filename: &HgPath,
25 old_state: EntryState,
25 old_state: EntryState,
26 entry: DirstateEntry,
26 entry: DirstateEntry,
27 ) -> Result<(), DirstateError>;
27 ) -> Result<(), DirstateError>;
28
28
29 fn remove_file(
29 fn remove_file(
30 &mut self,
30 &mut self,
31 filename: &HgPath,
31 filename: &HgPath,
32 old_state: EntryState,
32 old_state: EntryState,
33 size: i32,
33 size: i32,
34 ) -> Result<(), DirstateError>;
34 ) -> Result<(), DirstateError>;
35
35
36 fn drop_file(
36 fn drop_file(
37 &mut self,
37 &mut self,
38 filename: &HgPath,
38 filename: &HgPath,
39 old_state: EntryState,
39 old_state: EntryState,
40 ) -> Result<bool, DirstateError>;
40 ) -> Result<bool, DirstateError>;
41
41
42 fn clear_ambiguous_times(
42 fn clear_ambiguous_times(
43 &mut self,
43 &mut self,
44 filenames: Vec<HgPathBuf>,
44 filenames: Vec<HgPathBuf>,
45 now: i32,
45 now: i32,
46 ) -> Result<(), DirstateV2ParseError>;
46 ) -> Result<(), DirstateV2ParseError>;
47
47
48 fn non_normal_entries_contains(
48 fn non_normal_entries_contains(
49 &mut self,
49 &mut self,
50 key: &HgPath,
50 key: &HgPath,
51 ) -> Result<bool, DirstateV2ParseError>;
51 ) -> Result<bool, DirstateV2ParseError>;
52
52
53 fn non_normal_entries_remove(&mut self, key: &HgPath);
53 fn non_normal_entries_remove(&mut self, key: &HgPath);
54
54
55 fn non_normal_or_other_parent_paths(
55 fn non_normal_or_other_parent_paths(
56 &mut self,
56 &mut self,
57 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
57 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
58
58
59 fn set_non_normal_other_parent_entries(&mut self, force: bool);
59 fn set_non_normal_other_parent_entries(&mut self, force: bool);
60
60
61 fn iter_non_normal_paths(
61 fn iter_non_normal_paths(
62 &mut self,
62 &mut self,
63 ) -> Box<
63 ) -> Box<
64 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
64 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
65 >;
65 >;
66
66
67 fn iter_non_normal_paths_panic(
67 fn iter_non_normal_paths_panic(
68 &self,
68 &self,
69 ) -> Box<
69 ) -> Box<
70 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
70 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
71 >;
71 >;
72
72
73 fn iter_other_parent_paths(
73 fn iter_other_parent_paths(
74 &mut self,
74 &mut self,
75 ) -> Box<
75 ) -> Box<
76 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
76 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
77 >;
77 >;
78
78
79 fn has_tracked_dir(
79 fn has_tracked_dir(
80 &mut self,
80 &mut self,
81 directory: &HgPath,
81 directory: &HgPath,
82 ) -> Result<bool, DirstateError>;
82 ) -> Result<bool, DirstateError>;
83
83
84 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
84 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
85
85
86 fn pack_v1(
86 fn pack_v1(
87 &mut self,
87 &mut self,
88 parents: DirstateParents,
88 parents: DirstateParents,
89 now: Timestamp,
89 now: Timestamp,
90 ) -> Result<Vec<u8>, DirstateError>;
90 ) -> Result<Vec<u8>, DirstateError>;
91
91
92 fn pack_v2(
92 fn pack_v2(
93 &mut self,
93 &mut self,
94 parents: DirstateParents,
94 parents: DirstateParents,
95 now: Timestamp,
95 now: Timestamp,
96 ) -> Result<Vec<u8>, DirstateError>;
96 ) -> Result<Vec<u8>, DirstateError>;
97
97
98 fn set_all_dirs(&mut self) -> Result<(), DirstateError>;
98 fn set_all_dirs(&mut self) -> Result<(), DirstateError>;
99
99
100 fn set_dirs(&mut self) -> Result<(), DirstateError>;
100 fn set_dirs(&mut self) -> Result<(), DirstateError>;
101
101
102 fn status<'a>(
102 fn status<'a>(
103 &'a mut self,
103 &'a mut self,
104 matcher: &'a (dyn Matcher + Sync),
104 matcher: &'a (dyn Matcher + Sync),
105 root_dir: PathBuf,
105 root_dir: PathBuf,
106 ignore_files: Vec<PathBuf>,
106 ignore_files: Vec<PathBuf>,
107 options: StatusOptions,
107 options: StatusOptions,
108 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
108 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
109
109
110 fn copy_map_len(&self) -> usize;
110 fn copy_map_len(&self) -> usize;
111
111
112 fn copy_map_iter(&self) -> CopyMapIter<'_>;
112 fn copy_map_iter(&self) -> CopyMapIter<'_>;
113
113
114 fn copy_map_contains_key(
114 fn copy_map_contains_key(
115 &self,
115 &self,
116 key: &HgPath,
116 key: &HgPath,
117 ) -> Result<bool, DirstateV2ParseError>;
117 ) -> Result<bool, DirstateV2ParseError>;
118
118
119 fn copy_map_get(
119 fn copy_map_get(
120 &self,
120 &self,
121 key: &HgPath,
121 key: &HgPath,
122 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
122 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
123
123
124 fn copy_map_remove(
124 fn copy_map_remove(
125 &mut self,
125 &mut self,
126 key: &HgPath,
126 key: &HgPath,
127 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
127 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
128
128
129 fn copy_map_insert(
129 fn copy_map_insert(
130 &mut self,
130 &mut self,
131 key: HgPathBuf,
131 key: HgPathBuf,
132 value: HgPathBuf,
132 value: HgPathBuf,
133 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
133 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
134
134
135 fn len(&self) -> usize;
135 fn len(&self) -> usize;
136
136
137 fn contains_key(&self, key: &HgPath)
137 fn contains_key(&self, key: &HgPath)
138 -> Result<bool, DirstateV2ParseError>;
138 -> Result<bool, DirstateV2ParseError>;
139
139
140 fn get(
140 fn get(
141 &self,
141 &self,
142 key: &HgPath,
142 key: &HgPath,
143 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
143 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
144
144
145 fn iter(&self) -> StateMapIter<'_>;
145 fn iter(&self) -> StateMapIter<'_>;
146
147 fn iter_directories(
148 &self,
149 ) -> Box<
150 dyn Iterator<
151 Item = Result<
152 (&HgPath, Option<Timestamp>),
153 DirstateV2ParseError,
154 >,
155 > + Send
156 + '_,
157 >;
146 }
158 }
147
159
148 impl DirstateMapMethods for DirstateMap {
160 impl DirstateMapMethods for DirstateMap {
149 fn clear(&mut self) {
161 fn clear(&mut self) {
150 self.clear()
162 self.clear()
151 }
163 }
152
164
153 fn add_file(
165 fn add_file(
154 &mut self,
166 &mut self,
155 filename: &HgPath,
167 filename: &HgPath,
156 old_state: EntryState,
168 old_state: EntryState,
157 entry: DirstateEntry,
169 entry: DirstateEntry,
158 ) -> Result<(), DirstateError> {
170 ) -> Result<(), DirstateError> {
159 self.add_file(filename, old_state, entry)
171 self.add_file(filename, old_state, entry)
160 }
172 }
161
173
162 fn remove_file(
174 fn remove_file(
163 &mut self,
175 &mut self,
164 filename: &HgPath,
176 filename: &HgPath,
165 old_state: EntryState,
177 old_state: EntryState,
166 size: i32,
178 size: i32,
167 ) -> Result<(), DirstateError> {
179 ) -> Result<(), DirstateError> {
168 self.remove_file(filename, old_state, size)
180 self.remove_file(filename, old_state, size)
169 }
181 }
170
182
171 fn drop_file(
183 fn drop_file(
172 &mut self,
184 &mut self,
173 filename: &HgPath,
185 filename: &HgPath,
174 old_state: EntryState,
186 old_state: EntryState,
175 ) -> Result<bool, DirstateError> {
187 ) -> Result<bool, DirstateError> {
176 self.drop_file(filename, old_state)
188 self.drop_file(filename, old_state)
177 }
189 }
178
190
179 fn clear_ambiguous_times(
191 fn clear_ambiguous_times(
180 &mut self,
192 &mut self,
181 filenames: Vec<HgPathBuf>,
193 filenames: Vec<HgPathBuf>,
182 now: i32,
194 now: i32,
183 ) -> Result<(), DirstateV2ParseError> {
195 ) -> Result<(), DirstateV2ParseError> {
184 Ok(self.clear_ambiguous_times(filenames, now))
196 Ok(self.clear_ambiguous_times(filenames, now))
185 }
197 }
186
198
187 fn non_normal_entries_contains(
199 fn non_normal_entries_contains(
188 &mut self,
200 &mut self,
189 key: &HgPath,
201 key: &HgPath,
190 ) -> Result<bool, DirstateV2ParseError> {
202 ) -> Result<bool, DirstateV2ParseError> {
191 let (non_normal, _other_parent) =
203 let (non_normal, _other_parent) =
192 self.get_non_normal_other_parent_entries();
204 self.get_non_normal_other_parent_entries();
193 Ok(non_normal.contains(key))
205 Ok(non_normal.contains(key))
194 }
206 }
195
207
196 fn non_normal_entries_remove(&mut self, key: &HgPath) {
208 fn non_normal_entries_remove(&mut self, key: &HgPath) {
197 self.non_normal_entries_remove(key)
209 self.non_normal_entries_remove(key)
198 }
210 }
199
211
200 fn non_normal_or_other_parent_paths(
212 fn non_normal_or_other_parent_paths(
201 &mut self,
213 &mut self,
202 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
214 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
203 {
215 {
204 let (non_normal, other_parent) =
216 let (non_normal, other_parent) =
205 self.get_non_normal_other_parent_entries();
217 self.get_non_normal_other_parent_entries();
206 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
218 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
207 }
219 }
208
220
209 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
221 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
210 self.set_non_normal_other_parent_entries(force)
222 self.set_non_normal_other_parent_entries(force)
211 }
223 }
212
224
213 fn iter_non_normal_paths(
225 fn iter_non_normal_paths(
214 &mut self,
226 &mut self,
215 ) -> Box<
227 ) -> Box<
216 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
228 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
217 > {
229 > {
218 let (non_normal, _other_parent) =
230 let (non_normal, _other_parent) =
219 self.get_non_normal_other_parent_entries();
231 self.get_non_normal_other_parent_entries();
220 Box::new(non_normal.iter().map(|p| Ok(&**p)))
232 Box::new(non_normal.iter().map(|p| Ok(&**p)))
221 }
233 }
222
234
223 fn iter_non_normal_paths_panic(
235 fn iter_non_normal_paths_panic(
224 &self,
236 &self,
225 ) -> Box<
237 ) -> Box<
226 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
238 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
227 > {
239 > {
228 let (non_normal, _other_parent) =
240 let (non_normal, _other_parent) =
229 self.get_non_normal_other_parent_entries_panic();
241 self.get_non_normal_other_parent_entries_panic();
230 Box::new(non_normal.iter().map(|p| Ok(&**p)))
242 Box::new(non_normal.iter().map(|p| Ok(&**p)))
231 }
243 }
232
244
233 fn iter_other_parent_paths(
245 fn iter_other_parent_paths(
234 &mut self,
246 &mut self,
235 ) -> Box<
247 ) -> Box<
236 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
248 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
237 > {
249 > {
238 let (_non_normal, other_parent) =
250 let (_non_normal, other_parent) =
239 self.get_non_normal_other_parent_entries();
251 self.get_non_normal_other_parent_entries();
240 Box::new(other_parent.iter().map(|p| Ok(&**p)))
252 Box::new(other_parent.iter().map(|p| Ok(&**p)))
241 }
253 }
242
254
243 fn has_tracked_dir(
255 fn has_tracked_dir(
244 &mut self,
256 &mut self,
245 directory: &HgPath,
257 directory: &HgPath,
246 ) -> Result<bool, DirstateError> {
258 ) -> Result<bool, DirstateError> {
247 self.has_tracked_dir(directory)
259 self.has_tracked_dir(directory)
248 }
260 }
249
261
250 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
262 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
251 self.has_dir(directory)
263 self.has_dir(directory)
252 }
264 }
253
265
254 fn pack_v1(
266 fn pack_v1(
255 &mut self,
267 &mut self,
256 parents: DirstateParents,
268 parents: DirstateParents,
257 now: Timestamp,
269 now: Timestamp,
258 ) -> Result<Vec<u8>, DirstateError> {
270 ) -> Result<Vec<u8>, DirstateError> {
259 self.pack(parents, now)
271 self.pack(parents, now)
260 }
272 }
261
273
262 fn pack_v2(
274 fn pack_v2(
263 &mut self,
275 &mut self,
264 _parents: DirstateParents,
276 _parents: DirstateParents,
265 _now: Timestamp,
277 _now: Timestamp,
266 ) -> Result<Vec<u8>, DirstateError> {
278 ) -> Result<Vec<u8>, DirstateError> {
267 panic!(
279 panic!(
268 "should have used dirstate_tree::DirstateMap to use the v2 format"
280 "should have used dirstate_tree::DirstateMap to use the v2 format"
269 )
281 )
270 }
282 }
271
283
272 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
284 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
273 self.set_all_dirs()
285 self.set_all_dirs()
274 }
286 }
275
287
276 fn set_dirs(&mut self) -> Result<(), DirstateError> {
288 fn set_dirs(&mut self) -> Result<(), DirstateError> {
277 self.set_dirs()
289 self.set_dirs()
278 }
290 }
279
291
280 fn status<'a>(
292 fn status<'a>(
281 &'a mut self,
293 &'a mut self,
282 matcher: &'a (dyn Matcher + Sync),
294 matcher: &'a (dyn Matcher + Sync),
283 root_dir: PathBuf,
295 root_dir: PathBuf,
284 ignore_files: Vec<PathBuf>,
296 ignore_files: Vec<PathBuf>,
285 options: StatusOptions,
297 options: StatusOptions,
286 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
298 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
287 {
299 {
288 crate::status(self, matcher, root_dir, ignore_files, options)
300 crate::status(self, matcher, root_dir, ignore_files, options)
289 }
301 }
290
302
291 fn copy_map_len(&self) -> usize {
303 fn copy_map_len(&self) -> usize {
292 self.copy_map.len()
304 self.copy_map.len()
293 }
305 }
294
306
295 fn copy_map_iter(&self) -> CopyMapIter<'_> {
307 fn copy_map_iter(&self) -> CopyMapIter<'_> {
296 Box::new(
308 Box::new(
297 self.copy_map
309 self.copy_map
298 .iter()
310 .iter()
299 .map(|(key, value)| Ok((&**key, &**value))),
311 .map(|(key, value)| Ok((&**key, &**value))),
300 )
312 )
301 }
313 }
302
314
303 fn copy_map_contains_key(
315 fn copy_map_contains_key(
304 &self,
316 &self,
305 key: &HgPath,
317 key: &HgPath,
306 ) -> Result<bool, DirstateV2ParseError> {
318 ) -> Result<bool, DirstateV2ParseError> {
307 Ok(self.copy_map.contains_key(key))
319 Ok(self.copy_map.contains_key(key))
308 }
320 }
309
321
310 fn copy_map_get(
322 fn copy_map_get(
311 &self,
323 &self,
312 key: &HgPath,
324 key: &HgPath,
313 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
325 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
314 Ok(self.copy_map.get(key).map(|p| &**p))
326 Ok(self.copy_map.get(key).map(|p| &**p))
315 }
327 }
316
328
317 fn copy_map_remove(
329 fn copy_map_remove(
318 &mut self,
330 &mut self,
319 key: &HgPath,
331 key: &HgPath,
320 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
332 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
321 Ok(self.copy_map.remove(key))
333 Ok(self.copy_map.remove(key))
322 }
334 }
323
335
324 fn copy_map_insert(
336 fn copy_map_insert(
325 &mut self,
337 &mut self,
326 key: HgPathBuf,
338 key: HgPathBuf,
327 value: HgPathBuf,
339 value: HgPathBuf,
328 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
340 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
329 Ok(self.copy_map.insert(key, value))
341 Ok(self.copy_map.insert(key, value))
330 }
342 }
331
343
332 fn len(&self) -> usize {
344 fn len(&self) -> usize {
333 (&**self).len()
345 (&**self).len()
334 }
346 }
335
347
336 fn contains_key(
348 fn contains_key(
337 &self,
349 &self,
338 key: &HgPath,
350 key: &HgPath,
339 ) -> Result<bool, DirstateV2ParseError> {
351 ) -> Result<bool, DirstateV2ParseError> {
340 Ok((&**self).contains_key(key))
352 Ok((&**self).contains_key(key))
341 }
353 }
342
354
343 fn get(
355 fn get(
344 &self,
356 &self,
345 key: &HgPath,
357 key: &HgPath,
346 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
358 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
347 Ok((&**self).get(key).cloned())
359 Ok((&**self).get(key).cloned())
348 }
360 }
349
361
350 fn iter(&self) -> StateMapIter<'_> {
362 fn iter(&self) -> StateMapIter<'_> {
351 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
363 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
352 }
364 }
365
366 fn iter_directories(
367 &self,
368 ) -> Box<
369 dyn Iterator<
370 Item = Result<
371 (&HgPath, Option<Timestamp>),
372 DirstateV2ParseError,
373 >,
374 > + Send
375 + '_,
376 > {
377 Box::new(std::iter::empty())
378 }
353 }
379 }
@@ -1,537 +1,543 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! # File format
3 //! # File format
4 //!
4 //!
5 //! The file starts with a fixed-sized header, whose layout is defined by the
5 //! The file starts with a fixed-sized header, whose layout is defined by the
6 //! `Header` struct. Its `root` field contains the slice (offset and length) to
6 //! `Header` struct. Its `root` field contains the slice (offset and length) to
7 //! the nodes representing the files and directories at the root of the
7 //! the nodes representing the files and directories at the root of the
8 //! repository. Each node is also fixed-size, defined by the `Node` struct.
8 //! repository. Each node is also fixed-size, defined by the `Node` struct.
9 //! Nodes in turn contain slices to variable-size paths, and to their own child
9 //! Nodes in turn contain slices to variable-size paths, and to their own child
10 //! nodes (if any) for nested files and directories.
10 //! nodes (if any) for nested files and directories.
11
11
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
13 use crate::dirstate_tree::path_with_basename::WithBasename;
13 use crate::dirstate_tree::path_with_basename::WithBasename;
14 use crate::errors::HgError;
14 use crate::errors::HgError;
15 use crate::utils::hg_path::HgPath;
15 use crate::utils::hg_path::HgPath;
16 use crate::DirstateEntry;
16 use crate::DirstateEntry;
17 use crate::DirstateError;
17 use crate::DirstateError;
18 use crate::DirstateParents;
18 use crate::DirstateParents;
19 use crate::EntryState;
19 use crate::EntryState;
20 use bytes_cast::unaligned::{I32Be, I64Be, U32Be, U64Be};
20 use bytes_cast::unaligned::{I32Be, I64Be, U32Be, U64Be};
21 use bytes_cast::BytesCast;
21 use bytes_cast::BytesCast;
22 use std::borrow::Cow;
22 use std::borrow::Cow;
23 use std::convert::TryFrom;
23 use std::convert::TryFrom;
24 use std::time::{Duration, SystemTime, UNIX_EPOCH};
24 use std::time::{Duration, SystemTime, UNIX_EPOCH};
25
25
26 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
26 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
27 /// This a redundant sanity check more than an actual "magic number" since
27 /// This a redundant sanity check more than an actual "magic number" since
28 /// `.hg/requires` already governs which format should be used.
28 /// `.hg/requires` already governs which format should be used.
29 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
29 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
30
30
31 #[derive(BytesCast)]
31 #[derive(BytesCast)]
32 #[repr(C)]
32 #[repr(C)]
33 struct Header {
33 struct Header {
34 marker: [u8; V2_FORMAT_MARKER.len()],
34 marker: [u8; V2_FORMAT_MARKER.len()],
35
35
36 /// `dirstatemap.parents()` in `mercurial/dirstate.py` relies on this
36 /// `dirstatemap.parents()` in `mercurial/dirstate.py` relies on this
37 /// `parents` field being at this offset, immediately after `marker`.
37 /// `parents` field being at this offset, immediately after `marker`.
38 parents: DirstateParents,
38 parents: DirstateParents,
39
39
40 root: ChildNodes,
40 root: ChildNodes,
41 nodes_with_entry_count: Size,
41 nodes_with_entry_count: Size,
42 nodes_with_copy_source_count: Size,
42 nodes_with_copy_source_count: Size,
43 }
43 }
44
44
45 #[derive(BytesCast)]
45 #[derive(BytesCast)]
46 #[repr(C)]
46 #[repr(C)]
47 pub(super) struct Node {
47 pub(super) struct Node {
48 full_path: PathSlice,
48 full_path: PathSlice,
49
49
50 /// In bytes from `self.full_path.start`
50 /// In bytes from `self.full_path.start`
51 base_name_start: Size,
51 base_name_start: Size,
52
52
53 copy_source: OptPathSlice,
53 copy_source: OptPathSlice,
54 children: ChildNodes,
54 children: ChildNodes,
55 pub(super) tracked_descendants_count: Size,
55 pub(super) tracked_descendants_count: Size,
56
56
57 /// Dependending on the value of `state`:
57 /// Dependending on the value of `state`:
58 ///
58 ///
59 /// * A null byte: `data` is not used.
59 /// * A null byte: `data` is not used.
60 ///
60 ///
61 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
61 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
62 /// represent a dirstate entry like in the v1 format.
62 /// represent a dirstate entry like in the v1 format.
63 ///
63 ///
64 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
64 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
65 /// as the `Timestamp` for the mtime of a cached directory.
65 /// as the `Timestamp` for the mtime of a cached directory.
66 ///
66 ///
67 /// The presence of this state means that at some point, this path in
67 /// The presence of this state means that at some point, this path in
68 /// the working directory was observed:
68 /// the working directory was observed:
69 ///
69 ///
70 /// - To be a directory
70 /// - To be a directory
71 /// - With the modification time as given by `Timestamp`
71 /// - With the modification time as given by `Timestamp`
72 /// - That timestamp was already strictly in the past when observed,
72 /// - That timestamp was already strictly in the past when observed,
73 /// meaning that later changes cannot happen in the same clock tick
73 /// meaning that later changes cannot happen in the same clock tick
74 /// and must cause a different modification time (unless the system
74 /// and must cause a different modification time (unless the system
75 /// clock jumps back and we get unlucky, which is not impossible but
75 /// clock jumps back and we get unlucky, which is not impossible but
76 /// but deemed unlikely enough).
76 /// but deemed unlikely enough).
77 /// - The directory did not contain any child entry that did not have a
77 /// - The directory did not contain any child entry that did not have a
78 /// corresponding dirstate node.
78 /// corresponding dirstate node.
79 ///
79 ///
80 /// This means that if `std::fs::symlink_metadata` later reports the
80 /// This means that if `std::fs::symlink_metadata` later reports the
81 /// same modification time, we don’t need to call `std::fs::read_dir`
81 /// same modification time, we don’t need to call `std::fs::read_dir`
82 /// again for this directory and can iterate child dirstate nodes
82 /// again for this directory and can iterate child dirstate nodes
83 /// instead.
83 /// instead.
84 state: u8,
84 state: u8,
85 data: Entry,
85 data: Entry,
86 }
86 }
87
87
88 #[derive(BytesCast, Copy, Clone)]
88 #[derive(BytesCast, Copy, Clone)]
89 #[repr(C)]
89 #[repr(C)]
90 struct Entry {
90 struct Entry {
91 mode: I32Be,
91 mode: I32Be,
92 mtime: I32Be,
92 mtime: I32Be,
93 size: I32Be,
93 size: I32Be,
94 }
94 }
95
95
96 /// Duration since the Unix epoch
96 /// Duration since the Unix epoch
97 #[derive(BytesCast, Copy, Clone, PartialEq)]
97 #[derive(BytesCast, Copy, Clone, PartialEq)]
98 #[repr(C)]
98 #[repr(C)]
99 pub(super) struct Timestamp {
99 pub(super) struct Timestamp {
100 seconds: I64Be,
100 seconds: I64Be,
101
101
102 /// In `0 .. 1_000_000_000`.
102 /// In `0 .. 1_000_000_000`.
103 ///
103 ///
104 /// This timestamp is later or earlier than `(seconds, 0)` by this many
104 /// This timestamp is later or earlier than `(seconds, 0)` by this many
105 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
105 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
106 nanoseconds: U32Be,
106 nanoseconds: U32Be,
107 }
107 }
108
108
109 /// Counted in bytes from the start of the file
109 /// Counted in bytes from the start of the file
110 ///
110 ///
111 /// NOTE: If we decide to never support `.hg/dirstate` files larger than 4 GiB
111 /// NOTE: If we decide to never support `.hg/dirstate` files larger than 4 GiB
112 /// we could save space by using `U32Be` instead.
112 /// we could save space by using `U32Be` instead.
113 type Offset = U64Be;
113 type Offset = U64Be;
114
114
115 /// Counted in number of items
115 /// Counted in number of items
116 ///
116 ///
117 /// NOTE: not supporting directories with more than 4 billion direct children,
117 /// NOTE: not supporting directories with more than 4 billion direct children,
118 /// or filenames more than 4 GiB.
118 /// or filenames more than 4 GiB.
119 type Size = U32Be;
119 type Size = U32Be;
120
120
121 /// Location of consecutive, fixed-size items.
121 /// Location of consecutive, fixed-size items.
122 ///
122 ///
123 /// An item can be a single byte for paths, or a struct with
123 /// An item can be a single byte for paths, or a struct with
124 /// `derive(BytesCast)`.
124 /// `derive(BytesCast)`.
125 #[derive(BytesCast, Copy, Clone)]
125 #[derive(BytesCast, Copy, Clone)]
126 #[repr(C)]
126 #[repr(C)]
127 struct Slice {
127 struct Slice {
128 start: Offset,
128 start: Offset,
129 len: Size,
129 len: Size,
130 }
130 }
131
131
132 /// A contiguous sequence of `len` times `Node`, representing the child nodes
132 /// A contiguous sequence of `len` times `Node`, representing the child nodes
133 /// of either some other node or of the repository root.
133 /// of either some other node or of the repository root.
134 ///
134 ///
135 /// Always sorted by ascending `full_path`, to allow binary search.
135 /// Always sorted by ascending `full_path`, to allow binary search.
136 /// Since nodes with the same parent nodes also have the same parent path,
136 /// Since nodes with the same parent nodes also have the same parent path,
137 /// only the `base_name`s need to be compared during binary search.
137 /// only the `base_name`s need to be compared during binary search.
138 type ChildNodes = Slice;
138 type ChildNodes = Slice;
139
139
140 /// A `HgPath` of `len` bytes
140 /// A `HgPath` of `len` bytes
141 type PathSlice = Slice;
141 type PathSlice = Slice;
142
142
143 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
143 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
144 type OptPathSlice = Slice;
144 type OptPathSlice = Slice;
145
145
146 /// Make sure that size-affecting changes are made knowingly
146 /// Make sure that size-affecting changes are made knowingly
147 fn _static_assert_size_of() {
147 fn _static_assert_size_of() {
148 let _ = std::mem::transmute::<Header, [u8; 72]>;
148 let _ = std::mem::transmute::<Header, [u8; 72]>;
149 let _ = std::mem::transmute::<Node, [u8; 57]>;
149 let _ = std::mem::transmute::<Node, [u8; 57]>;
150 }
150 }
151
151
152 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
152 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
153 ///
153 ///
154 /// This should only happen if Mercurial is buggy or a repository is corrupted.
154 /// This should only happen if Mercurial is buggy or a repository is corrupted.
155 #[derive(Debug)]
155 #[derive(Debug)]
156 pub struct DirstateV2ParseError;
156 pub struct DirstateV2ParseError;
157
157
158 impl From<DirstateV2ParseError> for HgError {
158 impl From<DirstateV2ParseError> for HgError {
159 fn from(_: DirstateV2ParseError) -> Self {
159 fn from(_: DirstateV2ParseError) -> Self {
160 HgError::corrupted("dirstate-v2 parse error")
160 HgError::corrupted("dirstate-v2 parse error")
161 }
161 }
162 }
162 }
163
163
164 impl From<DirstateV2ParseError> for crate::DirstateError {
164 impl From<DirstateV2ParseError> for crate::DirstateError {
165 fn from(error: DirstateV2ParseError) -> Self {
165 fn from(error: DirstateV2ParseError) -> Self {
166 HgError::from(error).into()
166 HgError::from(error).into()
167 }
167 }
168 }
168 }
169
169
170 pub(super) fn read<'on_disk>(
170 pub(super) fn read<'on_disk>(
171 on_disk: &'on_disk [u8],
171 on_disk: &'on_disk [u8],
172 ) -> Result<
172 ) -> Result<
173 (DirstateMap<'on_disk>, Option<DirstateParents>),
173 (DirstateMap<'on_disk>, Option<DirstateParents>),
174 DirstateV2ParseError,
174 DirstateV2ParseError,
175 > {
175 > {
176 if on_disk.is_empty() {
176 if on_disk.is_empty() {
177 return Ok((DirstateMap::empty(on_disk), None));
177 return Ok((DirstateMap::empty(on_disk), None));
178 }
178 }
179 let (header, _) =
179 let (header, _) =
180 Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
180 Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
181 let Header {
181 let Header {
182 marker,
182 marker,
183 parents,
183 parents,
184 root,
184 root,
185 nodes_with_entry_count,
185 nodes_with_entry_count,
186 nodes_with_copy_source_count,
186 nodes_with_copy_source_count,
187 } = header;
187 } = header;
188 if marker != V2_FORMAT_MARKER {
188 if marker != V2_FORMAT_MARKER {
189 return Err(DirstateV2ParseError);
189 return Err(DirstateV2ParseError);
190 }
190 }
191 let dirstate_map = DirstateMap {
191 let dirstate_map = DirstateMap {
192 on_disk,
192 on_disk,
193 root: dirstate_map::ChildNodes::OnDisk(read_slice::<Node>(
193 root: dirstate_map::ChildNodes::OnDisk(read_slice::<Node>(
194 on_disk, *root,
194 on_disk, *root,
195 )?),
195 )?),
196 nodes_with_entry_count: nodes_with_entry_count.get(),
196 nodes_with_entry_count: nodes_with_entry_count.get(),
197 nodes_with_copy_source_count: nodes_with_copy_source_count.get(),
197 nodes_with_copy_source_count: nodes_with_copy_source_count.get(),
198 };
198 };
199 let parents = Some(parents.clone());
199 let parents = Some(parents.clone());
200 Ok((dirstate_map, parents))
200 Ok((dirstate_map, parents))
201 }
201 }
202
202
203 impl Node {
203 impl Node {
204 pub(super) fn full_path<'on_disk>(
204 pub(super) fn full_path<'on_disk>(
205 &self,
205 &self,
206 on_disk: &'on_disk [u8],
206 on_disk: &'on_disk [u8],
207 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
207 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
208 read_hg_path(on_disk, self.full_path)
208 read_hg_path(on_disk, self.full_path)
209 }
209 }
210
210
211 pub(super) fn base_name_start<'on_disk>(
211 pub(super) fn base_name_start<'on_disk>(
212 &self,
212 &self,
213 ) -> Result<usize, DirstateV2ParseError> {
213 ) -> Result<usize, DirstateV2ParseError> {
214 let start = self.base_name_start.get();
214 let start = self.base_name_start.get();
215 if start < self.full_path.len.get() {
215 if start < self.full_path.len.get() {
216 let start = usize::try_from(start)
216 let start = usize::try_from(start)
217 // u32 -> usize, could only panic on a 16-bit CPU
217 // u32 -> usize, could only panic on a 16-bit CPU
218 .expect("dirstate-v2 base_name_start out of bounds");
218 .expect("dirstate-v2 base_name_start out of bounds");
219 Ok(start)
219 Ok(start)
220 } else {
220 } else {
221 Err(DirstateV2ParseError)
221 Err(DirstateV2ParseError)
222 }
222 }
223 }
223 }
224
224
225 pub(super) fn base_name<'on_disk>(
225 pub(super) fn base_name<'on_disk>(
226 &self,
226 &self,
227 on_disk: &'on_disk [u8],
227 on_disk: &'on_disk [u8],
228 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
228 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
229 let full_path = self.full_path(on_disk)?;
229 let full_path = self.full_path(on_disk)?;
230 let base_name_start = self.base_name_start()?;
230 let base_name_start = self.base_name_start()?;
231 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
231 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
232 }
232 }
233
233
234 pub(super) fn path<'on_disk>(
234 pub(super) fn path<'on_disk>(
235 &self,
235 &self,
236 on_disk: &'on_disk [u8],
236 on_disk: &'on_disk [u8],
237 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
237 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
238 Ok(WithBasename::from_raw_parts(
238 Ok(WithBasename::from_raw_parts(
239 Cow::Borrowed(self.full_path(on_disk)?),
239 Cow::Borrowed(self.full_path(on_disk)?),
240 self.base_name_start()?,
240 self.base_name_start()?,
241 ))
241 ))
242 }
242 }
243
243
244 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
244 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
245 self.copy_source.start.get() != 0
245 self.copy_source.start.get() != 0
246 }
246 }
247
247
248 pub(super) fn copy_source<'on_disk>(
248 pub(super) fn copy_source<'on_disk>(
249 &self,
249 &self,
250 on_disk: &'on_disk [u8],
250 on_disk: &'on_disk [u8],
251 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
251 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
252 Ok(if self.has_copy_source() {
252 Ok(if self.has_copy_source() {
253 Some(read_hg_path(on_disk, self.copy_source)?)
253 Some(read_hg_path(on_disk, self.copy_source)?)
254 } else {
254 } else {
255 None
255 None
256 })
256 })
257 }
257 }
258
258
259 pub(super) fn node_data(
259 pub(super) fn node_data(
260 &self,
260 &self,
261 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
261 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
262 let entry = |state| {
262 let entry = |state| {
263 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
263 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
264 };
264 };
265
265
266 match self.state {
266 match self.state {
267 b'\0' => Ok(dirstate_map::NodeData::None),
267 b'\0' => Ok(dirstate_map::NodeData::None),
268 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
268 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
269 mtime: *self.data.as_timestamp(),
269 mtime: *self.data.as_timestamp(),
270 }),
270 }),
271 b'n' => Ok(entry(EntryState::Normal)),
271 b'n' => Ok(entry(EntryState::Normal)),
272 b'a' => Ok(entry(EntryState::Added)),
272 b'a' => Ok(entry(EntryState::Added)),
273 b'r' => Ok(entry(EntryState::Removed)),
273 b'r' => Ok(entry(EntryState::Removed)),
274 b'm' => Ok(entry(EntryState::Merged)),
274 b'm' => Ok(entry(EntryState::Merged)),
275 _ => Err(DirstateV2ParseError),
275 _ => Err(DirstateV2ParseError),
276 }
276 }
277 }
277 }
278
278
279 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
279 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
280 if self.state == b'd' {
280 if self.state == b'd' {
281 Some(self.data.as_timestamp())
281 Some(self.data.as_timestamp())
282 } else {
282 } else {
283 None
283 None
284 }
284 }
285 }
285 }
286
286
287 pub(super) fn state(
287 pub(super) fn state(
288 &self,
288 &self,
289 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
289 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
290 match self.state {
290 match self.state {
291 b'\0' | b'd' => Ok(None),
291 b'\0' | b'd' => Ok(None),
292 b'n' => Ok(Some(EntryState::Normal)),
292 b'n' => Ok(Some(EntryState::Normal)),
293 b'a' => Ok(Some(EntryState::Added)),
293 b'a' => Ok(Some(EntryState::Added)),
294 b'r' => Ok(Some(EntryState::Removed)),
294 b'r' => Ok(Some(EntryState::Removed)),
295 b'm' => Ok(Some(EntryState::Merged)),
295 b'm' => Ok(Some(EntryState::Merged)),
296 _ => Err(DirstateV2ParseError),
296 _ => Err(DirstateV2ParseError),
297 }
297 }
298 }
298 }
299
299
300 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
300 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
301 DirstateEntry {
301 DirstateEntry {
302 state,
302 state,
303 mode: self.data.mode.get(),
303 mode: self.data.mode.get(),
304 mtime: self.data.mtime.get(),
304 mtime: self.data.mtime.get(),
305 size: self.data.size.get(),
305 size: self.data.size.get(),
306 }
306 }
307 }
307 }
308
308
309 pub(super) fn entry(
309 pub(super) fn entry(
310 &self,
310 &self,
311 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
311 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
312 Ok(self
312 Ok(self
313 .state()?
313 .state()?
314 .map(|state| self.entry_with_given_state(state)))
314 .map(|state| self.entry_with_given_state(state)))
315 }
315 }
316
316
317 pub(super) fn children<'on_disk>(
317 pub(super) fn children<'on_disk>(
318 &self,
318 &self,
319 on_disk: &'on_disk [u8],
319 on_disk: &'on_disk [u8],
320 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
320 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
321 read_slice::<Node>(on_disk, self.children)
321 read_slice::<Node>(on_disk, self.children)
322 }
322 }
323
323
324 pub(super) fn to_in_memory_node<'on_disk>(
324 pub(super) fn to_in_memory_node<'on_disk>(
325 &self,
325 &self,
326 on_disk: &'on_disk [u8],
326 on_disk: &'on_disk [u8],
327 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
327 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
328 Ok(dirstate_map::Node {
328 Ok(dirstate_map::Node {
329 children: dirstate_map::ChildNodes::OnDisk(
329 children: dirstate_map::ChildNodes::OnDisk(
330 self.children(on_disk)?,
330 self.children(on_disk)?,
331 ),
331 ),
332 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
332 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
333 data: self.node_data()?,
333 data: self.node_data()?,
334 tracked_descendants_count: self.tracked_descendants_count.get(),
334 tracked_descendants_count: self.tracked_descendants_count.get(),
335 })
335 })
336 }
336 }
337 }
337 }
338
338
339 impl Entry {
339 impl Entry {
340 fn from_timestamp(timestamp: Timestamp) -> Self {
340 fn from_timestamp(timestamp: Timestamp) -> Self {
341 // Safety: both types implement the `ByteCast` trait, so we could
341 // Safety: both types implement the `ByteCast` trait, so we could
342 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
342 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
343 // `transmute` instead makes the compiler check that the two types
343 // `transmute` instead makes the compiler check that the two types
344 // have the same size, which eliminates the error case of
344 // have the same size, which eliminates the error case of
345 // `from_bytes`.
345 // `from_bytes`.
346 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
346 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
347 }
347 }
348
348
349 fn as_timestamp(&self) -> &Timestamp {
349 fn as_timestamp(&self) -> &Timestamp {
350 // Safety: same as above in `from_timestamp`
350 // Safety: same as above in `from_timestamp`
351 unsafe { &*(self as *const Entry as *const Timestamp) }
351 unsafe { &*(self as *const Entry as *const Timestamp) }
352 }
352 }
353 }
353 }
354
354
355 impl Timestamp {
356 pub fn seconds(&self) -> i64 {
357 self.seconds.get()
358 }
359 }
360
355 impl From<SystemTime> for Timestamp {
361 impl From<SystemTime> for Timestamp {
356 fn from(system_time: SystemTime) -> Self {
362 fn from(system_time: SystemTime) -> Self {
357 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
363 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
358 Ok(duration) => {
364 Ok(duration) => {
359 (duration.as_secs() as i64, duration.subsec_nanos())
365 (duration.as_secs() as i64, duration.subsec_nanos())
360 }
366 }
361 Err(error) => {
367 Err(error) => {
362 let negative = error.duration();
368 let negative = error.duration();
363 (-(negative.as_secs() as i64), negative.subsec_nanos())
369 (-(negative.as_secs() as i64), negative.subsec_nanos())
364 }
370 }
365 };
371 };
366 Timestamp {
372 Timestamp {
367 seconds: secs.into(),
373 seconds: secs.into(),
368 nanoseconds: nanos.into(),
374 nanoseconds: nanos.into(),
369 }
375 }
370 }
376 }
371 }
377 }
372
378
373 impl From<&'_ Timestamp> for SystemTime {
379 impl From<&'_ Timestamp> for SystemTime {
374 fn from(timestamp: &'_ Timestamp) -> Self {
380 fn from(timestamp: &'_ Timestamp) -> Self {
375 let secs = timestamp.seconds.get();
381 let secs = timestamp.seconds.get();
376 let nanos = timestamp.nanoseconds.get();
382 let nanos = timestamp.nanoseconds.get();
377 if secs >= 0 {
383 if secs >= 0 {
378 UNIX_EPOCH + Duration::new(secs as u64, nanos)
384 UNIX_EPOCH + Duration::new(secs as u64, nanos)
379 } else {
385 } else {
380 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
386 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
381 }
387 }
382 }
388 }
383 }
389 }
384
390
385 fn read_hg_path(
391 fn read_hg_path(
386 on_disk: &[u8],
392 on_disk: &[u8],
387 slice: Slice,
393 slice: Slice,
388 ) -> Result<&HgPath, DirstateV2ParseError> {
394 ) -> Result<&HgPath, DirstateV2ParseError> {
389 let bytes = read_slice::<u8>(on_disk, slice)?;
395 let bytes = read_slice::<u8>(on_disk, slice)?;
390 Ok(HgPath::new(bytes))
396 Ok(HgPath::new(bytes))
391 }
397 }
392
398
393 fn read_slice<T>(
399 fn read_slice<T>(
394 on_disk: &[u8],
400 on_disk: &[u8],
395 slice: Slice,
401 slice: Slice,
396 ) -> Result<&[T], DirstateV2ParseError>
402 ) -> Result<&[T], DirstateV2ParseError>
397 where
403 where
398 T: BytesCast,
404 T: BytesCast,
399 {
405 {
400 // Either `usize::MAX` would result in "out of bounds" error since a single
406 // Either `usize::MAX` would result in "out of bounds" error since a single
401 // `&[u8]` cannot occupy the entire addess space.
407 // `&[u8]` cannot occupy the entire addess space.
402 let start = usize::try_from(slice.start.get()).unwrap_or(std::usize::MAX);
408 let start = usize::try_from(slice.start.get()).unwrap_or(std::usize::MAX);
403 let len = usize::try_from(slice.len.get()).unwrap_or(std::usize::MAX);
409 let len = usize::try_from(slice.len.get()).unwrap_or(std::usize::MAX);
404 on_disk
410 on_disk
405 .get(start..)
411 .get(start..)
406 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
412 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
407 .map(|(slice, _rest)| slice)
413 .map(|(slice, _rest)| slice)
408 .ok_or_else(|| DirstateV2ParseError)
414 .ok_or_else(|| DirstateV2ParseError)
409 }
415 }
410
416
411 pub(super) fn write(
417 pub(super) fn write(
412 dirstate_map: &mut DirstateMap,
418 dirstate_map: &mut DirstateMap,
413 parents: DirstateParents,
419 parents: DirstateParents,
414 ) -> Result<Vec<u8>, DirstateError> {
420 ) -> Result<Vec<u8>, DirstateError> {
415 let header_len = std::mem::size_of::<Header>();
421 let header_len = std::mem::size_of::<Header>();
416
422
417 // This ignores the space for paths, and for nodes without an entry.
423 // This ignores the space for paths, and for nodes without an entry.
418 // TODO: better estimate? Skip the `Vec` and write to a file directly?
424 // TODO: better estimate? Skip the `Vec` and write to a file directly?
419 let size_guess = header_len
425 let size_guess = header_len
420 + std::mem::size_of::<Node>()
426 + std::mem::size_of::<Node>()
421 * dirstate_map.nodes_with_entry_count as usize;
427 * dirstate_map.nodes_with_entry_count as usize;
422 let mut out = Vec::with_capacity(size_guess);
428 let mut out = Vec::with_capacity(size_guess);
423
429
424 // Keep space for the header. We’ll fill it out at the end when we know the
430 // Keep space for the header. We’ll fill it out at the end when we know the
425 // actual offset for the root nodes.
431 // actual offset for the root nodes.
426 out.resize(header_len, 0_u8);
432 out.resize(header_len, 0_u8);
427
433
428 let root =
434 let root =
429 write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
435 write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
430
436
431 let header = Header {
437 let header = Header {
432 marker: *V2_FORMAT_MARKER,
438 marker: *V2_FORMAT_MARKER,
433 parents: parents,
439 parents: parents,
434 root,
440 root,
435 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
441 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
436 nodes_with_copy_source_count: dirstate_map
442 nodes_with_copy_source_count: dirstate_map
437 .nodes_with_copy_source_count
443 .nodes_with_copy_source_count
438 .into(),
444 .into(),
439 };
445 };
440 out[..header_len].copy_from_slice(header.as_bytes());
446 out[..header_len].copy_from_slice(header.as_bytes());
441 Ok(out)
447 Ok(out)
442 }
448 }
443
449
444 fn write_nodes(
450 fn write_nodes(
445 dirstate_map: &DirstateMap,
451 dirstate_map: &DirstateMap,
446 nodes: dirstate_map::ChildNodesRef,
452 nodes: dirstate_map::ChildNodesRef,
447 out: &mut Vec<u8>,
453 out: &mut Vec<u8>,
448 ) -> Result<ChildNodes, DirstateError> {
454 ) -> Result<ChildNodes, DirstateError> {
449 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
455 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
450 // order. Sort to enable binary search in the written file.
456 // order. Sort to enable binary search in the written file.
451 let nodes = nodes.sorted();
457 let nodes = nodes.sorted();
452
458
453 // First accumulate serialized nodes in a `Vec`
459 // First accumulate serialized nodes in a `Vec`
454 let mut on_disk_nodes = Vec::with_capacity(nodes.len());
460 let mut on_disk_nodes = Vec::with_capacity(nodes.len());
455 for node in nodes {
461 for node in nodes {
456 let children = write_nodes(
462 let children = write_nodes(
457 dirstate_map,
463 dirstate_map,
458 node.children(dirstate_map.on_disk)?,
464 node.children(dirstate_map.on_disk)?,
459 out,
465 out,
460 )?;
466 )?;
461 let full_path = node.full_path(dirstate_map.on_disk)?;
467 let full_path = node.full_path(dirstate_map.on_disk)?;
462 let full_path = write_slice::<u8>(full_path.as_bytes(), out);
468 let full_path = write_slice::<u8>(full_path.as_bytes(), out);
463 let copy_source =
469 let copy_source =
464 if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
470 if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
465 write_slice::<u8>(source.as_bytes(), out)
471 write_slice::<u8>(source.as_bytes(), out)
466 } else {
472 } else {
467 Slice {
473 Slice {
468 start: 0.into(),
474 start: 0.into(),
469 len: 0.into(),
475 len: 0.into(),
470 }
476 }
471 };
477 };
472 on_disk_nodes.push(match node {
478 on_disk_nodes.push(match node {
473 NodeRef::InMemory(path, node) => {
479 NodeRef::InMemory(path, node) => {
474 let (state, data) = match &node.data {
480 let (state, data) = match &node.data {
475 dirstate_map::NodeData::Entry(entry) => (
481 dirstate_map::NodeData::Entry(entry) => (
476 entry.state.into(),
482 entry.state.into(),
477 Entry {
483 Entry {
478 mode: entry.mode.into(),
484 mode: entry.mode.into(),
479 mtime: entry.mtime.into(),
485 mtime: entry.mtime.into(),
480 size: entry.size.into(),
486 size: entry.size.into(),
481 },
487 },
482 ),
488 ),
483 dirstate_map::NodeData::CachedDirectory { mtime } => {
489 dirstate_map::NodeData::CachedDirectory { mtime } => {
484 (b'd', Entry::from_timestamp(*mtime))
490 (b'd', Entry::from_timestamp(*mtime))
485 }
491 }
486 dirstate_map::NodeData::None => (
492 dirstate_map::NodeData::None => (
487 b'\0',
493 b'\0',
488 Entry {
494 Entry {
489 mode: 0.into(),
495 mode: 0.into(),
490 mtime: 0.into(),
496 mtime: 0.into(),
491 size: 0.into(),
497 size: 0.into(),
492 },
498 },
493 ),
499 ),
494 };
500 };
495 Node {
501 Node {
496 children,
502 children,
497 copy_source,
503 copy_source,
498 full_path,
504 full_path,
499 base_name_start: u32::try_from(path.base_name_start())
505 base_name_start: u32::try_from(path.base_name_start())
500 // Could only panic for paths over 4 GiB
506 // Could only panic for paths over 4 GiB
501 .expect("dirstate-v2 offset overflow")
507 .expect("dirstate-v2 offset overflow")
502 .into(),
508 .into(),
503 tracked_descendants_count: node
509 tracked_descendants_count: node
504 .tracked_descendants_count
510 .tracked_descendants_count
505 .into(),
511 .into(),
506 state,
512 state,
507 data,
513 data,
508 }
514 }
509 }
515 }
510 NodeRef::OnDisk(node) => Node {
516 NodeRef::OnDisk(node) => Node {
511 children,
517 children,
512 copy_source,
518 copy_source,
513 full_path,
519 full_path,
514 ..*node
520 ..*node
515 },
521 },
516 })
522 })
517 }
523 }
518 // … so we can write them contiguously
524 // … so we can write them contiguously
519 Ok(write_slice::<Node>(&on_disk_nodes, out))
525 Ok(write_slice::<Node>(&on_disk_nodes, out))
520 }
526 }
521
527
522 fn write_slice<T>(slice: &[T], out: &mut Vec<u8>) -> Slice
528 fn write_slice<T>(slice: &[T], out: &mut Vec<u8>) -> Slice
523 where
529 where
524 T: BytesCast,
530 T: BytesCast,
525 {
531 {
526 let start = u64::try_from(out.len())
532 let start = u64::try_from(out.len())
527 // Could only panic on a 128-bit CPU with a dirstate over 16 EiB
533 // Could only panic on a 128-bit CPU with a dirstate over 16 EiB
528 .expect("dirstate-v2 offset overflow")
534 .expect("dirstate-v2 offset overflow")
529 .into();
535 .into();
530 let len = u32::try_from(slice.len())
536 let len = u32::try_from(slice.len())
531 // Could only panic for paths over 4 GiB or nodes with over 4 billions
537 // Could only panic for paths over 4 GiB or nodes with over 4 billions
532 // child nodes
538 // child nodes
533 .expect("dirstate-v2 offset overflow")
539 .expect("dirstate-v2 offset overflow")
534 .into();
540 .into();
535 out.extend(slice.as_bytes());
541 out.extend(slice.as_bytes());
536 Slice { start, len }
542 Slice { start, len }
537 }
543 }
@@ -1,590 +1,602 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::non_normal_entries::{
22 dirstate::non_normal_entries::{
23 NonNormalEntries, NonNormalEntriesIterator,
23 NonNormalEntries, NonNormalEntriesIterator,
24 },
24 },
25 dirstate::owning::OwningDirstateMap,
25 dirstate::owning::OwningDirstateMap,
26 dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
26 dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
27 parsers::dirstate_parents_to_pytuple,
27 parsers::dirstate_parents_to_pytuple,
28 };
28 };
29 use hg::{
29 use hg::{
30 dirstate::parsers::Timestamp,
30 dirstate::parsers::Timestamp,
31 dirstate_tree::dispatch::DirstateMapMethods,
31 dirstate_tree::dispatch::DirstateMapMethods,
32 dirstate_tree::on_disk::DirstateV2ParseError,
32 dirstate_tree::on_disk::DirstateV2ParseError,
33 errors::HgError,
33 errors::HgError,
34 revlog::Node,
34 revlog::Node,
35 utils::files::normalize_case,
35 utils::files::normalize_case,
36 utils::hg_path::{HgPath, HgPathBuf},
36 utils::hg_path::{HgPath, HgPathBuf},
37 DirsMultiset, DirstateEntry, DirstateError,
37 DirsMultiset, DirstateEntry, DirstateError,
38 DirstateMap as RustDirstateMap, DirstateParents, EntryState, StateMapIter,
38 DirstateMap as RustDirstateMap, DirstateParents, EntryState, StateMapIter,
39 };
39 };
40
40
41 // TODO
41 // TODO
42 // This object needs to share references to multiple members of its Rust
42 // This object needs to share references to multiple members of its Rust
43 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
43 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
44 // Right now `CopyMap` is done, but it needs to have an explicit reference
44 // Right now `CopyMap` is done, but it needs to have an explicit reference
45 // to `RustDirstateMap` which itself needs to have an encapsulation for
45 // to `RustDirstateMap` which itself needs to have an encapsulation for
46 // every method in `CopyMap` (copymapcopy, etc.).
46 // every method in `CopyMap` (copymapcopy, etc.).
47 // This is ugly and hard to maintain.
47 // This is ugly and hard to maintain.
48 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
48 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
49 // `py_class!` is already implemented and does not mention
49 // `py_class!` is already implemented and does not mention
50 // `RustDirstateMap`, rightfully so.
50 // `RustDirstateMap`, rightfully so.
51 // All attributes also have to have a separate refcount data attribute for
51 // All attributes also have to have a separate refcount data attribute for
52 // leaks, with all methods that go along for reference sharing.
52 // leaks, with all methods that go along for reference sharing.
53 py_class!(pub class DirstateMap |py| {
53 py_class!(pub class DirstateMap |py| {
54 @shared data inner: Box<dyn DirstateMapMethods + Send>;
54 @shared data inner: Box<dyn DirstateMapMethods + Send>;
55
55
56 /// Returns a `(dirstate_map, parents)` tuple
56 /// Returns a `(dirstate_map, parents)` tuple
57 @staticmethod
57 @staticmethod
58 def new(
58 def new(
59 use_dirstate_tree: bool,
59 use_dirstate_tree: bool,
60 use_dirstate_v2: bool,
60 use_dirstate_v2: bool,
61 on_disk: PyBytes,
61 on_disk: PyBytes,
62 ) -> PyResult<PyObject> {
62 ) -> PyResult<PyObject> {
63 let dirstate_error = |e: DirstateError| {
63 let dirstate_error = |e: DirstateError| {
64 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
64 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
65 };
65 };
66 let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
66 let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
67 let (map, parents) =
67 let (map, parents) =
68 OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
68 OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
69 .map_err(dirstate_error)?;
69 .map_err(dirstate_error)?;
70 (Box::new(map) as _, parents)
70 (Box::new(map) as _, parents)
71 } else {
71 } else {
72 let bytes = on_disk.data(py);
72 let bytes = on_disk.data(py);
73 let mut map = RustDirstateMap::default();
73 let mut map = RustDirstateMap::default();
74 let parents = map.read(bytes).map_err(dirstate_error)?;
74 let parents = map.read(bytes).map_err(dirstate_error)?;
75 (Box::new(map) as _, parents)
75 (Box::new(map) as _, parents)
76 };
76 };
77 let map = Self::create_instance(py, inner)?;
77 let map = Self::create_instance(py, inner)?;
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
79 Ok((map, parents).to_py_object(py).into_object())
79 Ok((map, parents).to_py_object(py).into_object())
80 }
80 }
81
81
82 def clear(&self) -> PyResult<PyObject> {
82 def clear(&self) -> PyResult<PyObject> {
83 self.inner(py).borrow_mut().clear();
83 self.inner(py).borrow_mut().clear();
84 Ok(py.None())
84 Ok(py.None())
85 }
85 }
86
86
87 def get(
87 def get(
88 &self,
88 &self,
89 key: PyObject,
89 key: PyObject,
90 default: Option<PyObject> = None
90 default: Option<PyObject> = None
91 ) -> PyResult<Option<PyObject>> {
91 ) -> PyResult<Option<PyObject>> {
92 let key = key.extract::<PyBytes>(py)?;
92 let key = key.extract::<PyBytes>(py)?;
93 match self
93 match self
94 .inner(py)
94 .inner(py)
95 .borrow()
95 .borrow()
96 .get(HgPath::new(key.data(py)))
96 .get(HgPath::new(key.data(py)))
97 .map_err(|e| v2_error(py, e))?
97 .map_err(|e| v2_error(py, e))?
98 {
98 {
99 Some(entry) => {
99 Some(entry) => {
100 Ok(Some(make_dirstate_tuple(py, &entry)?))
100 Ok(Some(make_dirstate_tuple(py, &entry)?))
101 },
101 },
102 None => Ok(default)
102 None => Ok(default)
103 }
103 }
104 }
104 }
105
105
106 def addfile(
106 def addfile(
107 &self,
107 &self,
108 f: PyObject,
108 f: PyObject,
109 oldstate: PyObject,
109 oldstate: PyObject,
110 state: PyObject,
110 state: PyObject,
111 mode: PyObject,
111 mode: PyObject,
112 size: PyObject,
112 size: PyObject,
113 mtime: PyObject
113 mtime: PyObject
114 ) -> PyResult<PyObject> {
114 ) -> PyResult<PyObject> {
115 self.inner(py).borrow_mut().add_file(
115 self.inner(py).borrow_mut().add_file(
116 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
116 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
117 oldstate.extract::<PyBytes>(py)?.data(py)[0]
117 oldstate.extract::<PyBytes>(py)?.data(py)[0]
118 .try_into()
118 .try_into()
119 .map_err(|e: HgError| {
119 .map_err(|e: HgError| {
120 PyErr::new::<exc::ValueError, _>(py, e.to_string())
120 PyErr::new::<exc::ValueError, _>(py, e.to_string())
121 })?,
121 })?,
122 DirstateEntry {
122 DirstateEntry {
123 state: state.extract::<PyBytes>(py)?.data(py)[0]
123 state: state.extract::<PyBytes>(py)?.data(py)[0]
124 .try_into()
124 .try_into()
125 .map_err(|e: HgError| {
125 .map_err(|e: HgError| {
126 PyErr::new::<exc::ValueError, _>(py, e.to_string())
126 PyErr::new::<exc::ValueError, _>(py, e.to_string())
127 })?,
127 })?,
128 mode: mode.extract(py)?,
128 mode: mode.extract(py)?,
129 size: size.extract(py)?,
129 size: size.extract(py)?,
130 mtime: mtime.extract(py)?,
130 mtime: mtime.extract(py)?,
131 },
131 },
132 ).and(Ok(py.None())).or_else(|e: DirstateError| {
132 ).and(Ok(py.None())).or_else(|e: DirstateError| {
133 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
133 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
134 })
134 })
135 }
135 }
136
136
137 def removefile(
137 def removefile(
138 &self,
138 &self,
139 f: PyObject,
139 f: PyObject,
140 oldstate: PyObject,
140 oldstate: PyObject,
141 size: PyObject
141 size: PyObject
142 ) -> PyResult<PyObject> {
142 ) -> PyResult<PyObject> {
143 self.inner(py).borrow_mut()
143 self.inner(py).borrow_mut()
144 .remove_file(
144 .remove_file(
145 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
145 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
146 oldstate.extract::<PyBytes>(py)?.data(py)[0]
146 oldstate.extract::<PyBytes>(py)?.data(py)[0]
147 .try_into()
147 .try_into()
148 .map_err(|e: HgError| {
148 .map_err(|e: HgError| {
149 PyErr::new::<exc::ValueError, _>(py, e.to_string())
149 PyErr::new::<exc::ValueError, _>(py, e.to_string())
150 })?,
150 })?,
151 size.extract(py)?,
151 size.extract(py)?,
152 )
152 )
153 .or_else(|_| {
153 .or_else(|_| {
154 Err(PyErr::new::<exc::OSError, _>(
154 Err(PyErr::new::<exc::OSError, _>(
155 py,
155 py,
156 "Dirstate error".to_string(),
156 "Dirstate error".to_string(),
157 ))
157 ))
158 })?;
158 })?;
159 Ok(py.None())
159 Ok(py.None())
160 }
160 }
161
161
162 def dropfile(
162 def dropfile(
163 &self,
163 &self,
164 f: PyObject,
164 f: PyObject,
165 oldstate: PyObject
165 oldstate: PyObject
166 ) -> PyResult<PyBool> {
166 ) -> PyResult<PyBool> {
167 self.inner(py).borrow_mut()
167 self.inner(py).borrow_mut()
168 .drop_file(
168 .drop_file(
169 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
169 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
170 oldstate.extract::<PyBytes>(py)?.data(py)[0]
170 oldstate.extract::<PyBytes>(py)?.data(py)[0]
171 .try_into()
171 .try_into()
172 .map_err(|e: HgError| {
172 .map_err(|e: HgError| {
173 PyErr::new::<exc::ValueError, _>(py, e.to_string())
173 PyErr::new::<exc::ValueError, _>(py, e.to_string())
174 })?,
174 })?,
175 )
175 )
176 .and_then(|b| Ok(b.to_py_object(py)))
176 .and_then(|b| Ok(b.to_py_object(py)))
177 .or_else(|e| {
177 .or_else(|e| {
178 Err(PyErr::new::<exc::OSError, _>(
178 Err(PyErr::new::<exc::OSError, _>(
179 py,
179 py,
180 format!("Dirstate error: {}", e.to_string()),
180 format!("Dirstate error: {}", e.to_string()),
181 ))
181 ))
182 })
182 })
183 }
183 }
184
184
185 def clearambiguoustimes(
185 def clearambiguoustimes(
186 &self,
186 &self,
187 files: PyObject,
187 files: PyObject,
188 now: PyObject
188 now: PyObject
189 ) -> PyResult<PyObject> {
189 ) -> PyResult<PyObject> {
190 let files: PyResult<Vec<HgPathBuf>> = files
190 let files: PyResult<Vec<HgPathBuf>> = files
191 .iter(py)?
191 .iter(py)?
192 .map(|filename| {
192 .map(|filename| {
193 Ok(HgPathBuf::from_bytes(
193 Ok(HgPathBuf::from_bytes(
194 filename?.extract::<PyBytes>(py)?.data(py),
194 filename?.extract::<PyBytes>(py)?.data(py),
195 ))
195 ))
196 })
196 })
197 .collect();
197 .collect();
198 self.inner(py)
198 self.inner(py)
199 .borrow_mut()
199 .borrow_mut()
200 .clear_ambiguous_times(files?, now.extract(py)?)
200 .clear_ambiguous_times(files?, now.extract(py)?)
201 .map_err(|e| v2_error(py, e))?;
201 .map_err(|e| v2_error(py, e))?;
202 Ok(py.None())
202 Ok(py.None())
203 }
203 }
204
204
205 def other_parent_entries(&self) -> PyResult<PyObject> {
205 def other_parent_entries(&self) -> PyResult<PyObject> {
206 let mut inner_shared = self.inner(py).borrow_mut();
206 let mut inner_shared = self.inner(py).borrow_mut();
207 let set = PySet::empty(py)?;
207 let set = PySet::empty(py)?;
208 for path in inner_shared.iter_other_parent_paths() {
208 for path in inner_shared.iter_other_parent_paths() {
209 let path = path.map_err(|e| v2_error(py, e))?;
209 let path = path.map_err(|e| v2_error(py, e))?;
210 set.add(py, PyBytes::new(py, path.as_bytes()))?;
210 set.add(py, PyBytes::new(py, path.as_bytes()))?;
211 }
211 }
212 Ok(set.into_object())
212 Ok(set.into_object())
213 }
213 }
214
214
215 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
215 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
216 NonNormalEntries::from_inner(py, self.clone_ref(py))
216 NonNormalEntries::from_inner(py, self.clone_ref(py))
217 }
217 }
218
218
219 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
219 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
220 let key = key.extract::<PyBytes>(py)?;
220 let key = key.extract::<PyBytes>(py)?;
221 self.inner(py)
221 self.inner(py)
222 .borrow_mut()
222 .borrow_mut()
223 .non_normal_entries_contains(HgPath::new(key.data(py)))
223 .non_normal_entries_contains(HgPath::new(key.data(py)))
224 .map_err(|e| v2_error(py, e))
224 .map_err(|e| v2_error(py, e))
225 }
225 }
226
226
227 def non_normal_entries_display(&self) -> PyResult<PyString> {
227 def non_normal_entries_display(&self) -> PyResult<PyString> {
228 let mut inner = self.inner(py).borrow_mut();
228 let mut inner = self.inner(py).borrow_mut();
229 let paths = inner
229 let paths = inner
230 .iter_non_normal_paths()
230 .iter_non_normal_paths()
231 .collect::<Result<Vec<_>, _>>()
231 .collect::<Result<Vec<_>, _>>()
232 .map_err(|e| v2_error(py, e))?;
232 .map_err(|e| v2_error(py, e))?;
233 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
233 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
234 Ok(PyString::new(py, &formatted))
234 Ok(PyString::new(py, &formatted))
235 }
235 }
236
236
237 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
237 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
238 let key = key.extract::<PyBytes>(py)?;
238 let key = key.extract::<PyBytes>(py)?;
239 self
239 self
240 .inner(py)
240 .inner(py)
241 .borrow_mut()
241 .borrow_mut()
242 .non_normal_entries_remove(HgPath::new(key.data(py)));
242 .non_normal_entries_remove(HgPath::new(key.data(py)));
243 Ok(py.None())
243 Ok(py.None())
244 }
244 }
245
245
246 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
246 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
247 let mut inner = self.inner(py).borrow_mut();
247 let mut inner = self.inner(py).borrow_mut();
248
248
249 let ret = PyList::new(py, &[]);
249 let ret = PyList::new(py, &[]);
250 for filename in inner.non_normal_or_other_parent_paths() {
250 for filename in inner.non_normal_or_other_parent_paths() {
251 let filename = filename.map_err(|e| v2_error(py, e))?;
251 let filename = filename.map_err(|e| v2_error(py, e))?;
252 let as_pystring = PyBytes::new(py, filename.as_bytes());
252 let as_pystring = PyBytes::new(py, filename.as_bytes());
253 ret.append(py, as_pystring.into_object());
253 ret.append(py, as_pystring.into_object());
254 }
254 }
255 Ok(ret)
255 Ok(ret)
256 }
256 }
257
257
258 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
258 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
259 // Make sure the sets are defined before we no longer have a mutable
259 // Make sure the sets are defined before we no longer have a mutable
260 // reference to the dmap.
260 // reference to the dmap.
261 self.inner(py)
261 self.inner(py)
262 .borrow_mut()
262 .borrow_mut()
263 .set_non_normal_other_parent_entries(false);
263 .set_non_normal_other_parent_entries(false);
264
264
265 let leaked_ref = self.inner(py).leak_immutable();
265 let leaked_ref = self.inner(py).leak_immutable();
266
266
267 NonNormalEntriesIterator::from_inner(py, unsafe {
267 NonNormalEntriesIterator::from_inner(py, unsafe {
268 leaked_ref.map(py, |o| {
268 leaked_ref.map(py, |o| {
269 o.iter_non_normal_paths_panic()
269 o.iter_non_normal_paths_panic()
270 })
270 })
271 })
271 })
272 }
272 }
273
273
274 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
274 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
275 let d = d.extract::<PyBytes>(py)?;
275 let d = d.extract::<PyBytes>(py)?;
276 Ok(self.inner(py).borrow_mut()
276 Ok(self.inner(py).borrow_mut()
277 .has_tracked_dir(HgPath::new(d.data(py)))
277 .has_tracked_dir(HgPath::new(d.data(py)))
278 .map_err(|e| {
278 .map_err(|e| {
279 PyErr::new::<exc::ValueError, _>(py, e.to_string())
279 PyErr::new::<exc::ValueError, _>(py, e.to_string())
280 })?
280 })?
281 .to_py_object(py))
281 .to_py_object(py))
282 }
282 }
283
283
284 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
284 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
285 let d = d.extract::<PyBytes>(py)?;
285 let d = d.extract::<PyBytes>(py)?;
286 Ok(self.inner(py).borrow_mut()
286 Ok(self.inner(py).borrow_mut()
287 .has_dir(HgPath::new(d.data(py)))
287 .has_dir(HgPath::new(d.data(py)))
288 .map_err(|e| {
288 .map_err(|e| {
289 PyErr::new::<exc::ValueError, _>(py, e.to_string())
289 PyErr::new::<exc::ValueError, _>(py, e.to_string())
290 })?
290 })?
291 .to_py_object(py))
291 .to_py_object(py))
292 }
292 }
293
293
294 def write(
294 def write(
295 &self,
295 &self,
296 use_dirstate_v2: bool,
296 use_dirstate_v2: bool,
297 p1: PyObject,
297 p1: PyObject,
298 p2: PyObject,
298 p2: PyObject,
299 now: PyObject
299 now: PyObject
300 ) -> PyResult<PyBytes> {
300 ) -> PyResult<PyBytes> {
301 let now = Timestamp(now.extract(py)?);
301 let now = Timestamp(now.extract(py)?);
302 let parents = DirstateParents {
302 let parents = DirstateParents {
303 p1: extract_node_id(py, &p1)?,
303 p1: extract_node_id(py, &p1)?,
304 p2: extract_node_id(py, &p2)?,
304 p2: extract_node_id(py, &p2)?,
305 };
305 };
306
306
307 let mut inner = self.inner(py).borrow_mut();
307 let mut inner = self.inner(py).borrow_mut();
308 let result = if use_dirstate_v2 {
308 let result = if use_dirstate_v2 {
309 inner.pack_v2(parents, now)
309 inner.pack_v2(parents, now)
310 } else {
310 } else {
311 inner.pack_v1(parents, now)
311 inner.pack_v1(parents, now)
312 };
312 };
313 match result {
313 match result {
314 Ok(packed) => Ok(PyBytes::new(py, &packed)),
314 Ok(packed) => Ok(PyBytes::new(py, &packed)),
315 Err(_) => Err(PyErr::new::<exc::OSError, _>(
315 Err(_) => Err(PyErr::new::<exc::OSError, _>(
316 py,
316 py,
317 "Dirstate error".to_string(),
317 "Dirstate error".to_string(),
318 )),
318 )),
319 }
319 }
320 }
320 }
321
321
322 def filefoldmapasdict(&self) -> PyResult<PyDict> {
322 def filefoldmapasdict(&self) -> PyResult<PyDict> {
323 let dict = PyDict::new(py);
323 let dict = PyDict::new(py);
324 for item in self.inner(py).borrow_mut().iter() {
324 for item in self.inner(py).borrow_mut().iter() {
325 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
325 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
326 if entry.state != EntryState::Removed {
326 if entry.state != EntryState::Removed {
327 let key = normalize_case(path);
327 let key = normalize_case(path);
328 let value = path;
328 let value = path;
329 dict.set_item(
329 dict.set_item(
330 py,
330 py,
331 PyBytes::new(py, key.as_bytes()).into_object(),
331 PyBytes::new(py, key.as_bytes()).into_object(),
332 PyBytes::new(py, value.as_bytes()).into_object(),
332 PyBytes::new(py, value.as_bytes()).into_object(),
333 )?;
333 )?;
334 }
334 }
335 }
335 }
336 Ok(dict)
336 Ok(dict)
337 }
337 }
338
338
339 def __len__(&self) -> PyResult<usize> {
339 def __len__(&self) -> PyResult<usize> {
340 Ok(self.inner(py).borrow().len())
340 Ok(self.inner(py).borrow().len())
341 }
341 }
342
342
343 def __contains__(&self, key: PyObject) -> PyResult<bool> {
343 def __contains__(&self, key: PyObject) -> PyResult<bool> {
344 let key = key.extract::<PyBytes>(py)?;
344 let key = key.extract::<PyBytes>(py)?;
345 self.inner(py)
345 self.inner(py)
346 .borrow()
346 .borrow()
347 .contains_key(HgPath::new(key.data(py)))
347 .contains_key(HgPath::new(key.data(py)))
348 .map_err(|e| v2_error(py, e))
348 .map_err(|e| v2_error(py, e))
349 }
349 }
350
350
351 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
351 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
352 let key = key.extract::<PyBytes>(py)?;
352 let key = key.extract::<PyBytes>(py)?;
353 let key = HgPath::new(key.data(py));
353 let key = HgPath::new(key.data(py));
354 match self
354 match self
355 .inner(py)
355 .inner(py)
356 .borrow()
356 .borrow()
357 .get(key)
357 .get(key)
358 .map_err(|e| v2_error(py, e))?
358 .map_err(|e| v2_error(py, e))?
359 {
359 {
360 Some(entry) => {
360 Some(entry) => {
361 Ok(make_dirstate_tuple(py, &entry)?)
361 Ok(make_dirstate_tuple(py, &entry)?)
362 },
362 },
363 None => Err(PyErr::new::<exc::KeyError, _>(
363 None => Err(PyErr::new::<exc::KeyError, _>(
364 py,
364 py,
365 String::from_utf8_lossy(key.as_bytes()),
365 String::from_utf8_lossy(key.as_bytes()),
366 )),
366 )),
367 }
367 }
368 }
368 }
369
369
370 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
370 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
371 let leaked_ref = self.inner(py).leak_immutable();
371 let leaked_ref = self.inner(py).leak_immutable();
372 DirstateMapKeysIterator::from_inner(
372 DirstateMapKeysIterator::from_inner(
373 py,
373 py,
374 unsafe { leaked_ref.map(py, |o| o.iter()) },
374 unsafe { leaked_ref.map(py, |o| o.iter()) },
375 )
375 )
376 }
376 }
377
377
378 def items(&self) -> PyResult<DirstateMapItemsIterator> {
378 def items(&self) -> PyResult<DirstateMapItemsIterator> {
379 let leaked_ref = self.inner(py).leak_immutable();
379 let leaked_ref = self.inner(py).leak_immutable();
380 DirstateMapItemsIterator::from_inner(
380 DirstateMapItemsIterator::from_inner(
381 py,
381 py,
382 unsafe { leaked_ref.map(py, |o| o.iter()) },
382 unsafe { leaked_ref.map(py, |o| o.iter()) },
383 )
383 )
384 }
384 }
385
385
386 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
386 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
387 let leaked_ref = self.inner(py).leak_immutable();
387 let leaked_ref = self.inner(py).leak_immutable();
388 DirstateMapKeysIterator::from_inner(
388 DirstateMapKeysIterator::from_inner(
389 py,
389 py,
390 unsafe { leaked_ref.map(py, |o| o.iter()) },
390 unsafe { leaked_ref.map(py, |o| o.iter()) },
391 )
391 )
392 }
392 }
393
393
394 def getdirs(&self) -> PyResult<Dirs> {
394 def getdirs(&self) -> PyResult<Dirs> {
395 // TODO don't copy, share the reference
395 // TODO don't copy, share the reference
396 self.inner(py).borrow_mut().set_dirs()
396 self.inner(py).borrow_mut().set_dirs()
397 .map_err(|e| {
397 .map_err(|e| {
398 PyErr::new::<exc::ValueError, _>(py, e.to_string())
398 PyErr::new::<exc::ValueError, _>(py, e.to_string())
399 })?;
399 })?;
400 Dirs::from_inner(
400 Dirs::from_inner(
401 py,
401 py,
402 DirsMultiset::from_dirstate(
402 DirsMultiset::from_dirstate(
403 self.inner(py).borrow().iter(),
403 self.inner(py).borrow().iter(),
404 Some(EntryState::Removed),
404 Some(EntryState::Removed),
405 )
405 )
406 .map_err(|e| {
406 .map_err(|e| {
407 PyErr::new::<exc::ValueError, _>(py, e.to_string())
407 PyErr::new::<exc::ValueError, _>(py, e.to_string())
408 })?,
408 })?,
409 )
409 )
410 }
410 }
411 def getalldirs(&self) -> PyResult<Dirs> {
411 def getalldirs(&self) -> PyResult<Dirs> {
412 // TODO don't copy, share the reference
412 // TODO don't copy, share the reference
413 self.inner(py).borrow_mut().set_all_dirs()
413 self.inner(py).borrow_mut().set_all_dirs()
414 .map_err(|e| {
414 .map_err(|e| {
415 PyErr::new::<exc::ValueError, _>(py, e.to_string())
415 PyErr::new::<exc::ValueError, _>(py, e.to_string())
416 })?;
416 })?;
417 Dirs::from_inner(
417 Dirs::from_inner(
418 py,
418 py,
419 DirsMultiset::from_dirstate(
419 DirsMultiset::from_dirstate(
420 self.inner(py).borrow().iter(),
420 self.inner(py).borrow().iter(),
421 None,
421 None,
422 ).map_err(|e| {
422 ).map_err(|e| {
423 PyErr::new::<exc::ValueError, _>(py, e.to_string())
423 PyErr::new::<exc::ValueError, _>(py, e.to_string())
424 })?,
424 })?,
425 )
425 )
426 }
426 }
427
427
428 // TODO all copymap* methods, see docstring above
428 // TODO all copymap* methods, see docstring above
429 def copymapcopy(&self) -> PyResult<PyDict> {
429 def copymapcopy(&self) -> PyResult<PyDict> {
430 let dict = PyDict::new(py);
430 let dict = PyDict::new(py);
431 for item in self.inner(py).borrow().copy_map_iter() {
431 for item in self.inner(py).borrow().copy_map_iter() {
432 let (key, value) = item.map_err(|e| v2_error(py, e))?;
432 let (key, value) = item.map_err(|e| v2_error(py, e))?;
433 dict.set_item(
433 dict.set_item(
434 py,
434 py,
435 PyBytes::new(py, key.as_bytes()),
435 PyBytes::new(py, key.as_bytes()),
436 PyBytes::new(py, value.as_bytes()),
436 PyBytes::new(py, value.as_bytes()),
437 )?;
437 )?;
438 }
438 }
439 Ok(dict)
439 Ok(dict)
440 }
440 }
441
441
442 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
442 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
443 let key = key.extract::<PyBytes>(py)?;
443 let key = key.extract::<PyBytes>(py)?;
444 match self
444 match self
445 .inner(py)
445 .inner(py)
446 .borrow()
446 .borrow()
447 .copy_map_get(HgPath::new(key.data(py)))
447 .copy_map_get(HgPath::new(key.data(py)))
448 .map_err(|e| v2_error(py, e))?
448 .map_err(|e| v2_error(py, e))?
449 {
449 {
450 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
450 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
451 None => Err(PyErr::new::<exc::KeyError, _>(
451 None => Err(PyErr::new::<exc::KeyError, _>(
452 py,
452 py,
453 String::from_utf8_lossy(key.data(py)),
453 String::from_utf8_lossy(key.data(py)),
454 )),
454 )),
455 }
455 }
456 }
456 }
457 def copymap(&self) -> PyResult<CopyMap> {
457 def copymap(&self) -> PyResult<CopyMap> {
458 CopyMap::from_inner(py, self.clone_ref(py))
458 CopyMap::from_inner(py, self.clone_ref(py))
459 }
459 }
460
460
461 def copymaplen(&self) -> PyResult<usize> {
461 def copymaplen(&self) -> PyResult<usize> {
462 Ok(self.inner(py).borrow().copy_map_len())
462 Ok(self.inner(py).borrow().copy_map_len())
463 }
463 }
464 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
464 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
465 let key = key.extract::<PyBytes>(py)?;
465 let key = key.extract::<PyBytes>(py)?;
466 self.inner(py)
466 self.inner(py)
467 .borrow()
467 .borrow()
468 .copy_map_contains_key(HgPath::new(key.data(py)))
468 .copy_map_contains_key(HgPath::new(key.data(py)))
469 .map_err(|e| v2_error(py, e))
469 .map_err(|e| v2_error(py, e))
470 }
470 }
471 def copymapget(
471 def copymapget(
472 &self,
472 &self,
473 key: PyObject,
473 key: PyObject,
474 default: Option<PyObject>
474 default: Option<PyObject>
475 ) -> PyResult<Option<PyObject>> {
475 ) -> PyResult<Option<PyObject>> {
476 let key = key.extract::<PyBytes>(py)?;
476 let key = key.extract::<PyBytes>(py)?;
477 match self
477 match self
478 .inner(py)
478 .inner(py)
479 .borrow()
479 .borrow()
480 .copy_map_get(HgPath::new(key.data(py)))
480 .copy_map_get(HgPath::new(key.data(py)))
481 .map_err(|e| v2_error(py, e))?
481 .map_err(|e| v2_error(py, e))?
482 {
482 {
483 Some(copy) => Ok(Some(
483 Some(copy) => Ok(Some(
484 PyBytes::new(py, copy.as_bytes()).into_object(),
484 PyBytes::new(py, copy.as_bytes()).into_object(),
485 )),
485 )),
486 None => Ok(default),
486 None => Ok(default),
487 }
487 }
488 }
488 }
489 def copymapsetitem(
489 def copymapsetitem(
490 &self,
490 &self,
491 key: PyObject,
491 key: PyObject,
492 value: PyObject
492 value: PyObject
493 ) -> PyResult<PyObject> {
493 ) -> PyResult<PyObject> {
494 let key = key.extract::<PyBytes>(py)?;
494 let key = key.extract::<PyBytes>(py)?;
495 let value = value.extract::<PyBytes>(py)?;
495 let value = value.extract::<PyBytes>(py)?;
496 self.inner(py)
496 self.inner(py)
497 .borrow_mut()
497 .borrow_mut()
498 .copy_map_insert(
498 .copy_map_insert(
499 HgPathBuf::from_bytes(key.data(py)),
499 HgPathBuf::from_bytes(key.data(py)),
500 HgPathBuf::from_bytes(value.data(py)),
500 HgPathBuf::from_bytes(value.data(py)),
501 )
501 )
502 .map_err(|e| v2_error(py, e))?;
502 .map_err(|e| v2_error(py, e))?;
503 Ok(py.None())
503 Ok(py.None())
504 }
504 }
505 def copymappop(
505 def copymappop(
506 &self,
506 &self,
507 key: PyObject,
507 key: PyObject,
508 default: Option<PyObject>
508 default: Option<PyObject>
509 ) -> PyResult<Option<PyObject>> {
509 ) -> PyResult<Option<PyObject>> {
510 let key = key.extract::<PyBytes>(py)?;
510 let key = key.extract::<PyBytes>(py)?;
511 match self
511 match self
512 .inner(py)
512 .inner(py)
513 .borrow_mut()
513 .borrow_mut()
514 .copy_map_remove(HgPath::new(key.data(py)))
514 .copy_map_remove(HgPath::new(key.data(py)))
515 .map_err(|e| v2_error(py, e))?
515 .map_err(|e| v2_error(py, e))?
516 {
516 {
517 Some(_) => Ok(None),
517 Some(_) => Ok(None),
518 None => Ok(default),
518 None => Ok(default),
519 }
519 }
520 }
520 }
521
521
522 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
522 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
523 let leaked_ref = self.inner(py).leak_immutable();
523 let leaked_ref = self.inner(py).leak_immutable();
524 CopyMapKeysIterator::from_inner(
524 CopyMapKeysIterator::from_inner(
525 py,
525 py,
526 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
526 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
527 )
527 )
528 }
528 }
529
529
530 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
530 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
531 let leaked_ref = self.inner(py).leak_immutable();
531 let leaked_ref = self.inner(py).leak_immutable();
532 CopyMapItemsIterator::from_inner(
532 CopyMapItemsIterator::from_inner(
533 py,
533 py,
534 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
534 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
535 )
535 )
536 }
536 }
537
537
538 def directories(&self) -> PyResult<PyList> {
539 let dirs = PyList::new(py, &[]);
540 for item in self.inner(py).borrow().iter_directories() {
541 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
542 let path = PyBytes::new(py, path.as_bytes());
543 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
544 let tuple = (path, (b'd', 0, 0, mtime));
545 dirs.append(py, tuple.to_py_object(py).into_object())
546 }
547 Ok(dirs)
548 }
549
538 });
550 });
539
551
540 impl DirstateMap {
552 impl DirstateMap {
541 pub fn get_inner_mut<'a>(
553 pub fn get_inner_mut<'a>(
542 &'a self,
554 &'a self,
543 py: Python<'a>,
555 py: Python<'a>,
544 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
556 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
545 self.inner(py).borrow_mut()
557 self.inner(py).borrow_mut()
546 }
558 }
547 fn translate_key(
559 fn translate_key(
548 py: Python,
560 py: Python,
549 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
561 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
550 ) -> PyResult<Option<PyBytes>> {
562 ) -> PyResult<Option<PyBytes>> {
551 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
563 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
552 Ok(Some(PyBytes::new(py, f.as_bytes())))
564 Ok(Some(PyBytes::new(py, f.as_bytes())))
553 }
565 }
554 fn translate_key_value(
566 fn translate_key_value(
555 py: Python,
567 py: Python,
556 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
568 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
557 ) -> PyResult<Option<(PyBytes, PyObject)>> {
569 ) -> PyResult<Option<(PyBytes, PyObject)>> {
558 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
570 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
559 Ok(Some((
571 Ok(Some((
560 PyBytes::new(py, f.as_bytes()),
572 PyBytes::new(py, f.as_bytes()),
561 make_dirstate_tuple(py, &entry)?,
573 make_dirstate_tuple(py, &entry)?,
562 )))
574 )))
563 }
575 }
564 }
576 }
565
577
566 py_shared_iterator!(
578 py_shared_iterator!(
567 DirstateMapKeysIterator,
579 DirstateMapKeysIterator,
568 UnsafePyLeaked<StateMapIter<'static>>,
580 UnsafePyLeaked<StateMapIter<'static>>,
569 DirstateMap::translate_key,
581 DirstateMap::translate_key,
570 Option<PyBytes>
582 Option<PyBytes>
571 );
583 );
572
584
573 py_shared_iterator!(
585 py_shared_iterator!(
574 DirstateMapItemsIterator,
586 DirstateMapItemsIterator,
575 UnsafePyLeaked<StateMapIter<'static>>,
587 UnsafePyLeaked<StateMapIter<'static>>,
576 DirstateMap::translate_key_value,
588 DirstateMap::translate_key_value,
577 Option<(PyBytes, PyObject)>
589 Option<(PyBytes, PyObject)>
578 );
590 );
579
591
580 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
592 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
581 let bytes = obj.extract::<PyBytes>(py)?;
593 let bytes = obj.extract::<PyBytes>(py)?;
582 match bytes.data(py).try_into() {
594 match bytes.data(py).try_into() {
583 Ok(s) => Ok(s),
595 Ok(s) => Ok(s),
584 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
596 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
585 }
597 }
586 }
598 }
587
599
588 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
600 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
589 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
601 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
590 }
602 }
@@ -1,209 +1,223 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::EntryState;
12 use hg::EntryState;
13 use hg::PatternFileWarning;
13 use hg::PatternFileWarning;
14 use hg::StateMapIter;
14 use hg::StateMapIter;
15 use hg::StatusError;
15 use hg::StatusError;
16 use hg::StatusOptions;
16 use hg::StatusOptions;
17 use std::path::PathBuf;
17 use std::path::PathBuf;
18
18
19 impl DirstateMapMethods for OwningDirstateMap {
19 impl DirstateMapMethods for OwningDirstateMap {
20 fn clear(&mut self) {
20 fn clear(&mut self) {
21 self.get_mut().clear()
21 self.get_mut().clear()
22 }
22 }
23
23
24 fn add_file(
24 fn add_file(
25 &mut self,
25 &mut self,
26 filename: &HgPath,
26 filename: &HgPath,
27 old_state: EntryState,
27 old_state: EntryState,
28 entry: DirstateEntry,
28 entry: DirstateEntry,
29 ) -> Result<(), DirstateError> {
29 ) -> Result<(), DirstateError> {
30 self.get_mut().add_file(filename, old_state, entry)
30 self.get_mut().add_file(filename, old_state, entry)
31 }
31 }
32
32
33 fn remove_file(
33 fn remove_file(
34 &mut self,
34 &mut self,
35 filename: &HgPath,
35 filename: &HgPath,
36 old_state: EntryState,
36 old_state: EntryState,
37 size: i32,
37 size: i32,
38 ) -> Result<(), DirstateError> {
38 ) -> Result<(), DirstateError> {
39 self.get_mut().remove_file(filename, old_state, size)
39 self.get_mut().remove_file(filename, old_state, size)
40 }
40 }
41
41
42 fn drop_file(
42 fn drop_file(
43 &mut self,
43 &mut self,
44 filename: &HgPath,
44 filename: &HgPath,
45 old_state: EntryState,
45 old_state: EntryState,
46 ) -> Result<bool, DirstateError> {
46 ) -> Result<bool, DirstateError> {
47 self.get_mut().drop_file(filename, old_state)
47 self.get_mut().drop_file(filename, old_state)
48 }
48 }
49
49
50 fn clear_ambiguous_times(
50 fn clear_ambiguous_times(
51 &mut self,
51 &mut self,
52 filenames: Vec<HgPathBuf>,
52 filenames: Vec<HgPathBuf>,
53 now: i32,
53 now: i32,
54 ) -> Result<(), DirstateV2ParseError> {
54 ) -> Result<(), DirstateV2ParseError> {
55 self.get_mut().clear_ambiguous_times(filenames, now)
55 self.get_mut().clear_ambiguous_times(filenames, now)
56 }
56 }
57
57
58 fn non_normal_entries_contains(
58 fn non_normal_entries_contains(
59 &mut self,
59 &mut self,
60 key: &HgPath,
60 key: &HgPath,
61 ) -> Result<bool, DirstateV2ParseError> {
61 ) -> Result<bool, DirstateV2ParseError> {
62 self.get_mut().non_normal_entries_contains(key)
62 self.get_mut().non_normal_entries_contains(key)
63 }
63 }
64
64
65 fn non_normal_entries_remove(&mut self, key: &HgPath) {
65 fn non_normal_entries_remove(&mut self, key: &HgPath) {
66 self.get_mut().non_normal_entries_remove(key)
66 self.get_mut().non_normal_entries_remove(key)
67 }
67 }
68
68
69 fn non_normal_or_other_parent_paths(
69 fn non_normal_or_other_parent_paths(
70 &mut self,
70 &mut self,
71 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
71 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
72 {
72 {
73 self.get_mut().non_normal_or_other_parent_paths()
73 self.get_mut().non_normal_or_other_parent_paths()
74 }
74 }
75
75
76 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
76 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
77 self.get_mut().set_non_normal_other_parent_entries(force)
77 self.get_mut().set_non_normal_other_parent_entries(force)
78 }
78 }
79
79
80 fn iter_non_normal_paths(
80 fn iter_non_normal_paths(
81 &mut self,
81 &mut self,
82 ) -> Box<
82 ) -> Box<
83 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
83 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
84 > {
84 > {
85 self.get_mut().iter_non_normal_paths()
85 self.get_mut().iter_non_normal_paths()
86 }
86 }
87
87
88 fn iter_non_normal_paths_panic(
88 fn iter_non_normal_paths_panic(
89 &self,
89 &self,
90 ) -> Box<
90 ) -> Box<
91 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
91 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
92 > {
92 > {
93 self.get().iter_non_normal_paths_panic()
93 self.get().iter_non_normal_paths_panic()
94 }
94 }
95
95
96 fn iter_other_parent_paths(
96 fn iter_other_parent_paths(
97 &mut self,
97 &mut self,
98 ) -> Box<
98 ) -> Box<
99 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
99 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
100 > {
100 > {
101 self.get_mut().iter_other_parent_paths()
101 self.get_mut().iter_other_parent_paths()
102 }
102 }
103
103
104 fn has_tracked_dir(
104 fn has_tracked_dir(
105 &mut self,
105 &mut self,
106 directory: &HgPath,
106 directory: &HgPath,
107 ) -> Result<bool, DirstateError> {
107 ) -> Result<bool, DirstateError> {
108 self.get_mut().has_tracked_dir(directory)
108 self.get_mut().has_tracked_dir(directory)
109 }
109 }
110
110
111 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
111 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
112 self.get_mut().has_dir(directory)
112 self.get_mut().has_dir(directory)
113 }
113 }
114
114
115 fn pack_v1(
115 fn pack_v1(
116 &mut self,
116 &mut self,
117 parents: DirstateParents,
117 parents: DirstateParents,
118 now: Timestamp,
118 now: Timestamp,
119 ) -> Result<Vec<u8>, DirstateError> {
119 ) -> Result<Vec<u8>, DirstateError> {
120 self.get_mut().pack_v1(parents, now)
120 self.get_mut().pack_v1(parents, now)
121 }
121 }
122
122
123 fn pack_v2(
123 fn pack_v2(
124 &mut self,
124 &mut self,
125 parents: DirstateParents,
125 parents: DirstateParents,
126 now: Timestamp,
126 now: Timestamp,
127 ) -> Result<Vec<u8>, DirstateError> {
127 ) -> Result<Vec<u8>, DirstateError> {
128 self.get_mut().pack_v2(parents, now)
128 self.get_mut().pack_v2(parents, now)
129 }
129 }
130
130
131 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
131 fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
132 self.get_mut().set_all_dirs()
132 self.get_mut().set_all_dirs()
133 }
133 }
134
134
135 fn set_dirs(&mut self) -> Result<(), DirstateError> {
135 fn set_dirs(&mut self) -> Result<(), DirstateError> {
136 self.get_mut().set_dirs()
136 self.get_mut().set_dirs()
137 }
137 }
138
138
139 fn status<'a>(
139 fn status<'a>(
140 &'a mut self,
140 &'a mut self,
141 matcher: &'a (dyn Matcher + Sync),
141 matcher: &'a (dyn Matcher + Sync),
142 root_dir: PathBuf,
142 root_dir: PathBuf,
143 ignore_files: Vec<PathBuf>,
143 ignore_files: Vec<PathBuf>,
144 options: StatusOptions,
144 options: StatusOptions,
145 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
145 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
146 {
146 {
147 self.get_mut()
147 self.get_mut()
148 .status(matcher, root_dir, ignore_files, options)
148 .status(matcher, root_dir, ignore_files, options)
149 }
149 }
150
150
151 fn copy_map_len(&self) -> usize {
151 fn copy_map_len(&self) -> usize {
152 self.get().copy_map_len()
152 self.get().copy_map_len()
153 }
153 }
154
154
155 fn copy_map_iter(&self) -> CopyMapIter<'_> {
155 fn copy_map_iter(&self) -> CopyMapIter<'_> {
156 self.get().copy_map_iter()
156 self.get().copy_map_iter()
157 }
157 }
158
158
159 fn copy_map_contains_key(
159 fn copy_map_contains_key(
160 &self,
160 &self,
161 key: &HgPath,
161 key: &HgPath,
162 ) -> Result<bool, DirstateV2ParseError> {
162 ) -> Result<bool, DirstateV2ParseError> {
163 self.get().copy_map_contains_key(key)
163 self.get().copy_map_contains_key(key)
164 }
164 }
165
165
166 fn copy_map_get(
166 fn copy_map_get(
167 &self,
167 &self,
168 key: &HgPath,
168 key: &HgPath,
169 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
169 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
170 self.get().copy_map_get(key)
170 self.get().copy_map_get(key)
171 }
171 }
172
172
173 fn copy_map_remove(
173 fn copy_map_remove(
174 &mut self,
174 &mut self,
175 key: &HgPath,
175 key: &HgPath,
176 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
176 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
177 self.get_mut().copy_map_remove(key)
177 self.get_mut().copy_map_remove(key)
178 }
178 }
179
179
180 fn copy_map_insert(
180 fn copy_map_insert(
181 &mut self,
181 &mut self,
182 key: HgPathBuf,
182 key: HgPathBuf,
183 value: HgPathBuf,
183 value: HgPathBuf,
184 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
184 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
185 self.get_mut().copy_map_insert(key, value)
185 self.get_mut().copy_map_insert(key, value)
186 }
186 }
187
187
188 fn len(&self) -> usize {
188 fn len(&self) -> usize {
189 self.get().len()
189 self.get().len()
190 }
190 }
191
191
192 fn contains_key(
192 fn contains_key(
193 &self,
193 &self,
194 key: &HgPath,
194 key: &HgPath,
195 ) -> Result<bool, DirstateV2ParseError> {
195 ) -> Result<bool, DirstateV2ParseError> {
196 self.get().contains_key(key)
196 self.get().contains_key(key)
197 }
197 }
198
198
199 fn get(
199 fn get(
200 &self,
200 &self,
201 key: &HgPath,
201 key: &HgPath,
202 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
202 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
203 self.get().get(key)
203 self.get().get(key)
204 }
204 }
205
205
206 fn iter(&self) -> StateMapIter<'_> {
206 fn iter(&self) -> StateMapIter<'_> {
207 self.get().iter()
207 self.get().iter()
208 }
208 }
209
210 fn iter_directories(
211 &self,
212 ) -> Box<
213 dyn Iterator<
214 Item = Result<
215 (&HgPath, Option<Timestamp>),
216 DirstateV2ParseError,
217 >,
218 > + Send
219 + '_,
220 > {
221 self.get().iter_directories()
222 }
209 }
223 }
@@ -1,163 +1,163 b''
1 // parsers.rs
1 // parsers.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::parsers` module provided by the
8 //! Bindings for the `hg::dirstate::parsers` module provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10 //!
10 //!
11 //! From Python, this will be seen as `mercurial.rustext.parsers`
11 //! From Python, this will be seen as `mercurial.rustext.parsers`
12 use cpython::{
12 use cpython::{
13 exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
13 exc, PyBytes, PyDict, PyErr, PyInt, PyModule, PyResult, PyTuple, Python,
14 PythonObject, ToPyObject,
14 PythonObject, ToPyObject,
15 };
15 };
16 use hg::{
16 use hg::{
17 dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
17 dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
18 utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
18 utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
19 PARENT_SIZE,
19 PARENT_SIZE,
20 };
20 };
21 use std::convert::TryInto;
21 use std::convert::TryInto;
22
22
23 use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
23 use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
24
24
25 fn parse_dirstate_wrapper(
25 fn parse_dirstate_wrapper(
26 py: Python,
26 py: Python,
27 dmap: PyDict,
27 dmap: PyDict,
28 copymap: PyDict,
28 copymap: PyDict,
29 st: PyBytes,
29 st: PyBytes,
30 ) -> PyResult<PyTuple> {
30 ) -> PyResult<PyTuple> {
31 match parse_dirstate(st.data(py)) {
31 match parse_dirstate(st.data(py)) {
32 Ok((parents, entries, copies)) => {
32 Ok((parents, entries, copies)) => {
33 let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
33 let dirstate_map: FastHashMap<HgPathBuf, DirstateEntry> = entries
34 .into_iter()
34 .into_iter()
35 .map(|(path, entry)| (path.to_owned(), entry))
35 .map(|(path, entry)| (path.to_owned(), entry))
36 .collect();
36 .collect();
37 let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
37 let copy_map: FastHashMap<HgPathBuf, HgPathBuf> = copies
38 .into_iter()
38 .into_iter()
39 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
39 .map(|(path, copy)| (path.to_owned(), copy.to_owned()))
40 .collect();
40 .collect();
41
41
42 for (filename, entry) in &dirstate_map {
42 for (filename, entry) in &dirstate_map {
43 dmap.set_item(
43 dmap.set_item(
44 py,
44 py,
45 PyBytes::new(py, filename.as_bytes()),
45 PyBytes::new(py, filename.as_bytes()),
46 make_dirstate_tuple(py, entry)?,
46 make_dirstate_tuple(py, entry)?,
47 )?;
47 )?;
48 }
48 }
49 for (path, copy_path) in copy_map {
49 for (path, copy_path) in copy_map {
50 copymap.set_item(
50 copymap.set_item(
51 py,
51 py,
52 PyBytes::new(py, path.as_bytes()),
52 PyBytes::new(py, path.as_bytes()),
53 PyBytes::new(py, copy_path.as_bytes()),
53 PyBytes::new(py, copy_path.as_bytes()),
54 )?;
54 )?;
55 }
55 }
56 Ok(dirstate_parents_to_pytuple(py, parents))
56 Ok(dirstate_parents_to_pytuple(py, parents))
57 }
57 }
58 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
58 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
59 }
59 }
60 }
60 }
61
61
62 fn pack_dirstate_wrapper(
62 fn pack_dirstate_wrapper(
63 py: Python,
63 py: Python,
64 dmap: PyDict,
64 dmap: PyDict,
65 copymap: PyDict,
65 copymap: PyDict,
66 pl: PyTuple,
66 pl: PyTuple,
67 now: PyInt,
67 now: PyInt,
68 ) -> PyResult<PyBytes> {
68 ) -> PyResult<PyBytes> {
69 let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
69 let p1 = pl.get_item(py, 0).extract::<PyBytes>(py)?;
70 let p1: &[u8] = p1.data(py);
70 let p1: &[u8] = p1.data(py);
71 let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
71 let p2 = pl.get_item(py, 1).extract::<PyBytes>(py)?;
72 let p2: &[u8] = p2.data(py);
72 let p2: &[u8] = p2.data(py);
73
73
74 let mut dirstate_map = extract_dirstate(py, &dmap)?;
74 let mut dirstate_map = extract_dirstate(py, &dmap)?;
75
75
76 let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
76 let copies: Result<FastHashMap<HgPathBuf, HgPathBuf>, PyErr> = copymap
77 .items(py)
77 .items(py)
78 .iter()
78 .iter()
79 .map(|(key, value)| {
79 .map(|(key, value)| {
80 Ok((
80 Ok((
81 HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)),
81 HgPathBuf::from_bytes(key.extract::<PyBytes>(py)?.data(py)),
82 HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)),
82 HgPathBuf::from_bytes(value.extract::<PyBytes>(py)?.data(py)),
83 ))
83 ))
84 })
84 })
85 .collect();
85 .collect();
86
86
87 if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE {
87 if p1.len() != PARENT_SIZE || p2.len() != PARENT_SIZE {
88 return Err(PyErr::new::<exc::ValueError, _>(
88 return Err(PyErr::new::<exc::ValueError, _>(
89 py,
89 py,
90 "expected a 20-byte hash".to_string(),
90 "expected a 20-byte hash".to_string(),
91 ));
91 ));
92 }
92 }
93
93
94 match pack_dirstate(
94 match pack_dirstate(
95 &mut dirstate_map,
95 &mut dirstate_map,
96 &copies?,
96 &copies?,
97 DirstateParents {
97 DirstateParents {
98 p1: p1.try_into().unwrap(),
98 p1: p1.try_into().unwrap(),
99 p2: p2.try_into().unwrap(),
99 p2: p2.try_into().unwrap(),
100 },
100 },
101 Timestamp(now.as_object().extract::<u64>(py)?),
101 Timestamp(now.as_object().extract::<i64>(py)?),
102 ) {
102 ) {
103 Ok(packed) => {
103 Ok(packed) => {
104 for (filename, entry) in dirstate_map.iter() {
104 for (filename, entry) in dirstate_map.iter() {
105 dmap.set_item(
105 dmap.set_item(
106 py,
106 py,
107 PyBytes::new(py, filename.as_bytes()),
107 PyBytes::new(py, filename.as_bytes()),
108 make_dirstate_tuple(py, &entry)?,
108 make_dirstate_tuple(py, &entry)?,
109 )?;
109 )?;
110 }
110 }
111 Ok(PyBytes::new(py, &packed))
111 Ok(PyBytes::new(py, &packed))
112 }
112 }
113 Err(error) => {
113 Err(error) => {
114 Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
114 Err(PyErr::new::<exc::ValueError, _>(py, error.to_string()))
115 }
115 }
116 }
116 }
117 }
117 }
118
118
119 /// Create the module, with `__package__` given from parent
119 /// Create the module, with `__package__` given from parent
120 pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> {
120 pub fn init_parsers_module(py: Python, package: &str) -> PyResult<PyModule> {
121 let dotted_name = &format!("{}.parsers", package);
121 let dotted_name = &format!("{}.parsers", package);
122 let m = PyModule::new(py, dotted_name)?;
122 let m = PyModule::new(py, dotted_name)?;
123
123
124 m.add(py, "__package__", package)?;
124 m.add(py, "__package__", package)?;
125 m.add(py, "__doc__", "Parsers - Rust implementation")?;
125 m.add(py, "__doc__", "Parsers - Rust implementation")?;
126
126
127 m.add(
127 m.add(
128 py,
128 py,
129 "parse_dirstate",
129 "parse_dirstate",
130 py_fn!(
130 py_fn!(
131 py,
131 py,
132 parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
132 parse_dirstate_wrapper(dmap: PyDict, copymap: PyDict, st: PyBytes)
133 ),
133 ),
134 )?;
134 )?;
135 m.add(
135 m.add(
136 py,
136 py,
137 "pack_dirstate",
137 "pack_dirstate",
138 py_fn!(
138 py_fn!(
139 py,
139 py,
140 pack_dirstate_wrapper(
140 pack_dirstate_wrapper(
141 dmap: PyDict,
141 dmap: PyDict,
142 copymap: PyDict,
142 copymap: PyDict,
143 pl: PyTuple,
143 pl: PyTuple,
144 now: PyInt
144 now: PyInt
145 )
145 )
146 ),
146 ),
147 )?;
147 )?;
148
148
149 let sys = PyModule::import(py, "sys")?;
149 let sys = PyModule::import(py, "sys")?;
150 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
150 let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
151 sys_modules.set_item(py, dotted_name, &m)?;
151 sys_modules.set_item(py, dotted_name, &m)?;
152
152
153 Ok(m)
153 Ok(m)
154 }
154 }
155
155
156 pub(crate) fn dirstate_parents_to_pytuple(
156 pub(crate) fn dirstate_parents_to_pytuple(
157 py: Python,
157 py: Python,
158 parents: &DirstateParents,
158 parents: &DirstateParents,
159 ) -> PyTuple {
159 ) -> PyTuple {
160 let p1 = PyBytes::new(py, parents.p1.as_bytes());
160 let p1 = PyBytes::new(py, parents.p1.as_bytes());
161 let p2 = PyBytes::new(py, parents.p2.as_bytes());
161 let p2 = PyBytes::new(py, parents.p2.as_bytes());
162 (p1, p2).to_py_object(py)
162 (p1, p2).to_py_object(py)
163 }
163 }
@@ -1,443 +1,443 b''
1 Show all commands except debug commands
1 Show all commands except debug commands
2 $ hg debugcomplete
2 $ hg debugcomplete
3 abort
3 abort
4 add
4 add
5 addremove
5 addremove
6 annotate
6 annotate
7 archive
7 archive
8 backout
8 backout
9 bisect
9 bisect
10 bookmarks
10 bookmarks
11 branch
11 branch
12 branches
12 branches
13 bundle
13 bundle
14 cat
14 cat
15 clone
15 clone
16 commit
16 commit
17 config
17 config
18 continue
18 continue
19 copy
19 copy
20 diff
20 diff
21 export
21 export
22 files
22 files
23 forget
23 forget
24 graft
24 graft
25 grep
25 grep
26 heads
26 heads
27 help
27 help
28 identify
28 identify
29 import
29 import
30 incoming
30 incoming
31 init
31 init
32 locate
32 locate
33 log
33 log
34 manifest
34 manifest
35 merge
35 merge
36 outgoing
36 outgoing
37 parents
37 parents
38 paths
38 paths
39 phase
39 phase
40 pull
40 pull
41 purge
41 purge
42 push
42 push
43 recover
43 recover
44 remove
44 remove
45 rename
45 rename
46 resolve
46 resolve
47 revert
47 revert
48 rollback
48 rollback
49 root
49 root
50 serve
50 serve
51 shelve
51 shelve
52 status
52 status
53 summary
53 summary
54 tag
54 tag
55 tags
55 tags
56 tip
56 tip
57 unbundle
57 unbundle
58 unshelve
58 unshelve
59 update
59 update
60 verify
60 verify
61 version
61 version
62
62
63 Show all commands that start with "a"
63 Show all commands that start with "a"
64 $ hg debugcomplete a
64 $ hg debugcomplete a
65 abort
65 abort
66 add
66 add
67 addremove
67 addremove
68 annotate
68 annotate
69 archive
69 archive
70
70
71 Do not show debug commands if there are other candidates
71 Do not show debug commands if there are other candidates
72 $ hg debugcomplete d
72 $ hg debugcomplete d
73 diff
73 diff
74
74
75 Show debug commands if there are no other candidates
75 Show debug commands if there are no other candidates
76 $ hg debugcomplete debug
76 $ hg debugcomplete debug
77 debugancestor
77 debugancestor
78 debugantivirusrunning
78 debugantivirusrunning
79 debugapplystreamclonebundle
79 debugapplystreamclonebundle
80 debugbackupbundle
80 debugbackupbundle
81 debugbuilddag
81 debugbuilddag
82 debugbundle
82 debugbundle
83 debugcapabilities
83 debugcapabilities
84 debugchangedfiles
84 debugchangedfiles
85 debugcheckstate
85 debugcheckstate
86 debugcolor
86 debugcolor
87 debugcommands
87 debugcommands
88 debugcomplete
88 debugcomplete
89 debugconfig
89 debugconfig
90 debugcreatestreamclonebundle
90 debugcreatestreamclonebundle
91 debugdag
91 debugdag
92 debugdata
92 debugdata
93 debugdate
93 debugdate
94 debugdeltachain
94 debugdeltachain
95 debugdirstate
95 debugdirstate
96 debugdiscovery
96 debugdiscovery
97 debugdownload
97 debugdownload
98 debugextensions
98 debugextensions
99 debugfileset
99 debugfileset
100 debugformat
100 debugformat
101 debugfsinfo
101 debugfsinfo
102 debuggetbundle
102 debuggetbundle
103 debugignore
103 debugignore
104 debugindex
104 debugindex
105 debugindexdot
105 debugindexdot
106 debugindexstats
106 debugindexstats
107 debuginstall
107 debuginstall
108 debugknown
108 debugknown
109 debuglabelcomplete
109 debuglabelcomplete
110 debuglocks
110 debuglocks
111 debugmanifestfulltextcache
111 debugmanifestfulltextcache
112 debugmergestate
112 debugmergestate
113 debugnamecomplete
113 debugnamecomplete
114 debugnodemap
114 debugnodemap
115 debugobsolete
115 debugobsolete
116 debugp1copies
116 debugp1copies
117 debugp2copies
117 debugp2copies
118 debugpathcomplete
118 debugpathcomplete
119 debugpathcopies
119 debugpathcopies
120 debugpeer
120 debugpeer
121 debugpickmergetool
121 debugpickmergetool
122 debugpushkey
122 debugpushkey
123 debugpvec
123 debugpvec
124 debugrebuilddirstate
124 debugrebuilddirstate
125 debugrebuildfncache
125 debugrebuildfncache
126 debugrename
126 debugrename
127 debugrequires
127 debugrequires
128 debugrevlog
128 debugrevlog
129 debugrevlogindex
129 debugrevlogindex
130 debugrevspec
130 debugrevspec
131 debugserve
131 debugserve
132 debugsetparents
132 debugsetparents
133 debugshell
133 debugshell
134 debugsidedata
134 debugsidedata
135 debugssl
135 debugssl
136 debugstrip
136 debugstrip
137 debugsub
137 debugsub
138 debugsuccessorssets
138 debugsuccessorssets
139 debugtagscache
139 debugtagscache
140 debugtemplate
140 debugtemplate
141 debuguigetpass
141 debuguigetpass
142 debuguiprompt
142 debuguiprompt
143 debugupdatecaches
143 debugupdatecaches
144 debugupgraderepo
144 debugupgraderepo
145 debugwalk
145 debugwalk
146 debugwhyunstable
146 debugwhyunstable
147 debugwireargs
147 debugwireargs
148 debugwireproto
148 debugwireproto
149
149
150 Do not show the alias of a debug command if there are other candidates
150 Do not show the alias of a debug command if there are other candidates
151 (this should hide rawcommit)
151 (this should hide rawcommit)
152 $ hg debugcomplete r
152 $ hg debugcomplete r
153 recover
153 recover
154 remove
154 remove
155 rename
155 rename
156 resolve
156 resolve
157 revert
157 revert
158 rollback
158 rollback
159 root
159 root
160 Show the alias of a debug command if there are no other candidates
160 Show the alias of a debug command if there are no other candidates
161 $ hg debugcomplete rawc
161 $ hg debugcomplete rawc
162
162
163
163
164 Show the global options
164 Show the global options
165 $ hg debugcomplete --options | sort
165 $ hg debugcomplete --options | sort
166 --color
166 --color
167 --config
167 --config
168 --cwd
168 --cwd
169 --debug
169 --debug
170 --debugger
170 --debugger
171 --encoding
171 --encoding
172 --encodingmode
172 --encodingmode
173 --help
173 --help
174 --hidden
174 --hidden
175 --noninteractive
175 --noninteractive
176 --pager
176 --pager
177 --profile
177 --profile
178 --quiet
178 --quiet
179 --repository
179 --repository
180 --time
180 --time
181 --traceback
181 --traceback
182 --verbose
182 --verbose
183 --version
183 --version
184 -R
184 -R
185 -h
185 -h
186 -q
186 -q
187 -v
187 -v
188 -y
188 -y
189
189
190 Show the options for the "serve" command
190 Show the options for the "serve" command
191 $ hg debugcomplete --options serve | sort
191 $ hg debugcomplete --options serve | sort
192 --accesslog
192 --accesslog
193 --address
193 --address
194 --certificate
194 --certificate
195 --cmdserver
195 --cmdserver
196 --color
196 --color
197 --config
197 --config
198 --cwd
198 --cwd
199 --daemon
199 --daemon
200 --daemon-postexec
200 --daemon-postexec
201 --debug
201 --debug
202 --debugger
202 --debugger
203 --encoding
203 --encoding
204 --encodingmode
204 --encodingmode
205 --errorlog
205 --errorlog
206 --help
206 --help
207 --hidden
207 --hidden
208 --ipv6
208 --ipv6
209 --name
209 --name
210 --noninteractive
210 --noninteractive
211 --pager
211 --pager
212 --pid-file
212 --pid-file
213 --port
213 --port
214 --prefix
214 --prefix
215 --print-url
215 --print-url
216 --profile
216 --profile
217 --quiet
217 --quiet
218 --repository
218 --repository
219 --stdio
219 --stdio
220 --style
220 --style
221 --subrepos
221 --subrepos
222 --templates
222 --templates
223 --time
223 --time
224 --traceback
224 --traceback
225 --verbose
225 --verbose
226 --version
226 --version
227 --web-conf
227 --web-conf
228 -6
228 -6
229 -A
229 -A
230 -E
230 -E
231 -R
231 -R
232 -S
232 -S
233 -a
233 -a
234 -d
234 -d
235 -h
235 -h
236 -n
236 -n
237 -p
237 -p
238 -q
238 -q
239 -t
239 -t
240 -v
240 -v
241 -y
241 -y
242
242
243 Show an error if we use --options with an ambiguous abbreviation
243 Show an error if we use --options with an ambiguous abbreviation
244 $ hg debugcomplete --options s
244 $ hg debugcomplete --options s
245 hg: command 's' is ambiguous:
245 hg: command 's' is ambiguous:
246 serve shelve showconfig status summary
246 serve shelve showconfig status summary
247 [10]
247 [10]
248
248
249 Show all commands + options
249 Show all commands + options
250 $ hg debugcommands
250 $ hg debugcommands
251 abort: dry-run
251 abort: dry-run
252 add: include, exclude, subrepos, dry-run
252 add: include, exclude, subrepos, dry-run
253 addremove: similarity, subrepos, include, exclude, dry-run
253 addremove: similarity, subrepos, include, exclude, dry-run
254 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
254 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
255 archive: no-decode, prefix, rev, type, subrepos, include, exclude
255 archive: no-decode, prefix, rev, type, subrepos, include, exclude
256 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
256 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
257 bisect: reset, good, bad, skip, extend, command, noupdate
257 bisect: reset, good, bad, skip, extend, command, noupdate
258 bookmarks: force, rev, delete, rename, inactive, list, template
258 bookmarks: force, rev, delete, rename, inactive, list, template
259 branch: force, clean, rev
259 branch: force, clean, rev
260 branches: active, closed, rev, template
260 branches: active, closed, rev, template
261 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
261 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
262 cat: output, rev, decode, include, exclude, template
262 cat: output, rev, decode, include, exclude, template
263 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
263 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
264 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
264 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
265 config: untrusted, edit, local, source, shared, non-shared, global, template
265 config: untrusted, edit, local, source, shared, non-shared, global, template
266 continue: dry-run
266 continue: dry-run
267 copy: forget, after, at-rev, force, include, exclude, dry-run
267 copy: forget, after, at-rev, force, include, exclude, dry-run
268 debugancestor:
268 debugancestor:
269 debugantivirusrunning:
269 debugantivirusrunning:
270 debugapplystreamclonebundle:
270 debugapplystreamclonebundle:
271 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
271 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
272 debugbuilddag: mergeable-file, overwritten-file, new-file
272 debugbuilddag: mergeable-file, overwritten-file, new-file
273 debugbundle: all, part-type, spec
273 debugbundle: all, part-type, spec
274 debugcapabilities:
274 debugcapabilities:
275 debugchangedfiles: compute
275 debugchangedfiles: compute
276 debugcheckstate:
276 debugcheckstate:
277 debugcolor: style
277 debugcolor: style
278 debugcommands:
278 debugcommands:
279 debugcomplete: options
279 debugcomplete: options
280 debugcreatestreamclonebundle:
280 debugcreatestreamclonebundle:
281 debugdag: tags, branches, dots, spaces
281 debugdag: tags, branches, dots, spaces
282 debugdata: changelog, manifest, dir
282 debugdata: changelog, manifest, dir
283 debugdate: extended
283 debugdate: extended
284 debugdeltachain: changelog, manifest, dir, template
284 debugdeltachain: changelog, manifest, dir, template
285 debugdirstate: nodates, dates, datesort
285 debugdirstate: nodates, dates, datesort, dirs
286 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
286 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
287 debugdownload: output
287 debugdownload: output
288 debugextensions: template
288 debugextensions: template
289 debugfileset: rev, all-files, show-matcher, show-stage
289 debugfileset: rev, all-files, show-matcher, show-stage
290 debugformat: template
290 debugformat: template
291 debugfsinfo:
291 debugfsinfo:
292 debuggetbundle: head, common, type
292 debuggetbundle: head, common, type
293 debugignore:
293 debugignore:
294 debugindex: changelog, manifest, dir, template
294 debugindex: changelog, manifest, dir, template
295 debugindexdot: changelog, manifest, dir
295 debugindexdot: changelog, manifest, dir
296 debugindexstats:
296 debugindexstats:
297 debuginstall: template
297 debuginstall: template
298 debugknown:
298 debugknown:
299 debuglabelcomplete:
299 debuglabelcomplete:
300 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
300 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
301 debugmanifestfulltextcache: clear, add
301 debugmanifestfulltextcache: clear, add
302 debugmergestate: style, template
302 debugmergestate: style, template
303 debugnamecomplete:
303 debugnamecomplete:
304 debugnodemap: dump-new, dump-disk, check, metadata
304 debugnodemap: dump-new, dump-disk, check, metadata
305 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
305 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
306 debugp1copies: rev
306 debugp1copies: rev
307 debugp2copies: rev
307 debugp2copies: rev
308 debugpathcomplete: full, normal, added, removed
308 debugpathcomplete: full, normal, added, removed
309 debugpathcopies: include, exclude
309 debugpathcopies: include, exclude
310 debugpeer:
310 debugpeer:
311 debugpickmergetool: rev, changedelete, include, exclude, tool
311 debugpickmergetool: rev, changedelete, include, exclude, tool
312 debugpushkey:
312 debugpushkey:
313 debugpvec:
313 debugpvec:
314 debugrebuilddirstate: rev, minimal
314 debugrebuilddirstate: rev, minimal
315 debugrebuildfncache:
315 debugrebuildfncache:
316 debugrename: rev
316 debugrename: rev
317 debugrequires:
317 debugrequires:
318 debugrevlog: changelog, manifest, dir, dump
318 debugrevlog: changelog, manifest, dir, dump
319 debugrevlogindex: changelog, manifest, dir, format
319 debugrevlogindex: changelog, manifest, dir, format
320 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
320 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
321 debugserve: sshstdio, logiofd, logiofile
321 debugserve: sshstdio, logiofd, logiofile
322 debugsetparents:
322 debugsetparents:
323 debugshell:
323 debugshell:
324 debugsidedata: changelog, manifest, dir
324 debugsidedata: changelog, manifest, dir
325 debugssl:
325 debugssl:
326 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
326 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
327 debugsub: rev
327 debugsub: rev
328 debugsuccessorssets: closest
328 debugsuccessorssets: closest
329 debugtagscache:
329 debugtagscache:
330 debugtemplate: rev, define
330 debugtemplate: rev, define
331 debuguigetpass: prompt
331 debuguigetpass: prompt
332 debuguiprompt: prompt
332 debuguiprompt: prompt
333 debugupdatecaches:
333 debugupdatecaches:
334 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
334 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
335 debugwalk: include, exclude
335 debugwalk: include, exclude
336 debugwhyunstable:
336 debugwhyunstable:
337 debugwireargs: three, four, five, ssh, remotecmd, insecure
337 debugwireargs: three, four, five, ssh, remotecmd, insecure
338 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
338 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
339 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
339 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
340 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
340 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
341 files: rev, print0, include, exclude, template, subrepos
341 files: rev, print0, include, exclude, template, subrepos
342 forget: interactive, include, exclude, dry-run
342 forget: interactive, include, exclude, dry-run
343 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
343 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
344 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
344 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
345 heads: rev, topo, active, closed, style, template
345 heads: rev, topo, active, closed, style, template
346 help: extension, command, keyword, system
346 help: extension, command, keyword, system
347 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
347 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
348 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
348 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
349 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
349 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
350 init: ssh, remotecmd, insecure
350 init: ssh, remotecmd, insecure
351 locate: rev, print0, fullpath, include, exclude
351 locate: rev, print0, fullpath, include, exclude
352 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
352 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
353 manifest: rev, all, template
353 manifest: rev, all, template
354 merge: force, rev, preview, abort, tool
354 merge: force, rev, preview, abort, tool
355 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
355 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
356 parents: rev, style, template
356 parents: rev, style, template
357 paths: template
357 paths: template
358 phase: public, draft, secret, force, rev
358 phase: public, draft, secret, force, rev
359 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
359 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
360 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
360 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
361 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
361 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
362 recover: verify
362 recover: verify
363 remove: after, force, subrepos, include, exclude, dry-run
363 remove: after, force, subrepos, include, exclude, dry-run
364 rename: forget, after, at-rev, force, include, exclude, dry-run
364 rename: forget, after, at-rev, force, include, exclude, dry-run
365 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
365 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
366 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
366 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
367 rollback: dry-run, force
367 rollback: dry-run, force
368 root: template
368 root: template
369 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
369 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
370 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
370 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
371 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
371 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
372 summary: remote
372 summary: remote
373 tag: force, local, rev, remove, edit, message, date, user
373 tag: force, local, rev, remove, edit, message, date, user
374 tags: template
374 tags: template
375 tip: patch, git, style, template
375 tip: patch, git, style, template
376 unbundle: update
376 unbundle: update
377 unshelve: abort, continue, interactive, keep, name, tool, date
377 unshelve: abort, continue, interactive, keep, name, tool, date
378 update: clean, check, merge, date, rev, tool
378 update: clean, check, merge, date, rev, tool
379 verify: full
379 verify: full
380 version: template
380 version: template
381
381
382 $ hg init a
382 $ hg init a
383 $ cd a
383 $ cd a
384 $ echo fee > fee
384 $ echo fee > fee
385 $ hg ci -q -Amfee
385 $ hg ci -q -Amfee
386 $ hg tag fee
386 $ hg tag fee
387 $ mkdir fie
387 $ mkdir fie
388 $ echo dead > fie/dead
388 $ echo dead > fie/dead
389 $ echo live > fie/live
389 $ echo live > fie/live
390 $ hg bookmark fo
390 $ hg bookmark fo
391 $ hg branch -q fie
391 $ hg branch -q fie
392 $ hg ci -q -Amfie
392 $ hg ci -q -Amfie
393 $ echo fo > fo
393 $ echo fo > fo
394 $ hg branch -qf default
394 $ hg branch -qf default
395 $ hg ci -q -Amfo
395 $ hg ci -q -Amfo
396 $ echo Fum > Fum
396 $ echo Fum > Fum
397 $ hg ci -q -AmFum
397 $ hg ci -q -AmFum
398 $ hg bookmark Fum
398 $ hg bookmark Fum
399
399
400 Test debugpathcomplete
400 Test debugpathcomplete
401
401
402 $ hg debugpathcomplete f
402 $ hg debugpathcomplete f
403 fee
403 fee
404 fie
404 fie
405 fo
405 fo
406 $ hg debugpathcomplete -f f
406 $ hg debugpathcomplete -f f
407 fee
407 fee
408 fie/dead
408 fie/dead
409 fie/live
409 fie/live
410 fo
410 fo
411
411
412 $ hg rm Fum
412 $ hg rm Fum
413 $ hg debugpathcomplete -r F
413 $ hg debugpathcomplete -r F
414 Fum
414 Fum
415
415
416 Test debugnamecomplete
416 Test debugnamecomplete
417
417
418 $ hg debugnamecomplete
418 $ hg debugnamecomplete
419 Fum
419 Fum
420 default
420 default
421 fee
421 fee
422 fie
422 fie
423 fo
423 fo
424 tip
424 tip
425 $ hg debugnamecomplete f
425 $ hg debugnamecomplete f
426 fee
426 fee
427 fie
427 fie
428 fo
428 fo
429
429
430 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
430 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
431 used for completions in some shells.
431 used for completions in some shells.
432
432
433 $ hg debuglabelcomplete
433 $ hg debuglabelcomplete
434 Fum
434 Fum
435 default
435 default
436 fee
436 fee
437 fie
437 fie
438 fo
438 fo
439 tip
439 tip
440 $ hg debuglabelcomplete f
440 $ hg debuglabelcomplete f
441 fee
441 fee
442 fie
442 fie
443 fo
443 fo
@@ -1,917 +1,960 b''
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
1 #testcases dirstate-v1 dirstate-v1-tree dirstate-v2
2
2
3 #if no-rust
3 #if no-rust
4 $ hg init repo0 --config format.exp-dirstate-v2=1
4 $ hg init repo0 --config format.exp-dirstate-v2=1
5 abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
5 abort: dirstate v2 format requested by config but not supported (requires Rust extensions)
6 [255]
6 [255]
7 #endif
7 #endif
8
8
9 #if dirstate-v1-tree
9 #if dirstate-v1-tree
10 #require rust
10 #require rust
11 $ echo '[experimental]' >> $HGRCPATH
11 $ echo '[experimental]' >> $HGRCPATH
12 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
12 $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
13 #endif
13 #endif
14
14
15 #if dirstate-v2
15 #if dirstate-v2
16 #require rust
16 #require rust
17 $ echo '[format]' >> $HGRCPATH
17 $ echo '[format]' >> $HGRCPATH
18 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
18 $ echo 'exp-dirstate-v2=1' >> $HGRCPATH
19 #endif
19 #endif
20
20
21 $ hg init repo1
21 $ hg init repo1
22 $ cd repo1
22 $ cd repo1
23 $ mkdir a b a/1 b/1 b/2
23 $ mkdir a b a/1 b/1 b/2
24 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
24 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
25
25
26 hg status in repo root:
26 hg status in repo root:
27
27
28 $ hg status
28 $ hg status
29 ? a/1/in_a_1
29 ? a/1/in_a_1
30 ? a/in_a
30 ? a/in_a
31 ? b/1/in_b_1
31 ? b/1/in_b_1
32 ? b/2/in_b_2
32 ? b/2/in_b_2
33 ? b/in_b
33 ? b/in_b
34 ? in_root
34 ? in_root
35
35
36 hg status . in repo root:
36 hg status . in repo root:
37
37
38 $ hg status .
38 $ hg status .
39 ? a/1/in_a_1
39 ? a/1/in_a_1
40 ? a/in_a
40 ? a/in_a
41 ? b/1/in_b_1
41 ? b/1/in_b_1
42 ? b/2/in_b_2
42 ? b/2/in_b_2
43 ? b/in_b
43 ? b/in_b
44 ? in_root
44 ? in_root
45
45
46 $ hg status --cwd a
46 $ hg status --cwd a
47 ? a/1/in_a_1
47 ? a/1/in_a_1
48 ? a/in_a
48 ? a/in_a
49 ? b/1/in_b_1
49 ? b/1/in_b_1
50 ? b/2/in_b_2
50 ? b/2/in_b_2
51 ? b/in_b
51 ? b/in_b
52 ? in_root
52 ? in_root
53 $ hg status --cwd a .
53 $ hg status --cwd a .
54 ? 1/in_a_1
54 ? 1/in_a_1
55 ? in_a
55 ? in_a
56 $ hg status --cwd a ..
56 $ hg status --cwd a ..
57 ? 1/in_a_1
57 ? 1/in_a_1
58 ? in_a
58 ? in_a
59 ? ../b/1/in_b_1
59 ? ../b/1/in_b_1
60 ? ../b/2/in_b_2
60 ? ../b/2/in_b_2
61 ? ../b/in_b
61 ? ../b/in_b
62 ? ../in_root
62 ? ../in_root
63
63
64 $ hg status --cwd b
64 $ hg status --cwd b
65 ? a/1/in_a_1
65 ? a/1/in_a_1
66 ? a/in_a
66 ? a/in_a
67 ? b/1/in_b_1
67 ? b/1/in_b_1
68 ? b/2/in_b_2
68 ? b/2/in_b_2
69 ? b/in_b
69 ? b/in_b
70 ? in_root
70 ? in_root
71 $ hg status --cwd b .
71 $ hg status --cwd b .
72 ? 1/in_b_1
72 ? 1/in_b_1
73 ? 2/in_b_2
73 ? 2/in_b_2
74 ? in_b
74 ? in_b
75 $ hg status --cwd b ..
75 $ hg status --cwd b ..
76 ? ../a/1/in_a_1
76 ? ../a/1/in_a_1
77 ? ../a/in_a
77 ? ../a/in_a
78 ? 1/in_b_1
78 ? 1/in_b_1
79 ? 2/in_b_2
79 ? 2/in_b_2
80 ? in_b
80 ? in_b
81 ? ../in_root
81 ? ../in_root
82
82
83 $ hg status --cwd a/1
83 $ hg status --cwd a/1
84 ? a/1/in_a_1
84 ? a/1/in_a_1
85 ? a/in_a
85 ? a/in_a
86 ? b/1/in_b_1
86 ? b/1/in_b_1
87 ? b/2/in_b_2
87 ? b/2/in_b_2
88 ? b/in_b
88 ? b/in_b
89 ? in_root
89 ? in_root
90 $ hg status --cwd a/1 .
90 $ hg status --cwd a/1 .
91 ? in_a_1
91 ? in_a_1
92 $ hg status --cwd a/1 ..
92 $ hg status --cwd a/1 ..
93 ? in_a_1
93 ? in_a_1
94 ? ../in_a
94 ? ../in_a
95
95
96 $ hg status --cwd b/1
96 $ hg status --cwd b/1
97 ? a/1/in_a_1
97 ? a/1/in_a_1
98 ? a/in_a
98 ? a/in_a
99 ? b/1/in_b_1
99 ? b/1/in_b_1
100 ? b/2/in_b_2
100 ? b/2/in_b_2
101 ? b/in_b
101 ? b/in_b
102 ? in_root
102 ? in_root
103 $ hg status --cwd b/1 .
103 $ hg status --cwd b/1 .
104 ? in_b_1
104 ? in_b_1
105 $ hg status --cwd b/1 ..
105 $ hg status --cwd b/1 ..
106 ? in_b_1
106 ? in_b_1
107 ? ../2/in_b_2
107 ? ../2/in_b_2
108 ? ../in_b
108 ? ../in_b
109
109
110 $ hg status --cwd b/2
110 $ hg status --cwd b/2
111 ? a/1/in_a_1
111 ? a/1/in_a_1
112 ? a/in_a
112 ? a/in_a
113 ? b/1/in_b_1
113 ? b/1/in_b_1
114 ? b/2/in_b_2
114 ? b/2/in_b_2
115 ? b/in_b
115 ? b/in_b
116 ? in_root
116 ? in_root
117 $ hg status --cwd b/2 .
117 $ hg status --cwd b/2 .
118 ? in_b_2
118 ? in_b_2
119 $ hg status --cwd b/2 ..
119 $ hg status --cwd b/2 ..
120 ? ../1/in_b_1
120 ? ../1/in_b_1
121 ? in_b_2
121 ? in_b_2
122 ? ../in_b
122 ? ../in_b
123
123
124 combining patterns with root and patterns without a root works
124 combining patterns with root and patterns without a root works
125
125
126 $ hg st a/in_a re:.*b$
126 $ hg st a/in_a re:.*b$
127 ? a/in_a
127 ? a/in_a
128 ? b/in_b
128 ? b/in_b
129
129
130 tweaking defaults works
130 tweaking defaults works
131 $ hg status --cwd a --config ui.tweakdefaults=yes
131 $ hg status --cwd a --config ui.tweakdefaults=yes
132 ? 1/in_a_1
132 ? 1/in_a_1
133 ? in_a
133 ? in_a
134 ? ../b/1/in_b_1
134 ? ../b/1/in_b_1
135 ? ../b/2/in_b_2
135 ? ../b/2/in_b_2
136 ? ../b/in_b
136 ? ../b/in_b
137 ? ../in_root
137 ? ../in_root
138 $ HGPLAIN=1 hg status --cwd a --config ui.tweakdefaults=yes
138 $ HGPLAIN=1 hg status --cwd a --config ui.tweakdefaults=yes
139 ? a/1/in_a_1 (glob)
139 ? a/1/in_a_1 (glob)
140 ? a/in_a (glob)
140 ? a/in_a (glob)
141 ? b/1/in_b_1 (glob)
141 ? b/1/in_b_1 (glob)
142 ? b/2/in_b_2 (glob)
142 ? b/2/in_b_2 (glob)
143 ? b/in_b (glob)
143 ? b/in_b (glob)
144 ? in_root
144 ? in_root
145 $ HGPLAINEXCEPT=tweakdefaults hg status --cwd a --config ui.tweakdefaults=yes
145 $ HGPLAINEXCEPT=tweakdefaults hg status --cwd a --config ui.tweakdefaults=yes
146 ? 1/in_a_1
146 ? 1/in_a_1
147 ? in_a
147 ? in_a
148 ? ../b/1/in_b_1
148 ? ../b/1/in_b_1
149 ? ../b/2/in_b_2
149 ? ../b/2/in_b_2
150 ? ../b/in_b
150 ? ../b/in_b
151 ? ../in_root (glob)
151 ? ../in_root (glob)
152
152
153 relative paths can be requested
153 relative paths can be requested
154
154
155 $ hg status --cwd a --config ui.relative-paths=yes
155 $ hg status --cwd a --config ui.relative-paths=yes
156 ? 1/in_a_1
156 ? 1/in_a_1
157 ? in_a
157 ? in_a
158 ? ../b/1/in_b_1
158 ? ../b/1/in_b_1
159 ? ../b/2/in_b_2
159 ? ../b/2/in_b_2
160 ? ../b/in_b
160 ? ../b/in_b
161 ? ../in_root
161 ? ../in_root
162
162
163 $ hg status --cwd a . --config ui.relative-paths=legacy
163 $ hg status --cwd a . --config ui.relative-paths=legacy
164 ? 1/in_a_1
164 ? 1/in_a_1
165 ? in_a
165 ? in_a
166 $ hg status --cwd a . --config ui.relative-paths=no
166 $ hg status --cwd a . --config ui.relative-paths=no
167 ? a/1/in_a_1
167 ? a/1/in_a_1
168 ? a/in_a
168 ? a/in_a
169
169
170 commands.status.relative overrides ui.relative-paths
170 commands.status.relative overrides ui.relative-paths
171
171
172 $ cat >> $HGRCPATH <<EOF
172 $ cat >> $HGRCPATH <<EOF
173 > [ui]
173 > [ui]
174 > relative-paths = False
174 > relative-paths = False
175 > [commands]
175 > [commands]
176 > status.relative = True
176 > status.relative = True
177 > EOF
177 > EOF
178 $ hg status --cwd a
178 $ hg status --cwd a
179 ? 1/in_a_1
179 ? 1/in_a_1
180 ? in_a
180 ? in_a
181 ? ../b/1/in_b_1
181 ? ../b/1/in_b_1
182 ? ../b/2/in_b_2
182 ? ../b/2/in_b_2
183 ? ../b/in_b
183 ? ../b/in_b
184 ? ../in_root
184 ? ../in_root
185 $ HGPLAIN=1 hg status --cwd a
185 $ HGPLAIN=1 hg status --cwd a
186 ? a/1/in_a_1 (glob)
186 ? a/1/in_a_1 (glob)
187 ? a/in_a (glob)
187 ? a/in_a (glob)
188 ? b/1/in_b_1 (glob)
188 ? b/1/in_b_1 (glob)
189 ? b/2/in_b_2 (glob)
189 ? b/2/in_b_2 (glob)
190 ? b/in_b (glob)
190 ? b/in_b (glob)
191 ? in_root
191 ? in_root
192
192
193 if relative paths are explicitly off, tweakdefaults doesn't change it
193 if relative paths are explicitly off, tweakdefaults doesn't change it
194 $ cat >> $HGRCPATH <<EOF
194 $ cat >> $HGRCPATH <<EOF
195 > [commands]
195 > [commands]
196 > status.relative = False
196 > status.relative = False
197 > EOF
197 > EOF
198 $ hg status --cwd a --config ui.tweakdefaults=yes
198 $ hg status --cwd a --config ui.tweakdefaults=yes
199 ? a/1/in_a_1
199 ? a/1/in_a_1
200 ? a/in_a
200 ? a/in_a
201 ? b/1/in_b_1
201 ? b/1/in_b_1
202 ? b/2/in_b_2
202 ? b/2/in_b_2
203 ? b/in_b
203 ? b/in_b
204 ? in_root
204 ? in_root
205
205
206 $ cd ..
206 $ cd ..
207
207
208 $ hg init repo2
208 $ hg init repo2
209 $ cd repo2
209 $ cd repo2
210 $ touch modified removed deleted ignored
210 $ touch modified removed deleted ignored
211 $ echo "^ignored$" > .hgignore
211 $ echo "^ignored$" > .hgignore
212 $ hg ci -A -m 'initial checkin'
212 $ hg ci -A -m 'initial checkin'
213 adding .hgignore
213 adding .hgignore
214 adding deleted
214 adding deleted
215 adding modified
215 adding modified
216 adding removed
216 adding removed
217 $ touch modified added unknown ignored
217 $ touch modified added unknown ignored
218 $ hg add added
218 $ hg add added
219 $ hg remove removed
219 $ hg remove removed
220 $ rm deleted
220 $ rm deleted
221
221
222 hg status:
222 hg status:
223
223
224 $ hg status
224 $ hg status
225 A added
225 A added
226 R removed
226 R removed
227 ! deleted
227 ! deleted
228 ? unknown
228 ? unknown
229
229
230 hg status modified added removed deleted unknown never-existed ignored:
230 hg status modified added removed deleted unknown never-existed ignored:
231
231
232 $ hg status modified added removed deleted unknown never-existed ignored
232 $ hg status modified added removed deleted unknown never-existed ignored
233 never-existed: * (glob)
233 never-existed: * (glob)
234 A added
234 A added
235 R removed
235 R removed
236 ! deleted
236 ! deleted
237 ? unknown
237 ? unknown
238
238
239 $ hg copy modified copied
239 $ hg copy modified copied
240
240
241 hg status -C:
241 hg status -C:
242
242
243 $ hg status -C
243 $ hg status -C
244 A added
244 A added
245 A copied
245 A copied
246 modified
246 modified
247 R removed
247 R removed
248 ! deleted
248 ! deleted
249 ? unknown
249 ? unknown
250
250
251 hg status -A:
251 hg status -A:
252
252
253 $ hg status -A
253 $ hg status -A
254 A added
254 A added
255 A copied
255 A copied
256 modified
256 modified
257 R removed
257 R removed
258 ! deleted
258 ! deleted
259 ? unknown
259 ? unknown
260 I ignored
260 I ignored
261 C .hgignore
261 C .hgignore
262 C modified
262 C modified
263
263
264 $ hg status -A -T '{status} {path} {node|shortest}\n'
264 $ hg status -A -T '{status} {path} {node|shortest}\n'
265 A added ffff
265 A added ffff
266 A copied ffff
266 A copied ffff
267 R removed ffff
267 R removed ffff
268 ! deleted ffff
268 ! deleted ffff
269 ? unknown ffff
269 ? unknown ffff
270 I ignored ffff
270 I ignored ffff
271 C .hgignore ffff
271 C .hgignore ffff
272 C modified ffff
272 C modified ffff
273
273
274 $ hg status -A -Tjson
274 $ hg status -A -Tjson
275 [
275 [
276 {
276 {
277 "itemtype": "file",
277 "itemtype": "file",
278 "path": "added",
278 "path": "added",
279 "status": "A"
279 "status": "A"
280 },
280 },
281 {
281 {
282 "itemtype": "file",
282 "itemtype": "file",
283 "path": "copied",
283 "path": "copied",
284 "source": "modified",
284 "source": "modified",
285 "status": "A"
285 "status": "A"
286 },
286 },
287 {
287 {
288 "itemtype": "file",
288 "itemtype": "file",
289 "path": "removed",
289 "path": "removed",
290 "status": "R"
290 "status": "R"
291 },
291 },
292 {
292 {
293 "itemtype": "file",
293 "itemtype": "file",
294 "path": "deleted",
294 "path": "deleted",
295 "status": "!"
295 "status": "!"
296 },
296 },
297 {
297 {
298 "itemtype": "file",
298 "itemtype": "file",
299 "path": "unknown",
299 "path": "unknown",
300 "status": "?"
300 "status": "?"
301 },
301 },
302 {
302 {
303 "itemtype": "file",
303 "itemtype": "file",
304 "path": "ignored",
304 "path": "ignored",
305 "status": "I"
305 "status": "I"
306 },
306 },
307 {
307 {
308 "itemtype": "file",
308 "itemtype": "file",
309 "path": ".hgignore",
309 "path": ".hgignore",
310 "status": "C"
310 "status": "C"
311 },
311 },
312 {
312 {
313 "itemtype": "file",
313 "itemtype": "file",
314 "path": "modified",
314 "path": "modified",
315 "status": "C"
315 "status": "C"
316 }
316 }
317 ]
317 ]
318
318
319 $ hg status -A -Tpickle > pickle
319 $ hg status -A -Tpickle > pickle
320 >>> from __future__ import print_function
320 >>> from __future__ import print_function
321 >>> from mercurial import util
321 >>> from mercurial import util
322 >>> pickle = util.pickle
322 >>> pickle = util.pickle
323 >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb")))
323 >>> data = sorted((x[b'status'].decode(), x[b'path'].decode()) for x in pickle.load(open("pickle", r"rb")))
324 >>> for s, p in data: print("%s %s" % (s, p))
324 >>> for s, p in data: print("%s %s" % (s, p))
325 ! deleted
325 ! deleted
326 ? pickle
326 ? pickle
327 ? unknown
327 ? unknown
328 A added
328 A added
329 A copied
329 A copied
330 C .hgignore
330 C .hgignore
331 C modified
331 C modified
332 I ignored
332 I ignored
333 R removed
333 R removed
334 $ rm pickle
334 $ rm pickle
335
335
336 $ echo "^ignoreddir$" > .hgignore
336 $ echo "^ignoreddir$" > .hgignore
337 $ mkdir ignoreddir
337 $ mkdir ignoreddir
338 $ touch ignoreddir/file
338 $ touch ignoreddir/file
339
339
340 Test templater support:
340 Test templater support:
341
341
342 $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
342 $ hg status -AT "[{status}]\t{if(source, '{source} -> ')}{path}\n"
343 [M] .hgignore
343 [M] .hgignore
344 [A] added
344 [A] added
345 [A] modified -> copied
345 [A] modified -> copied
346 [R] removed
346 [R] removed
347 [!] deleted
347 [!] deleted
348 [?] ignored
348 [?] ignored
349 [?] unknown
349 [?] unknown
350 [I] ignoreddir/file
350 [I] ignoreddir/file
351 [C] modified
351 [C] modified
352 $ hg status -AT default
352 $ hg status -AT default
353 M .hgignore
353 M .hgignore
354 A added
354 A added
355 A copied
355 A copied
356 modified
356 modified
357 R removed
357 R removed
358 ! deleted
358 ! deleted
359 ? ignored
359 ? ignored
360 ? unknown
360 ? unknown
361 I ignoreddir/file
361 I ignoreddir/file
362 C modified
362 C modified
363 $ hg status -T compact
363 $ hg status -T compact
364 abort: "status" not in template map
364 abort: "status" not in template map
365 [255]
365 [255]
366
366
367 hg status ignoreddir/file:
367 hg status ignoreddir/file:
368
368
369 $ hg status ignoreddir/file
369 $ hg status ignoreddir/file
370
370
371 hg status -i ignoreddir/file:
371 hg status -i ignoreddir/file:
372
372
373 $ hg status -i ignoreddir/file
373 $ hg status -i ignoreddir/file
374 I ignoreddir/file
374 I ignoreddir/file
375 $ cd ..
375 $ cd ..
376
376
377 Check 'status -q' and some combinations
377 Check 'status -q' and some combinations
378
378
379 $ hg init repo3
379 $ hg init repo3
380 $ cd repo3
380 $ cd repo3
381 $ touch modified removed deleted ignored
381 $ touch modified removed deleted ignored
382 $ echo "^ignored$" > .hgignore
382 $ echo "^ignored$" > .hgignore
383 $ hg commit -A -m 'initial checkin'
383 $ hg commit -A -m 'initial checkin'
384 adding .hgignore
384 adding .hgignore
385 adding deleted
385 adding deleted
386 adding modified
386 adding modified
387 adding removed
387 adding removed
388 $ touch added unknown ignored
388 $ touch added unknown ignored
389 $ hg add added
389 $ hg add added
390 $ echo "test" >> modified
390 $ echo "test" >> modified
391 $ hg remove removed
391 $ hg remove removed
392 $ rm deleted
392 $ rm deleted
393 $ hg copy modified copied
393 $ hg copy modified copied
394
394
395 Specify working directory revision explicitly, that should be the same as
395 Specify working directory revision explicitly, that should be the same as
396 "hg status"
396 "hg status"
397
397
398 $ hg status --change "wdir()"
398 $ hg status --change "wdir()"
399 M modified
399 M modified
400 A added
400 A added
401 A copied
401 A copied
402 R removed
402 R removed
403 ! deleted
403 ! deleted
404 ? unknown
404 ? unknown
405
405
406 Run status with 2 different flags.
406 Run status with 2 different flags.
407 Check if result is the same or different.
407 Check if result is the same or different.
408 If result is not as expected, raise error
408 If result is not as expected, raise error
409
409
410 $ assert() {
410 $ assert() {
411 > hg status $1 > ../a
411 > hg status $1 > ../a
412 > hg status $2 > ../b
412 > hg status $2 > ../b
413 > if diff ../a ../b > /dev/null; then
413 > if diff ../a ../b > /dev/null; then
414 > out=0
414 > out=0
415 > else
415 > else
416 > out=1
416 > out=1
417 > fi
417 > fi
418 > if [ $3 -eq 0 ]; then
418 > if [ $3 -eq 0 ]; then
419 > df="same"
419 > df="same"
420 > else
420 > else
421 > df="different"
421 > df="different"
422 > fi
422 > fi
423 > if [ $out -ne $3 ]; then
423 > if [ $out -ne $3 ]; then
424 > echo "Error on $1 and $2, should be $df."
424 > echo "Error on $1 and $2, should be $df."
425 > fi
425 > fi
426 > }
426 > }
427
427
428 Assert flag1 flag2 [0-same | 1-different]
428 Assert flag1 flag2 [0-same | 1-different]
429
429
430 $ assert "-q" "-mard" 0
430 $ assert "-q" "-mard" 0
431 $ assert "-A" "-marduicC" 0
431 $ assert "-A" "-marduicC" 0
432 $ assert "-qA" "-mardcC" 0
432 $ assert "-qA" "-mardcC" 0
433 $ assert "-qAui" "-A" 0
433 $ assert "-qAui" "-A" 0
434 $ assert "-qAu" "-marducC" 0
434 $ assert "-qAu" "-marducC" 0
435 $ assert "-qAi" "-mardicC" 0
435 $ assert "-qAi" "-mardicC" 0
436 $ assert "-qu" "-u" 0
436 $ assert "-qu" "-u" 0
437 $ assert "-q" "-u" 1
437 $ assert "-q" "-u" 1
438 $ assert "-m" "-a" 1
438 $ assert "-m" "-a" 1
439 $ assert "-r" "-d" 1
439 $ assert "-r" "-d" 1
440 $ cd ..
440 $ cd ..
441
441
442 $ hg init repo4
442 $ hg init repo4
443 $ cd repo4
443 $ cd repo4
444 $ touch modified removed deleted
444 $ touch modified removed deleted
445 $ hg ci -q -A -m 'initial checkin'
445 $ hg ci -q -A -m 'initial checkin'
446 $ touch added unknown
446 $ touch added unknown
447 $ hg add added
447 $ hg add added
448 $ hg remove removed
448 $ hg remove removed
449 $ rm deleted
449 $ rm deleted
450 $ echo x > modified
450 $ echo x > modified
451 $ hg copy modified copied
451 $ hg copy modified copied
452 $ hg ci -m 'test checkin' -d "1000001 0"
452 $ hg ci -m 'test checkin' -d "1000001 0"
453 $ rm *
453 $ rm *
454 $ touch unrelated
454 $ touch unrelated
455 $ hg ci -q -A -m 'unrelated checkin' -d "1000002 0"
455 $ hg ci -q -A -m 'unrelated checkin' -d "1000002 0"
456
456
457 hg status --change 1:
457 hg status --change 1:
458
458
459 $ hg status --change 1
459 $ hg status --change 1
460 M modified
460 M modified
461 A added
461 A added
462 A copied
462 A copied
463 R removed
463 R removed
464
464
465 hg status --change 1 unrelated:
465 hg status --change 1 unrelated:
466
466
467 $ hg status --change 1 unrelated
467 $ hg status --change 1 unrelated
468
468
469 hg status -C --change 1 added modified copied removed deleted:
469 hg status -C --change 1 added modified copied removed deleted:
470
470
471 $ hg status -C --change 1 added modified copied removed deleted
471 $ hg status -C --change 1 added modified copied removed deleted
472 M modified
472 M modified
473 A added
473 A added
474 A copied
474 A copied
475 modified
475 modified
476 R removed
476 R removed
477
477
478 hg status -A --change 1 and revset:
478 hg status -A --change 1 and revset:
479
479
480 $ hg status -A --change '1|1'
480 $ hg status -A --change '1|1'
481 M modified
481 M modified
482 A added
482 A added
483 A copied
483 A copied
484 modified
484 modified
485 R removed
485 R removed
486 C deleted
486 C deleted
487
487
488 $ cd ..
488 $ cd ..
489
489
490 hg status with --rev and reverted changes:
490 hg status with --rev and reverted changes:
491
491
492 $ hg init reverted-changes-repo
492 $ hg init reverted-changes-repo
493 $ cd reverted-changes-repo
493 $ cd reverted-changes-repo
494 $ echo a > file
494 $ echo a > file
495 $ hg add file
495 $ hg add file
496 $ hg ci -m a
496 $ hg ci -m a
497 $ echo b > file
497 $ echo b > file
498 $ hg ci -m b
498 $ hg ci -m b
499
499
500 reverted file should appear clean
500 reverted file should appear clean
501
501
502 $ hg revert -r 0 .
502 $ hg revert -r 0 .
503 reverting file
503 reverting file
504 $ hg status -A --rev 0
504 $ hg status -A --rev 0
505 C file
505 C file
506
506
507 #if execbit
507 #if execbit
508 reverted file with changed flag should appear modified
508 reverted file with changed flag should appear modified
509
509
510 $ chmod +x file
510 $ chmod +x file
511 $ hg status -A --rev 0
511 $ hg status -A --rev 0
512 M file
512 M file
513
513
514 $ hg revert -r 0 .
514 $ hg revert -r 0 .
515 reverting file
515 reverting file
516
516
517 reverted and committed file with changed flag should appear modified
517 reverted and committed file with changed flag should appear modified
518
518
519 $ hg co -C .
519 $ hg co -C .
520 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
520 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
521 $ chmod +x file
521 $ chmod +x file
522 $ hg ci -m 'change flag'
522 $ hg ci -m 'change flag'
523 $ hg status -A --rev 1 --rev 2
523 $ hg status -A --rev 1 --rev 2
524 M file
524 M file
525 $ hg diff -r 1 -r 2
525 $ hg diff -r 1 -r 2
526
526
527 #endif
527 #endif
528
528
529 $ cd ..
529 $ cd ..
530
530
531 hg status of binary file starting with '\1\n', a separator for metadata:
531 hg status of binary file starting with '\1\n', a separator for metadata:
532
532
533 $ hg init repo5
533 $ hg init repo5
534 $ cd repo5
534 $ cd repo5
535 >>> open("010a", r"wb").write(b"\1\nfoo") and None
535 >>> open("010a", r"wb").write(b"\1\nfoo") and None
536 $ hg ci -q -A -m 'initial checkin'
536 $ hg ci -q -A -m 'initial checkin'
537 $ hg status -A
537 $ hg status -A
538 C 010a
538 C 010a
539
539
540 >>> open("010a", r"wb").write(b"\1\nbar") and None
540 >>> open("010a", r"wb").write(b"\1\nbar") and None
541 $ hg status -A
541 $ hg status -A
542 M 010a
542 M 010a
543 $ hg ci -q -m 'modify 010a'
543 $ hg ci -q -m 'modify 010a'
544 $ hg status -A --rev 0:1
544 $ hg status -A --rev 0:1
545 M 010a
545 M 010a
546
546
547 $ touch empty
547 $ touch empty
548 $ hg ci -q -A -m 'add another file'
548 $ hg ci -q -A -m 'add another file'
549 $ hg status -A --rev 1:2 010a
549 $ hg status -A --rev 1:2 010a
550 C 010a
550 C 010a
551
551
552 $ cd ..
552 $ cd ..
553
553
554 test "hg status" with "directory pattern" which matches against files
554 test "hg status" with "directory pattern" which matches against files
555 only known on target revision.
555 only known on target revision.
556
556
557 $ hg init repo6
557 $ hg init repo6
558 $ cd repo6
558 $ cd repo6
559
559
560 $ echo a > a.txt
560 $ echo a > a.txt
561 $ hg add a.txt
561 $ hg add a.txt
562 $ hg commit -m '#0'
562 $ hg commit -m '#0'
563 $ mkdir -p 1/2/3/4/5
563 $ mkdir -p 1/2/3/4/5
564 $ echo b > 1/2/3/4/5/b.txt
564 $ echo b > 1/2/3/4/5/b.txt
565 $ hg add 1/2/3/4/5/b.txt
565 $ hg add 1/2/3/4/5/b.txt
566 $ hg commit -m '#1'
566 $ hg commit -m '#1'
567
567
568 $ hg update -C 0 > /dev/null
568 $ hg update -C 0 > /dev/null
569 $ hg status -A
569 $ hg status -A
570 C a.txt
570 C a.txt
571
571
572 the directory matching against specified pattern should be removed,
572 the directory matching against specified pattern should be removed,
573 because directory existence prevents 'dirstate.walk()' from showing
573 because directory existence prevents 'dirstate.walk()' from showing
574 warning message about such pattern.
574 warning message about such pattern.
575
575
576 $ test ! -d 1
576 $ test ! -d 1
577 $ hg status -A --rev 1 1/2/3/4/5/b.txt
577 $ hg status -A --rev 1 1/2/3/4/5/b.txt
578 R 1/2/3/4/5/b.txt
578 R 1/2/3/4/5/b.txt
579 $ hg status -A --rev 1 1/2/3/4/5
579 $ hg status -A --rev 1 1/2/3/4/5
580 R 1/2/3/4/5/b.txt
580 R 1/2/3/4/5/b.txt
581 $ hg status -A --rev 1 1/2/3
581 $ hg status -A --rev 1 1/2/3
582 R 1/2/3/4/5/b.txt
582 R 1/2/3/4/5/b.txt
583 $ hg status -A --rev 1 1
583 $ hg status -A --rev 1 1
584 R 1/2/3/4/5/b.txt
584 R 1/2/3/4/5/b.txt
585
585
586 $ hg status --config ui.formatdebug=True --rev 1 1
586 $ hg status --config ui.formatdebug=True --rev 1 1
587 status = [
587 status = [
588 {
588 {
589 'itemtype': 'file',
589 'itemtype': 'file',
590 'path': '1/2/3/4/5/b.txt',
590 'path': '1/2/3/4/5/b.txt',
591 'status': 'R'
591 'status': 'R'
592 },
592 },
593 ]
593 ]
594
594
595 #if windows
595 #if windows
596 $ hg --config ui.slash=false status -A --rev 1 1
596 $ hg --config ui.slash=false status -A --rev 1 1
597 R 1\2\3\4\5\b.txt
597 R 1\2\3\4\5\b.txt
598 #endif
598 #endif
599
599
600 $ cd ..
600 $ cd ..
601
601
602 Status after move overwriting a file (issue4458)
602 Status after move overwriting a file (issue4458)
603 =================================================
603 =================================================
604
604
605
605
606 $ hg init issue4458
606 $ hg init issue4458
607 $ cd issue4458
607 $ cd issue4458
608 $ echo a > a
608 $ echo a > a
609 $ echo b > b
609 $ echo b > b
610 $ hg commit -Am base
610 $ hg commit -Am base
611 adding a
611 adding a
612 adding b
612 adding b
613
613
614
614
615 with --force
615 with --force
616
616
617 $ hg mv b --force a
617 $ hg mv b --force a
618 $ hg st --copies
618 $ hg st --copies
619 M a
619 M a
620 b
620 b
621 R b
621 R b
622 $ hg revert --all
622 $ hg revert --all
623 reverting a
623 reverting a
624 undeleting b
624 undeleting b
625 $ rm *.orig
625 $ rm *.orig
626
626
627 without force
627 without force
628
628
629 $ hg rm a
629 $ hg rm a
630 $ hg st --copies
630 $ hg st --copies
631 R a
631 R a
632 $ hg mv b a
632 $ hg mv b a
633 $ hg st --copies
633 $ hg st --copies
634 M a
634 M a
635 b
635 b
636 R b
636 R b
637
637
638 using ui.statuscopies setting
638 using ui.statuscopies setting
639 $ hg st --config ui.statuscopies=true
639 $ hg st --config ui.statuscopies=true
640 M a
640 M a
641 b
641 b
642 R b
642 R b
643 $ hg st --config ui.statuscopies=false
643 $ hg st --config ui.statuscopies=false
644 M a
644 M a
645 R b
645 R b
646 $ hg st --config ui.tweakdefaults=yes
646 $ hg st --config ui.tweakdefaults=yes
647 M a
647 M a
648 b
648 b
649 R b
649 R b
650
650
651 using log status template (issue5155)
651 using log status template (issue5155)
652 $ hg log -Tstatus -r 'wdir()' -C
652 $ hg log -Tstatus -r 'wdir()' -C
653 changeset: 2147483647:ffffffffffff
653 changeset: 2147483647:ffffffffffff
654 parent: 0:8c55c58b4c0e
654 parent: 0:8c55c58b4c0e
655 user: test
655 user: test
656 date: * (glob)
656 date: * (glob)
657 files:
657 files:
658 M a
658 M a
659 b
659 b
660 R b
660 R b
661
661
662 $ hg log -GTstatus -r 'wdir()' -C
662 $ hg log -GTstatus -r 'wdir()' -C
663 o changeset: 2147483647:ffffffffffff
663 o changeset: 2147483647:ffffffffffff
664 | parent: 0:8c55c58b4c0e
664 | parent: 0:8c55c58b4c0e
665 ~ user: test
665 ~ user: test
666 date: * (glob)
666 date: * (glob)
667 files:
667 files:
668 M a
668 M a
669 b
669 b
670 R b
670 R b
671
671
672
672
673 Other "bug" highlight, the revision status does not report the copy information.
673 Other "bug" highlight, the revision status does not report the copy information.
674 This is buggy behavior.
674 This is buggy behavior.
675
675
676 $ hg commit -m 'blah'
676 $ hg commit -m 'blah'
677 $ hg st --copies --change .
677 $ hg st --copies --change .
678 M a
678 M a
679 R b
679 R b
680
680
681 using log status template, the copy information is displayed correctly.
681 using log status template, the copy information is displayed correctly.
682 $ hg log -Tstatus -r. -C
682 $ hg log -Tstatus -r. -C
683 changeset: 1:6685fde43d21
683 changeset: 1:6685fde43d21
684 tag: tip
684 tag: tip
685 user: test
685 user: test
686 date: * (glob)
686 date: * (glob)
687 summary: blah
687 summary: blah
688 files:
688 files:
689 M a
689 M a
690 b
690 b
691 R b
691 R b
692
692
693
693
694 $ cd ..
694 $ cd ..
695
695
696 Make sure .hg doesn't show up even as a symlink
696 Make sure .hg doesn't show up even as a symlink
697
697
698 $ hg init repo0
698 $ hg init repo0
699 $ mkdir symlink-repo0
699 $ mkdir symlink-repo0
700 $ cd symlink-repo0
700 $ cd symlink-repo0
701 $ ln -s ../repo0/.hg
701 $ ln -s ../repo0/.hg
702 $ hg status
702 $ hg status
703
703
704 If the size hasnt changed but mtime has, status needs to read the contents
704 If the size hasnt changed but mtime has, status needs to read the contents
705 of the file to check whether it has changed
705 of the file to check whether it has changed
706
706
707 $ echo 1 > a
707 $ echo 1 > a
708 $ echo 1 > b
708 $ echo 1 > b
709 $ touch -t 200102030000 a b
709 $ touch -t 200102030000 a b
710 $ hg commit -Aqm '#0'
710 $ hg commit -Aqm '#0'
711 $ echo 2 > a
711 $ echo 2 > a
712 $ touch -t 200102040000 a b
712 $ touch -t 200102040000 a b
713 $ hg status
713 $ hg status
714 M a
714 M a
715
715
716 Asking specifically for the status of a deleted/removed file
716 Asking specifically for the status of a deleted/removed file
717
717
718 $ rm a
718 $ rm a
719 $ rm b
719 $ rm b
720 $ hg status a
720 $ hg status a
721 ! a
721 ! a
722 $ hg rm a
722 $ hg rm a
723 $ hg rm b
723 $ hg rm b
724 $ hg status a
724 $ hg status a
725 R a
725 R a
726 $ hg commit -qm '#1'
726 $ hg commit -qm '#1'
727 $ hg status a
727 $ hg status a
728 a: $ENOENT$
728 a: $ENOENT$
729
729
730 Check using include flag with pattern when status does not need to traverse
730 Check using include flag with pattern when status does not need to traverse
731 the working directory (issue6483)
731 the working directory (issue6483)
732
732
733 $ cd ..
733 $ cd ..
734 $ hg init issue6483
734 $ hg init issue6483
735 $ cd issue6483
735 $ cd issue6483
736 $ touch a.py b.rs
736 $ touch a.py b.rs
737 $ hg add a.py b.rs
737 $ hg add a.py b.rs
738 $ hg st -aI "*.py"
738 $ hg st -aI "*.py"
739 A a.py
739 A a.py
740
740
741 Also check exclude pattern
741 Also check exclude pattern
742
742
743 $ hg st -aX "*.rs"
743 $ hg st -aX "*.rs"
744 A a.py
744 A a.py
745
745
746 issue6335
746 issue6335
747 When a directory containing a tracked file gets symlinked, as of 5.8
747 When a directory containing a tracked file gets symlinked, as of 5.8
748 `hg st` only gives the correct answer about clean (or deleted) files
748 `hg st` only gives the correct answer about clean (or deleted) files
749 if also listing unknowns.
749 if also listing unknowns.
750 The tree-based dirstate and status algorithm fix this:
750 The tree-based dirstate and status algorithm fix this:
751
751
752 #if symlink no-dirstate-v1
752 #if symlink no-dirstate-v1
753
753
754 $ cd ..
754 $ cd ..
755 $ hg init issue6335
755 $ hg init issue6335
756 $ cd issue6335
756 $ cd issue6335
757 $ mkdir foo
757 $ mkdir foo
758 $ touch foo/a
758 $ touch foo/a
759 $ hg ci -Ama
759 $ hg ci -Ama
760 adding foo/a
760 adding foo/a
761 $ mv foo bar
761 $ mv foo bar
762 $ ln -s bar foo
762 $ ln -s bar foo
763 $ hg status
763 $ hg status
764 ! foo/a
764 ! foo/a
765 ? bar/a
765 ? bar/a
766 ? foo
766 ? foo
767
767
768 $ hg status -c # incorrect output with `dirstate-v1`
768 $ hg status -c # incorrect output with `dirstate-v1`
769 $ hg status -cu
769 $ hg status -cu
770 ? bar/a
770 ? bar/a
771 ? foo
771 ? foo
772 $ hg status -d # incorrect output with `dirstate-v1`
772 $ hg status -d # incorrect output with `dirstate-v1`
773 ! foo/a
773 ! foo/a
774 $ hg status -du
774 $ hg status -du
775 ! foo/a
775 ! foo/a
776 ? bar/a
776 ? bar/a
777 ? foo
777 ? foo
778
778
779 #endif
779 #endif
780
780
781
781
782 Create a repo with files in each possible status
782 Create a repo with files in each possible status
783
783
784 $ cd ..
784 $ cd ..
785 $ hg init repo7
785 $ hg init repo7
786 $ cd repo7
786 $ cd repo7
787 $ mkdir subdir
787 $ mkdir subdir
788 $ touch clean modified deleted removed
788 $ touch clean modified deleted removed
789 $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
789 $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
790 $ echo ignored > .hgignore
790 $ echo ignored > .hgignore
791 $ hg ci -Aqm '#0'
791 $ hg ci -Aqm '#0'
792 $ echo 1 > modified
792 $ echo 1 > modified
793 $ echo 1 > subdir/modified
793 $ echo 1 > subdir/modified
794 $ rm deleted
794 $ rm deleted
795 $ rm subdir/deleted
795 $ rm subdir/deleted
796 $ hg rm removed
796 $ hg rm removed
797 $ hg rm subdir/removed
797 $ hg rm subdir/removed
798 $ touch unknown ignored
798 $ touch unknown ignored
799 $ touch subdir/unknown subdir/ignored
799 $ touch subdir/unknown subdir/ignored
800
800
801 Check the output
801 Check the output
802
802
803 $ hg status
803 $ hg status
804 M modified
804 M modified
805 M subdir/modified
805 M subdir/modified
806 R removed
806 R removed
807 R subdir/removed
807 R subdir/removed
808 ! deleted
808 ! deleted
809 ! subdir/deleted
809 ! subdir/deleted
810 ? subdir/unknown
810 ? subdir/unknown
811 ? unknown
811 ? unknown
812
812
813 $ hg status -mard
813 $ hg status -mard
814 M modified
814 M modified
815 M subdir/modified
815 M subdir/modified
816 R removed
816 R removed
817 R subdir/removed
817 R subdir/removed
818 ! deleted
818 ! deleted
819 ! subdir/deleted
819 ! subdir/deleted
820
820
821 $ hg status -A
821 $ hg status -A
822 M modified
822 M modified
823 M subdir/modified
823 M subdir/modified
824 R removed
824 R removed
825 R subdir/removed
825 R subdir/removed
826 ! deleted
826 ! deleted
827 ! subdir/deleted
827 ! subdir/deleted
828 ? subdir/unknown
828 ? subdir/unknown
829 ? unknown
829 ? unknown
830 I ignored
830 I ignored
831 I subdir/ignored
831 I subdir/ignored
832 C .hgignore
832 C .hgignore
833 C clean
833 C clean
834 C subdir/clean
834 C subdir/clean
835
835
836 Note: `hg status some-name` creates a patternmatcher which is not supported
836 Note: `hg status some-name` creates a patternmatcher which is not supported
837 yet by the Rust implementation of status, but includematcher is supported.
837 yet by the Rust implementation of status, but includematcher is supported.
838 --include is used below for that reason
838 --include is used below for that reason
839
839
840 #if unix-permissions
840 #if unix-permissions
841
841
842 Not having permission to read a directory that contains tracked files makes
842 Not having permission to read a directory that contains tracked files makes
843 status emit a warning then behave as if the directory was empty or removed
843 status emit a warning then behave as if the directory was empty or removed
844 entirely:
844 entirely:
845
845
846 $ chmod 0 subdir
846 $ chmod 0 subdir
847 $ hg status --include subdir
847 $ hg status --include subdir
848 subdir: Permission denied
848 subdir: Permission denied
849 R subdir/removed
849 R subdir/removed
850 ! subdir/clean
850 ! subdir/clean
851 ! subdir/deleted
851 ! subdir/deleted
852 ! subdir/modified
852 ! subdir/modified
853 $ chmod 755 subdir
853 $ chmod 755 subdir
854
854
855 #endif
855 #endif
856
856
857 Remove a directory that contains tracked files
857 Remove a directory that contains tracked files
858
858
859 $ rm -r subdir
859 $ rm -r subdir
860 $ hg status --include subdir
860 $ hg status --include subdir
861 R subdir/removed
861 R subdir/removed
862 ! subdir/clean
862 ! subdir/clean
863 ! subdir/deleted
863 ! subdir/deleted
864 ! subdir/modified
864 ! subdir/modified
865
865
866 and replace it by a file
866 and replace it by a file
867
867
868 $ touch subdir
868 $ touch subdir
869 $ hg status --include subdir
869 $ hg status --include subdir
870 R subdir/removed
870 R subdir/removed
871 ! subdir/clean
871 ! subdir/clean
872 ! subdir/deleted
872 ! subdir/deleted
873 ! subdir/modified
873 ! subdir/modified
874 ? subdir
874 ? subdir
875
875
876 Replaced a deleted or removed file with a directory
876 Replaced a deleted or removed file with a directory
877
877
878 $ mkdir deleted removed
878 $ mkdir deleted removed
879 $ touch deleted/1 removed/1
879 $ touch deleted/1 removed/1
880 $ hg status --include deleted --include removed
880 $ hg status --include deleted --include removed
881 R removed
881 R removed
882 ! deleted
882 ! deleted
883 ? deleted/1
883 ? deleted/1
884 ? removed/1
884 ? removed/1
885 $ hg add removed/1
885 $ hg add removed/1
886 $ hg status --include deleted --include removed
886 $ hg status --include deleted --include removed
887 A removed/1
887 A removed/1
888 R removed
888 R removed
889 ! deleted
889 ! deleted
890 ? deleted/1
890 ? deleted/1
891
891
892 Deeply nested files in an ignored directory are still listed on request
892 Deeply nested files in an ignored directory are still listed on request
893
893
894 $ echo ignored-dir >> .hgignore
894 $ echo ignored-dir >> .hgignore
895 $ mkdir ignored-dir
895 $ mkdir ignored-dir
896 $ mkdir ignored-dir/subdir
896 $ mkdir ignored-dir/subdir
897 $ touch ignored-dir/subdir/1
897 $ touch ignored-dir/subdir/1
898 $ hg status --ignored
898 $ hg status --ignored
899 I ignored
899 I ignored
900 I ignored-dir/subdir/1
900 I ignored-dir/subdir/1
901
901
902 Check using include flag while listing ignored composes correctly (issue6514)
902 Check using include flag while listing ignored composes correctly (issue6514)
903
903
904 $ cd ..
904 $ cd ..
905 $ hg init issue6514
905 $ hg init issue6514
906 $ cd issue6514
906 $ cd issue6514
907 $ mkdir ignored-folder
907 $ mkdir ignored-folder
908 $ touch A.hs B.hs C.hs ignored-folder/other.txt ignored-folder/ctest.hs
908 $ touch A.hs B.hs C.hs ignored-folder/other.txt ignored-folder/ctest.hs
909 $ cat >.hgignore <<EOF
909 $ cat >.hgignore <<EOF
910 > A.hs
910 > A.hs
911 > B.hs
911 > B.hs
912 > ignored-folder/
912 > ignored-folder/
913 > EOF
913 > EOF
914 $ hg st -i -I 're:.*\.hs$'
914 $ hg st -i -I 're:.*\.hs$'
915 I A.hs
915 I A.hs
916 I B.hs
916 I B.hs
917 I ignored-folder/ctest.hs
917 I ignored-folder/ctest.hs
918
919 #if dirstate-v2
920
921 Check read_dir caching
922
923 $ cd ..
924 $ hg init repo8
925 $ cd repo8
926 $ mkdir subdir
927 $ touch subdir/a subdir/b
928 $ hg ci -Aqm '#0'
929
930 The cached mtime is initially unset
931
932 $ hg debugdirstate --dirs --no-dates | grep '^d'
933 d 0 0 unset subdir
934
935 It is still not set when there are unknown files
936
937 $ touch subdir/unknown
938 $ hg status
939 ? subdir/unknown
940 $ hg debugdirstate --dirs --no-dates | grep '^d'
941 d 0 0 unset subdir
942
943 Now the directory is eligible for caching, so its mtime is save in the dirstate
944
945 $ rm subdir/unknown
946 $ hg status
947 $ hg debugdirstate --dirs --no-dates | grep '^d'
948 d 0 0 set subdir
949
950 This time the command should be ever so slightly faster since it does not need `read_dir("subdir")`
951
952 $ hg status
953
954 Creating a new file changes the directorys mtime, invalidating the cache
955
956 $ touch subdir/unknown
957 $ hg status
958 ? subdir/unknown
959
960 #endif
General Comments 0
You need to be logged in to leave comments. Login now