##// END OF EJS Templates
dirstate-v2: Introduce a docket file...
Simon Sapin -
r48474:ff97e793 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
@@ -0,0 +1,62 b''
1 # dirstatedocket.py - docket file for dirstate-v2
2 #
3 # Copyright Mercurial Contributors
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 import struct
11
12 from ..revlogutils import docket as docket_mod
13
14
15 V2_FORMAT_MARKER = b"dirstate-v2\n"
16
17 # * 12 bytes: format marker
18 # * 32 bytes: node ID of the working directory's first parent
19 # * 32 bytes: node ID of the working directory's second parent
20 # * 4 bytes: big-endian used size of the data file
21 # * 1 byte: length of the data file's UUID
22 # * variable: data file's UUID
23 #
24 # Node IDs are null-padded if shorter than 32 bytes.
25 # A data file shorter than the specified used size is corrupted (truncated)
26 HEADER = struct.Struct(">{}s32s32sLB".format(len(V2_FORMAT_MARKER)))
27
28
29 class DirstateDocket(object):
30 data_filename_pattern = b'dirstate.%s.d'
31
32 def __init__(self, parents, data_size, uuid):
33 self.parents = parents
34 self.data_size = data_size
35 self.uuid = uuid
36
37 @classmethod
38 def with_new_uuid(cls, parents, data):
39 return cls(parents, data, docket_mod.make_uid())
40
41 @classmethod
42 def parse(cls, data, nodeconstants):
43 if not data:
44 parents = (nodeconstants.nullid, nodeconstants.nullid)
45 return cls(parents, 0, None)
46 marker, p1, p2, data_size, uuid_size = HEADER.unpack_from(data)
47 if marker != V2_FORMAT_MARKER:
48 raise ValueError("expected dirstate-v2 marker")
49 uuid = data[HEADER.size : HEADER.size + uuid_size]
50 p1 = p1[: nodeconstants.nodelen]
51 p2 = p2[: nodeconstants.nodelen]
52 return cls((p1, p2), data_size, uuid)
53
54 def serialize(self):
55 p1, p2 = self.parents
56 header = HEADER.pack(
57 V2_FORMAT_MARKER, p1, p2, self.data_size, len(self.uuid)
58 )
59 return header + self.uuid
60
61 def data_filename(self):
62 return self.data_filename_pattern % self.uuid
@@ -1,69 +1,69 b''
1 # dirstatenonnormalcheck.py - extension to check the consistency of the
1 # dirstatenonnormalcheck.py - extension to check the consistency of the
2 # dirstate's non-normal map
2 # dirstate's non-normal map
3 #
3 #
4 # For most operations on dirstate, this extensions checks that the nonnormalset
4 # For most operations on dirstate, this extensions checks that the nonnormalset
5 # contains the right entries.
5 # contains the right entries.
6 # It compares the nonnormal file to a nonnormalset built from the map of all
6 # It compares the nonnormal file to a nonnormalset built from the map of all
7 # the files in the dirstate to check that they contain the same files.
7 # the files in the dirstate to check that they contain the same files.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from mercurial import (
11 from mercurial import (
12 dirstate,
12 dirstate,
13 extensions,
13 extensions,
14 pycompat,
14 pycompat,
15 )
15 )
16
16
17
17
18 def nonnormalentries(dmap):
18 def nonnormalentries(dmap):
19 """Compute nonnormal entries from dirstate's dmap"""
19 """Compute nonnormal entries from dirstate's dmap"""
20 res = set()
20 res = set()
21 for f, e in dmap.iteritems():
21 for f, e in dmap.iteritems():
22 if e.state != b'n' or e.mtime == -1:
22 if e.state != b'n' or e.mtime == -1:
23 res.add(f)
23 res.add(f)
24 return res
24 return res
25
25
26
26
27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
27 def checkconsistency(ui, orig, dmap, _nonnormalset, label):
28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
28 """Compute nonnormalset from dmap, check that it matches _nonnormalset"""
29 nonnormalcomputedmap = nonnormalentries(dmap)
29 nonnormalcomputedmap = nonnormalentries(dmap)
30 if _nonnormalset != nonnormalcomputedmap:
30 if _nonnormalset != nonnormalcomputedmap:
31 b_orig = pycompat.sysbytes(repr(orig))
31 b_orig = pycompat.sysbytes(repr(orig))
32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
32 ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
33 ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
34 b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
35 ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
36 b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
37 ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
38
38
39
39
40 def _checkdirstate(orig, self, arg):
40 def _checkdirstate(orig, self, *args, **kwargs):
41 """Check nonnormal set consistency before and after the call to orig"""
41 """Check nonnormal set consistency before and after the call to orig"""
42 checkconsistency(
42 checkconsistency(
43 self._ui, orig, self._map, self._map.nonnormalset, b"before"
43 self._ui, orig, self._map, self._map.nonnormalset, b"before"
44 )
44 )
45 r = orig(self, arg)
45 r = orig(self, *args, **kwargs)
46 checkconsistency(
46 checkconsistency(
47 self._ui, orig, self._map, self._map.nonnormalset, b"after"
47 self._ui, orig, self._map, self._map.nonnormalset, b"after"
48 )
48 )
49 return r
49 return r
50
50
51
51
52 def extsetup(ui):
52 def extsetup(ui):
53 """Wrap functions modifying dirstate to check nonnormalset consistency"""
53 """Wrap functions modifying dirstate to check nonnormalset consistency"""
54 dirstatecl = dirstate.dirstate
54 dirstatecl = dirstate.dirstate
55 devel = ui.configbool(b'devel', b'all-warnings')
55 devel = ui.configbool(b'devel', b'all-warnings')
56 paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
56 paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
57 if devel:
57 if devel:
58 extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
58 extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
59 if paranoid:
59 if paranoid:
60 # We don't do all these checks when paranoid is disable as it would
60 # We don't do all these checks when paranoid is disable as it would
61 # make the extension run very slowly on large repos
61 # make the extension run very slowly on large repos
62 extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
62 extensions.wrapfunction(dirstatecl, 'normallookup', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
63 extensions.wrapfunction(dirstatecl, 'otherparent', _checkdirstate)
64 extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
64 extensions.wrapfunction(dirstatecl, 'normal', _checkdirstate)
65 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
65 extensions.wrapfunction(dirstatecl, 'write', _checkdirstate)
66 extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
66 extensions.wrapfunction(dirstatecl, 'add', _checkdirstate)
67 extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
67 extensions.wrapfunction(dirstatecl, 'remove', _checkdirstate)
68 extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
68 extensions.wrapfunction(dirstatecl, 'merge', _checkdirstate)
69 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
69 extensions.wrapfunction(dirstatecl, 'drop', _checkdirstate)
@@ -1,4833 +1,4852 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2016 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import codecs
11 import codecs
11 import collections
12 import collections
12 import contextlib
13 import contextlib
13 import difflib
14 import difflib
14 import errno
15 import errno
15 import glob
16 import glob
16 import operator
17 import operator
17 import os
18 import os
18 import platform
19 import platform
19 import random
20 import random
20 import re
21 import re
21 import socket
22 import socket
22 import ssl
23 import ssl
23 import stat
24 import stat
24 import string
25 import string
25 import subprocess
26 import subprocess
26 import sys
27 import sys
27 import time
28 import time
28
29
29 from .i18n import _
30 from .i18n import _
30 from .node import (
31 from .node import (
31 bin,
32 bin,
32 hex,
33 hex,
33 nullrev,
34 nullrev,
34 short,
35 short,
35 )
36 )
36 from .pycompat import (
37 from .pycompat import (
37 getattr,
38 getattr,
38 open,
39 open,
39 )
40 )
40 from . import (
41 from . import (
41 bundle2,
42 bundle2,
42 bundlerepo,
43 bundlerepo,
43 changegroup,
44 changegroup,
44 cmdutil,
45 cmdutil,
45 color,
46 color,
46 context,
47 context,
47 copies,
48 copies,
48 dagparser,
49 dagparser,
49 encoding,
50 encoding,
50 error,
51 error,
51 exchange,
52 exchange,
52 extensions,
53 extensions,
53 filemerge,
54 filemerge,
54 filesetlang,
55 filesetlang,
55 formatter,
56 formatter,
56 hg,
57 hg,
57 httppeer,
58 httppeer,
58 localrepo,
59 localrepo,
59 lock as lockmod,
60 lock as lockmod,
60 logcmdutil,
61 logcmdutil,
61 mergestate as mergestatemod,
62 mergestate as mergestatemod,
62 metadata,
63 metadata,
63 obsolete,
64 obsolete,
64 obsutil,
65 obsutil,
65 pathutil,
66 pathutil,
66 phases,
67 phases,
67 policy,
68 policy,
68 pvec,
69 pvec,
69 pycompat,
70 pycompat,
70 registrar,
71 registrar,
71 repair,
72 repair,
72 repoview,
73 repoview,
73 revlog,
74 revlog,
74 revset,
75 revset,
75 revsetlang,
76 revsetlang,
76 scmutil,
77 scmutil,
77 setdiscovery,
78 setdiscovery,
78 simplemerge,
79 simplemerge,
79 sshpeer,
80 sshpeer,
80 sslutil,
81 sslutil,
81 streamclone,
82 streamclone,
82 strip,
83 strip,
83 tags as tagsmod,
84 tags as tagsmod,
84 templater,
85 templater,
85 treediscovery,
86 treediscovery,
86 upgrade,
87 upgrade,
87 url as urlmod,
88 url as urlmod,
88 util,
89 util,
89 vfs as vfsmod,
90 vfs as vfsmod,
90 wireprotoframing,
91 wireprotoframing,
91 wireprotoserver,
92 wireprotoserver,
92 wireprotov2peer,
93 wireprotov2peer,
93 )
94 )
94 from .interfaces import repository
95 from .interfaces import repository
95 from .utils import (
96 from .utils import (
96 cborutil,
97 cborutil,
97 compression,
98 compression,
98 dateutil,
99 dateutil,
99 procutil,
100 procutil,
100 stringutil,
101 stringutil,
101 urlutil,
102 urlutil,
102 )
103 )
103
104
104 from .revlogutils import (
105 from .revlogutils import (
105 deltas as deltautil,
106 deltas as deltautil,
106 nodemap,
107 nodemap,
107 sidedata,
108 sidedata,
108 )
109 )
109
110
110 release = lockmod.release
111 release = lockmod.release
111
112
112 table = {}
113 table = {}
113 table.update(strip.command._table)
114 table.update(strip.command._table)
114 command = registrar.command(table)
115 command = registrar.command(table)
115
116
116
117
117 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
118 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
118 def debugancestor(ui, repo, *args):
119 def debugancestor(ui, repo, *args):
119 """find the ancestor revision of two revisions in a given index"""
120 """find the ancestor revision of two revisions in a given index"""
120 if len(args) == 3:
121 if len(args) == 3:
121 index, rev1, rev2 = args
122 index, rev1, rev2 = args
122 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
123 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
123 lookup = r.lookup
124 lookup = r.lookup
124 elif len(args) == 2:
125 elif len(args) == 2:
125 if not repo:
126 if not repo:
126 raise error.Abort(
127 raise error.Abort(
127 _(b'there is no Mercurial repository here (.hg not found)')
128 _(b'there is no Mercurial repository here (.hg not found)')
128 )
129 )
129 rev1, rev2 = args
130 rev1, rev2 = args
130 r = repo.changelog
131 r = repo.changelog
131 lookup = repo.lookup
132 lookup = repo.lookup
132 else:
133 else:
133 raise error.Abort(_(b'either two or three arguments required'))
134 raise error.Abort(_(b'either two or three arguments required'))
134 a = r.ancestor(lookup(rev1), lookup(rev2))
135 a = r.ancestor(lookup(rev1), lookup(rev2))
135 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
136 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
136
137
137
138
138 @command(b'debugantivirusrunning', [])
139 @command(b'debugantivirusrunning', [])
139 def debugantivirusrunning(ui, repo):
140 def debugantivirusrunning(ui, repo):
140 """attempt to trigger an antivirus scanner to see if one is active"""
141 """attempt to trigger an antivirus scanner to see if one is active"""
141 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
142 with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
142 f.write(
143 f.write(
143 util.b85decode(
144 util.b85decode(
144 # This is a base85-armored version of the EICAR test file. See
145 # This is a base85-armored version of the EICAR test file. See
145 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
146 # https://en.wikipedia.org/wiki/EICAR_test_file for details.
146 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
147 b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
147 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
148 b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
148 )
149 )
149 )
150 )
150 # Give an AV engine time to scan the file.
151 # Give an AV engine time to scan the file.
151 time.sleep(2)
152 time.sleep(2)
152 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
153 util.unlink(repo.cachevfs.join('eicar-test-file.com'))
153
154
154
155
155 @command(b'debugapplystreamclonebundle', [], b'FILE')
156 @command(b'debugapplystreamclonebundle', [], b'FILE')
156 def debugapplystreamclonebundle(ui, repo, fname):
157 def debugapplystreamclonebundle(ui, repo, fname):
157 """apply a stream clone bundle file"""
158 """apply a stream clone bundle file"""
158 f = hg.openpath(ui, fname)
159 f = hg.openpath(ui, fname)
159 gen = exchange.readbundle(ui, f, fname)
160 gen = exchange.readbundle(ui, f, fname)
160 gen.apply(repo)
161 gen.apply(repo)
161
162
162
163
163 @command(
164 @command(
164 b'debugbuilddag',
165 b'debugbuilddag',
165 [
166 [
166 (
167 (
167 b'm',
168 b'm',
168 b'mergeable-file',
169 b'mergeable-file',
169 None,
170 None,
170 _(b'add single file mergeable changes'),
171 _(b'add single file mergeable changes'),
171 ),
172 ),
172 (
173 (
173 b'o',
174 b'o',
174 b'overwritten-file',
175 b'overwritten-file',
175 None,
176 None,
176 _(b'add single file all revs overwrite'),
177 _(b'add single file all revs overwrite'),
177 ),
178 ),
178 (b'n', b'new-file', None, _(b'add new file at each rev')),
179 (b'n', b'new-file', None, _(b'add new file at each rev')),
179 ],
180 ],
180 _(b'[OPTION]... [TEXT]'),
181 _(b'[OPTION]... [TEXT]'),
181 )
182 )
182 def debugbuilddag(
183 def debugbuilddag(
183 ui,
184 ui,
184 repo,
185 repo,
185 text=None,
186 text=None,
186 mergeable_file=False,
187 mergeable_file=False,
187 overwritten_file=False,
188 overwritten_file=False,
188 new_file=False,
189 new_file=False,
189 ):
190 ):
190 """builds a repo with a given DAG from scratch in the current empty repo
191 """builds a repo with a given DAG from scratch in the current empty repo
191
192
192 The description of the DAG is read from stdin if not given on the
193 The description of the DAG is read from stdin if not given on the
193 command line.
194 command line.
194
195
195 Elements:
196 Elements:
196
197
197 - "+n" is a linear run of n nodes based on the current default parent
198 - "+n" is a linear run of n nodes based on the current default parent
198 - "." is a single node based on the current default parent
199 - "." is a single node based on the current default parent
199 - "$" resets the default parent to null (implied at the start);
200 - "$" resets the default parent to null (implied at the start);
200 otherwise the default parent is always the last node created
201 otherwise the default parent is always the last node created
201 - "<p" sets the default parent to the backref p
202 - "<p" sets the default parent to the backref p
202 - "*p" is a fork at parent p, which is a backref
203 - "*p" is a fork at parent p, which is a backref
203 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
204 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
204 - "/p2" is a merge of the preceding node and p2
205 - "/p2" is a merge of the preceding node and p2
205 - ":tag" defines a local tag for the preceding node
206 - ":tag" defines a local tag for the preceding node
206 - "@branch" sets the named branch for subsequent nodes
207 - "@branch" sets the named branch for subsequent nodes
207 - "#...\\n" is a comment up to the end of the line
208 - "#...\\n" is a comment up to the end of the line
208
209
209 Whitespace between the above elements is ignored.
210 Whitespace between the above elements is ignored.
210
211
211 A backref is either
212 A backref is either
212
213
213 - a number n, which references the node curr-n, where curr is the current
214 - a number n, which references the node curr-n, where curr is the current
214 node, or
215 node, or
215 - the name of a local tag you placed earlier using ":tag", or
216 - the name of a local tag you placed earlier using ":tag", or
216 - empty to denote the default parent.
217 - empty to denote the default parent.
217
218
218 All string valued-elements are either strictly alphanumeric, or must
219 All string valued-elements are either strictly alphanumeric, or must
219 be enclosed in double quotes ("..."), with "\\" as escape character.
220 be enclosed in double quotes ("..."), with "\\" as escape character.
220 """
221 """
221
222
222 if text is None:
223 if text is None:
223 ui.status(_(b"reading DAG from stdin\n"))
224 ui.status(_(b"reading DAG from stdin\n"))
224 text = ui.fin.read()
225 text = ui.fin.read()
225
226
226 cl = repo.changelog
227 cl = repo.changelog
227 if len(cl) > 0:
228 if len(cl) > 0:
228 raise error.Abort(_(b'repository is not empty'))
229 raise error.Abort(_(b'repository is not empty'))
229
230
230 # determine number of revs in DAG
231 # determine number of revs in DAG
231 total = 0
232 total = 0
232 for type, data in dagparser.parsedag(text):
233 for type, data in dagparser.parsedag(text):
233 if type == b'n':
234 if type == b'n':
234 total += 1
235 total += 1
235
236
236 if mergeable_file:
237 if mergeable_file:
237 linesperrev = 2
238 linesperrev = 2
238 # make a file with k lines per rev
239 # make a file with k lines per rev
239 initialmergedlines = [
240 initialmergedlines = [
240 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
241 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
241 ]
242 ]
242 initialmergedlines.append(b"")
243 initialmergedlines.append(b"")
243
244
244 tags = []
245 tags = []
245 progress = ui.makeprogress(
246 progress = ui.makeprogress(
246 _(b'building'), unit=_(b'revisions'), total=total
247 _(b'building'), unit=_(b'revisions'), total=total
247 )
248 )
248 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
249 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
249 at = -1
250 at = -1
250 atbranch = b'default'
251 atbranch = b'default'
251 nodeids = []
252 nodeids = []
252 id = 0
253 id = 0
253 progress.update(id)
254 progress.update(id)
254 for type, data in dagparser.parsedag(text):
255 for type, data in dagparser.parsedag(text):
255 if type == b'n':
256 if type == b'n':
256 ui.note((b'node %s\n' % pycompat.bytestr(data)))
257 ui.note((b'node %s\n' % pycompat.bytestr(data)))
257 id, ps = data
258 id, ps = data
258
259
259 files = []
260 files = []
260 filecontent = {}
261 filecontent = {}
261
262
262 p2 = None
263 p2 = None
263 if mergeable_file:
264 if mergeable_file:
264 fn = b"mf"
265 fn = b"mf"
265 p1 = repo[ps[0]]
266 p1 = repo[ps[0]]
266 if len(ps) > 1:
267 if len(ps) > 1:
267 p2 = repo[ps[1]]
268 p2 = repo[ps[1]]
268 pa = p1.ancestor(p2)
269 pa = p1.ancestor(p2)
269 base, local, other = [
270 base, local, other = [
270 x[fn].data() for x in (pa, p1, p2)
271 x[fn].data() for x in (pa, p1, p2)
271 ]
272 ]
272 m3 = simplemerge.Merge3Text(base, local, other)
273 m3 = simplemerge.Merge3Text(base, local, other)
273 ml = [l.strip() for l in m3.merge_lines()]
274 ml = [l.strip() for l in m3.merge_lines()]
274 ml.append(b"")
275 ml.append(b"")
275 elif at > 0:
276 elif at > 0:
276 ml = p1[fn].data().split(b"\n")
277 ml = p1[fn].data().split(b"\n")
277 else:
278 else:
278 ml = initialmergedlines
279 ml = initialmergedlines
279 ml[id * linesperrev] += b" r%i" % id
280 ml[id * linesperrev] += b" r%i" % id
280 mergedtext = b"\n".join(ml)
281 mergedtext = b"\n".join(ml)
281 files.append(fn)
282 files.append(fn)
282 filecontent[fn] = mergedtext
283 filecontent[fn] = mergedtext
283
284
284 if overwritten_file:
285 if overwritten_file:
285 fn = b"of"
286 fn = b"of"
286 files.append(fn)
287 files.append(fn)
287 filecontent[fn] = b"r%i\n" % id
288 filecontent[fn] = b"r%i\n" % id
288
289
289 if new_file:
290 if new_file:
290 fn = b"nf%i" % id
291 fn = b"nf%i" % id
291 files.append(fn)
292 files.append(fn)
292 filecontent[fn] = b"r%i\n" % id
293 filecontent[fn] = b"r%i\n" % id
293 if len(ps) > 1:
294 if len(ps) > 1:
294 if not p2:
295 if not p2:
295 p2 = repo[ps[1]]
296 p2 = repo[ps[1]]
296 for fn in p2:
297 for fn in p2:
297 if fn.startswith(b"nf"):
298 if fn.startswith(b"nf"):
298 files.append(fn)
299 files.append(fn)
299 filecontent[fn] = p2[fn].data()
300 filecontent[fn] = p2[fn].data()
300
301
301 def fctxfn(repo, cx, path):
302 def fctxfn(repo, cx, path):
302 if path in filecontent:
303 if path in filecontent:
303 return context.memfilectx(
304 return context.memfilectx(
304 repo, cx, path, filecontent[path]
305 repo, cx, path, filecontent[path]
305 )
306 )
306 return None
307 return None
307
308
308 if len(ps) == 0 or ps[0] < 0:
309 if len(ps) == 0 or ps[0] < 0:
309 pars = [None, None]
310 pars = [None, None]
310 elif len(ps) == 1:
311 elif len(ps) == 1:
311 pars = [nodeids[ps[0]], None]
312 pars = [nodeids[ps[0]], None]
312 else:
313 else:
313 pars = [nodeids[p] for p in ps]
314 pars = [nodeids[p] for p in ps]
314 cx = context.memctx(
315 cx = context.memctx(
315 repo,
316 repo,
316 pars,
317 pars,
317 b"r%i" % id,
318 b"r%i" % id,
318 files,
319 files,
319 fctxfn,
320 fctxfn,
320 date=(id, 0),
321 date=(id, 0),
321 user=b"debugbuilddag",
322 user=b"debugbuilddag",
322 extra={b'branch': atbranch},
323 extra={b'branch': atbranch},
323 )
324 )
324 nodeid = repo.commitctx(cx)
325 nodeid = repo.commitctx(cx)
325 nodeids.append(nodeid)
326 nodeids.append(nodeid)
326 at = id
327 at = id
327 elif type == b'l':
328 elif type == b'l':
328 id, name = data
329 id, name = data
329 ui.note((b'tag %s\n' % name))
330 ui.note((b'tag %s\n' % name))
330 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
331 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
331 elif type == b'a':
332 elif type == b'a':
332 ui.note((b'branch %s\n' % data))
333 ui.note((b'branch %s\n' % data))
333 atbranch = data
334 atbranch = data
334 progress.update(id)
335 progress.update(id)
335
336
336 if tags:
337 if tags:
337 repo.vfs.write(b"localtags", b"".join(tags))
338 repo.vfs.write(b"localtags", b"".join(tags))
338
339
339
340
340 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
341 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
341 indent_string = b' ' * indent
342 indent_string = b' ' * indent
342 if all:
343 if all:
343 ui.writenoi18n(
344 ui.writenoi18n(
344 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
345 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
345 % indent_string
346 % indent_string
346 )
347 )
347
348
348 def showchunks(named):
349 def showchunks(named):
349 ui.write(b"\n%s%s\n" % (indent_string, named))
350 ui.write(b"\n%s%s\n" % (indent_string, named))
350 for deltadata in gen.deltaiter():
351 for deltadata in gen.deltaiter():
351 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
352 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
352 ui.write(
353 ui.write(
353 b"%s%s %s %s %s %s %d\n"
354 b"%s%s %s %s %s %s %d\n"
354 % (
355 % (
355 indent_string,
356 indent_string,
356 hex(node),
357 hex(node),
357 hex(p1),
358 hex(p1),
358 hex(p2),
359 hex(p2),
359 hex(cs),
360 hex(cs),
360 hex(deltabase),
361 hex(deltabase),
361 len(delta),
362 len(delta),
362 )
363 )
363 )
364 )
364
365
365 gen.changelogheader()
366 gen.changelogheader()
366 showchunks(b"changelog")
367 showchunks(b"changelog")
367 gen.manifestheader()
368 gen.manifestheader()
368 showchunks(b"manifest")
369 showchunks(b"manifest")
369 for chunkdata in iter(gen.filelogheader, {}):
370 for chunkdata in iter(gen.filelogheader, {}):
370 fname = chunkdata[b'filename']
371 fname = chunkdata[b'filename']
371 showchunks(fname)
372 showchunks(fname)
372 else:
373 else:
373 if isinstance(gen, bundle2.unbundle20):
374 if isinstance(gen, bundle2.unbundle20):
374 raise error.Abort(_(b'use debugbundle2 for this file'))
375 raise error.Abort(_(b'use debugbundle2 for this file'))
375 gen.changelogheader()
376 gen.changelogheader()
376 for deltadata in gen.deltaiter():
377 for deltadata in gen.deltaiter():
377 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
378 node, p1, p2, cs, deltabase, delta, flags, sidedata = deltadata
378 ui.write(b"%s%s\n" % (indent_string, hex(node)))
379 ui.write(b"%s%s\n" % (indent_string, hex(node)))
379
380
380
381
381 def _debugobsmarkers(ui, part, indent=0, **opts):
382 def _debugobsmarkers(ui, part, indent=0, **opts):
382 """display version and markers contained in 'data'"""
383 """display version and markers contained in 'data'"""
383 opts = pycompat.byteskwargs(opts)
384 opts = pycompat.byteskwargs(opts)
384 data = part.read()
385 data = part.read()
385 indent_string = b' ' * indent
386 indent_string = b' ' * indent
386 try:
387 try:
387 version, markers = obsolete._readmarkers(data)
388 version, markers = obsolete._readmarkers(data)
388 except error.UnknownVersion as exc:
389 except error.UnknownVersion as exc:
389 msg = b"%sunsupported version: %s (%d bytes)\n"
390 msg = b"%sunsupported version: %s (%d bytes)\n"
390 msg %= indent_string, exc.version, len(data)
391 msg %= indent_string, exc.version, len(data)
391 ui.write(msg)
392 ui.write(msg)
392 else:
393 else:
393 msg = b"%sversion: %d (%d bytes)\n"
394 msg = b"%sversion: %d (%d bytes)\n"
394 msg %= indent_string, version, len(data)
395 msg %= indent_string, version, len(data)
395 ui.write(msg)
396 ui.write(msg)
396 fm = ui.formatter(b'debugobsolete', opts)
397 fm = ui.formatter(b'debugobsolete', opts)
397 for rawmarker in sorted(markers):
398 for rawmarker in sorted(markers):
398 m = obsutil.marker(None, rawmarker)
399 m = obsutil.marker(None, rawmarker)
399 fm.startitem()
400 fm.startitem()
400 fm.plain(indent_string)
401 fm.plain(indent_string)
401 cmdutil.showmarker(fm, m)
402 cmdutil.showmarker(fm, m)
402 fm.end()
403 fm.end()
403
404
404
405
405 def _debugphaseheads(ui, data, indent=0):
406 def _debugphaseheads(ui, data, indent=0):
406 """display version and markers contained in 'data'"""
407 """display version and markers contained in 'data'"""
407 indent_string = b' ' * indent
408 indent_string = b' ' * indent
408 headsbyphase = phases.binarydecode(data)
409 headsbyphase = phases.binarydecode(data)
409 for phase in phases.allphases:
410 for phase in phases.allphases:
410 for head in headsbyphase[phase]:
411 for head in headsbyphase[phase]:
411 ui.write(indent_string)
412 ui.write(indent_string)
412 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
413 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
413
414
414
415
415 def _quasirepr(thing):
416 def _quasirepr(thing):
416 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
417 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
417 return b'{%s}' % (
418 return b'{%s}' % (
418 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
419 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
419 )
420 )
420 return pycompat.bytestr(repr(thing))
421 return pycompat.bytestr(repr(thing))
421
422
422
423
423 def _debugbundle2(ui, gen, all=None, **opts):
424 def _debugbundle2(ui, gen, all=None, **opts):
424 """lists the contents of a bundle2"""
425 """lists the contents of a bundle2"""
425 if not isinstance(gen, bundle2.unbundle20):
426 if not isinstance(gen, bundle2.unbundle20):
426 raise error.Abort(_(b'not a bundle2 file'))
427 raise error.Abort(_(b'not a bundle2 file'))
427 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
428 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
428 parttypes = opts.get('part_type', [])
429 parttypes = opts.get('part_type', [])
429 for part in gen.iterparts():
430 for part in gen.iterparts():
430 if parttypes and part.type not in parttypes:
431 if parttypes and part.type not in parttypes:
431 continue
432 continue
432 msg = b'%s -- %s (mandatory: %r)\n'
433 msg = b'%s -- %s (mandatory: %r)\n'
433 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
434 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
434 if part.type == b'changegroup':
435 if part.type == b'changegroup':
435 version = part.params.get(b'version', b'01')
436 version = part.params.get(b'version', b'01')
436 cg = changegroup.getunbundler(version, part, b'UN')
437 cg = changegroup.getunbundler(version, part, b'UN')
437 if not ui.quiet:
438 if not ui.quiet:
438 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
439 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
439 if part.type == b'obsmarkers':
440 if part.type == b'obsmarkers':
440 if not ui.quiet:
441 if not ui.quiet:
441 _debugobsmarkers(ui, part, indent=4, **opts)
442 _debugobsmarkers(ui, part, indent=4, **opts)
442 if part.type == b'phase-heads':
443 if part.type == b'phase-heads':
443 if not ui.quiet:
444 if not ui.quiet:
444 _debugphaseheads(ui, part, indent=4)
445 _debugphaseheads(ui, part, indent=4)
445
446
446
447
447 @command(
448 @command(
448 b'debugbundle',
449 b'debugbundle',
449 [
450 [
450 (b'a', b'all', None, _(b'show all details')),
451 (b'a', b'all', None, _(b'show all details')),
451 (b'', b'part-type', [], _(b'show only the named part type')),
452 (b'', b'part-type', [], _(b'show only the named part type')),
452 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
453 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
453 ],
454 ],
454 _(b'FILE'),
455 _(b'FILE'),
455 norepo=True,
456 norepo=True,
456 )
457 )
457 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
458 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
458 """lists the contents of a bundle"""
459 """lists the contents of a bundle"""
459 with hg.openpath(ui, bundlepath) as f:
460 with hg.openpath(ui, bundlepath) as f:
460 if spec:
461 if spec:
461 spec = exchange.getbundlespec(ui, f)
462 spec = exchange.getbundlespec(ui, f)
462 ui.write(b'%s\n' % spec)
463 ui.write(b'%s\n' % spec)
463 return
464 return
464
465
465 gen = exchange.readbundle(ui, f, bundlepath)
466 gen = exchange.readbundle(ui, f, bundlepath)
466 if isinstance(gen, bundle2.unbundle20):
467 if isinstance(gen, bundle2.unbundle20):
467 return _debugbundle2(ui, gen, all=all, **opts)
468 return _debugbundle2(ui, gen, all=all, **opts)
468 _debugchangegroup(ui, gen, all=all, **opts)
469 _debugchangegroup(ui, gen, all=all, **opts)
469
470
470
471
471 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
472 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
472 def debugcapabilities(ui, path, **opts):
473 def debugcapabilities(ui, path, **opts):
473 """lists the capabilities of a remote peer"""
474 """lists the capabilities of a remote peer"""
474 opts = pycompat.byteskwargs(opts)
475 opts = pycompat.byteskwargs(opts)
475 peer = hg.peer(ui, opts, path)
476 peer = hg.peer(ui, opts, path)
476 try:
477 try:
477 caps = peer.capabilities()
478 caps = peer.capabilities()
478 ui.writenoi18n(b'Main capabilities:\n')
479 ui.writenoi18n(b'Main capabilities:\n')
479 for c in sorted(caps):
480 for c in sorted(caps):
480 ui.write(b' %s\n' % c)
481 ui.write(b' %s\n' % c)
481 b2caps = bundle2.bundle2caps(peer)
482 b2caps = bundle2.bundle2caps(peer)
482 if b2caps:
483 if b2caps:
483 ui.writenoi18n(b'Bundle2 capabilities:\n')
484 ui.writenoi18n(b'Bundle2 capabilities:\n')
484 for key, values in sorted(pycompat.iteritems(b2caps)):
485 for key, values in sorted(pycompat.iteritems(b2caps)):
485 ui.write(b' %s\n' % key)
486 ui.write(b' %s\n' % key)
486 for v in values:
487 for v in values:
487 ui.write(b' %s\n' % v)
488 ui.write(b' %s\n' % v)
488 finally:
489 finally:
489 peer.close()
490 peer.close()
490
491
491
492
492 @command(
493 @command(
493 b'debugchangedfiles',
494 b'debugchangedfiles',
494 [
495 [
495 (
496 (
496 b'',
497 b'',
497 b'compute',
498 b'compute',
498 False,
499 False,
499 b"compute information instead of reading it from storage",
500 b"compute information instead of reading it from storage",
500 ),
501 ),
501 ],
502 ],
502 b'REV',
503 b'REV',
503 )
504 )
504 def debugchangedfiles(ui, repo, rev, **opts):
505 def debugchangedfiles(ui, repo, rev, **opts):
505 """list the stored files changes for a revision"""
506 """list the stored files changes for a revision"""
506 ctx = scmutil.revsingle(repo, rev, None)
507 ctx = scmutil.revsingle(repo, rev, None)
507 files = None
508 files = None
508
509
509 if opts['compute']:
510 if opts['compute']:
510 files = metadata.compute_all_files_changes(ctx)
511 files = metadata.compute_all_files_changes(ctx)
511 else:
512 else:
512 sd = repo.changelog.sidedata(ctx.rev())
513 sd = repo.changelog.sidedata(ctx.rev())
513 files_block = sd.get(sidedata.SD_FILES)
514 files_block = sd.get(sidedata.SD_FILES)
514 if files_block is not None:
515 if files_block is not None:
515 files = metadata.decode_files_sidedata(sd)
516 files = metadata.decode_files_sidedata(sd)
516 if files is not None:
517 if files is not None:
517 for f in sorted(files.touched):
518 for f in sorted(files.touched):
518 if f in files.added:
519 if f in files.added:
519 action = b"added"
520 action = b"added"
520 elif f in files.removed:
521 elif f in files.removed:
521 action = b"removed"
522 action = b"removed"
522 elif f in files.merged:
523 elif f in files.merged:
523 action = b"merged"
524 action = b"merged"
524 elif f in files.salvaged:
525 elif f in files.salvaged:
525 action = b"salvaged"
526 action = b"salvaged"
526 else:
527 else:
527 action = b"touched"
528 action = b"touched"
528
529
529 copy_parent = b""
530 copy_parent = b""
530 copy_source = b""
531 copy_source = b""
531 if f in files.copied_from_p1:
532 if f in files.copied_from_p1:
532 copy_parent = b"p1"
533 copy_parent = b"p1"
533 copy_source = files.copied_from_p1[f]
534 copy_source = files.copied_from_p1[f]
534 elif f in files.copied_from_p2:
535 elif f in files.copied_from_p2:
535 copy_parent = b"p2"
536 copy_parent = b"p2"
536 copy_source = files.copied_from_p2[f]
537 copy_source = files.copied_from_p2[f]
537
538
538 data = (action, copy_parent, f, copy_source)
539 data = (action, copy_parent, f, copy_source)
539 template = b"%-8s %2s: %s, %s;\n"
540 template = b"%-8s %2s: %s, %s;\n"
540 ui.write(template % data)
541 ui.write(template % data)
541
542
542
543
543 @command(b'debugcheckstate', [], b'')
544 @command(b'debugcheckstate', [], b'')
544 def debugcheckstate(ui, repo):
545 def debugcheckstate(ui, repo):
545 """validate the correctness of the current dirstate"""
546 """validate the correctness of the current dirstate"""
546 parent1, parent2 = repo.dirstate.parents()
547 parent1, parent2 = repo.dirstate.parents()
547 m1 = repo[parent1].manifest()
548 m1 = repo[parent1].manifest()
548 m2 = repo[parent2].manifest()
549 m2 = repo[parent2].manifest()
549 errors = 0
550 errors = 0
550 for f in repo.dirstate:
551 for f in repo.dirstate:
551 state = repo.dirstate[f]
552 state = repo.dirstate[f]
552 if state in b"nr" and f not in m1:
553 if state in b"nr" and f not in m1:
553 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
554 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
554 errors += 1
555 errors += 1
555 if state in b"a" and f in m1:
556 if state in b"a" and f in m1:
556 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
557 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
557 errors += 1
558 errors += 1
558 if state in b"m" and f not in m1 and f not in m2:
559 if state in b"m" and f not in m1 and f not in m2:
559 ui.warn(
560 ui.warn(
560 _(b"%s in state %s, but not in either manifest\n") % (f, state)
561 _(b"%s in state %s, but not in either manifest\n") % (f, state)
561 )
562 )
562 errors += 1
563 errors += 1
563 for f in m1:
564 for f in m1:
564 state = repo.dirstate[f]
565 state = repo.dirstate[f]
565 if state not in b"nrm":
566 if state not in b"nrm":
566 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
567 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
567 errors += 1
568 errors += 1
568 if errors:
569 if errors:
569 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
570 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
570 raise error.Abort(errstr)
571 raise error.Abort(errstr)
571
572
572
573
573 @command(
574 @command(
574 b'debugcolor',
575 b'debugcolor',
575 [(b'', b'style', None, _(b'show all configured styles'))],
576 [(b'', b'style', None, _(b'show all configured styles'))],
576 b'hg debugcolor',
577 b'hg debugcolor',
577 )
578 )
578 def debugcolor(ui, repo, **opts):
579 def debugcolor(ui, repo, **opts):
579 """show available color, effects or style"""
580 """show available color, effects or style"""
580 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
581 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
581 if opts.get('style'):
582 if opts.get('style'):
582 return _debugdisplaystyle(ui)
583 return _debugdisplaystyle(ui)
583 else:
584 else:
584 return _debugdisplaycolor(ui)
585 return _debugdisplaycolor(ui)
585
586
586
587
587 def _debugdisplaycolor(ui):
588 def _debugdisplaycolor(ui):
588 ui = ui.copy()
589 ui = ui.copy()
589 ui._styles.clear()
590 ui._styles.clear()
590 for effect in color._activeeffects(ui).keys():
591 for effect in color._activeeffects(ui).keys():
591 ui._styles[effect] = effect
592 ui._styles[effect] = effect
592 if ui._terminfoparams:
593 if ui._terminfoparams:
593 for k, v in ui.configitems(b'color'):
594 for k, v in ui.configitems(b'color'):
594 if k.startswith(b'color.'):
595 if k.startswith(b'color.'):
595 ui._styles[k] = k[6:]
596 ui._styles[k] = k[6:]
596 elif k.startswith(b'terminfo.'):
597 elif k.startswith(b'terminfo.'):
597 ui._styles[k] = k[9:]
598 ui._styles[k] = k[9:]
598 ui.write(_(b'available colors:\n'))
599 ui.write(_(b'available colors:\n'))
599 # sort label with a '_' after the other to group '_background' entry.
600 # sort label with a '_' after the other to group '_background' entry.
600 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
601 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
601 for colorname, label in items:
602 for colorname, label in items:
602 ui.write(b'%s\n' % colorname, label=label)
603 ui.write(b'%s\n' % colorname, label=label)
603
604
604
605
605 def _debugdisplaystyle(ui):
606 def _debugdisplaystyle(ui):
606 ui.write(_(b'available style:\n'))
607 ui.write(_(b'available style:\n'))
607 if not ui._styles:
608 if not ui._styles:
608 return
609 return
609 width = max(len(s) for s in ui._styles)
610 width = max(len(s) for s in ui._styles)
610 for label, effects in sorted(ui._styles.items()):
611 for label, effects in sorted(ui._styles.items()):
611 ui.write(b'%s' % label, label=label)
612 ui.write(b'%s' % label, label=label)
612 if effects:
613 if effects:
613 # 50
614 # 50
614 ui.write(b': ')
615 ui.write(b': ')
615 ui.write(b' ' * (max(0, width - len(label))))
616 ui.write(b' ' * (max(0, width - len(label))))
616 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
617 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
617 ui.write(b'\n')
618 ui.write(b'\n')
618
619
619
620
620 @command(b'debugcreatestreamclonebundle', [], b'FILE')
621 @command(b'debugcreatestreamclonebundle', [], b'FILE')
621 def debugcreatestreamclonebundle(ui, repo, fname):
622 def debugcreatestreamclonebundle(ui, repo, fname):
622 """create a stream clone bundle file
623 """create a stream clone bundle file
623
624
624 Stream bundles are special bundles that are essentially archives of
625 Stream bundles are special bundles that are essentially archives of
625 revlog files. They are commonly used for cloning very quickly.
626 revlog files. They are commonly used for cloning very quickly.
626 """
627 """
627 # TODO we may want to turn this into an abort when this functionality
628 # TODO we may want to turn this into an abort when this functionality
628 # is moved into `hg bundle`.
629 # is moved into `hg bundle`.
629 if phases.hassecret(repo):
630 if phases.hassecret(repo):
630 ui.warn(
631 ui.warn(
631 _(
632 _(
632 b'(warning: stream clone bundle will contain secret '
633 b'(warning: stream clone bundle will contain secret '
633 b'revisions)\n'
634 b'revisions)\n'
634 )
635 )
635 )
636 )
636
637
637 requirements, gen = streamclone.generatebundlev1(repo)
638 requirements, gen = streamclone.generatebundlev1(repo)
638 changegroup.writechunks(ui, gen, fname)
639 changegroup.writechunks(ui, gen, fname)
639
640
640 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
641 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
641
642
642
643
643 @command(
644 @command(
644 b'debugdag',
645 b'debugdag',
645 [
646 [
646 (b't', b'tags', None, _(b'use tags as labels')),
647 (b't', b'tags', None, _(b'use tags as labels')),
647 (b'b', b'branches', None, _(b'annotate with branch names')),
648 (b'b', b'branches', None, _(b'annotate with branch names')),
648 (b'', b'dots', None, _(b'use dots for runs')),
649 (b'', b'dots', None, _(b'use dots for runs')),
649 (b's', b'spaces', None, _(b'separate elements by spaces')),
650 (b's', b'spaces', None, _(b'separate elements by spaces')),
650 ],
651 ],
651 _(b'[OPTION]... [FILE [REV]...]'),
652 _(b'[OPTION]... [FILE [REV]...]'),
652 optionalrepo=True,
653 optionalrepo=True,
653 )
654 )
654 def debugdag(ui, repo, file_=None, *revs, **opts):
655 def debugdag(ui, repo, file_=None, *revs, **opts):
655 """format the changelog or an index DAG as a concise textual description
656 """format the changelog or an index DAG as a concise textual description
656
657
657 If you pass a revlog index, the revlog's DAG is emitted. If you list
658 If you pass a revlog index, the revlog's DAG is emitted. If you list
658 revision numbers, they get labeled in the output as rN.
659 revision numbers, they get labeled in the output as rN.
659
660
660 Otherwise, the changelog DAG of the current repo is emitted.
661 Otherwise, the changelog DAG of the current repo is emitted.
661 """
662 """
662 spaces = opts.get('spaces')
663 spaces = opts.get('spaces')
663 dots = opts.get('dots')
664 dots = opts.get('dots')
664 if file_:
665 if file_:
665 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
666 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
666 revs = {int(r) for r in revs}
667 revs = {int(r) for r in revs}
667
668
668 def events():
669 def events():
669 for r in rlog:
670 for r in rlog:
670 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
671 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
671 if r in revs:
672 if r in revs:
672 yield b'l', (r, b"r%i" % r)
673 yield b'l', (r, b"r%i" % r)
673
674
674 elif repo:
675 elif repo:
675 cl = repo.changelog
676 cl = repo.changelog
676 tags = opts.get('tags')
677 tags = opts.get('tags')
677 branches = opts.get('branches')
678 branches = opts.get('branches')
678 if tags:
679 if tags:
679 labels = {}
680 labels = {}
680 for l, n in repo.tags().items():
681 for l, n in repo.tags().items():
681 labels.setdefault(cl.rev(n), []).append(l)
682 labels.setdefault(cl.rev(n), []).append(l)
682
683
683 def events():
684 def events():
684 b = b"default"
685 b = b"default"
685 for r in cl:
686 for r in cl:
686 if branches:
687 if branches:
687 newb = cl.read(cl.node(r))[5][b'branch']
688 newb = cl.read(cl.node(r))[5][b'branch']
688 if newb != b:
689 if newb != b:
689 yield b'a', newb
690 yield b'a', newb
690 b = newb
691 b = newb
691 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
692 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
692 if tags:
693 if tags:
693 ls = labels.get(r)
694 ls = labels.get(r)
694 if ls:
695 if ls:
695 for l in ls:
696 for l in ls:
696 yield b'l', (r, l)
697 yield b'l', (r, l)
697
698
698 else:
699 else:
699 raise error.Abort(_(b'need repo for changelog dag'))
700 raise error.Abort(_(b'need repo for changelog dag'))
700
701
701 for line in dagparser.dagtextlines(
702 for line in dagparser.dagtextlines(
702 events(),
703 events(),
703 addspaces=spaces,
704 addspaces=spaces,
704 wraplabels=True,
705 wraplabels=True,
705 wrapannotations=True,
706 wrapannotations=True,
706 wrapnonlinear=dots,
707 wrapnonlinear=dots,
707 usedots=dots,
708 usedots=dots,
708 maxlinewidth=70,
709 maxlinewidth=70,
709 ):
710 ):
710 ui.write(line)
711 ui.write(line)
711 ui.write(b"\n")
712 ui.write(b"\n")
712
713
713
714
714 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
715 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
715 def debugdata(ui, repo, file_, rev=None, **opts):
716 def debugdata(ui, repo, file_, rev=None, **opts):
716 """dump the contents of a data file revision"""
717 """dump the contents of a data file revision"""
717 opts = pycompat.byteskwargs(opts)
718 opts = pycompat.byteskwargs(opts)
718 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
719 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
719 if rev is not None:
720 if rev is not None:
720 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
721 file_, rev = None, file_
722 file_, rev = None, file_
722 elif rev is None:
723 elif rev is None:
723 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
724 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
725 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
725 try:
726 try:
726 ui.write(r.rawdata(r.lookup(rev)))
727 ui.write(r.rawdata(r.lookup(rev)))
727 except KeyError:
728 except KeyError:
728 raise error.Abort(_(b'invalid revision identifier %s') % rev)
729 raise error.Abort(_(b'invalid revision identifier %s') % rev)
729
730
730
731
731 @command(
732 @command(
732 b'debugdate',
733 b'debugdate',
733 [(b'e', b'extended', None, _(b'try extended date formats'))],
734 [(b'e', b'extended', None, _(b'try extended date formats'))],
734 _(b'[-e] DATE [RANGE]'),
735 _(b'[-e] DATE [RANGE]'),
735 norepo=True,
736 norepo=True,
736 optionalrepo=True,
737 optionalrepo=True,
737 )
738 )
738 def debugdate(ui, date, range=None, **opts):
739 def debugdate(ui, date, range=None, **opts):
739 """parse and display a date"""
740 """parse and display a date"""
740 if opts["extended"]:
741 if opts["extended"]:
741 d = dateutil.parsedate(date, dateutil.extendeddateformats)
742 d = dateutil.parsedate(date, dateutil.extendeddateformats)
742 else:
743 else:
743 d = dateutil.parsedate(date)
744 d = dateutil.parsedate(date)
744 ui.writenoi18n(b"internal: %d %d\n" % d)
745 ui.writenoi18n(b"internal: %d %d\n" % d)
745 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
746 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
746 if range:
747 if range:
747 m = dateutil.matchdate(range)
748 m = dateutil.matchdate(range)
748 ui.writenoi18n(b"match: %s\n" % m(d[0]))
749 ui.writenoi18n(b"match: %s\n" % m(d[0]))
749
750
750
751
751 @command(
752 @command(
752 b'debugdeltachain',
753 b'debugdeltachain',
753 cmdutil.debugrevlogopts + cmdutil.formatteropts,
754 cmdutil.debugrevlogopts + cmdutil.formatteropts,
754 _(b'-c|-m|FILE'),
755 _(b'-c|-m|FILE'),
755 optionalrepo=True,
756 optionalrepo=True,
756 )
757 )
757 def debugdeltachain(ui, repo, file_=None, **opts):
758 def debugdeltachain(ui, repo, file_=None, **opts):
758 """dump information about delta chains in a revlog
759 """dump information about delta chains in a revlog
759
760
760 Output can be templatized. Available template keywords are:
761 Output can be templatized. Available template keywords are:
761
762
762 :``rev``: revision number
763 :``rev``: revision number
763 :``chainid``: delta chain identifier (numbered by unique base)
764 :``chainid``: delta chain identifier (numbered by unique base)
764 :``chainlen``: delta chain length to this revision
765 :``chainlen``: delta chain length to this revision
765 :``prevrev``: previous revision in delta chain
766 :``prevrev``: previous revision in delta chain
766 :``deltatype``: role of delta / how it was computed
767 :``deltatype``: role of delta / how it was computed
767 :``compsize``: compressed size of revision
768 :``compsize``: compressed size of revision
768 :``uncompsize``: uncompressed size of revision
769 :``uncompsize``: uncompressed size of revision
769 :``chainsize``: total size of compressed revisions in chain
770 :``chainsize``: total size of compressed revisions in chain
770 :``chainratio``: total chain size divided by uncompressed revision size
771 :``chainratio``: total chain size divided by uncompressed revision size
771 (new delta chains typically start at ratio 2.00)
772 (new delta chains typically start at ratio 2.00)
772 :``lindist``: linear distance from base revision in delta chain to end
773 :``lindist``: linear distance from base revision in delta chain to end
773 of this revision
774 of this revision
774 :``extradist``: total size of revisions not part of this delta chain from
775 :``extradist``: total size of revisions not part of this delta chain from
775 base of delta chain to end of this revision; a measurement
776 base of delta chain to end of this revision; a measurement
776 of how much extra data we need to read/seek across to read
777 of how much extra data we need to read/seek across to read
777 the delta chain for this revision
778 the delta chain for this revision
778 :``extraratio``: extradist divided by chainsize; another representation of
779 :``extraratio``: extradist divided by chainsize; another representation of
779 how much unrelated data is needed to load this delta chain
780 how much unrelated data is needed to load this delta chain
780
781
781 If the repository is configured to use the sparse read, additional keywords
782 If the repository is configured to use the sparse read, additional keywords
782 are available:
783 are available:
783
784
784 :``readsize``: total size of data read from the disk for a revision
785 :``readsize``: total size of data read from the disk for a revision
785 (sum of the sizes of all the blocks)
786 (sum of the sizes of all the blocks)
786 :``largestblock``: size of the largest block of data read from the disk
787 :``largestblock``: size of the largest block of data read from the disk
787 :``readdensity``: density of useful bytes in the data read from the disk
788 :``readdensity``: density of useful bytes in the data read from the disk
788 :``srchunks``: in how many data hunks the whole revision would be read
789 :``srchunks``: in how many data hunks the whole revision would be read
789
790
790 The sparse read can be enabled with experimental.sparse-read = True
791 The sparse read can be enabled with experimental.sparse-read = True
791 """
792 """
792 opts = pycompat.byteskwargs(opts)
793 opts = pycompat.byteskwargs(opts)
793 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
794 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
794 index = r.index
795 index = r.index
795 start = r.start
796 start = r.start
796 length = r.length
797 length = r.length
797 generaldelta = r._generaldelta
798 generaldelta = r._generaldelta
798 withsparseread = getattr(r, '_withsparseread', False)
799 withsparseread = getattr(r, '_withsparseread', False)
799
800
800 def revinfo(rev):
801 def revinfo(rev):
801 e = index[rev]
802 e = index[rev]
802 compsize = e[1]
803 compsize = e[1]
803 uncompsize = e[2]
804 uncompsize = e[2]
804 chainsize = 0
805 chainsize = 0
805
806
806 if generaldelta:
807 if generaldelta:
807 if e[3] == e[5]:
808 if e[3] == e[5]:
808 deltatype = b'p1'
809 deltatype = b'p1'
809 elif e[3] == e[6]:
810 elif e[3] == e[6]:
810 deltatype = b'p2'
811 deltatype = b'p2'
811 elif e[3] == rev - 1:
812 elif e[3] == rev - 1:
812 deltatype = b'prev'
813 deltatype = b'prev'
813 elif e[3] == rev:
814 elif e[3] == rev:
814 deltatype = b'base'
815 deltatype = b'base'
815 else:
816 else:
816 deltatype = b'other'
817 deltatype = b'other'
817 else:
818 else:
818 if e[3] == rev:
819 if e[3] == rev:
819 deltatype = b'base'
820 deltatype = b'base'
820 else:
821 else:
821 deltatype = b'prev'
822 deltatype = b'prev'
822
823
823 chain = r._deltachain(rev)[0]
824 chain = r._deltachain(rev)[0]
824 for iterrev in chain:
825 for iterrev in chain:
825 e = index[iterrev]
826 e = index[iterrev]
826 chainsize += e[1]
827 chainsize += e[1]
827
828
828 return compsize, uncompsize, deltatype, chain, chainsize
829 return compsize, uncompsize, deltatype, chain, chainsize
829
830
830 fm = ui.formatter(b'debugdeltachain', opts)
831 fm = ui.formatter(b'debugdeltachain', opts)
831
832
832 fm.plain(
833 fm.plain(
833 b' rev chain# chainlen prev delta '
834 b' rev chain# chainlen prev delta '
834 b'size rawsize chainsize ratio lindist extradist '
835 b'size rawsize chainsize ratio lindist extradist '
835 b'extraratio'
836 b'extraratio'
836 )
837 )
837 if withsparseread:
838 if withsparseread:
838 fm.plain(b' readsize largestblk rddensity srchunks')
839 fm.plain(b' readsize largestblk rddensity srchunks')
839 fm.plain(b'\n')
840 fm.plain(b'\n')
840
841
841 chainbases = {}
842 chainbases = {}
842 for rev in r:
843 for rev in r:
843 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
844 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
844 chainbase = chain[0]
845 chainbase = chain[0]
845 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
846 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
846 basestart = start(chainbase)
847 basestart = start(chainbase)
847 revstart = start(rev)
848 revstart = start(rev)
848 lineardist = revstart + comp - basestart
849 lineardist = revstart + comp - basestart
849 extradist = lineardist - chainsize
850 extradist = lineardist - chainsize
850 try:
851 try:
851 prevrev = chain[-2]
852 prevrev = chain[-2]
852 except IndexError:
853 except IndexError:
853 prevrev = -1
854 prevrev = -1
854
855
855 if uncomp != 0:
856 if uncomp != 0:
856 chainratio = float(chainsize) / float(uncomp)
857 chainratio = float(chainsize) / float(uncomp)
857 else:
858 else:
858 chainratio = chainsize
859 chainratio = chainsize
859
860
860 if chainsize != 0:
861 if chainsize != 0:
861 extraratio = float(extradist) / float(chainsize)
862 extraratio = float(extradist) / float(chainsize)
862 else:
863 else:
863 extraratio = extradist
864 extraratio = extradist
864
865
865 fm.startitem()
866 fm.startitem()
866 fm.write(
867 fm.write(
867 b'rev chainid chainlen prevrev deltatype compsize '
868 b'rev chainid chainlen prevrev deltatype compsize '
868 b'uncompsize chainsize chainratio lindist extradist '
869 b'uncompsize chainsize chainratio lindist extradist '
869 b'extraratio',
870 b'extraratio',
870 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
871 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
871 rev,
872 rev,
872 chainid,
873 chainid,
873 len(chain),
874 len(chain),
874 prevrev,
875 prevrev,
875 deltatype,
876 deltatype,
876 comp,
877 comp,
877 uncomp,
878 uncomp,
878 chainsize,
879 chainsize,
879 chainratio,
880 chainratio,
880 lineardist,
881 lineardist,
881 extradist,
882 extradist,
882 extraratio,
883 extraratio,
883 rev=rev,
884 rev=rev,
884 chainid=chainid,
885 chainid=chainid,
885 chainlen=len(chain),
886 chainlen=len(chain),
886 prevrev=prevrev,
887 prevrev=prevrev,
887 deltatype=deltatype,
888 deltatype=deltatype,
888 compsize=comp,
889 compsize=comp,
889 uncompsize=uncomp,
890 uncompsize=uncomp,
890 chainsize=chainsize,
891 chainsize=chainsize,
891 chainratio=chainratio,
892 chainratio=chainratio,
892 lindist=lineardist,
893 lindist=lineardist,
893 extradist=extradist,
894 extradist=extradist,
894 extraratio=extraratio,
895 extraratio=extraratio,
895 )
896 )
896 if withsparseread:
897 if withsparseread:
897 readsize = 0
898 readsize = 0
898 largestblock = 0
899 largestblock = 0
899 srchunks = 0
900 srchunks = 0
900
901
901 for revschunk in deltautil.slicechunk(r, chain):
902 for revschunk in deltautil.slicechunk(r, chain):
902 srchunks += 1
903 srchunks += 1
903 blkend = start(revschunk[-1]) + length(revschunk[-1])
904 blkend = start(revschunk[-1]) + length(revschunk[-1])
904 blksize = blkend - start(revschunk[0])
905 blksize = blkend - start(revschunk[0])
905
906
906 readsize += blksize
907 readsize += blksize
907 if largestblock < blksize:
908 if largestblock < blksize:
908 largestblock = blksize
909 largestblock = blksize
909
910
910 if readsize:
911 if readsize:
911 readdensity = float(chainsize) / float(readsize)
912 readdensity = float(chainsize) / float(readsize)
912 else:
913 else:
913 readdensity = 1
914 readdensity = 1
914
915
915 fm.write(
916 fm.write(
916 b'readsize largestblock readdensity srchunks',
917 b'readsize largestblock readdensity srchunks',
917 b' %10d %10d %9.5f %8d',
918 b' %10d %10d %9.5f %8d',
918 readsize,
919 readsize,
919 largestblock,
920 largestblock,
920 readdensity,
921 readdensity,
921 srchunks,
922 srchunks,
922 readsize=readsize,
923 readsize=readsize,
923 largestblock=largestblock,
924 largestblock=largestblock,
924 readdensity=readdensity,
925 readdensity=readdensity,
925 srchunks=srchunks,
926 srchunks=srchunks,
926 )
927 )
927
928
928 fm.plain(b'\n')
929 fm.plain(b'\n')
929
930
930 fm.end()
931 fm.end()
931
932
932
933
933 @command(
934 @command(
934 b'debugdirstate|debugstate',
935 b'debugdirstate|debugstate',
935 [
936 [
936 (
937 (
937 b'',
938 b'',
938 b'nodates',
939 b'nodates',
939 None,
940 None,
940 _(b'do not display the saved mtime (DEPRECATED)'),
941 _(b'do not display the saved mtime (DEPRECATED)'),
941 ),
942 ),
942 (b'', b'dates', True, _(b'display the saved mtime')),
943 (b'', b'dates', True, _(b'display the saved mtime')),
943 (b'', b'datesort', None, _(b'sort by saved mtime')),
944 (b'', b'datesort', None, _(b'sort by saved mtime')),
944 (b'', b'dirs', False, _(b'display directories')),
945 (b'', b'dirs', False, _(b'display directories')),
945 ],
946 ],
946 _(b'[OPTION]...'),
947 _(b'[OPTION]...'),
947 )
948 )
948 def debugstate(ui, repo, **opts):
949 def debugstate(ui, repo, **opts):
949 """show the contents of the current dirstate"""
950 """show the contents of the current dirstate"""
950
951
951 nodates = not opts['dates']
952 nodates = not opts['dates']
952 if opts.get('nodates') is not None:
953 if opts.get('nodates') is not None:
953 nodates = True
954 nodates = True
954 datesort = opts.get('datesort')
955 datesort = opts.get('datesort')
955
956
956 if datesort:
957 if datesort:
957 keyfunc = lambda x: (
958 keyfunc = lambda x: (
958 x[1].v1_mtime(),
959 x[1].v1_mtime(),
959 x[0],
960 x[0],
960 ) # sort by mtime, then by filename
961 ) # sort by mtime, then by filename
961 else:
962 else:
962 keyfunc = None # sort by filename
963 keyfunc = None # sort by filename
963 entries = list(pycompat.iteritems(repo.dirstate))
964 entries = list(pycompat.iteritems(repo.dirstate))
964 if opts['dirs']:
965 if opts['dirs']:
965 entries.extend(repo.dirstate.directories())
966 entries.extend(repo.dirstate.directories())
966 entries.sort(key=keyfunc)
967 entries.sort(key=keyfunc)
967 for file_, ent in entries:
968 for file_, ent in entries:
968 if ent.v1_mtime() == -1:
969 if ent.v1_mtime() == -1:
969 timestr = b'unset '
970 timestr = b'unset '
970 elif nodates:
971 elif nodates:
971 timestr = b'set '
972 timestr = b'set '
972 else:
973 else:
973 timestr = time.strftime(
974 timestr = time.strftime(
974 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
975 "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
975 )
976 )
976 timestr = encoding.strtolocal(timestr)
977 timestr = encoding.strtolocal(timestr)
977 if ent.mode & 0o20000:
978 if ent.mode & 0o20000:
978 mode = b'lnk'
979 mode = b'lnk'
979 else:
980 else:
980 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
981 mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
981 ui.write(
982 ui.write(
982 b"%c %s %10d %s%s\n"
983 b"%c %s %10d %s%s\n"
983 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
984 % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
984 )
985 )
985 for f in repo.dirstate.copies():
986 for f in repo.dirstate.copies():
986 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
987 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
987
988
988
989
989 @command(
990 @command(
991 b'debugdirstateignorepatternshash',
992 [],
993 _(b''),
994 )
995 def debugdirstateignorepatternshash(ui, repo, **opts):
996 """show the hash of ignore patterns stored in dirstate if v2,
997 or nothing for dirstate-v2
998 """
999 if repo.dirstate._use_dirstate_v2:
1000 hash_offset = 16 # Four 32-bit integers before this field
1001 hash_len = 20 # 160 bits for SHA-1
1002 data_filename = repo.dirstate._map.docket.data_filename()
1003 with repo.vfs(data_filename) as f:
1004 hash_bytes = f.read(hash_offset + hash_len)[-hash_len:]
1005 ui.write(binascii.hexlify(hash_bytes) + b'\n')
1006
1007
1008 @command(
990 b'debugdiscovery',
1009 b'debugdiscovery',
991 [
1010 [
992 (b'', b'old', None, _(b'use old-style discovery')),
1011 (b'', b'old', None, _(b'use old-style discovery')),
993 (
1012 (
994 b'',
1013 b'',
995 b'nonheads',
1014 b'nonheads',
996 None,
1015 None,
997 _(b'use old-style discovery with non-heads included'),
1016 _(b'use old-style discovery with non-heads included'),
998 ),
1017 ),
999 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1018 (b'', b'rev', [], b'restrict discovery to this set of revs'),
1000 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1019 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
1001 (
1020 (
1002 b'',
1021 b'',
1003 b'local-as-revs',
1022 b'local-as-revs',
1004 b"",
1023 b"",
1005 b'treat local has having these revisions only',
1024 b'treat local has having these revisions only',
1006 ),
1025 ),
1007 (
1026 (
1008 b'',
1027 b'',
1009 b'remote-as-revs',
1028 b'remote-as-revs',
1010 b"",
1029 b"",
1011 b'use local as remote, with only these these revisions',
1030 b'use local as remote, with only these these revisions',
1012 ),
1031 ),
1013 ]
1032 ]
1014 + cmdutil.remoteopts
1033 + cmdutil.remoteopts
1015 + cmdutil.formatteropts,
1034 + cmdutil.formatteropts,
1016 _(b'[--rev REV] [OTHER]'),
1035 _(b'[--rev REV] [OTHER]'),
1017 )
1036 )
1018 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1037 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
1019 """runs the changeset discovery protocol in isolation
1038 """runs the changeset discovery protocol in isolation
1020
1039
1021 The local peer can be "replaced" by a subset of the local repository by
1040 The local peer can be "replaced" by a subset of the local repository by
1022 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1041 using the `--local-as-revs` flag. Int he same way, usual `remote` peer can
1023 be "replaced" by a subset of the local repository using the
1042 be "replaced" by a subset of the local repository using the
1024 `--local-as-revs` flag. This is useful to efficiently debug pathological
1043 `--local-as-revs` flag. This is useful to efficiently debug pathological
1025 discovery situation.
1044 discovery situation.
1026
1045
1027 The following developer oriented config are relevant for people playing with this command:
1046 The following developer oriented config are relevant for people playing with this command:
1028
1047
1029 * devel.discovery.exchange-heads=True
1048 * devel.discovery.exchange-heads=True
1030
1049
1031 If False, the discovery will not start with
1050 If False, the discovery will not start with
1032 remote head fetching and local head querying.
1051 remote head fetching and local head querying.
1033
1052
1034 * devel.discovery.grow-sample=True
1053 * devel.discovery.grow-sample=True
1035
1054
1036 If False, the sample size used in set discovery will not be increased
1055 If False, the sample size used in set discovery will not be increased
1037 through the process
1056 through the process
1038
1057
1039 * devel.discovery.grow-sample.dynamic=True
1058 * devel.discovery.grow-sample.dynamic=True
1040
1059
1041 When discovery.grow-sample.dynamic is True, the default, the sample size is
1060 When discovery.grow-sample.dynamic is True, the default, the sample size is
1042 adapted to the shape of the undecided set (it is set to the max of:
1061 adapted to the shape of the undecided set (it is set to the max of:
1043 <target-size>, len(roots(undecided)), len(heads(undecided)
1062 <target-size>, len(roots(undecided)), len(heads(undecided)
1044
1063
1045 * devel.discovery.grow-sample.rate=1.05
1064 * devel.discovery.grow-sample.rate=1.05
1046
1065
1047 the rate at which the sample grow
1066 the rate at which the sample grow
1048
1067
1049 * devel.discovery.randomize=True
1068 * devel.discovery.randomize=True
1050
1069
1051 If andom sampling during discovery are deterministic. It is meant for
1070 If andom sampling during discovery are deterministic. It is meant for
1052 integration tests.
1071 integration tests.
1053
1072
1054 * devel.discovery.sample-size=200
1073 * devel.discovery.sample-size=200
1055
1074
1056 Control the initial size of the discovery sample
1075 Control the initial size of the discovery sample
1057
1076
1058 * devel.discovery.sample-size.initial=100
1077 * devel.discovery.sample-size.initial=100
1059
1078
1060 Control the initial size of the discovery for initial change
1079 Control the initial size of the discovery for initial change
1061 """
1080 """
1062 opts = pycompat.byteskwargs(opts)
1081 opts = pycompat.byteskwargs(opts)
1063 unfi = repo.unfiltered()
1082 unfi = repo.unfiltered()
1064
1083
1065 # setup potential extra filtering
1084 # setup potential extra filtering
1066 local_revs = opts[b"local_as_revs"]
1085 local_revs = opts[b"local_as_revs"]
1067 remote_revs = opts[b"remote_as_revs"]
1086 remote_revs = opts[b"remote_as_revs"]
1068
1087
1069 # make sure tests are repeatable
1088 # make sure tests are repeatable
1070 random.seed(int(opts[b'seed']))
1089 random.seed(int(opts[b'seed']))
1071
1090
1072 if not remote_revs:
1091 if not remote_revs:
1073
1092
1074 remoteurl, branches = urlutil.get_unique_pull_path(
1093 remoteurl, branches = urlutil.get_unique_pull_path(
1075 b'debugdiscovery', repo, ui, remoteurl
1094 b'debugdiscovery', repo, ui, remoteurl
1076 )
1095 )
1077 remote = hg.peer(repo, opts, remoteurl)
1096 remote = hg.peer(repo, opts, remoteurl)
1078 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1097 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
1079 else:
1098 else:
1080 branches = (None, [])
1099 branches = (None, [])
1081 remote_filtered_revs = scmutil.revrange(
1100 remote_filtered_revs = scmutil.revrange(
1082 unfi, [b"not (::(%s))" % remote_revs]
1101 unfi, [b"not (::(%s))" % remote_revs]
1083 )
1102 )
1084 remote_filtered_revs = frozenset(remote_filtered_revs)
1103 remote_filtered_revs = frozenset(remote_filtered_revs)
1085
1104
1086 def remote_func(x):
1105 def remote_func(x):
1087 return remote_filtered_revs
1106 return remote_filtered_revs
1088
1107
1089 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1108 repoview.filtertable[b'debug-discovery-remote-filter'] = remote_func
1090
1109
1091 remote = repo.peer()
1110 remote = repo.peer()
1092 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1111 remote._repo = remote._repo.filtered(b'debug-discovery-remote-filter')
1093
1112
1094 if local_revs:
1113 if local_revs:
1095 local_filtered_revs = scmutil.revrange(
1114 local_filtered_revs = scmutil.revrange(
1096 unfi, [b"not (::(%s))" % local_revs]
1115 unfi, [b"not (::(%s))" % local_revs]
1097 )
1116 )
1098 local_filtered_revs = frozenset(local_filtered_revs)
1117 local_filtered_revs = frozenset(local_filtered_revs)
1099
1118
1100 def local_func(x):
1119 def local_func(x):
1101 return local_filtered_revs
1120 return local_filtered_revs
1102
1121
1103 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1122 repoview.filtertable[b'debug-discovery-local-filter'] = local_func
1104 repo = repo.filtered(b'debug-discovery-local-filter')
1123 repo = repo.filtered(b'debug-discovery-local-filter')
1105
1124
1106 data = {}
1125 data = {}
1107 if opts.get(b'old'):
1126 if opts.get(b'old'):
1108
1127
1109 def doit(pushedrevs, remoteheads, remote=remote):
1128 def doit(pushedrevs, remoteheads, remote=remote):
1110 if not util.safehasattr(remote, b'branches'):
1129 if not util.safehasattr(remote, b'branches'):
1111 # enable in-client legacy support
1130 # enable in-client legacy support
1112 remote = localrepo.locallegacypeer(remote.local())
1131 remote = localrepo.locallegacypeer(remote.local())
1113 common, _in, hds = treediscovery.findcommonincoming(
1132 common, _in, hds = treediscovery.findcommonincoming(
1114 repo, remote, force=True, audit=data
1133 repo, remote, force=True, audit=data
1115 )
1134 )
1116 common = set(common)
1135 common = set(common)
1117 if not opts.get(b'nonheads'):
1136 if not opts.get(b'nonheads'):
1118 ui.writenoi18n(
1137 ui.writenoi18n(
1119 b"unpruned common: %s\n"
1138 b"unpruned common: %s\n"
1120 % b" ".join(sorted(short(n) for n in common))
1139 % b" ".join(sorted(short(n) for n in common))
1121 )
1140 )
1122
1141
1123 clnode = repo.changelog.node
1142 clnode = repo.changelog.node
1124 common = repo.revs(b'heads(::%ln)', common)
1143 common = repo.revs(b'heads(::%ln)', common)
1125 common = {clnode(r) for r in common}
1144 common = {clnode(r) for r in common}
1126 return common, hds
1145 return common, hds
1127
1146
1128 else:
1147 else:
1129
1148
1130 def doit(pushedrevs, remoteheads, remote=remote):
1149 def doit(pushedrevs, remoteheads, remote=remote):
1131 nodes = None
1150 nodes = None
1132 if pushedrevs:
1151 if pushedrevs:
1133 revs = scmutil.revrange(repo, pushedrevs)
1152 revs = scmutil.revrange(repo, pushedrevs)
1134 nodes = [repo[r].node() for r in revs]
1153 nodes = [repo[r].node() for r in revs]
1135 common, any, hds = setdiscovery.findcommonheads(
1154 common, any, hds = setdiscovery.findcommonheads(
1136 ui, repo, remote, ancestorsof=nodes, audit=data
1155 ui, repo, remote, ancestorsof=nodes, audit=data
1137 )
1156 )
1138 return common, hds
1157 return common, hds
1139
1158
1140 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1159 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
1141 localrevs = opts[b'rev']
1160 localrevs = opts[b'rev']
1142
1161
1143 fm = ui.formatter(b'debugdiscovery', opts)
1162 fm = ui.formatter(b'debugdiscovery', opts)
1144 if fm.strict_format:
1163 if fm.strict_format:
1145
1164
1146 @contextlib.contextmanager
1165 @contextlib.contextmanager
1147 def may_capture_output():
1166 def may_capture_output():
1148 ui.pushbuffer()
1167 ui.pushbuffer()
1149 yield
1168 yield
1150 data[b'output'] = ui.popbuffer()
1169 data[b'output'] = ui.popbuffer()
1151
1170
1152 else:
1171 else:
1153 may_capture_output = util.nullcontextmanager
1172 may_capture_output = util.nullcontextmanager
1154 with may_capture_output():
1173 with may_capture_output():
1155 with util.timedcm('debug-discovery') as t:
1174 with util.timedcm('debug-discovery') as t:
1156 common, hds = doit(localrevs, remoterevs)
1175 common, hds = doit(localrevs, remoterevs)
1157
1176
1158 # compute all statistics
1177 # compute all statistics
1159 heads_common = set(common)
1178 heads_common = set(common)
1160 heads_remote = set(hds)
1179 heads_remote = set(hds)
1161 heads_local = set(repo.heads())
1180 heads_local = set(repo.heads())
1162 # note: they cannot be a local or remote head that is in common and not
1181 # note: they cannot be a local or remote head that is in common and not
1163 # itself a head of common.
1182 # itself a head of common.
1164 heads_common_local = heads_common & heads_local
1183 heads_common_local = heads_common & heads_local
1165 heads_common_remote = heads_common & heads_remote
1184 heads_common_remote = heads_common & heads_remote
1166 heads_common_both = heads_common & heads_remote & heads_local
1185 heads_common_both = heads_common & heads_remote & heads_local
1167
1186
1168 all = repo.revs(b'all()')
1187 all = repo.revs(b'all()')
1169 common = repo.revs(b'::%ln', common)
1188 common = repo.revs(b'::%ln', common)
1170 roots_common = repo.revs(b'roots(::%ld)', common)
1189 roots_common = repo.revs(b'roots(::%ld)', common)
1171 missing = repo.revs(b'not ::%ld', common)
1190 missing = repo.revs(b'not ::%ld', common)
1172 heads_missing = repo.revs(b'heads(%ld)', missing)
1191 heads_missing = repo.revs(b'heads(%ld)', missing)
1173 roots_missing = repo.revs(b'roots(%ld)', missing)
1192 roots_missing = repo.revs(b'roots(%ld)', missing)
1174 assert len(common) + len(missing) == len(all)
1193 assert len(common) + len(missing) == len(all)
1175
1194
1176 initial_undecided = repo.revs(
1195 initial_undecided = repo.revs(
1177 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1196 b'not (::%ln or %ln::)', heads_common_remote, heads_common_local
1178 )
1197 )
1179 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1198 heads_initial_undecided = repo.revs(b'heads(%ld)', initial_undecided)
1180 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1199 roots_initial_undecided = repo.revs(b'roots(%ld)', initial_undecided)
1181 common_initial_undecided = initial_undecided & common
1200 common_initial_undecided = initial_undecided & common
1182 missing_initial_undecided = initial_undecided & missing
1201 missing_initial_undecided = initial_undecided & missing
1183
1202
1184 data[b'elapsed'] = t.elapsed
1203 data[b'elapsed'] = t.elapsed
1185 data[b'nb-common-heads'] = len(heads_common)
1204 data[b'nb-common-heads'] = len(heads_common)
1186 data[b'nb-common-heads-local'] = len(heads_common_local)
1205 data[b'nb-common-heads-local'] = len(heads_common_local)
1187 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1206 data[b'nb-common-heads-remote'] = len(heads_common_remote)
1188 data[b'nb-common-heads-both'] = len(heads_common_both)
1207 data[b'nb-common-heads-both'] = len(heads_common_both)
1189 data[b'nb-common-roots'] = len(roots_common)
1208 data[b'nb-common-roots'] = len(roots_common)
1190 data[b'nb-head-local'] = len(heads_local)
1209 data[b'nb-head-local'] = len(heads_local)
1191 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1210 data[b'nb-head-local-missing'] = len(heads_local) - len(heads_common_local)
1192 data[b'nb-head-remote'] = len(heads_remote)
1211 data[b'nb-head-remote'] = len(heads_remote)
1193 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1212 data[b'nb-head-remote-unknown'] = len(heads_remote) - len(
1194 heads_common_remote
1213 heads_common_remote
1195 )
1214 )
1196 data[b'nb-revs'] = len(all)
1215 data[b'nb-revs'] = len(all)
1197 data[b'nb-revs-common'] = len(common)
1216 data[b'nb-revs-common'] = len(common)
1198 data[b'nb-revs-missing'] = len(missing)
1217 data[b'nb-revs-missing'] = len(missing)
1199 data[b'nb-missing-heads'] = len(heads_missing)
1218 data[b'nb-missing-heads'] = len(heads_missing)
1200 data[b'nb-missing-roots'] = len(roots_missing)
1219 data[b'nb-missing-roots'] = len(roots_missing)
1201 data[b'nb-ini_und'] = len(initial_undecided)
1220 data[b'nb-ini_und'] = len(initial_undecided)
1202 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1221 data[b'nb-ini_und-heads'] = len(heads_initial_undecided)
1203 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1222 data[b'nb-ini_und-roots'] = len(roots_initial_undecided)
1204 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1223 data[b'nb-ini_und-common'] = len(common_initial_undecided)
1205 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1224 data[b'nb-ini_und-missing'] = len(missing_initial_undecided)
1206
1225
1207 fm.startitem()
1226 fm.startitem()
1208 fm.data(**pycompat.strkwargs(data))
1227 fm.data(**pycompat.strkwargs(data))
1209 # display discovery summary
1228 # display discovery summary
1210 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1229 fm.plain(b"elapsed time: %(elapsed)f seconds\n" % data)
1211 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1230 fm.plain(b"round-trips: %(total-roundtrips)9d\n" % data)
1212 fm.plain(b"heads summary:\n")
1231 fm.plain(b"heads summary:\n")
1213 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1232 fm.plain(b" total common heads: %(nb-common-heads)9d\n" % data)
1214 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1233 fm.plain(b" also local heads: %(nb-common-heads-local)9d\n" % data)
1215 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1234 fm.plain(b" also remote heads: %(nb-common-heads-remote)9d\n" % data)
1216 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1235 fm.plain(b" both: %(nb-common-heads-both)9d\n" % data)
1217 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1236 fm.plain(b" local heads: %(nb-head-local)9d\n" % data)
1218 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1237 fm.plain(b" common: %(nb-common-heads-local)9d\n" % data)
1219 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1238 fm.plain(b" missing: %(nb-head-local-missing)9d\n" % data)
1220 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1239 fm.plain(b" remote heads: %(nb-head-remote)9d\n" % data)
1221 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1240 fm.plain(b" common: %(nb-common-heads-remote)9d\n" % data)
1222 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1241 fm.plain(b" unknown: %(nb-head-remote-unknown)9d\n" % data)
1223 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1242 fm.plain(b"local changesets: %(nb-revs)9d\n" % data)
1224 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1243 fm.plain(b" common: %(nb-revs-common)9d\n" % data)
1225 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1244 fm.plain(b" heads: %(nb-common-heads)9d\n" % data)
1226 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1245 fm.plain(b" roots: %(nb-common-roots)9d\n" % data)
1227 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1246 fm.plain(b" missing: %(nb-revs-missing)9d\n" % data)
1228 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1247 fm.plain(b" heads: %(nb-missing-heads)9d\n" % data)
1229 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1248 fm.plain(b" roots: %(nb-missing-roots)9d\n" % data)
1230 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1249 fm.plain(b" first undecided set: %(nb-ini_und)9d\n" % data)
1231 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1250 fm.plain(b" heads: %(nb-ini_und-heads)9d\n" % data)
1232 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1251 fm.plain(b" roots: %(nb-ini_und-roots)9d\n" % data)
1233 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1252 fm.plain(b" common: %(nb-ini_und-common)9d\n" % data)
1234 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1253 fm.plain(b" missing: %(nb-ini_und-missing)9d\n" % data)
1235
1254
1236 if ui.verbose:
1255 if ui.verbose:
1237 fm.plain(
1256 fm.plain(
1238 b"common heads: %s\n"
1257 b"common heads: %s\n"
1239 % b" ".join(sorted(short(n) for n in heads_common))
1258 % b" ".join(sorted(short(n) for n in heads_common))
1240 )
1259 )
1241 fm.end()
1260 fm.end()
1242
1261
1243
1262
1244 _chunksize = 4 << 10
1263 _chunksize = 4 << 10
1245
1264
1246
1265
1247 @command(
1266 @command(
1248 b'debugdownload',
1267 b'debugdownload',
1249 [
1268 [
1250 (b'o', b'output', b'', _(b'path')),
1269 (b'o', b'output', b'', _(b'path')),
1251 ],
1270 ],
1252 optionalrepo=True,
1271 optionalrepo=True,
1253 )
1272 )
1254 def debugdownload(ui, repo, url, output=None, **opts):
1273 def debugdownload(ui, repo, url, output=None, **opts):
1255 """download a resource using Mercurial logic and config"""
1274 """download a resource using Mercurial logic and config"""
1256 fh = urlmod.open(ui, url, output)
1275 fh = urlmod.open(ui, url, output)
1257
1276
1258 dest = ui
1277 dest = ui
1259 if output:
1278 if output:
1260 dest = open(output, b"wb", _chunksize)
1279 dest = open(output, b"wb", _chunksize)
1261 try:
1280 try:
1262 data = fh.read(_chunksize)
1281 data = fh.read(_chunksize)
1263 while data:
1282 while data:
1264 dest.write(data)
1283 dest.write(data)
1265 data = fh.read(_chunksize)
1284 data = fh.read(_chunksize)
1266 finally:
1285 finally:
1267 if output:
1286 if output:
1268 dest.close()
1287 dest.close()
1269
1288
1270
1289
1271 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1290 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1272 def debugextensions(ui, repo, **opts):
1291 def debugextensions(ui, repo, **opts):
1273 '''show information about active extensions'''
1292 '''show information about active extensions'''
1274 opts = pycompat.byteskwargs(opts)
1293 opts = pycompat.byteskwargs(opts)
1275 exts = extensions.extensions(ui)
1294 exts = extensions.extensions(ui)
1276 hgver = util.version()
1295 hgver = util.version()
1277 fm = ui.formatter(b'debugextensions', opts)
1296 fm = ui.formatter(b'debugextensions', opts)
1278 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1297 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1279 isinternal = extensions.ismoduleinternal(extmod)
1298 isinternal = extensions.ismoduleinternal(extmod)
1280 extsource = None
1299 extsource = None
1281
1300
1282 if util.safehasattr(extmod, '__file__'):
1301 if util.safehasattr(extmod, '__file__'):
1283 extsource = pycompat.fsencode(extmod.__file__)
1302 extsource = pycompat.fsencode(extmod.__file__)
1284 elif getattr(sys, 'oxidized', False):
1303 elif getattr(sys, 'oxidized', False):
1285 extsource = pycompat.sysexecutable
1304 extsource = pycompat.sysexecutable
1286 if isinternal:
1305 if isinternal:
1287 exttestedwith = [] # never expose magic string to users
1306 exttestedwith = [] # never expose magic string to users
1288 else:
1307 else:
1289 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1308 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1290 extbuglink = getattr(extmod, 'buglink', None)
1309 extbuglink = getattr(extmod, 'buglink', None)
1291
1310
1292 fm.startitem()
1311 fm.startitem()
1293
1312
1294 if ui.quiet or ui.verbose:
1313 if ui.quiet or ui.verbose:
1295 fm.write(b'name', b'%s\n', extname)
1314 fm.write(b'name', b'%s\n', extname)
1296 else:
1315 else:
1297 fm.write(b'name', b'%s', extname)
1316 fm.write(b'name', b'%s', extname)
1298 if isinternal or hgver in exttestedwith:
1317 if isinternal or hgver in exttestedwith:
1299 fm.plain(b'\n')
1318 fm.plain(b'\n')
1300 elif not exttestedwith:
1319 elif not exttestedwith:
1301 fm.plain(_(b' (untested!)\n'))
1320 fm.plain(_(b' (untested!)\n'))
1302 else:
1321 else:
1303 lasttestedversion = exttestedwith[-1]
1322 lasttestedversion = exttestedwith[-1]
1304 fm.plain(b' (%s!)\n' % lasttestedversion)
1323 fm.plain(b' (%s!)\n' % lasttestedversion)
1305
1324
1306 fm.condwrite(
1325 fm.condwrite(
1307 ui.verbose and extsource,
1326 ui.verbose and extsource,
1308 b'source',
1327 b'source',
1309 _(b' location: %s\n'),
1328 _(b' location: %s\n'),
1310 extsource or b"",
1329 extsource or b"",
1311 )
1330 )
1312
1331
1313 if ui.verbose:
1332 if ui.verbose:
1314 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1333 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1315 fm.data(bundled=isinternal)
1334 fm.data(bundled=isinternal)
1316
1335
1317 fm.condwrite(
1336 fm.condwrite(
1318 ui.verbose and exttestedwith,
1337 ui.verbose and exttestedwith,
1319 b'testedwith',
1338 b'testedwith',
1320 _(b' tested with: %s\n'),
1339 _(b' tested with: %s\n'),
1321 fm.formatlist(exttestedwith, name=b'ver'),
1340 fm.formatlist(exttestedwith, name=b'ver'),
1322 )
1341 )
1323
1342
1324 fm.condwrite(
1343 fm.condwrite(
1325 ui.verbose and extbuglink,
1344 ui.verbose and extbuglink,
1326 b'buglink',
1345 b'buglink',
1327 _(b' bug reporting: %s\n'),
1346 _(b' bug reporting: %s\n'),
1328 extbuglink or b"",
1347 extbuglink or b"",
1329 )
1348 )
1330
1349
1331 fm.end()
1350 fm.end()
1332
1351
1333
1352
1334 @command(
1353 @command(
1335 b'debugfileset',
1354 b'debugfileset',
1336 [
1355 [
1337 (
1356 (
1338 b'r',
1357 b'r',
1339 b'rev',
1358 b'rev',
1340 b'',
1359 b'',
1341 _(b'apply the filespec on this revision'),
1360 _(b'apply the filespec on this revision'),
1342 _(b'REV'),
1361 _(b'REV'),
1343 ),
1362 ),
1344 (
1363 (
1345 b'',
1364 b'',
1346 b'all-files',
1365 b'all-files',
1347 False,
1366 False,
1348 _(b'test files from all revisions and working directory'),
1367 _(b'test files from all revisions and working directory'),
1349 ),
1368 ),
1350 (
1369 (
1351 b's',
1370 b's',
1352 b'show-matcher',
1371 b'show-matcher',
1353 None,
1372 None,
1354 _(b'print internal representation of matcher'),
1373 _(b'print internal representation of matcher'),
1355 ),
1374 ),
1356 (
1375 (
1357 b'p',
1376 b'p',
1358 b'show-stage',
1377 b'show-stage',
1359 [],
1378 [],
1360 _(b'print parsed tree at the given stage'),
1379 _(b'print parsed tree at the given stage'),
1361 _(b'NAME'),
1380 _(b'NAME'),
1362 ),
1381 ),
1363 ],
1382 ],
1364 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1383 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1365 )
1384 )
1366 def debugfileset(ui, repo, expr, **opts):
1385 def debugfileset(ui, repo, expr, **opts):
1367 '''parse and apply a fileset specification'''
1386 '''parse and apply a fileset specification'''
1368 from . import fileset
1387 from . import fileset
1369
1388
1370 fileset.symbols # force import of fileset so we have predicates to optimize
1389 fileset.symbols # force import of fileset so we have predicates to optimize
1371 opts = pycompat.byteskwargs(opts)
1390 opts = pycompat.byteskwargs(opts)
1372 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1391 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1373
1392
1374 stages = [
1393 stages = [
1375 (b'parsed', pycompat.identity),
1394 (b'parsed', pycompat.identity),
1376 (b'analyzed', filesetlang.analyze),
1395 (b'analyzed', filesetlang.analyze),
1377 (b'optimized', filesetlang.optimize),
1396 (b'optimized', filesetlang.optimize),
1378 ]
1397 ]
1379 stagenames = {n for n, f in stages}
1398 stagenames = {n for n, f in stages}
1380
1399
1381 showalways = set()
1400 showalways = set()
1382 if ui.verbose and not opts[b'show_stage']:
1401 if ui.verbose and not opts[b'show_stage']:
1383 # show parsed tree by --verbose (deprecated)
1402 # show parsed tree by --verbose (deprecated)
1384 showalways.add(b'parsed')
1403 showalways.add(b'parsed')
1385 if opts[b'show_stage'] == [b'all']:
1404 if opts[b'show_stage'] == [b'all']:
1386 showalways.update(stagenames)
1405 showalways.update(stagenames)
1387 else:
1406 else:
1388 for n in opts[b'show_stage']:
1407 for n in opts[b'show_stage']:
1389 if n not in stagenames:
1408 if n not in stagenames:
1390 raise error.Abort(_(b'invalid stage name: %s') % n)
1409 raise error.Abort(_(b'invalid stage name: %s') % n)
1391 showalways.update(opts[b'show_stage'])
1410 showalways.update(opts[b'show_stage'])
1392
1411
1393 tree = filesetlang.parse(expr)
1412 tree = filesetlang.parse(expr)
1394 for n, f in stages:
1413 for n, f in stages:
1395 tree = f(tree)
1414 tree = f(tree)
1396 if n in showalways:
1415 if n in showalways:
1397 if opts[b'show_stage'] or n != b'parsed':
1416 if opts[b'show_stage'] or n != b'parsed':
1398 ui.write(b"* %s:\n" % n)
1417 ui.write(b"* %s:\n" % n)
1399 ui.write(filesetlang.prettyformat(tree), b"\n")
1418 ui.write(filesetlang.prettyformat(tree), b"\n")
1400
1419
1401 files = set()
1420 files = set()
1402 if opts[b'all_files']:
1421 if opts[b'all_files']:
1403 for r in repo:
1422 for r in repo:
1404 c = repo[r]
1423 c = repo[r]
1405 files.update(c.files())
1424 files.update(c.files())
1406 files.update(c.substate)
1425 files.update(c.substate)
1407 if opts[b'all_files'] or ctx.rev() is None:
1426 if opts[b'all_files'] or ctx.rev() is None:
1408 wctx = repo[None]
1427 wctx = repo[None]
1409 files.update(
1428 files.update(
1410 repo.dirstate.walk(
1429 repo.dirstate.walk(
1411 scmutil.matchall(repo),
1430 scmutil.matchall(repo),
1412 subrepos=list(wctx.substate),
1431 subrepos=list(wctx.substate),
1413 unknown=True,
1432 unknown=True,
1414 ignored=True,
1433 ignored=True,
1415 )
1434 )
1416 )
1435 )
1417 files.update(wctx.substate)
1436 files.update(wctx.substate)
1418 else:
1437 else:
1419 files.update(ctx.files())
1438 files.update(ctx.files())
1420 files.update(ctx.substate)
1439 files.update(ctx.substate)
1421
1440
1422 m = ctx.matchfileset(repo.getcwd(), expr)
1441 m = ctx.matchfileset(repo.getcwd(), expr)
1423 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1442 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1424 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1443 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1425 for f in sorted(files):
1444 for f in sorted(files):
1426 if not m(f):
1445 if not m(f):
1427 continue
1446 continue
1428 ui.write(b"%s\n" % f)
1447 ui.write(b"%s\n" % f)
1429
1448
1430
1449
1431 @command(b'debugformat', [] + cmdutil.formatteropts)
1450 @command(b'debugformat', [] + cmdutil.formatteropts)
1432 def debugformat(ui, repo, **opts):
1451 def debugformat(ui, repo, **opts):
1433 """display format information about the current repository
1452 """display format information about the current repository
1434
1453
1435 Use --verbose to get extra information about current config value and
1454 Use --verbose to get extra information about current config value and
1436 Mercurial default."""
1455 Mercurial default."""
1437 opts = pycompat.byteskwargs(opts)
1456 opts = pycompat.byteskwargs(opts)
1438 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1457 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1439 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1458 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1440
1459
1441 def makeformatname(name):
1460 def makeformatname(name):
1442 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1461 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1443
1462
1444 fm = ui.formatter(b'debugformat', opts)
1463 fm = ui.formatter(b'debugformat', opts)
1445 if fm.isplain():
1464 if fm.isplain():
1446
1465
1447 def formatvalue(value):
1466 def formatvalue(value):
1448 if util.safehasattr(value, b'startswith'):
1467 if util.safehasattr(value, b'startswith'):
1449 return value
1468 return value
1450 if value:
1469 if value:
1451 return b'yes'
1470 return b'yes'
1452 else:
1471 else:
1453 return b'no'
1472 return b'no'
1454
1473
1455 else:
1474 else:
1456 formatvalue = pycompat.identity
1475 formatvalue = pycompat.identity
1457
1476
1458 fm.plain(b'format-variant')
1477 fm.plain(b'format-variant')
1459 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1478 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1460 fm.plain(b' repo')
1479 fm.plain(b' repo')
1461 if ui.verbose:
1480 if ui.verbose:
1462 fm.plain(b' config default')
1481 fm.plain(b' config default')
1463 fm.plain(b'\n')
1482 fm.plain(b'\n')
1464 for fv in upgrade.allformatvariant:
1483 for fv in upgrade.allformatvariant:
1465 fm.startitem()
1484 fm.startitem()
1466 repovalue = fv.fromrepo(repo)
1485 repovalue = fv.fromrepo(repo)
1467 configvalue = fv.fromconfig(repo)
1486 configvalue = fv.fromconfig(repo)
1468
1487
1469 if repovalue != configvalue:
1488 if repovalue != configvalue:
1470 namelabel = b'formatvariant.name.mismatchconfig'
1489 namelabel = b'formatvariant.name.mismatchconfig'
1471 repolabel = b'formatvariant.repo.mismatchconfig'
1490 repolabel = b'formatvariant.repo.mismatchconfig'
1472 elif repovalue != fv.default:
1491 elif repovalue != fv.default:
1473 namelabel = b'formatvariant.name.mismatchdefault'
1492 namelabel = b'formatvariant.name.mismatchdefault'
1474 repolabel = b'formatvariant.repo.mismatchdefault'
1493 repolabel = b'formatvariant.repo.mismatchdefault'
1475 else:
1494 else:
1476 namelabel = b'formatvariant.name.uptodate'
1495 namelabel = b'formatvariant.name.uptodate'
1477 repolabel = b'formatvariant.repo.uptodate'
1496 repolabel = b'formatvariant.repo.uptodate'
1478
1497
1479 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1498 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1480 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1499 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1481 if fv.default != configvalue:
1500 if fv.default != configvalue:
1482 configlabel = b'formatvariant.config.special'
1501 configlabel = b'formatvariant.config.special'
1483 else:
1502 else:
1484 configlabel = b'formatvariant.config.default'
1503 configlabel = b'formatvariant.config.default'
1485 fm.condwrite(
1504 fm.condwrite(
1486 ui.verbose,
1505 ui.verbose,
1487 b'config',
1506 b'config',
1488 b' %6s',
1507 b' %6s',
1489 formatvalue(configvalue),
1508 formatvalue(configvalue),
1490 label=configlabel,
1509 label=configlabel,
1491 )
1510 )
1492 fm.condwrite(
1511 fm.condwrite(
1493 ui.verbose,
1512 ui.verbose,
1494 b'default',
1513 b'default',
1495 b' %7s',
1514 b' %7s',
1496 formatvalue(fv.default),
1515 formatvalue(fv.default),
1497 label=b'formatvariant.default',
1516 label=b'formatvariant.default',
1498 )
1517 )
1499 fm.plain(b'\n')
1518 fm.plain(b'\n')
1500 fm.end()
1519 fm.end()
1501
1520
1502
1521
1503 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1522 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1504 def debugfsinfo(ui, path=b"."):
1523 def debugfsinfo(ui, path=b"."):
1505 """show information detected about current filesystem"""
1524 """show information detected about current filesystem"""
1506 ui.writenoi18n(b'path: %s\n' % path)
1525 ui.writenoi18n(b'path: %s\n' % path)
1507 ui.writenoi18n(
1526 ui.writenoi18n(
1508 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1527 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1509 )
1528 )
1510 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1529 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1511 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1530 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1512 ui.writenoi18n(
1531 ui.writenoi18n(
1513 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1532 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1514 )
1533 )
1515 ui.writenoi18n(
1534 ui.writenoi18n(
1516 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1535 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1517 )
1536 )
1518 casesensitive = b'(unknown)'
1537 casesensitive = b'(unknown)'
1519 try:
1538 try:
1520 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1539 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1521 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1540 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1522 except OSError:
1541 except OSError:
1523 pass
1542 pass
1524 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1543 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1525
1544
1526
1545
1527 @command(
1546 @command(
1528 b'debuggetbundle',
1547 b'debuggetbundle',
1529 [
1548 [
1530 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1549 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1531 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1550 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1532 (
1551 (
1533 b't',
1552 b't',
1534 b'type',
1553 b'type',
1535 b'bzip2',
1554 b'bzip2',
1536 _(b'bundle compression type to use'),
1555 _(b'bundle compression type to use'),
1537 _(b'TYPE'),
1556 _(b'TYPE'),
1538 ),
1557 ),
1539 ],
1558 ],
1540 _(b'REPO FILE [-H|-C ID]...'),
1559 _(b'REPO FILE [-H|-C ID]...'),
1541 norepo=True,
1560 norepo=True,
1542 )
1561 )
1543 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1562 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1544 """retrieves a bundle from a repo
1563 """retrieves a bundle from a repo
1545
1564
1546 Every ID must be a full-length hex node id string. Saves the bundle to the
1565 Every ID must be a full-length hex node id string. Saves the bundle to the
1547 given file.
1566 given file.
1548 """
1567 """
1549 opts = pycompat.byteskwargs(opts)
1568 opts = pycompat.byteskwargs(opts)
1550 repo = hg.peer(ui, opts, repopath)
1569 repo = hg.peer(ui, opts, repopath)
1551 if not repo.capable(b'getbundle'):
1570 if not repo.capable(b'getbundle'):
1552 raise error.Abort(b"getbundle() not supported by target repository")
1571 raise error.Abort(b"getbundle() not supported by target repository")
1553 args = {}
1572 args = {}
1554 if common:
1573 if common:
1555 args['common'] = [bin(s) for s in common]
1574 args['common'] = [bin(s) for s in common]
1556 if head:
1575 if head:
1557 args['heads'] = [bin(s) for s in head]
1576 args['heads'] = [bin(s) for s in head]
1558 # TODO: get desired bundlecaps from command line.
1577 # TODO: get desired bundlecaps from command line.
1559 args['bundlecaps'] = None
1578 args['bundlecaps'] = None
1560 bundle = repo.getbundle(b'debug', **args)
1579 bundle = repo.getbundle(b'debug', **args)
1561
1580
1562 bundletype = opts.get(b'type', b'bzip2').lower()
1581 bundletype = opts.get(b'type', b'bzip2').lower()
1563 btypes = {
1582 btypes = {
1564 b'none': b'HG10UN',
1583 b'none': b'HG10UN',
1565 b'bzip2': b'HG10BZ',
1584 b'bzip2': b'HG10BZ',
1566 b'gzip': b'HG10GZ',
1585 b'gzip': b'HG10GZ',
1567 b'bundle2': b'HG20',
1586 b'bundle2': b'HG20',
1568 }
1587 }
1569 bundletype = btypes.get(bundletype)
1588 bundletype = btypes.get(bundletype)
1570 if bundletype not in bundle2.bundletypes:
1589 if bundletype not in bundle2.bundletypes:
1571 raise error.Abort(_(b'unknown bundle type specified with --type'))
1590 raise error.Abort(_(b'unknown bundle type specified with --type'))
1572 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1591 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1573
1592
1574
1593
1575 @command(b'debugignore', [], b'[FILE]')
1594 @command(b'debugignore', [], b'[FILE]')
1576 def debugignore(ui, repo, *files, **opts):
1595 def debugignore(ui, repo, *files, **opts):
1577 """display the combined ignore pattern and information about ignored files
1596 """display the combined ignore pattern and information about ignored files
1578
1597
1579 With no argument display the combined ignore pattern.
1598 With no argument display the combined ignore pattern.
1580
1599
1581 Given space separated file names, shows if the given file is ignored and
1600 Given space separated file names, shows if the given file is ignored and
1582 if so, show the ignore rule (file and line number) that matched it.
1601 if so, show the ignore rule (file and line number) that matched it.
1583 """
1602 """
1584 ignore = repo.dirstate._ignore
1603 ignore = repo.dirstate._ignore
1585 if not files:
1604 if not files:
1586 # Show all the patterns
1605 # Show all the patterns
1587 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1606 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1588 else:
1607 else:
1589 m = scmutil.match(repo[None], pats=files)
1608 m = scmutil.match(repo[None], pats=files)
1590 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1609 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1591 for f in m.files():
1610 for f in m.files():
1592 nf = util.normpath(f)
1611 nf = util.normpath(f)
1593 ignored = None
1612 ignored = None
1594 ignoredata = None
1613 ignoredata = None
1595 if nf != b'.':
1614 if nf != b'.':
1596 if ignore(nf):
1615 if ignore(nf):
1597 ignored = nf
1616 ignored = nf
1598 ignoredata = repo.dirstate._ignorefileandline(nf)
1617 ignoredata = repo.dirstate._ignorefileandline(nf)
1599 else:
1618 else:
1600 for p in pathutil.finddirs(nf):
1619 for p in pathutil.finddirs(nf):
1601 if ignore(p):
1620 if ignore(p):
1602 ignored = p
1621 ignored = p
1603 ignoredata = repo.dirstate._ignorefileandline(p)
1622 ignoredata = repo.dirstate._ignorefileandline(p)
1604 break
1623 break
1605 if ignored:
1624 if ignored:
1606 if ignored == nf:
1625 if ignored == nf:
1607 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1626 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1608 else:
1627 else:
1609 ui.write(
1628 ui.write(
1610 _(
1629 _(
1611 b"%s is ignored because of "
1630 b"%s is ignored because of "
1612 b"containing directory %s\n"
1631 b"containing directory %s\n"
1613 )
1632 )
1614 % (uipathfn(f), ignored)
1633 % (uipathfn(f), ignored)
1615 )
1634 )
1616 ignorefile, lineno, line = ignoredata
1635 ignorefile, lineno, line = ignoredata
1617 ui.write(
1636 ui.write(
1618 _(b"(ignore rule in %s, line %d: '%s')\n")
1637 _(b"(ignore rule in %s, line %d: '%s')\n")
1619 % (ignorefile, lineno, line)
1638 % (ignorefile, lineno, line)
1620 )
1639 )
1621 else:
1640 else:
1622 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1641 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1623
1642
1624
1643
1625 @command(
1644 @command(
1626 b'debugindex',
1645 b'debugindex',
1627 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1646 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1628 _(b'-c|-m|FILE'),
1647 _(b'-c|-m|FILE'),
1629 )
1648 )
1630 def debugindex(ui, repo, file_=None, **opts):
1649 def debugindex(ui, repo, file_=None, **opts):
1631 """dump index data for a storage primitive"""
1650 """dump index data for a storage primitive"""
1632 opts = pycompat.byteskwargs(opts)
1651 opts = pycompat.byteskwargs(opts)
1633 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1652 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1634
1653
1635 if ui.debugflag:
1654 if ui.debugflag:
1636 shortfn = hex
1655 shortfn = hex
1637 else:
1656 else:
1638 shortfn = short
1657 shortfn = short
1639
1658
1640 idlen = 12
1659 idlen = 12
1641 for i in store:
1660 for i in store:
1642 idlen = len(shortfn(store.node(i)))
1661 idlen = len(shortfn(store.node(i)))
1643 break
1662 break
1644
1663
1645 fm = ui.formatter(b'debugindex', opts)
1664 fm = ui.formatter(b'debugindex', opts)
1646 fm.plain(
1665 fm.plain(
1647 b' rev linkrev %s %s p2\n'
1666 b' rev linkrev %s %s p2\n'
1648 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1667 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1649 )
1668 )
1650
1669
1651 for rev in store:
1670 for rev in store:
1652 node = store.node(rev)
1671 node = store.node(rev)
1653 parents = store.parents(node)
1672 parents = store.parents(node)
1654
1673
1655 fm.startitem()
1674 fm.startitem()
1656 fm.write(b'rev', b'%6d ', rev)
1675 fm.write(b'rev', b'%6d ', rev)
1657 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1676 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1658 fm.write(b'node', b'%s ', shortfn(node))
1677 fm.write(b'node', b'%s ', shortfn(node))
1659 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1678 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1660 fm.write(b'p2', b'%s', shortfn(parents[1]))
1679 fm.write(b'p2', b'%s', shortfn(parents[1]))
1661 fm.plain(b'\n')
1680 fm.plain(b'\n')
1662
1681
1663 fm.end()
1682 fm.end()
1664
1683
1665
1684
1666 @command(
1685 @command(
1667 b'debugindexdot',
1686 b'debugindexdot',
1668 cmdutil.debugrevlogopts,
1687 cmdutil.debugrevlogopts,
1669 _(b'-c|-m|FILE'),
1688 _(b'-c|-m|FILE'),
1670 optionalrepo=True,
1689 optionalrepo=True,
1671 )
1690 )
1672 def debugindexdot(ui, repo, file_=None, **opts):
1691 def debugindexdot(ui, repo, file_=None, **opts):
1673 """dump an index DAG as a graphviz dot file"""
1692 """dump an index DAG as a graphviz dot file"""
1674 opts = pycompat.byteskwargs(opts)
1693 opts = pycompat.byteskwargs(opts)
1675 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1694 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1676 ui.writenoi18n(b"digraph G {\n")
1695 ui.writenoi18n(b"digraph G {\n")
1677 for i in r:
1696 for i in r:
1678 node = r.node(i)
1697 node = r.node(i)
1679 pp = r.parents(node)
1698 pp = r.parents(node)
1680 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1699 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1681 if pp[1] != repo.nullid:
1700 if pp[1] != repo.nullid:
1682 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1701 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1683 ui.write(b"}\n")
1702 ui.write(b"}\n")
1684
1703
1685
1704
1686 @command(b'debugindexstats', [])
1705 @command(b'debugindexstats', [])
1687 def debugindexstats(ui, repo):
1706 def debugindexstats(ui, repo):
1688 """show stats related to the changelog index"""
1707 """show stats related to the changelog index"""
1689 repo.changelog.shortest(repo.nullid, 1)
1708 repo.changelog.shortest(repo.nullid, 1)
1690 index = repo.changelog.index
1709 index = repo.changelog.index
1691 if not util.safehasattr(index, b'stats'):
1710 if not util.safehasattr(index, b'stats'):
1692 raise error.Abort(_(b'debugindexstats only works with native code'))
1711 raise error.Abort(_(b'debugindexstats only works with native code'))
1693 for k, v in sorted(index.stats().items()):
1712 for k, v in sorted(index.stats().items()):
1694 ui.write(b'%s: %d\n' % (k, v))
1713 ui.write(b'%s: %d\n' % (k, v))
1695
1714
1696
1715
1697 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1716 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1698 def debuginstall(ui, **opts):
1717 def debuginstall(ui, **opts):
1699 """test Mercurial installation
1718 """test Mercurial installation
1700
1719
1701 Returns 0 on success.
1720 Returns 0 on success.
1702 """
1721 """
1703 opts = pycompat.byteskwargs(opts)
1722 opts = pycompat.byteskwargs(opts)
1704
1723
1705 problems = 0
1724 problems = 0
1706
1725
1707 fm = ui.formatter(b'debuginstall', opts)
1726 fm = ui.formatter(b'debuginstall', opts)
1708 fm.startitem()
1727 fm.startitem()
1709
1728
1710 # encoding might be unknown or wrong. don't translate these messages.
1729 # encoding might be unknown or wrong. don't translate these messages.
1711 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1730 fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
1712 err = None
1731 err = None
1713 try:
1732 try:
1714 codecs.lookup(pycompat.sysstr(encoding.encoding))
1733 codecs.lookup(pycompat.sysstr(encoding.encoding))
1715 except LookupError as inst:
1734 except LookupError as inst:
1716 err = stringutil.forcebytestr(inst)
1735 err = stringutil.forcebytestr(inst)
1717 problems += 1
1736 problems += 1
1718 fm.condwrite(
1737 fm.condwrite(
1719 err,
1738 err,
1720 b'encodingerror',
1739 b'encodingerror',
1721 b" %s\n (check that your locale is properly set)\n",
1740 b" %s\n (check that your locale is properly set)\n",
1722 err,
1741 err,
1723 )
1742 )
1724
1743
1725 # Python
1744 # Python
1726 pythonlib = None
1745 pythonlib = None
1727 if util.safehasattr(os, '__file__'):
1746 if util.safehasattr(os, '__file__'):
1728 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1747 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1729 elif getattr(sys, 'oxidized', False):
1748 elif getattr(sys, 'oxidized', False):
1730 pythonlib = pycompat.sysexecutable
1749 pythonlib = pycompat.sysexecutable
1731
1750
1732 fm.write(
1751 fm.write(
1733 b'pythonexe',
1752 b'pythonexe',
1734 _(b"checking Python executable (%s)\n"),
1753 _(b"checking Python executable (%s)\n"),
1735 pycompat.sysexecutable or _(b"unknown"),
1754 pycompat.sysexecutable or _(b"unknown"),
1736 )
1755 )
1737 fm.write(
1756 fm.write(
1738 b'pythonimplementation',
1757 b'pythonimplementation',
1739 _(b"checking Python implementation (%s)\n"),
1758 _(b"checking Python implementation (%s)\n"),
1740 pycompat.sysbytes(platform.python_implementation()),
1759 pycompat.sysbytes(platform.python_implementation()),
1741 )
1760 )
1742 fm.write(
1761 fm.write(
1743 b'pythonver',
1762 b'pythonver',
1744 _(b"checking Python version (%s)\n"),
1763 _(b"checking Python version (%s)\n"),
1745 (b"%d.%d.%d" % sys.version_info[:3]),
1764 (b"%d.%d.%d" % sys.version_info[:3]),
1746 )
1765 )
1747 fm.write(
1766 fm.write(
1748 b'pythonlib',
1767 b'pythonlib',
1749 _(b"checking Python lib (%s)...\n"),
1768 _(b"checking Python lib (%s)...\n"),
1750 pythonlib or _(b"unknown"),
1769 pythonlib or _(b"unknown"),
1751 )
1770 )
1752
1771
1753 try:
1772 try:
1754 from . import rustext # pytype: disable=import-error
1773 from . import rustext # pytype: disable=import-error
1755
1774
1756 rustext.__doc__ # trigger lazy import
1775 rustext.__doc__ # trigger lazy import
1757 except ImportError:
1776 except ImportError:
1758 rustext = None
1777 rustext = None
1759
1778
1760 security = set(sslutil.supportedprotocols)
1779 security = set(sslutil.supportedprotocols)
1761 if sslutil.hassni:
1780 if sslutil.hassni:
1762 security.add(b'sni')
1781 security.add(b'sni')
1763
1782
1764 fm.write(
1783 fm.write(
1765 b'pythonsecurity',
1784 b'pythonsecurity',
1766 _(b"checking Python security support (%s)\n"),
1785 _(b"checking Python security support (%s)\n"),
1767 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1786 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1768 )
1787 )
1769
1788
1770 # These are warnings, not errors. So don't increment problem count. This
1789 # These are warnings, not errors. So don't increment problem count. This
1771 # may change in the future.
1790 # may change in the future.
1772 if b'tls1.2' not in security:
1791 if b'tls1.2' not in security:
1773 fm.plain(
1792 fm.plain(
1774 _(
1793 _(
1775 b' TLS 1.2 not supported by Python install; '
1794 b' TLS 1.2 not supported by Python install; '
1776 b'network connections lack modern security\n'
1795 b'network connections lack modern security\n'
1777 )
1796 )
1778 )
1797 )
1779 if b'sni' not in security:
1798 if b'sni' not in security:
1780 fm.plain(
1799 fm.plain(
1781 _(
1800 _(
1782 b' SNI not supported by Python install; may have '
1801 b' SNI not supported by Python install; may have '
1783 b'connectivity issues with some servers\n'
1802 b'connectivity issues with some servers\n'
1784 )
1803 )
1785 )
1804 )
1786
1805
1787 fm.plain(
1806 fm.plain(
1788 _(
1807 _(
1789 b"checking Rust extensions (%s)\n"
1808 b"checking Rust extensions (%s)\n"
1790 % (b'missing' if rustext is None else b'installed')
1809 % (b'missing' if rustext is None else b'installed')
1791 ),
1810 ),
1792 )
1811 )
1793
1812
1794 # TODO print CA cert info
1813 # TODO print CA cert info
1795
1814
1796 # hg version
1815 # hg version
1797 hgver = util.version()
1816 hgver = util.version()
1798 fm.write(
1817 fm.write(
1799 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1818 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1800 )
1819 )
1801 fm.write(
1820 fm.write(
1802 b'hgverextra',
1821 b'hgverextra',
1803 _(b"checking Mercurial custom build (%s)\n"),
1822 _(b"checking Mercurial custom build (%s)\n"),
1804 b'+'.join(hgver.split(b'+')[1:]),
1823 b'+'.join(hgver.split(b'+')[1:]),
1805 )
1824 )
1806
1825
1807 # compiled modules
1826 # compiled modules
1808 hgmodules = None
1827 hgmodules = None
1809 if util.safehasattr(sys.modules[__name__], '__file__'):
1828 if util.safehasattr(sys.modules[__name__], '__file__'):
1810 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1829 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1811 elif getattr(sys, 'oxidized', False):
1830 elif getattr(sys, 'oxidized', False):
1812 hgmodules = pycompat.sysexecutable
1831 hgmodules = pycompat.sysexecutable
1813
1832
1814 fm.write(
1833 fm.write(
1815 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1834 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1816 )
1835 )
1817 fm.write(
1836 fm.write(
1818 b'hgmodules',
1837 b'hgmodules',
1819 _(b"checking installed modules (%s)...\n"),
1838 _(b"checking installed modules (%s)...\n"),
1820 hgmodules or _(b"unknown"),
1839 hgmodules or _(b"unknown"),
1821 )
1840 )
1822
1841
1823 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1842 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1824 rustext = rustandc # for now, that's the only case
1843 rustext = rustandc # for now, that's the only case
1825 cext = policy.policy in (b'c', b'allow') or rustandc
1844 cext = policy.policy in (b'c', b'allow') or rustandc
1826 nopure = cext or rustext
1845 nopure = cext or rustext
1827 if nopure:
1846 if nopure:
1828 err = None
1847 err = None
1829 try:
1848 try:
1830 if cext:
1849 if cext:
1831 from .cext import ( # pytype: disable=import-error
1850 from .cext import ( # pytype: disable=import-error
1832 base85,
1851 base85,
1833 bdiff,
1852 bdiff,
1834 mpatch,
1853 mpatch,
1835 osutil,
1854 osutil,
1836 )
1855 )
1837
1856
1838 # quiet pyflakes
1857 # quiet pyflakes
1839 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1858 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1840 if rustext:
1859 if rustext:
1841 from .rustext import ( # pytype: disable=import-error
1860 from .rustext import ( # pytype: disable=import-error
1842 ancestor,
1861 ancestor,
1843 dirstate,
1862 dirstate,
1844 )
1863 )
1845
1864
1846 dir(ancestor), dir(dirstate) # quiet pyflakes
1865 dir(ancestor), dir(dirstate) # quiet pyflakes
1847 except Exception as inst:
1866 except Exception as inst:
1848 err = stringutil.forcebytestr(inst)
1867 err = stringutil.forcebytestr(inst)
1849 problems += 1
1868 problems += 1
1850 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1869 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1851
1870
1852 compengines = util.compengines._engines.values()
1871 compengines = util.compengines._engines.values()
1853 fm.write(
1872 fm.write(
1854 b'compengines',
1873 b'compengines',
1855 _(b'checking registered compression engines (%s)\n'),
1874 _(b'checking registered compression engines (%s)\n'),
1856 fm.formatlist(
1875 fm.formatlist(
1857 sorted(e.name() for e in compengines),
1876 sorted(e.name() for e in compengines),
1858 name=b'compengine',
1877 name=b'compengine',
1859 fmt=b'%s',
1878 fmt=b'%s',
1860 sep=b', ',
1879 sep=b', ',
1861 ),
1880 ),
1862 )
1881 )
1863 fm.write(
1882 fm.write(
1864 b'compenginesavail',
1883 b'compenginesavail',
1865 _(b'checking available compression engines (%s)\n'),
1884 _(b'checking available compression engines (%s)\n'),
1866 fm.formatlist(
1885 fm.formatlist(
1867 sorted(e.name() for e in compengines if e.available()),
1886 sorted(e.name() for e in compengines if e.available()),
1868 name=b'compengine',
1887 name=b'compengine',
1869 fmt=b'%s',
1888 fmt=b'%s',
1870 sep=b', ',
1889 sep=b', ',
1871 ),
1890 ),
1872 )
1891 )
1873 wirecompengines = compression.compengines.supportedwireengines(
1892 wirecompengines = compression.compengines.supportedwireengines(
1874 compression.SERVERROLE
1893 compression.SERVERROLE
1875 )
1894 )
1876 fm.write(
1895 fm.write(
1877 b'compenginesserver',
1896 b'compenginesserver',
1878 _(
1897 _(
1879 b'checking available compression engines '
1898 b'checking available compression engines '
1880 b'for wire protocol (%s)\n'
1899 b'for wire protocol (%s)\n'
1881 ),
1900 ),
1882 fm.formatlist(
1901 fm.formatlist(
1883 [e.name() for e in wirecompengines if e.wireprotosupport()],
1902 [e.name() for e in wirecompengines if e.wireprotosupport()],
1884 name=b'compengine',
1903 name=b'compengine',
1885 fmt=b'%s',
1904 fmt=b'%s',
1886 sep=b', ',
1905 sep=b', ',
1887 ),
1906 ),
1888 )
1907 )
1889 re2 = b'missing'
1908 re2 = b'missing'
1890 if util._re2:
1909 if util._re2:
1891 re2 = b'available'
1910 re2 = b'available'
1892 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1911 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1893 fm.data(re2=bool(util._re2))
1912 fm.data(re2=bool(util._re2))
1894
1913
1895 # templates
1914 # templates
1896 p = templater.templatedir()
1915 p = templater.templatedir()
1897 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1916 fm.write(b'templatedirs', b'checking templates (%s)...\n', p or b'')
1898 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1917 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1899 if p:
1918 if p:
1900 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1919 (m, fp) = templater.try_open_template(b"map-cmdline.default")
1901 if m:
1920 if m:
1902 # template found, check if it is working
1921 # template found, check if it is working
1903 err = None
1922 err = None
1904 try:
1923 try:
1905 templater.templater.frommapfile(m)
1924 templater.templater.frommapfile(m)
1906 except Exception as inst:
1925 except Exception as inst:
1907 err = stringutil.forcebytestr(inst)
1926 err = stringutil.forcebytestr(inst)
1908 p = None
1927 p = None
1909 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1928 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1910 else:
1929 else:
1911 p = None
1930 p = None
1912 fm.condwrite(
1931 fm.condwrite(
1913 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1932 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1914 )
1933 )
1915 fm.condwrite(
1934 fm.condwrite(
1916 not m,
1935 not m,
1917 b'defaulttemplatenotfound',
1936 b'defaulttemplatenotfound',
1918 _(b" template '%s' not found\n"),
1937 _(b" template '%s' not found\n"),
1919 b"default",
1938 b"default",
1920 )
1939 )
1921 if not p:
1940 if not p:
1922 problems += 1
1941 problems += 1
1923 fm.condwrite(
1942 fm.condwrite(
1924 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1943 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1925 )
1944 )
1926
1945
1927 # editor
1946 # editor
1928 editor = ui.geteditor()
1947 editor = ui.geteditor()
1929 editor = util.expandpath(editor)
1948 editor = util.expandpath(editor)
1930 editorbin = procutil.shellsplit(editor)[0]
1949 editorbin = procutil.shellsplit(editor)[0]
1931 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1950 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1932 cmdpath = procutil.findexe(editorbin)
1951 cmdpath = procutil.findexe(editorbin)
1933 fm.condwrite(
1952 fm.condwrite(
1934 not cmdpath and editor == b'vi',
1953 not cmdpath and editor == b'vi',
1935 b'vinotfound',
1954 b'vinotfound',
1936 _(
1955 _(
1937 b" No commit editor set and can't find %s in PATH\n"
1956 b" No commit editor set and can't find %s in PATH\n"
1938 b" (specify a commit editor in your configuration"
1957 b" (specify a commit editor in your configuration"
1939 b" file)\n"
1958 b" file)\n"
1940 ),
1959 ),
1941 not cmdpath and editor == b'vi' and editorbin,
1960 not cmdpath and editor == b'vi' and editorbin,
1942 )
1961 )
1943 fm.condwrite(
1962 fm.condwrite(
1944 not cmdpath and editor != b'vi',
1963 not cmdpath and editor != b'vi',
1945 b'editornotfound',
1964 b'editornotfound',
1946 _(
1965 _(
1947 b" Can't find editor '%s' in PATH\n"
1966 b" Can't find editor '%s' in PATH\n"
1948 b" (specify a commit editor in your configuration"
1967 b" (specify a commit editor in your configuration"
1949 b" file)\n"
1968 b" file)\n"
1950 ),
1969 ),
1951 not cmdpath and editorbin,
1970 not cmdpath and editorbin,
1952 )
1971 )
1953 if not cmdpath and editor != b'vi':
1972 if not cmdpath and editor != b'vi':
1954 problems += 1
1973 problems += 1
1955
1974
1956 # check username
1975 # check username
1957 username = None
1976 username = None
1958 err = None
1977 err = None
1959 try:
1978 try:
1960 username = ui.username()
1979 username = ui.username()
1961 except error.Abort as e:
1980 except error.Abort as e:
1962 err = e.message
1981 err = e.message
1963 problems += 1
1982 problems += 1
1964
1983
1965 fm.condwrite(
1984 fm.condwrite(
1966 username, b'username', _(b"checking username (%s)\n"), username
1985 username, b'username', _(b"checking username (%s)\n"), username
1967 )
1986 )
1968 fm.condwrite(
1987 fm.condwrite(
1969 err,
1988 err,
1970 b'usernameerror',
1989 b'usernameerror',
1971 _(
1990 _(
1972 b"checking username...\n %s\n"
1991 b"checking username...\n %s\n"
1973 b" (specify a username in your configuration file)\n"
1992 b" (specify a username in your configuration file)\n"
1974 ),
1993 ),
1975 err,
1994 err,
1976 )
1995 )
1977
1996
1978 for name, mod in extensions.extensions():
1997 for name, mod in extensions.extensions():
1979 handler = getattr(mod, 'debuginstall', None)
1998 handler = getattr(mod, 'debuginstall', None)
1980 if handler is not None:
1999 if handler is not None:
1981 problems += handler(ui, fm)
2000 problems += handler(ui, fm)
1982
2001
1983 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
2002 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1984 if not problems:
2003 if not problems:
1985 fm.data(problems=problems)
2004 fm.data(problems=problems)
1986 fm.condwrite(
2005 fm.condwrite(
1987 problems,
2006 problems,
1988 b'problems',
2007 b'problems',
1989 _(b"%d problems detected, please check your install!\n"),
2008 _(b"%d problems detected, please check your install!\n"),
1990 problems,
2009 problems,
1991 )
2010 )
1992 fm.end()
2011 fm.end()
1993
2012
1994 return problems
2013 return problems
1995
2014
1996
2015
1997 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
2016 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1998 def debugknown(ui, repopath, *ids, **opts):
2017 def debugknown(ui, repopath, *ids, **opts):
1999 """test whether node ids are known to a repo
2018 """test whether node ids are known to a repo
2000
2019
2001 Every ID must be a full-length hex node id string. Returns a list of 0s
2020 Every ID must be a full-length hex node id string. Returns a list of 0s
2002 and 1s indicating unknown/known.
2021 and 1s indicating unknown/known.
2003 """
2022 """
2004 opts = pycompat.byteskwargs(opts)
2023 opts = pycompat.byteskwargs(opts)
2005 repo = hg.peer(ui, opts, repopath)
2024 repo = hg.peer(ui, opts, repopath)
2006 if not repo.capable(b'known'):
2025 if not repo.capable(b'known'):
2007 raise error.Abort(b"known() not supported by target repository")
2026 raise error.Abort(b"known() not supported by target repository")
2008 flags = repo.known([bin(s) for s in ids])
2027 flags = repo.known([bin(s) for s in ids])
2009 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2028 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
2010
2029
2011
2030
2012 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2031 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
2013 def debuglabelcomplete(ui, repo, *args):
2032 def debuglabelcomplete(ui, repo, *args):
2014 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2033 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
2015 debugnamecomplete(ui, repo, *args)
2034 debugnamecomplete(ui, repo, *args)
2016
2035
2017
2036
2018 @command(
2037 @command(
2019 b'debuglocks',
2038 b'debuglocks',
2020 [
2039 [
2021 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2040 (b'L', b'force-free-lock', None, _(b'free the store lock (DANGEROUS)')),
2022 (
2041 (
2023 b'W',
2042 b'W',
2024 b'force-free-wlock',
2043 b'force-free-wlock',
2025 None,
2044 None,
2026 _(b'free the working state lock (DANGEROUS)'),
2045 _(b'free the working state lock (DANGEROUS)'),
2027 ),
2046 ),
2028 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2047 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
2029 (
2048 (
2030 b'S',
2049 b'S',
2031 b'set-wlock',
2050 b'set-wlock',
2032 None,
2051 None,
2033 _(b'set the working state lock until stopped'),
2052 _(b'set the working state lock until stopped'),
2034 ),
2053 ),
2035 ],
2054 ],
2036 _(b'[OPTION]...'),
2055 _(b'[OPTION]...'),
2037 )
2056 )
2038 def debuglocks(ui, repo, **opts):
2057 def debuglocks(ui, repo, **opts):
2039 """show or modify state of locks
2058 """show or modify state of locks
2040
2059
2041 By default, this command will show which locks are held. This
2060 By default, this command will show which locks are held. This
2042 includes the user and process holding the lock, the amount of time
2061 includes the user and process holding the lock, the amount of time
2043 the lock has been held, and the machine name where the process is
2062 the lock has been held, and the machine name where the process is
2044 running if it's not local.
2063 running if it's not local.
2045
2064
2046 Locks protect the integrity of Mercurial's data, so should be
2065 Locks protect the integrity of Mercurial's data, so should be
2047 treated with care. System crashes or other interruptions may cause
2066 treated with care. System crashes or other interruptions may cause
2048 locks to not be properly released, though Mercurial will usually
2067 locks to not be properly released, though Mercurial will usually
2049 detect and remove such stale locks automatically.
2068 detect and remove such stale locks automatically.
2050
2069
2051 However, detecting stale locks may not always be possible (for
2070 However, detecting stale locks may not always be possible (for
2052 instance, on a shared filesystem). Removing locks may also be
2071 instance, on a shared filesystem). Removing locks may also be
2053 blocked by filesystem permissions.
2072 blocked by filesystem permissions.
2054
2073
2055 Setting a lock will prevent other commands from changing the data.
2074 Setting a lock will prevent other commands from changing the data.
2056 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2075 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
2057 The set locks are removed when the command exits.
2076 The set locks are removed when the command exits.
2058
2077
2059 Returns 0 if no locks are held.
2078 Returns 0 if no locks are held.
2060
2079
2061 """
2080 """
2062
2081
2063 if opts.get('force_free_lock'):
2082 if opts.get('force_free_lock'):
2064 repo.svfs.unlink(b'lock')
2083 repo.svfs.unlink(b'lock')
2065 if opts.get('force_free_wlock'):
2084 if opts.get('force_free_wlock'):
2066 repo.vfs.unlink(b'wlock')
2085 repo.vfs.unlink(b'wlock')
2067 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2086 if opts.get('force_free_lock') or opts.get('force_free_wlock'):
2068 return 0
2087 return 0
2069
2088
2070 locks = []
2089 locks = []
2071 try:
2090 try:
2072 if opts.get('set_wlock'):
2091 if opts.get('set_wlock'):
2073 try:
2092 try:
2074 locks.append(repo.wlock(False))
2093 locks.append(repo.wlock(False))
2075 except error.LockHeld:
2094 except error.LockHeld:
2076 raise error.Abort(_(b'wlock is already held'))
2095 raise error.Abort(_(b'wlock is already held'))
2077 if opts.get('set_lock'):
2096 if opts.get('set_lock'):
2078 try:
2097 try:
2079 locks.append(repo.lock(False))
2098 locks.append(repo.lock(False))
2080 except error.LockHeld:
2099 except error.LockHeld:
2081 raise error.Abort(_(b'lock is already held'))
2100 raise error.Abort(_(b'lock is already held'))
2082 if len(locks):
2101 if len(locks):
2083 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2102 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
2084 return 0
2103 return 0
2085 finally:
2104 finally:
2086 release(*locks)
2105 release(*locks)
2087
2106
2088 now = time.time()
2107 now = time.time()
2089 held = 0
2108 held = 0
2090
2109
2091 def report(vfs, name, method):
2110 def report(vfs, name, method):
2092 # this causes stale locks to get reaped for more accurate reporting
2111 # this causes stale locks to get reaped for more accurate reporting
2093 try:
2112 try:
2094 l = method(False)
2113 l = method(False)
2095 except error.LockHeld:
2114 except error.LockHeld:
2096 l = None
2115 l = None
2097
2116
2098 if l:
2117 if l:
2099 l.release()
2118 l.release()
2100 else:
2119 else:
2101 try:
2120 try:
2102 st = vfs.lstat(name)
2121 st = vfs.lstat(name)
2103 age = now - st[stat.ST_MTIME]
2122 age = now - st[stat.ST_MTIME]
2104 user = util.username(st.st_uid)
2123 user = util.username(st.st_uid)
2105 locker = vfs.readlock(name)
2124 locker = vfs.readlock(name)
2106 if b":" in locker:
2125 if b":" in locker:
2107 host, pid = locker.split(b':')
2126 host, pid = locker.split(b':')
2108 if host == socket.gethostname():
2127 if host == socket.gethostname():
2109 locker = b'user %s, process %s' % (user or b'None', pid)
2128 locker = b'user %s, process %s' % (user or b'None', pid)
2110 else:
2129 else:
2111 locker = b'user %s, process %s, host %s' % (
2130 locker = b'user %s, process %s, host %s' % (
2112 user or b'None',
2131 user or b'None',
2113 pid,
2132 pid,
2114 host,
2133 host,
2115 )
2134 )
2116 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2135 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
2117 return 1
2136 return 1
2118 except OSError as e:
2137 except OSError as e:
2119 if e.errno != errno.ENOENT:
2138 if e.errno != errno.ENOENT:
2120 raise
2139 raise
2121
2140
2122 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2141 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
2123 return 0
2142 return 0
2124
2143
2125 held += report(repo.svfs, b"lock", repo.lock)
2144 held += report(repo.svfs, b"lock", repo.lock)
2126 held += report(repo.vfs, b"wlock", repo.wlock)
2145 held += report(repo.vfs, b"wlock", repo.wlock)
2127
2146
2128 return held
2147 return held
2129
2148
2130
2149
2131 @command(
2150 @command(
2132 b'debugmanifestfulltextcache',
2151 b'debugmanifestfulltextcache',
2133 [
2152 [
2134 (b'', b'clear', False, _(b'clear the cache')),
2153 (b'', b'clear', False, _(b'clear the cache')),
2135 (
2154 (
2136 b'a',
2155 b'a',
2137 b'add',
2156 b'add',
2138 [],
2157 [],
2139 _(b'add the given manifest nodes to the cache'),
2158 _(b'add the given manifest nodes to the cache'),
2140 _(b'NODE'),
2159 _(b'NODE'),
2141 ),
2160 ),
2142 ],
2161 ],
2143 b'',
2162 b'',
2144 )
2163 )
2145 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2164 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
2146 """show, clear or amend the contents of the manifest fulltext cache"""
2165 """show, clear or amend the contents of the manifest fulltext cache"""
2147
2166
2148 def getcache():
2167 def getcache():
2149 r = repo.manifestlog.getstorage(b'')
2168 r = repo.manifestlog.getstorage(b'')
2150 try:
2169 try:
2151 return r._fulltextcache
2170 return r._fulltextcache
2152 except AttributeError:
2171 except AttributeError:
2153 msg = _(
2172 msg = _(
2154 b"Current revlog implementation doesn't appear to have a "
2173 b"Current revlog implementation doesn't appear to have a "
2155 b"manifest fulltext cache\n"
2174 b"manifest fulltext cache\n"
2156 )
2175 )
2157 raise error.Abort(msg)
2176 raise error.Abort(msg)
2158
2177
2159 if opts.get('clear'):
2178 if opts.get('clear'):
2160 with repo.wlock():
2179 with repo.wlock():
2161 cache = getcache()
2180 cache = getcache()
2162 cache.clear(clear_persisted_data=True)
2181 cache.clear(clear_persisted_data=True)
2163 return
2182 return
2164
2183
2165 if add:
2184 if add:
2166 with repo.wlock():
2185 with repo.wlock():
2167 m = repo.manifestlog
2186 m = repo.manifestlog
2168 store = m.getstorage(b'')
2187 store = m.getstorage(b'')
2169 for n in add:
2188 for n in add:
2170 try:
2189 try:
2171 manifest = m[store.lookup(n)]
2190 manifest = m[store.lookup(n)]
2172 except error.LookupError as e:
2191 except error.LookupError as e:
2173 raise error.Abort(
2192 raise error.Abort(
2174 bytes(e), hint=b"Check your manifest node id"
2193 bytes(e), hint=b"Check your manifest node id"
2175 )
2194 )
2176 manifest.read() # stores revisision in cache too
2195 manifest.read() # stores revisision in cache too
2177 return
2196 return
2178
2197
2179 cache = getcache()
2198 cache = getcache()
2180 if not len(cache):
2199 if not len(cache):
2181 ui.write(_(b'cache empty\n'))
2200 ui.write(_(b'cache empty\n'))
2182 else:
2201 else:
2183 ui.write(
2202 ui.write(
2184 _(
2203 _(
2185 b'cache contains %d manifest entries, in order of most to '
2204 b'cache contains %d manifest entries, in order of most to '
2186 b'least recent:\n'
2205 b'least recent:\n'
2187 )
2206 )
2188 % (len(cache),)
2207 % (len(cache),)
2189 )
2208 )
2190 totalsize = 0
2209 totalsize = 0
2191 for nodeid in cache:
2210 for nodeid in cache:
2192 # Use cache.get to not update the LRU order
2211 # Use cache.get to not update the LRU order
2193 data = cache.peek(nodeid)
2212 data = cache.peek(nodeid)
2194 size = len(data)
2213 size = len(data)
2195 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2214 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
2196 ui.write(
2215 ui.write(
2197 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2216 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
2198 )
2217 )
2199 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2218 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
2200 ui.write(
2219 ui.write(
2201 _(b'total cache data size %s, on-disk %s\n')
2220 _(b'total cache data size %s, on-disk %s\n')
2202 % (util.bytecount(totalsize), util.bytecount(ondisk))
2221 % (util.bytecount(totalsize), util.bytecount(ondisk))
2203 )
2222 )
2204
2223
2205
2224
2206 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2225 @command(b'debugmergestate', [] + cmdutil.templateopts, b'')
2207 def debugmergestate(ui, repo, *args, **opts):
2226 def debugmergestate(ui, repo, *args, **opts):
2208 """print merge state
2227 """print merge state
2209
2228
2210 Use --verbose to print out information about whether v1 or v2 merge state
2229 Use --verbose to print out information about whether v1 or v2 merge state
2211 was chosen."""
2230 was chosen."""
2212
2231
2213 if ui.verbose:
2232 if ui.verbose:
2214 ms = mergestatemod.mergestate(repo)
2233 ms = mergestatemod.mergestate(repo)
2215
2234
2216 # sort so that reasonable information is on top
2235 # sort so that reasonable information is on top
2217 v1records = ms._readrecordsv1()
2236 v1records = ms._readrecordsv1()
2218 v2records = ms._readrecordsv2()
2237 v2records = ms._readrecordsv2()
2219
2238
2220 if not v1records and not v2records:
2239 if not v1records and not v2records:
2221 pass
2240 pass
2222 elif not v2records:
2241 elif not v2records:
2223 ui.writenoi18n(b'no version 2 merge state\n')
2242 ui.writenoi18n(b'no version 2 merge state\n')
2224 elif ms._v1v2match(v1records, v2records):
2243 elif ms._v1v2match(v1records, v2records):
2225 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2244 ui.writenoi18n(b'v1 and v2 states match: using v2\n')
2226 else:
2245 else:
2227 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2246 ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
2228
2247
2229 opts = pycompat.byteskwargs(opts)
2248 opts = pycompat.byteskwargs(opts)
2230 if not opts[b'template']:
2249 if not opts[b'template']:
2231 opts[b'template'] = (
2250 opts[b'template'] = (
2232 b'{if(commits, "", "no merge state found\n")}'
2251 b'{if(commits, "", "no merge state found\n")}'
2233 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2252 b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
2234 b'{files % "file: {path} (state \\"{state}\\")\n'
2253 b'{files % "file: {path} (state \\"{state}\\")\n'
2235 b'{if(local_path, "'
2254 b'{if(local_path, "'
2236 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2255 b' local path: {local_path} (hash {local_key}, flags \\"{local_flags}\\")\n'
2237 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2256 b' ancestor path: {ancestor_path} (node {ancestor_node})\n'
2238 b' other path: {other_path} (node {other_node})\n'
2257 b' other path: {other_path} (node {other_node})\n'
2239 b'")}'
2258 b'")}'
2240 b'{if(rename_side, "'
2259 b'{if(rename_side, "'
2241 b' rename side: {rename_side}\n'
2260 b' rename side: {rename_side}\n'
2242 b' renamed path: {renamed_path}\n'
2261 b' renamed path: {renamed_path}\n'
2243 b'")}'
2262 b'")}'
2244 b'{extras % " extra: {key} = {value}\n"}'
2263 b'{extras % " extra: {key} = {value}\n"}'
2245 b'"}'
2264 b'"}'
2246 b'{extras % "extra: {file} ({key} = {value})\n"}'
2265 b'{extras % "extra: {file} ({key} = {value})\n"}'
2247 )
2266 )
2248
2267
2249 ms = mergestatemod.mergestate.read(repo)
2268 ms = mergestatemod.mergestate.read(repo)
2250
2269
2251 fm = ui.formatter(b'debugmergestate', opts)
2270 fm = ui.formatter(b'debugmergestate', opts)
2252 fm.startitem()
2271 fm.startitem()
2253
2272
2254 fm_commits = fm.nested(b'commits')
2273 fm_commits = fm.nested(b'commits')
2255 if ms.active():
2274 if ms.active():
2256 for name, node, label_index in (
2275 for name, node, label_index in (
2257 (b'local', ms.local, 0),
2276 (b'local', ms.local, 0),
2258 (b'other', ms.other, 1),
2277 (b'other', ms.other, 1),
2259 ):
2278 ):
2260 fm_commits.startitem()
2279 fm_commits.startitem()
2261 fm_commits.data(name=name)
2280 fm_commits.data(name=name)
2262 fm_commits.data(node=hex(node))
2281 fm_commits.data(node=hex(node))
2263 if ms._labels and len(ms._labels) > label_index:
2282 if ms._labels and len(ms._labels) > label_index:
2264 fm_commits.data(label=ms._labels[label_index])
2283 fm_commits.data(label=ms._labels[label_index])
2265 fm_commits.end()
2284 fm_commits.end()
2266
2285
2267 fm_files = fm.nested(b'files')
2286 fm_files = fm.nested(b'files')
2268 if ms.active():
2287 if ms.active():
2269 for f in ms:
2288 for f in ms:
2270 fm_files.startitem()
2289 fm_files.startitem()
2271 fm_files.data(path=f)
2290 fm_files.data(path=f)
2272 state = ms._state[f]
2291 state = ms._state[f]
2273 fm_files.data(state=state[0])
2292 fm_files.data(state=state[0])
2274 if state[0] in (
2293 if state[0] in (
2275 mergestatemod.MERGE_RECORD_UNRESOLVED,
2294 mergestatemod.MERGE_RECORD_UNRESOLVED,
2276 mergestatemod.MERGE_RECORD_RESOLVED,
2295 mergestatemod.MERGE_RECORD_RESOLVED,
2277 ):
2296 ):
2278 fm_files.data(local_key=state[1])
2297 fm_files.data(local_key=state[1])
2279 fm_files.data(local_path=state[2])
2298 fm_files.data(local_path=state[2])
2280 fm_files.data(ancestor_path=state[3])
2299 fm_files.data(ancestor_path=state[3])
2281 fm_files.data(ancestor_node=state[4])
2300 fm_files.data(ancestor_node=state[4])
2282 fm_files.data(other_path=state[5])
2301 fm_files.data(other_path=state[5])
2283 fm_files.data(other_node=state[6])
2302 fm_files.data(other_node=state[6])
2284 fm_files.data(local_flags=state[7])
2303 fm_files.data(local_flags=state[7])
2285 elif state[0] in (
2304 elif state[0] in (
2286 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2305 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
2287 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2306 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
2288 ):
2307 ):
2289 fm_files.data(renamed_path=state[1])
2308 fm_files.data(renamed_path=state[1])
2290 fm_files.data(rename_side=state[2])
2309 fm_files.data(rename_side=state[2])
2291 fm_extras = fm_files.nested(b'extras')
2310 fm_extras = fm_files.nested(b'extras')
2292 for k, v in sorted(ms.extras(f).items()):
2311 for k, v in sorted(ms.extras(f).items()):
2293 fm_extras.startitem()
2312 fm_extras.startitem()
2294 fm_extras.data(key=k)
2313 fm_extras.data(key=k)
2295 fm_extras.data(value=v)
2314 fm_extras.data(value=v)
2296 fm_extras.end()
2315 fm_extras.end()
2297
2316
2298 fm_files.end()
2317 fm_files.end()
2299
2318
2300 fm_extras = fm.nested(b'extras')
2319 fm_extras = fm.nested(b'extras')
2301 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2320 for f, d in sorted(pycompat.iteritems(ms.allextras())):
2302 if f in ms:
2321 if f in ms:
2303 # If file is in mergestate, we have already processed it's extras
2322 # If file is in mergestate, we have already processed it's extras
2304 continue
2323 continue
2305 for k, v in pycompat.iteritems(d):
2324 for k, v in pycompat.iteritems(d):
2306 fm_extras.startitem()
2325 fm_extras.startitem()
2307 fm_extras.data(file=f)
2326 fm_extras.data(file=f)
2308 fm_extras.data(key=k)
2327 fm_extras.data(key=k)
2309 fm_extras.data(value=v)
2328 fm_extras.data(value=v)
2310 fm_extras.end()
2329 fm_extras.end()
2311
2330
2312 fm.end()
2331 fm.end()
2313
2332
2314
2333
2315 @command(b'debugnamecomplete', [], _(b'NAME...'))
2334 @command(b'debugnamecomplete', [], _(b'NAME...'))
2316 def debugnamecomplete(ui, repo, *args):
2335 def debugnamecomplete(ui, repo, *args):
2317 '''complete "names" - tags, open branch names, bookmark names'''
2336 '''complete "names" - tags, open branch names, bookmark names'''
2318
2337
2319 names = set()
2338 names = set()
2320 # since we previously only listed open branches, we will handle that
2339 # since we previously only listed open branches, we will handle that
2321 # specially (after this for loop)
2340 # specially (after this for loop)
2322 for name, ns in pycompat.iteritems(repo.names):
2341 for name, ns in pycompat.iteritems(repo.names):
2323 if name != b'branches':
2342 if name != b'branches':
2324 names.update(ns.listnames(repo))
2343 names.update(ns.listnames(repo))
2325 names.update(
2344 names.update(
2326 tag
2345 tag
2327 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2346 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2328 if not closed
2347 if not closed
2329 )
2348 )
2330 completions = set()
2349 completions = set()
2331 if not args:
2350 if not args:
2332 args = [b'']
2351 args = [b'']
2333 for a in args:
2352 for a in args:
2334 completions.update(n for n in names if n.startswith(a))
2353 completions.update(n for n in names if n.startswith(a))
2335 ui.write(b'\n'.join(sorted(completions)))
2354 ui.write(b'\n'.join(sorted(completions)))
2336 ui.write(b'\n')
2355 ui.write(b'\n')
2337
2356
2338
2357
2339 @command(
2358 @command(
2340 b'debugnodemap',
2359 b'debugnodemap',
2341 [
2360 [
2342 (
2361 (
2343 b'',
2362 b'',
2344 b'dump-new',
2363 b'dump-new',
2345 False,
2364 False,
2346 _(b'write a (new) persistent binary nodemap on stdout'),
2365 _(b'write a (new) persistent binary nodemap on stdout'),
2347 ),
2366 ),
2348 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2367 (b'', b'dump-disk', False, _(b'dump on-disk data on stdout')),
2349 (
2368 (
2350 b'',
2369 b'',
2351 b'check',
2370 b'check',
2352 False,
2371 False,
2353 _(b'check that the data on disk data are correct.'),
2372 _(b'check that the data on disk data are correct.'),
2354 ),
2373 ),
2355 (
2374 (
2356 b'',
2375 b'',
2357 b'metadata',
2376 b'metadata',
2358 False,
2377 False,
2359 _(b'display the on disk meta data for the nodemap'),
2378 _(b'display the on disk meta data for the nodemap'),
2360 ),
2379 ),
2361 ],
2380 ],
2362 )
2381 )
2363 def debugnodemap(ui, repo, **opts):
2382 def debugnodemap(ui, repo, **opts):
2364 """write and inspect on disk nodemap"""
2383 """write and inspect on disk nodemap"""
2365 if opts['dump_new']:
2384 if opts['dump_new']:
2366 unfi = repo.unfiltered()
2385 unfi = repo.unfiltered()
2367 cl = unfi.changelog
2386 cl = unfi.changelog
2368 if util.safehasattr(cl.index, "nodemap_data_all"):
2387 if util.safehasattr(cl.index, "nodemap_data_all"):
2369 data = cl.index.nodemap_data_all()
2388 data = cl.index.nodemap_data_all()
2370 else:
2389 else:
2371 data = nodemap.persistent_data(cl.index)
2390 data = nodemap.persistent_data(cl.index)
2372 ui.write(data)
2391 ui.write(data)
2373 elif opts['dump_disk']:
2392 elif opts['dump_disk']:
2374 unfi = repo.unfiltered()
2393 unfi = repo.unfiltered()
2375 cl = unfi.changelog
2394 cl = unfi.changelog
2376 nm_data = nodemap.persisted_data(cl)
2395 nm_data = nodemap.persisted_data(cl)
2377 if nm_data is not None:
2396 if nm_data is not None:
2378 docket, data = nm_data
2397 docket, data = nm_data
2379 ui.write(data[:])
2398 ui.write(data[:])
2380 elif opts['check']:
2399 elif opts['check']:
2381 unfi = repo.unfiltered()
2400 unfi = repo.unfiltered()
2382 cl = unfi.changelog
2401 cl = unfi.changelog
2383 nm_data = nodemap.persisted_data(cl)
2402 nm_data = nodemap.persisted_data(cl)
2384 if nm_data is not None:
2403 if nm_data is not None:
2385 docket, data = nm_data
2404 docket, data = nm_data
2386 return nodemap.check_data(ui, cl.index, data)
2405 return nodemap.check_data(ui, cl.index, data)
2387 elif opts['metadata']:
2406 elif opts['metadata']:
2388 unfi = repo.unfiltered()
2407 unfi = repo.unfiltered()
2389 cl = unfi.changelog
2408 cl = unfi.changelog
2390 nm_data = nodemap.persisted_data(cl)
2409 nm_data = nodemap.persisted_data(cl)
2391 if nm_data is not None:
2410 if nm_data is not None:
2392 docket, data = nm_data
2411 docket, data = nm_data
2393 ui.write((b"uid: %s\n") % docket.uid)
2412 ui.write((b"uid: %s\n") % docket.uid)
2394 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2413 ui.write((b"tip-rev: %d\n") % docket.tip_rev)
2395 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2414 ui.write((b"tip-node: %s\n") % hex(docket.tip_node))
2396 ui.write((b"data-length: %d\n") % docket.data_length)
2415 ui.write((b"data-length: %d\n") % docket.data_length)
2397 ui.write((b"data-unused: %d\n") % docket.data_unused)
2416 ui.write((b"data-unused: %d\n") % docket.data_unused)
2398 unused_perc = docket.data_unused * 100.0 / docket.data_length
2417 unused_perc = docket.data_unused * 100.0 / docket.data_length
2399 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2418 ui.write((b"data-unused: %2.3f%%\n") % unused_perc)
2400
2419
2401
2420
2402 @command(
2421 @command(
2403 b'debugobsolete',
2422 b'debugobsolete',
2404 [
2423 [
2405 (b'', b'flags', 0, _(b'markers flag')),
2424 (b'', b'flags', 0, _(b'markers flag')),
2406 (
2425 (
2407 b'',
2426 b'',
2408 b'record-parents',
2427 b'record-parents',
2409 False,
2428 False,
2410 _(b'record parent information for the precursor'),
2429 _(b'record parent information for the precursor'),
2411 ),
2430 ),
2412 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2431 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2413 (
2432 (
2414 b'',
2433 b'',
2415 b'exclusive',
2434 b'exclusive',
2416 False,
2435 False,
2417 _(b'restrict display to markers only relevant to REV'),
2436 _(b'restrict display to markers only relevant to REV'),
2418 ),
2437 ),
2419 (b'', b'index', False, _(b'display index of the marker')),
2438 (b'', b'index', False, _(b'display index of the marker')),
2420 (b'', b'delete', [], _(b'delete markers specified by indices')),
2439 (b'', b'delete', [], _(b'delete markers specified by indices')),
2421 ]
2440 ]
2422 + cmdutil.commitopts2
2441 + cmdutil.commitopts2
2423 + cmdutil.formatteropts,
2442 + cmdutil.formatteropts,
2424 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2443 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2425 )
2444 )
2426 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2445 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2427 """create arbitrary obsolete marker
2446 """create arbitrary obsolete marker
2428
2447
2429 With no arguments, displays the list of obsolescence markers."""
2448 With no arguments, displays the list of obsolescence markers."""
2430
2449
2431 opts = pycompat.byteskwargs(opts)
2450 opts = pycompat.byteskwargs(opts)
2432
2451
2433 def parsenodeid(s):
2452 def parsenodeid(s):
2434 try:
2453 try:
2435 # We do not use revsingle/revrange functions here to accept
2454 # We do not use revsingle/revrange functions here to accept
2436 # arbitrary node identifiers, possibly not present in the
2455 # arbitrary node identifiers, possibly not present in the
2437 # local repository.
2456 # local repository.
2438 n = bin(s)
2457 n = bin(s)
2439 if len(n) != repo.nodeconstants.nodelen:
2458 if len(n) != repo.nodeconstants.nodelen:
2440 raise TypeError()
2459 raise TypeError()
2441 return n
2460 return n
2442 except TypeError:
2461 except TypeError:
2443 raise error.InputError(
2462 raise error.InputError(
2444 b'changeset references must be full hexadecimal '
2463 b'changeset references must be full hexadecimal '
2445 b'node identifiers'
2464 b'node identifiers'
2446 )
2465 )
2447
2466
2448 if opts.get(b'delete'):
2467 if opts.get(b'delete'):
2449 indices = []
2468 indices = []
2450 for v in opts.get(b'delete'):
2469 for v in opts.get(b'delete'):
2451 try:
2470 try:
2452 indices.append(int(v))
2471 indices.append(int(v))
2453 except ValueError:
2472 except ValueError:
2454 raise error.InputError(
2473 raise error.InputError(
2455 _(b'invalid index value: %r') % v,
2474 _(b'invalid index value: %r') % v,
2456 hint=_(b'use integers for indices'),
2475 hint=_(b'use integers for indices'),
2457 )
2476 )
2458
2477
2459 if repo.currenttransaction():
2478 if repo.currenttransaction():
2460 raise error.Abort(
2479 raise error.Abort(
2461 _(b'cannot delete obsmarkers in the middle of transaction.')
2480 _(b'cannot delete obsmarkers in the middle of transaction.')
2462 )
2481 )
2463
2482
2464 with repo.lock():
2483 with repo.lock():
2465 n = repair.deleteobsmarkers(repo.obsstore, indices)
2484 n = repair.deleteobsmarkers(repo.obsstore, indices)
2466 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2485 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2467
2486
2468 return
2487 return
2469
2488
2470 if precursor is not None:
2489 if precursor is not None:
2471 if opts[b'rev']:
2490 if opts[b'rev']:
2472 raise error.InputError(
2491 raise error.InputError(
2473 b'cannot select revision when creating marker'
2492 b'cannot select revision when creating marker'
2474 )
2493 )
2475 metadata = {}
2494 metadata = {}
2476 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2495 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2477 succs = tuple(parsenodeid(succ) for succ in successors)
2496 succs = tuple(parsenodeid(succ) for succ in successors)
2478 l = repo.lock()
2497 l = repo.lock()
2479 try:
2498 try:
2480 tr = repo.transaction(b'debugobsolete')
2499 tr = repo.transaction(b'debugobsolete')
2481 try:
2500 try:
2482 date = opts.get(b'date')
2501 date = opts.get(b'date')
2483 if date:
2502 if date:
2484 date = dateutil.parsedate(date)
2503 date = dateutil.parsedate(date)
2485 else:
2504 else:
2486 date = None
2505 date = None
2487 prec = parsenodeid(precursor)
2506 prec = parsenodeid(precursor)
2488 parents = None
2507 parents = None
2489 if opts[b'record_parents']:
2508 if opts[b'record_parents']:
2490 if prec not in repo.unfiltered():
2509 if prec not in repo.unfiltered():
2491 raise error.Abort(
2510 raise error.Abort(
2492 b'cannot used --record-parents on '
2511 b'cannot used --record-parents on '
2493 b'unknown changesets'
2512 b'unknown changesets'
2494 )
2513 )
2495 parents = repo.unfiltered()[prec].parents()
2514 parents = repo.unfiltered()[prec].parents()
2496 parents = tuple(p.node() for p in parents)
2515 parents = tuple(p.node() for p in parents)
2497 repo.obsstore.create(
2516 repo.obsstore.create(
2498 tr,
2517 tr,
2499 prec,
2518 prec,
2500 succs,
2519 succs,
2501 opts[b'flags'],
2520 opts[b'flags'],
2502 parents=parents,
2521 parents=parents,
2503 date=date,
2522 date=date,
2504 metadata=metadata,
2523 metadata=metadata,
2505 ui=ui,
2524 ui=ui,
2506 )
2525 )
2507 tr.close()
2526 tr.close()
2508 except ValueError as exc:
2527 except ValueError as exc:
2509 raise error.Abort(
2528 raise error.Abort(
2510 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2529 _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)
2511 )
2530 )
2512 finally:
2531 finally:
2513 tr.release()
2532 tr.release()
2514 finally:
2533 finally:
2515 l.release()
2534 l.release()
2516 else:
2535 else:
2517 if opts[b'rev']:
2536 if opts[b'rev']:
2518 revs = scmutil.revrange(repo, opts[b'rev'])
2537 revs = scmutil.revrange(repo, opts[b'rev'])
2519 nodes = [repo[r].node() for r in revs]
2538 nodes = [repo[r].node() for r in revs]
2520 markers = list(
2539 markers = list(
2521 obsutil.getmarkers(
2540 obsutil.getmarkers(
2522 repo, nodes=nodes, exclusive=opts[b'exclusive']
2541 repo, nodes=nodes, exclusive=opts[b'exclusive']
2523 )
2542 )
2524 )
2543 )
2525 markers.sort(key=lambda x: x._data)
2544 markers.sort(key=lambda x: x._data)
2526 else:
2545 else:
2527 markers = obsutil.getmarkers(repo)
2546 markers = obsutil.getmarkers(repo)
2528
2547
2529 markerstoiter = markers
2548 markerstoiter = markers
2530 isrelevant = lambda m: True
2549 isrelevant = lambda m: True
2531 if opts.get(b'rev') and opts.get(b'index'):
2550 if opts.get(b'rev') and opts.get(b'index'):
2532 markerstoiter = obsutil.getmarkers(repo)
2551 markerstoiter = obsutil.getmarkers(repo)
2533 markerset = set(markers)
2552 markerset = set(markers)
2534 isrelevant = lambda m: m in markerset
2553 isrelevant = lambda m: m in markerset
2535
2554
2536 fm = ui.formatter(b'debugobsolete', opts)
2555 fm = ui.formatter(b'debugobsolete', opts)
2537 for i, m in enumerate(markerstoiter):
2556 for i, m in enumerate(markerstoiter):
2538 if not isrelevant(m):
2557 if not isrelevant(m):
2539 # marker can be irrelevant when we're iterating over a set
2558 # marker can be irrelevant when we're iterating over a set
2540 # of markers (markerstoiter) which is bigger than the set
2559 # of markers (markerstoiter) which is bigger than the set
2541 # of markers we want to display (markers)
2560 # of markers we want to display (markers)
2542 # this can happen if both --index and --rev options are
2561 # this can happen if both --index and --rev options are
2543 # provided and thus we need to iterate over all of the markers
2562 # provided and thus we need to iterate over all of the markers
2544 # to get the correct indices, but only display the ones that
2563 # to get the correct indices, but only display the ones that
2545 # are relevant to --rev value
2564 # are relevant to --rev value
2546 continue
2565 continue
2547 fm.startitem()
2566 fm.startitem()
2548 ind = i if opts.get(b'index') else None
2567 ind = i if opts.get(b'index') else None
2549 cmdutil.showmarker(fm, m, index=ind)
2568 cmdutil.showmarker(fm, m, index=ind)
2550 fm.end()
2569 fm.end()
2551
2570
2552
2571
2553 @command(
2572 @command(
2554 b'debugp1copies',
2573 b'debugp1copies',
2555 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2574 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2556 _(b'[-r REV]'),
2575 _(b'[-r REV]'),
2557 )
2576 )
2558 def debugp1copies(ui, repo, **opts):
2577 def debugp1copies(ui, repo, **opts):
2559 """dump copy information compared to p1"""
2578 """dump copy information compared to p1"""
2560
2579
2561 opts = pycompat.byteskwargs(opts)
2580 opts = pycompat.byteskwargs(opts)
2562 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2581 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2563 for dst, src in ctx.p1copies().items():
2582 for dst, src in ctx.p1copies().items():
2564 ui.write(b'%s -> %s\n' % (src, dst))
2583 ui.write(b'%s -> %s\n' % (src, dst))
2565
2584
2566
2585
2567 @command(
2586 @command(
2568 b'debugp2copies',
2587 b'debugp2copies',
2569 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2588 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2570 _(b'[-r REV]'),
2589 _(b'[-r REV]'),
2571 )
2590 )
2572 def debugp1copies(ui, repo, **opts):
2591 def debugp1copies(ui, repo, **opts):
2573 """dump copy information compared to p2"""
2592 """dump copy information compared to p2"""
2574
2593
2575 opts = pycompat.byteskwargs(opts)
2594 opts = pycompat.byteskwargs(opts)
2576 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2595 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2577 for dst, src in ctx.p2copies().items():
2596 for dst, src in ctx.p2copies().items():
2578 ui.write(b'%s -> %s\n' % (src, dst))
2597 ui.write(b'%s -> %s\n' % (src, dst))
2579
2598
2580
2599
2581 @command(
2600 @command(
2582 b'debugpathcomplete',
2601 b'debugpathcomplete',
2583 [
2602 [
2584 (b'f', b'full', None, _(b'complete an entire path')),
2603 (b'f', b'full', None, _(b'complete an entire path')),
2585 (b'n', b'normal', None, _(b'show only normal files')),
2604 (b'n', b'normal', None, _(b'show only normal files')),
2586 (b'a', b'added', None, _(b'show only added files')),
2605 (b'a', b'added', None, _(b'show only added files')),
2587 (b'r', b'removed', None, _(b'show only removed files')),
2606 (b'r', b'removed', None, _(b'show only removed files')),
2588 ],
2607 ],
2589 _(b'FILESPEC...'),
2608 _(b'FILESPEC...'),
2590 )
2609 )
2591 def debugpathcomplete(ui, repo, *specs, **opts):
2610 def debugpathcomplete(ui, repo, *specs, **opts):
2592 """complete part or all of a tracked path
2611 """complete part or all of a tracked path
2593
2612
2594 This command supports shells that offer path name completion. It
2613 This command supports shells that offer path name completion. It
2595 currently completes only files already known to the dirstate.
2614 currently completes only files already known to the dirstate.
2596
2615
2597 Completion extends only to the next path segment unless
2616 Completion extends only to the next path segment unless
2598 --full is specified, in which case entire paths are used."""
2617 --full is specified, in which case entire paths are used."""
2599
2618
2600 def complete(path, acceptable):
2619 def complete(path, acceptable):
2601 dirstate = repo.dirstate
2620 dirstate = repo.dirstate
2602 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2621 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2603 rootdir = repo.root + pycompat.ossep
2622 rootdir = repo.root + pycompat.ossep
2604 if spec != repo.root and not spec.startswith(rootdir):
2623 if spec != repo.root and not spec.startswith(rootdir):
2605 return [], []
2624 return [], []
2606 if os.path.isdir(spec):
2625 if os.path.isdir(spec):
2607 spec += b'/'
2626 spec += b'/'
2608 spec = spec[len(rootdir) :]
2627 spec = spec[len(rootdir) :]
2609 fixpaths = pycompat.ossep != b'/'
2628 fixpaths = pycompat.ossep != b'/'
2610 if fixpaths:
2629 if fixpaths:
2611 spec = spec.replace(pycompat.ossep, b'/')
2630 spec = spec.replace(pycompat.ossep, b'/')
2612 speclen = len(spec)
2631 speclen = len(spec)
2613 fullpaths = opts['full']
2632 fullpaths = opts['full']
2614 files, dirs = set(), set()
2633 files, dirs = set(), set()
2615 adddir, addfile = dirs.add, files.add
2634 adddir, addfile = dirs.add, files.add
2616 for f, st in pycompat.iteritems(dirstate):
2635 for f, st in pycompat.iteritems(dirstate):
2617 if f.startswith(spec) and st.state in acceptable:
2636 if f.startswith(spec) and st.state in acceptable:
2618 if fixpaths:
2637 if fixpaths:
2619 f = f.replace(b'/', pycompat.ossep)
2638 f = f.replace(b'/', pycompat.ossep)
2620 if fullpaths:
2639 if fullpaths:
2621 addfile(f)
2640 addfile(f)
2622 continue
2641 continue
2623 s = f.find(pycompat.ossep, speclen)
2642 s = f.find(pycompat.ossep, speclen)
2624 if s >= 0:
2643 if s >= 0:
2625 adddir(f[:s])
2644 adddir(f[:s])
2626 else:
2645 else:
2627 addfile(f)
2646 addfile(f)
2628 return files, dirs
2647 return files, dirs
2629
2648
2630 acceptable = b''
2649 acceptable = b''
2631 if opts['normal']:
2650 if opts['normal']:
2632 acceptable += b'nm'
2651 acceptable += b'nm'
2633 if opts['added']:
2652 if opts['added']:
2634 acceptable += b'a'
2653 acceptable += b'a'
2635 if opts['removed']:
2654 if opts['removed']:
2636 acceptable += b'r'
2655 acceptable += b'r'
2637 cwd = repo.getcwd()
2656 cwd = repo.getcwd()
2638 if not specs:
2657 if not specs:
2639 specs = [b'.']
2658 specs = [b'.']
2640
2659
2641 files, dirs = set(), set()
2660 files, dirs = set(), set()
2642 for spec in specs:
2661 for spec in specs:
2643 f, d = complete(spec, acceptable or b'nmar')
2662 f, d = complete(spec, acceptable or b'nmar')
2644 files.update(f)
2663 files.update(f)
2645 dirs.update(d)
2664 dirs.update(d)
2646 files.update(dirs)
2665 files.update(dirs)
2647 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2666 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2648 ui.write(b'\n')
2667 ui.write(b'\n')
2649
2668
2650
2669
2651 @command(
2670 @command(
2652 b'debugpathcopies',
2671 b'debugpathcopies',
2653 cmdutil.walkopts,
2672 cmdutil.walkopts,
2654 b'hg debugpathcopies REV1 REV2 [FILE]',
2673 b'hg debugpathcopies REV1 REV2 [FILE]',
2655 inferrepo=True,
2674 inferrepo=True,
2656 )
2675 )
2657 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2676 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2658 """show copies between two revisions"""
2677 """show copies between two revisions"""
2659 ctx1 = scmutil.revsingle(repo, rev1)
2678 ctx1 = scmutil.revsingle(repo, rev1)
2660 ctx2 = scmutil.revsingle(repo, rev2)
2679 ctx2 = scmutil.revsingle(repo, rev2)
2661 m = scmutil.match(ctx1, pats, opts)
2680 m = scmutil.match(ctx1, pats, opts)
2662 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2681 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2663 ui.write(b'%s -> %s\n' % (src, dst))
2682 ui.write(b'%s -> %s\n' % (src, dst))
2664
2683
2665
2684
2666 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2685 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2667 def debugpeer(ui, path):
2686 def debugpeer(ui, path):
2668 """establish a connection to a peer repository"""
2687 """establish a connection to a peer repository"""
2669 # Always enable peer request logging. Requires --debug to display
2688 # Always enable peer request logging. Requires --debug to display
2670 # though.
2689 # though.
2671 overrides = {
2690 overrides = {
2672 (b'devel', b'debug.peer-request'): True,
2691 (b'devel', b'debug.peer-request'): True,
2673 }
2692 }
2674
2693
2675 with ui.configoverride(overrides):
2694 with ui.configoverride(overrides):
2676 peer = hg.peer(ui, {}, path)
2695 peer = hg.peer(ui, {}, path)
2677
2696
2678 try:
2697 try:
2679 local = peer.local() is not None
2698 local = peer.local() is not None
2680 canpush = peer.canpush()
2699 canpush = peer.canpush()
2681
2700
2682 ui.write(_(b'url: %s\n') % peer.url())
2701 ui.write(_(b'url: %s\n') % peer.url())
2683 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2702 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2684 ui.write(
2703 ui.write(
2685 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2704 _(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no'))
2686 )
2705 )
2687 finally:
2706 finally:
2688 peer.close()
2707 peer.close()
2689
2708
2690
2709
2691 @command(
2710 @command(
2692 b'debugpickmergetool',
2711 b'debugpickmergetool',
2693 [
2712 [
2694 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2713 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2695 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2714 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2696 ]
2715 ]
2697 + cmdutil.walkopts
2716 + cmdutil.walkopts
2698 + cmdutil.mergetoolopts,
2717 + cmdutil.mergetoolopts,
2699 _(b'[PATTERN]...'),
2718 _(b'[PATTERN]...'),
2700 inferrepo=True,
2719 inferrepo=True,
2701 )
2720 )
2702 def debugpickmergetool(ui, repo, *pats, **opts):
2721 def debugpickmergetool(ui, repo, *pats, **opts):
2703 """examine which merge tool is chosen for specified file
2722 """examine which merge tool is chosen for specified file
2704
2723
2705 As described in :hg:`help merge-tools`, Mercurial examines
2724 As described in :hg:`help merge-tools`, Mercurial examines
2706 configurations below in this order to decide which merge tool is
2725 configurations below in this order to decide which merge tool is
2707 chosen for specified file.
2726 chosen for specified file.
2708
2727
2709 1. ``--tool`` option
2728 1. ``--tool`` option
2710 2. ``HGMERGE`` environment variable
2729 2. ``HGMERGE`` environment variable
2711 3. configurations in ``merge-patterns`` section
2730 3. configurations in ``merge-patterns`` section
2712 4. configuration of ``ui.merge``
2731 4. configuration of ``ui.merge``
2713 5. configurations in ``merge-tools`` section
2732 5. configurations in ``merge-tools`` section
2714 6. ``hgmerge`` tool (for historical reason only)
2733 6. ``hgmerge`` tool (for historical reason only)
2715 7. default tool for fallback (``:merge`` or ``:prompt``)
2734 7. default tool for fallback (``:merge`` or ``:prompt``)
2716
2735
2717 This command writes out examination result in the style below::
2736 This command writes out examination result in the style below::
2718
2737
2719 FILE = MERGETOOL
2738 FILE = MERGETOOL
2720
2739
2721 By default, all files known in the first parent context of the
2740 By default, all files known in the first parent context of the
2722 working directory are examined. Use file patterns and/or -I/-X
2741 working directory are examined. Use file patterns and/or -I/-X
2723 options to limit target files. -r/--rev is also useful to examine
2742 options to limit target files. -r/--rev is also useful to examine
2724 files in another context without actual updating to it.
2743 files in another context without actual updating to it.
2725
2744
2726 With --debug, this command shows warning messages while matching
2745 With --debug, this command shows warning messages while matching
2727 against ``merge-patterns`` and so on, too. It is recommended to
2746 against ``merge-patterns`` and so on, too. It is recommended to
2728 use this option with explicit file patterns and/or -I/-X options,
2747 use this option with explicit file patterns and/or -I/-X options,
2729 because this option increases amount of output per file according
2748 because this option increases amount of output per file according
2730 to configurations in hgrc.
2749 to configurations in hgrc.
2731
2750
2732 With -v/--verbose, this command shows configurations below at
2751 With -v/--verbose, this command shows configurations below at
2733 first (only if specified).
2752 first (only if specified).
2734
2753
2735 - ``--tool`` option
2754 - ``--tool`` option
2736 - ``HGMERGE`` environment variable
2755 - ``HGMERGE`` environment variable
2737 - configuration of ``ui.merge``
2756 - configuration of ``ui.merge``
2738
2757
2739 If merge tool is chosen before matching against
2758 If merge tool is chosen before matching against
2740 ``merge-patterns``, this command can't show any helpful
2759 ``merge-patterns``, this command can't show any helpful
2741 information, even with --debug. In such case, information above is
2760 information, even with --debug. In such case, information above is
2742 useful to know why a merge tool is chosen.
2761 useful to know why a merge tool is chosen.
2743 """
2762 """
2744 opts = pycompat.byteskwargs(opts)
2763 opts = pycompat.byteskwargs(opts)
2745 overrides = {}
2764 overrides = {}
2746 if opts[b'tool']:
2765 if opts[b'tool']:
2747 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2766 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2748 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2767 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2749
2768
2750 with ui.configoverride(overrides, b'debugmergepatterns'):
2769 with ui.configoverride(overrides, b'debugmergepatterns'):
2751 hgmerge = encoding.environ.get(b"HGMERGE")
2770 hgmerge = encoding.environ.get(b"HGMERGE")
2752 if hgmerge is not None:
2771 if hgmerge is not None:
2753 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2772 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2754 uimerge = ui.config(b"ui", b"merge")
2773 uimerge = ui.config(b"ui", b"merge")
2755 if uimerge:
2774 if uimerge:
2756 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2775 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2757
2776
2758 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2777 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2759 m = scmutil.match(ctx, pats, opts)
2778 m = scmutil.match(ctx, pats, opts)
2760 changedelete = opts[b'changedelete']
2779 changedelete = opts[b'changedelete']
2761 for path in ctx.walk(m):
2780 for path in ctx.walk(m):
2762 fctx = ctx[path]
2781 fctx = ctx[path]
2763 with ui.silent(
2782 with ui.silent(
2764 error=True
2783 error=True
2765 ) if not ui.debugflag else util.nullcontextmanager():
2784 ) if not ui.debugflag else util.nullcontextmanager():
2766 tool, toolpath = filemerge._picktool(
2785 tool, toolpath = filemerge._picktool(
2767 repo,
2786 repo,
2768 ui,
2787 ui,
2769 path,
2788 path,
2770 fctx.isbinary(),
2789 fctx.isbinary(),
2771 b'l' in fctx.flags(),
2790 b'l' in fctx.flags(),
2772 changedelete,
2791 changedelete,
2773 )
2792 )
2774 ui.write(b'%s = %s\n' % (path, tool))
2793 ui.write(b'%s = %s\n' % (path, tool))
2775
2794
2776
2795
2777 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2796 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2778 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2797 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2779 """access the pushkey key/value protocol
2798 """access the pushkey key/value protocol
2780
2799
2781 With two args, list the keys in the given namespace.
2800 With two args, list the keys in the given namespace.
2782
2801
2783 With five args, set a key to new if it currently is set to old.
2802 With five args, set a key to new if it currently is set to old.
2784 Reports success or failure.
2803 Reports success or failure.
2785 """
2804 """
2786
2805
2787 target = hg.peer(ui, {}, repopath)
2806 target = hg.peer(ui, {}, repopath)
2788 try:
2807 try:
2789 if keyinfo:
2808 if keyinfo:
2790 key, old, new = keyinfo
2809 key, old, new = keyinfo
2791 with target.commandexecutor() as e:
2810 with target.commandexecutor() as e:
2792 r = e.callcommand(
2811 r = e.callcommand(
2793 b'pushkey',
2812 b'pushkey',
2794 {
2813 {
2795 b'namespace': namespace,
2814 b'namespace': namespace,
2796 b'key': key,
2815 b'key': key,
2797 b'old': old,
2816 b'old': old,
2798 b'new': new,
2817 b'new': new,
2799 },
2818 },
2800 ).result()
2819 ).result()
2801
2820
2802 ui.status(pycompat.bytestr(r) + b'\n')
2821 ui.status(pycompat.bytestr(r) + b'\n')
2803 return not r
2822 return not r
2804 else:
2823 else:
2805 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2824 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2806 ui.write(
2825 ui.write(
2807 b"%s\t%s\n"
2826 b"%s\t%s\n"
2808 % (stringutil.escapestr(k), stringutil.escapestr(v))
2827 % (stringutil.escapestr(k), stringutil.escapestr(v))
2809 )
2828 )
2810 finally:
2829 finally:
2811 target.close()
2830 target.close()
2812
2831
2813
2832
2814 @command(b'debugpvec', [], _(b'A B'))
2833 @command(b'debugpvec', [], _(b'A B'))
2815 def debugpvec(ui, repo, a, b=None):
2834 def debugpvec(ui, repo, a, b=None):
2816 ca = scmutil.revsingle(repo, a)
2835 ca = scmutil.revsingle(repo, a)
2817 cb = scmutil.revsingle(repo, b)
2836 cb = scmutil.revsingle(repo, b)
2818 pa = pvec.ctxpvec(ca)
2837 pa = pvec.ctxpvec(ca)
2819 pb = pvec.ctxpvec(cb)
2838 pb = pvec.ctxpvec(cb)
2820 if pa == pb:
2839 if pa == pb:
2821 rel = b"="
2840 rel = b"="
2822 elif pa > pb:
2841 elif pa > pb:
2823 rel = b">"
2842 rel = b">"
2824 elif pa < pb:
2843 elif pa < pb:
2825 rel = b"<"
2844 rel = b"<"
2826 elif pa | pb:
2845 elif pa | pb:
2827 rel = b"|"
2846 rel = b"|"
2828 ui.write(_(b"a: %s\n") % pa)
2847 ui.write(_(b"a: %s\n") % pa)
2829 ui.write(_(b"b: %s\n") % pb)
2848 ui.write(_(b"b: %s\n") % pb)
2830 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2849 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2831 ui.write(
2850 ui.write(
2832 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2851 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2833 % (
2852 % (
2834 abs(pa._depth - pb._depth),
2853 abs(pa._depth - pb._depth),
2835 pvec._hamming(pa._vec, pb._vec),
2854 pvec._hamming(pa._vec, pb._vec),
2836 pa.distance(pb),
2855 pa.distance(pb),
2837 rel,
2856 rel,
2838 )
2857 )
2839 )
2858 )
2840
2859
2841
2860
2842 @command(
2861 @command(
2843 b'debugrebuilddirstate|debugrebuildstate',
2862 b'debugrebuilddirstate|debugrebuildstate',
2844 [
2863 [
2845 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2864 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2846 (
2865 (
2847 b'',
2866 b'',
2848 b'minimal',
2867 b'minimal',
2849 None,
2868 None,
2850 _(
2869 _(
2851 b'only rebuild files that are inconsistent with '
2870 b'only rebuild files that are inconsistent with '
2852 b'the working copy parent'
2871 b'the working copy parent'
2853 ),
2872 ),
2854 ),
2873 ),
2855 ],
2874 ],
2856 _(b'[-r REV]'),
2875 _(b'[-r REV]'),
2857 )
2876 )
2858 def debugrebuilddirstate(ui, repo, rev, **opts):
2877 def debugrebuilddirstate(ui, repo, rev, **opts):
2859 """rebuild the dirstate as it would look like for the given revision
2878 """rebuild the dirstate as it would look like for the given revision
2860
2879
2861 If no revision is specified the first current parent will be used.
2880 If no revision is specified the first current parent will be used.
2862
2881
2863 The dirstate will be set to the files of the given revision.
2882 The dirstate will be set to the files of the given revision.
2864 The actual working directory content or existing dirstate
2883 The actual working directory content or existing dirstate
2865 information such as adds or removes is not considered.
2884 information such as adds or removes is not considered.
2866
2885
2867 ``minimal`` will only rebuild the dirstate status for files that claim to be
2886 ``minimal`` will only rebuild the dirstate status for files that claim to be
2868 tracked but are not in the parent manifest, or that exist in the parent
2887 tracked but are not in the parent manifest, or that exist in the parent
2869 manifest but are not in the dirstate. It will not change adds, removes, or
2888 manifest but are not in the dirstate. It will not change adds, removes, or
2870 modified files that are in the working copy parent.
2889 modified files that are in the working copy parent.
2871
2890
2872 One use of this command is to make the next :hg:`status` invocation
2891 One use of this command is to make the next :hg:`status` invocation
2873 check the actual file content.
2892 check the actual file content.
2874 """
2893 """
2875 ctx = scmutil.revsingle(repo, rev)
2894 ctx = scmutil.revsingle(repo, rev)
2876 with repo.wlock():
2895 with repo.wlock():
2877 dirstate = repo.dirstate
2896 dirstate = repo.dirstate
2878 changedfiles = None
2897 changedfiles = None
2879 # See command doc for what minimal does.
2898 # See command doc for what minimal does.
2880 if opts.get('minimal'):
2899 if opts.get('minimal'):
2881 manifestfiles = set(ctx.manifest().keys())
2900 manifestfiles = set(ctx.manifest().keys())
2882 dirstatefiles = set(dirstate)
2901 dirstatefiles = set(dirstate)
2883 manifestonly = manifestfiles - dirstatefiles
2902 manifestonly = manifestfiles - dirstatefiles
2884 dsonly = dirstatefiles - manifestfiles
2903 dsonly = dirstatefiles - manifestfiles
2885 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2904 dsnotadded = {f for f in dsonly if dirstate[f] != b'a'}
2886 changedfiles = manifestonly | dsnotadded
2905 changedfiles = manifestonly | dsnotadded
2887
2906
2888 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2907 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2889
2908
2890
2909
2891 @command(b'debugrebuildfncache', [], b'')
2910 @command(b'debugrebuildfncache', [], b'')
2892 def debugrebuildfncache(ui, repo):
2911 def debugrebuildfncache(ui, repo):
2893 """rebuild the fncache file"""
2912 """rebuild the fncache file"""
2894 repair.rebuildfncache(ui, repo)
2913 repair.rebuildfncache(ui, repo)
2895
2914
2896
2915
2897 @command(
2916 @command(
2898 b'debugrename',
2917 b'debugrename',
2899 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2918 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2900 _(b'[-r REV] [FILE]...'),
2919 _(b'[-r REV] [FILE]...'),
2901 )
2920 )
2902 def debugrename(ui, repo, *pats, **opts):
2921 def debugrename(ui, repo, *pats, **opts):
2903 """dump rename information"""
2922 """dump rename information"""
2904
2923
2905 opts = pycompat.byteskwargs(opts)
2924 opts = pycompat.byteskwargs(opts)
2906 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2925 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2907 m = scmutil.match(ctx, pats, opts)
2926 m = scmutil.match(ctx, pats, opts)
2908 for abs in ctx.walk(m):
2927 for abs in ctx.walk(m):
2909 fctx = ctx[abs]
2928 fctx = ctx[abs]
2910 o = fctx.filelog().renamed(fctx.filenode())
2929 o = fctx.filelog().renamed(fctx.filenode())
2911 rel = repo.pathto(abs)
2930 rel = repo.pathto(abs)
2912 if o:
2931 if o:
2913 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2932 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2914 else:
2933 else:
2915 ui.write(_(b"%s not renamed\n") % rel)
2934 ui.write(_(b"%s not renamed\n") % rel)
2916
2935
2917
2936
2918 @command(b'debugrequires|debugrequirements', [], b'')
2937 @command(b'debugrequires|debugrequirements', [], b'')
2919 def debugrequirements(ui, repo):
2938 def debugrequirements(ui, repo):
2920 """print the current repo requirements"""
2939 """print the current repo requirements"""
2921 for r in sorted(repo.requirements):
2940 for r in sorted(repo.requirements):
2922 ui.write(b"%s\n" % r)
2941 ui.write(b"%s\n" % r)
2923
2942
2924
2943
2925 @command(
2944 @command(
2926 b'debugrevlog',
2945 b'debugrevlog',
2927 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2946 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2928 _(b'-c|-m|FILE'),
2947 _(b'-c|-m|FILE'),
2929 optionalrepo=True,
2948 optionalrepo=True,
2930 )
2949 )
2931 def debugrevlog(ui, repo, file_=None, **opts):
2950 def debugrevlog(ui, repo, file_=None, **opts):
2932 """show data and statistics about a revlog"""
2951 """show data and statistics about a revlog"""
2933 opts = pycompat.byteskwargs(opts)
2952 opts = pycompat.byteskwargs(opts)
2934 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2953 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2935
2954
2936 if opts.get(b"dump"):
2955 if opts.get(b"dump"):
2937 numrevs = len(r)
2956 numrevs = len(r)
2938 ui.write(
2957 ui.write(
2939 (
2958 (
2940 b"# rev p1rev p2rev start end deltastart base p1 p2"
2959 b"# rev p1rev p2rev start end deltastart base p1 p2"
2941 b" rawsize totalsize compression heads chainlen\n"
2960 b" rawsize totalsize compression heads chainlen\n"
2942 )
2961 )
2943 )
2962 )
2944 ts = 0
2963 ts = 0
2945 heads = set()
2964 heads = set()
2946
2965
2947 for rev in pycompat.xrange(numrevs):
2966 for rev in pycompat.xrange(numrevs):
2948 dbase = r.deltaparent(rev)
2967 dbase = r.deltaparent(rev)
2949 if dbase == -1:
2968 if dbase == -1:
2950 dbase = rev
2969 dbase = rev
2951 cbase = r.chainbase(rev)
2970 cbase = r.chainbase(rev)
2952 clen = r.chainlen(rev)
2971 clen = r.chainlen(rev)
2953 p1, p2 = r.parentrevs(rev)
2972 p1, p2 = r.parentrevs(rev)
2954 rs = r.rawsize(rev)
2973 rs = r.rawsize(rev)
2955 ts = ts + rs
2974 ts = ts + rs
2956 heads -= set(r.parentrevs(rev))
2975 heads -= set(r.parentrevs(rev))
2957 heads.add(rev)
2976 heads.add(rev)
2958 try:
2977 try:
2959 compression = ts / r.end(rev)
2978 compression = ts / r.end(rev)
2960 except ZeroDivisionError:
2979 except ZeroDivisionError:
2961 compression = 0
2980 compression = 0
2962 ui.write(
2981 ui.write(
2963 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2982 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2964 b"%11d %5d %8d\n"
2983 b"%11d %5d %8d\n"
2965 % (
2984 % (
2966 rev,
2985 rev,
2967 p1,
2986 p1,
2968 p2,
2987 p2,
2969 r.start(rev),
2988 r.start(rev),
2970 r.end(rev),
2989 r.end(rev),
2971 r.start(dbase),
2990 r.start(dbase),
2972 r.start(cbase),
2991 r.start(cbase),
2973 r.start(p1),
2992 r.start(p1),
2974 r.start(p2),
2993 r.start(p2),
2975 rs,
2994 rs,
2976 ts,
2995 ts,
2977 compression,
2996 compression,
2978 len(heads),
2997 len(heads),
2979 clen,
2998 clen,
2980 )
2999 )
2981 )
3000 )
2982 return 0
3001 return 0
2983
3002
2984 format = r._format_version
3003 format = r._format_version
2985 v = r._format_flags
3004 v = r._format_flags
2986 flags = []
3005 flags = []
2987 gdelta = False
3006 gdelta = False
2988 if v & revlog.FLAG_INLINE_DATA:
3007 if v & revlog.FLAG_INLINE_DATA:
2989 flags.append(b'inline')
3008 flags.append(b'inline')
2990 if v & revlog.FLAG_GENERALDELTA:
3009 if v & revlog.FLAG_GENERALDELTA:
2991 gdelta = True
3010 gdelta = True
2992 flags.append(b'generaldelta')
3011 flags.append(b'generaldelta')
2993 if not flags:
3012 if not flags:
2994 flags = [b'(none)']
3013 flags = [b'(none)']
2995
3014
2996 ### tracks merge vs single parent
3015 ### tracks merge vs single parent
2997 nummerges = 0
3016 nummerges = 0
2998
3017
2999 ### tracks ways the "delta" are build
3018 ### tracks ways the "delta" are build
3000 # nodelta
3019 # nodelta
3001 numempty = 0
3020 numempty = 0
3002 numemptytext = 0
3021 numemptytext = 0
3003 numemptydelta = 0
3022 numemptydelta = 0
3004 # full file content
3023 # full file content
3005 numfull = 0
3024 numfull = 0
3006 # intermediate snapshot against a prior snapshot
3025 # intermediate snapshot against a prior snapshot
3007 numsemi = 0
3026 numsemi = 0
3008 # snapshot count per depth
3027 # snapshot count per depth
3009 numsnapdepth = collections.defaultdict(lambda: 0)
3028 numsnapdepth = collections.defaultdict(lambda: 0)
3010 # delta against previous revision
3029 # delta against previous revision
3011 numprev = 0
3030 numprev = 0
3012 # delta against first or second parent (not prev)
3031 # delta against first or second parent (not prev)
3013 nump1 = 0
3032 nump1 = 0
3014 nump2 = 0
3033 nump2 = 0
3015 # delta against neither prev nor parents
3034 # delta against neither prev nor parents
3016 numother = 0
3035 numother = 0
3017 # delta against prev that are also first or second parent
3036 # delta against prev that are also first or second parent
3018 # (details of `numprev`)
3037 # (details of `numprev`)
3019 nump1prev = 0
3038 nump1prev = 0
3020 nump2prev = 0
3039 nump2prev = 0
3021
3040
3022 # data about delta chain of each revs
3041 # data about delta chain of each revs
3023 chainlengths = []
3042 chainlengths = []
3024 chainbases = []
3043 chainbases = []
3025 chainspans = []
3044 chainspans = []
3026
3045
3027 # data about each revision
3046 # data about each revision
3028 datasize = [None, 0, 0]
3047 datasize = [None, 0, 0]
3029 fullsize = [None, 0, 0]
3048 fullsize = [None, 0, 0]
3030 semisize = [None, 0, 0]
3049 semisize = [None, 0, 0]
3031 # snapshot count per depth
3050 # snapshot count per depth
3032 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3051 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
3033 deltasize = [None, 0, 0]
3052 deltasize = [None, 0, 0]
3034 chunktypecounts = {}
3053 chunktypecounts = {}
3035 chunktypesizes = {}
3054 chunktypesizes = {}
3036
3055
3037 def addsize(size, l):
3056 def addsize(size, l):
3038 if l[0] is None or size < l[0]:
3057 if l[0] is None or size < l[0]:
3039 l[0] = size
3058 l[0] = size
3040 if size > l[1]:
3059 if size > l[1]:
3041 l[1] = size
3060 l[1] = size
3042 l[2] += size
3061 l[2] += size
3043
3062
3044 numrevs = len(r)
3063 numrevs = len(r)
3045 for rev in pycompat.xrange(numrevs):
3064 for rev in pycompat.xrange(numrevs):
3046 p1, p2 = r.parentrevs(rev)
3065 p1, p2 = r.parentrevs(rev)
3047 delta = r.deltaparent(rev)
3066 delta = r.deltaparent(rev)
3048 if format > 0:
3067 if format > 0:
3049 addsize(r.rawsize(rev), datasize)
3068 addsize(r.rawsize(rev), datasize)
3050 if p2 != nullrev:
3069 if p2 != nullrev:
3051 nummerges += 1
3070 nummerges += 1
3052 size = r.length(rev)
3071 size = r.length(rev)
3053 if delta == nullrev:
3072 if delta == nullrev:
3054 chainlengths.append(0)
3073 chainlengths.append(0)
3055 chainbases.append(r.start(rev))
3074 chainbases.append(r.start(rev))
3056 chainspans.append(size)
3075 chainspans.append(size)
3057 if size == 0:
3076 if size == 0:
3058 numempty += 1
3077 numempty += 1
3059 numemptytext += 1
3078 numemptytext += 1
3060 else:
3079 else:
3061 numfull += 1
3080 numfull += 1
3062 numsnapdepth[0] += 1
3081 numsnapdepth[0] += 1
3063 addsize(size, fullsize)
3082 addsize(size, fullsize)
3064 addsize(size, snapsizedepth[0])
3083 addsize(size, snapsizedepth[0])
3065 else:
3084 else:
3066 chainlengths.append(chainlengths[delta] + 1)
3085 chainlengths.append(chainlengths[delta] + 1)
3067 baseaddr = chainbases[delta]
3086 baseaddr = chainbases[delta]
3068 revaddr = r.start(rev)
3087 revaddr = r.start(rev)
3069 chainbases.append(baseaddr)
3088 chainbases.append(baseaddr)
3070 chainspans.append((revaddr - baseaddr) + size)
3089 chainspans.append((revaddr - baseaddr) + size)
3071 if size == 0:
3090 if size == 0:
3072 numempty += 1
3091 numempty += 1
3073 numemptydelta += 1
3092 numemptydelta += 1
3074 elif r.issnapshot(rev):
3093 elif r.issnapshot(rev):
3075 addsize(size, semisize)
3094 addsize(size, semisize)
3076 numsemi += 1
3095 numsemi += 1
3077 depth = r.snapshotdepth(rev)
3096 depth = r.snapshotdepth(rev)
3078 numsnapdepth[depth] += 1
3097 numsnapdepth[depth] += 1
3079 addsize(size, snapsizedepth[depth])
3098 addsize(size, snapsizedepth[depth])
3080 else:
3099 else:
3081 addsize(size, deltasize)
3100 addsize(size, deltasize)
3082 if delta == rev - 1:
3101 if delta == rev - 1:
3083 numprev += 1
3102 numprev += 1
3084 if delta == p1:
3103 if delta == p1:
3085 nump1prev += 1
3104 nump1prev += 1
3086 elif delta == p2:
3105 elif delta == p2:
3087 nump2prev += 1
3106 nump2prev += 1
3088 elif delta == p1:
3107 elif delta == p1:
3089 nump1 += 1
3108 nump1 += 1
3090 elif delta == p2:
3109 elif delta == p2:
3091 nump2 += 1
3110 nump2 += 1
3092 elif delta != nullrev:
3111 elif delta != nullrev:
3093 numother += 1
3112 numother += 1
3094
3113
3095 # Obtain data on the raw chunks in the revlog.
3114 # Obtain data on the raw chunks in the revlog.
3096 if util.safehasattr(r, b'_getsegmentforrevs'):
3115 if util.safehasattr(r, b'_getsegmentforrevs'):
3097 segment = r._getsegmentforrevs(rev, rev)[1]
3116 segment = r._getsegmentforrevs(rev, rev)[1]
3098 else:
3117 else:
3099 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3118 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
3100 if segment:
3119 if segment:
3101 chunktype = bytes(segment[0:1])
3120 chunktype = bytes(segment[0:1])
3102 else:
3121 else:
3103 chunktype = b'empty'
3122 chunktype = b'empty'
3104
3123
3105 if chunktype not in chunktypecounts:
3124 if chunktype not in chunktypecounts:
3106 chunktypecounts[chunktype] = 0
3125 chunktypecounts[chunktype] = 0
3107 chunktypesizes[chunktype] = 0
3126 chunktypesizes[chunktype] = 0
3108
3127
3109 chunktypecounts[chunktype] += 1
3128 chunktypecounts[chunktype] += 1
3110 chunktypesizes[chunktype] += size
3129 chunktypesizes[chunktype] += size
3111
3130
3112 # Adjust size min value for empty cases
3131 # Adjust size min value for empty cases
3113 for size in (datasize, fullsize, semisize, deltasize):
3132 for size in (datasize, fullsize, semisize, deltasize):
3114 if size[0] is None:
3133 if size[0] is None:
3115 size[0] = 0
3134 size[0] = 0
3116
3135
3117 numdeltas = numrevs - numfull - numempty - numsemi
3136 numdeltas = numrevs - numfull - numempty - numsemi
3118 numoprev = numprev - nump1prev - nump2prev
3137 numoprev = numprev - nump1prev - nump2prev
3119 totalrawsize = datasize[2]
3138 totalrawsize = datasize[2]
3120 datasize[2] /= numrevs
3139 datasize[2] /= numrevs
3121 fulltotal = fullsize[2]
3140 fulltotal = fullsize[2]
3122 if numfull == 0:
3141 if numfull == 0:
3123 fullsize[2] = 0
3142 fullsize[2] = 0
3124 else:
3143 else:
3125 fullsize[2] /= numfull
3144 fullsize[2] /= numfull
3126 semitotal = semisize[2]
3145 semitotal = semisize[2]
3127 snaptotal = {}
3146 snaptotal = {}
3128 if numsemi > 0:
3147 if numsemi > 0:
3129 semisize[2] /= numsemi
3148 semisize[2] /= numsemi
3130 for depth in snapsizedepth:
3149 for depth in snapsizedepth:
3131 snaptotal[depth] = snapsizedepth[depth][2]
3150 snaptotal[depth] = snapsizedepth[depth][2]
3132 snapsizedepth[depth][2] /= numsnapdepth[depth]
3151 snapsizedepth[depth][2] /= numsnapdepth[depth]
3133
3152
3134 deltatotal = deltasize[2]
3153 deltatotal = deltasize[2]
3135 if numdeltas > 0:
3154 if numdeltas > 0:
3136 deltasize[2] /= numdeltas
3155 deltasize[2] /= numdeltas
3137 totalsize = fulltotal + semitotal + deltatotal
3156 totalsize = fulltotal + semitotal + deltatotal
3138 avgchainlen = sum(chainlengths) / numrevs
3157 avgchainlen = sum(chainlengths) / numrevs
3139 maxchainlen = max(chainlengths)
3158 maxchainlen = max(chainlengths)
3140 maxchainspan = max(chainspans)
3159 maxchainspan = max(chainspans)
3141 compratio = 1
3160 compratio = 1
3142 if totalsize:
3161 if totalsize:
3143 compratio = totalrawsize / totalsize
3162 compratio = totalrawsize / totalsize
3144
3163
3145 basedfmtstr = b'%%%dd\n'
3164 basedfmtstr = b'%%%dd\n'
3146 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3165 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
3147
3166
3148 def dfmtstr(max):
3167 def dfmtstr(max):
3149 return basedfmtstr % len(str(max))
3168 return basedfmtstr % len(str(max))
3150
3169
3151 def pcfmtstr(max, padding=0):
3170 def pcfmtstr(max, padding=0):
3152 return basepcfmtstr % (len(str(max)), b' ' * padding)
3171 return basepcfmtstr % (len(str(max)), b' ' * padding)
3153
3172
3154 def pcfmt(value, total):
3173 def pcfmt(value, total):
3155 if total:
3174 if total:
3156 return (value, 100 * float(value) / total)
3175 return (value, 100 * float(value) / total)
3157 else:
3176 else:
3158 return value, 100.0
3177 return value, 100.0
3159
3178
3160 ui.writenoi18n(b'format : %d\n' % format)
3179 ui.writenoi18n(b'format : %d\n' % format)
3161 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3180 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
3162
3181
3163 ui.write(b'\n')
3182 ui.write(b'\n')
3164 fmt = pcfmtstr(totalsize)
3183 fmt = pcfmtstr(totalsize)
3165 fmt2 = dfmtstr(totalsize)
3184 fmt2 = dfmtstr(totalsize)
3166 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3185 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3167 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3186 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
3168 ui.writenoi18n(
3187 ui.writenoi18n(
3169 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3188 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
3170 )
3189 )
3171 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3190 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
3172 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3191 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
3173 ui.writenoi18n(
3192 ui.writenoi18n(
3174 b' text : '
3193 b' text : '
3175 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3194 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
3176 )
3195 )
3177 ui.writenoi18n(
3196 ui.writenoi18n(
3178 b' delta : '
3197 b' delta : '
3179 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3198 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
3180 )
3199 )
3181 ui.writenoi18n(
3200 ui.writenoi18n(
3182 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3201 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
3183 )
3202 )
3184 for depth in sorted(numsnapdepth):
3203 for depth in sorted(numsnapdepth):
3185 ui.write(
3204 ui.write(
3186 (b' lvl-%-3d : ' % depth)
3205 (b' lvl-%-3d : ' % depth)
3187 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3206 + fmt % pcfmt(numsnapdepth[depth], numrevs)
3188 )
3207 )
3189 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3208 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
3190 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3209 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
3191 ui.writenoi18n(
3210 ui.writenoi18n(
3192 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3211 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
3193 )
3212 )
3194 for depth in sorted(numsnapdepth):
3213 for depth in sorted(numsnapdepth):
3195 ui.write(
3214 ui.write(
3196 (b' lvl-%-3d : ' % depth)
3215 (b' lvl-%-3d : ' % depth)
3197 + fmt % pcfmt(snaptotal[depth], totalsize)
3216 + fmt % pcfmt(snaptotal[depth], totalsize)
3198 )
3217 )
3199 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3218 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
3200
3219
3201 def fmtchunktype(chunktype):
3220 def fmtchunktype(chunktype):
3202 if chunktype == b'empty':
3221 if chunktype == b'empty':
3203 return b' %s : ' % chunktype
3222 return b' %s : ' % chunktype
3204 elif chunktype in pycompat.bytestr(string.ascii_letters):
3223 elif chunktype in pycompat.bytestr(string.ascii_letters):
3205 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3224 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
3206 else:
3225 else:
3207 return b' 0x%s : ' % hex(chunktype)
3226 return b' 0x%s : ' % hex(chunktype)
3208
3227
3209 ui.write(b'\n')
3228 ui.write(b'\n')
3210 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3229 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
3211 for chunktype in sorted(chunktypecounts):
3230 for chunktype in sorted(chunktypecounts):
3212 ui.write(fmtchunktype(chunktype))
3231 ui.write(fmtchunktype(chunktype))
3213 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3232 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
3214 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3233 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
3215 for chunktype in sorted(chunktypecounts):
3234 for chunktype in sorted(chunktypecounts):
3216 ui.write(fmtchunktype(chunktype))
3235 ui.write(fmtchunktype(chunktype))
3217 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3236 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
3218
3237
3219 ui.write(b'\n')
3238 ui.write(b'\n')
3220 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3239 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
3221 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3240 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
3222 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3241 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
3223 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3242 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
3224 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3243 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
3225
3244
3226 if format > 0:
3245 if format > 0:
3227 ui.write(b'\n')
3246 ui.write(b'\n')
3228 ui.writenoi18n(
3247 ui.writenoi18n(
3229 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3248 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
3230 % tuple(datasize)
3249 % tuple(datasize)
3231 )
3250 )
3232 ui.writenoi18n(
3251 ui.writenoi18n(
3233 b'full revision size (min/max/avg) : %d / %d / %d\n'
3252 b'full revision size (min/max/avg) : %d / %d / %d\n'
3234 % tuple(fullsize)
3253 % tuple(fullsize)
3235 )
3254 )
3236 ui.writenoi18n(
3255 ui.writenoi18n(
3237 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3256 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
3238 % tuple(semisize)
3257 % tuple(semisize)
3239 )
3258 )
3240 for depth in sorted(snapsizedepth):
3259 for depth in sorted(snapsizedepth):
3241 if depth == 0:
3260 if depth == 0:
3242 continue
3261 continue
3243 ui.writenoi18n(
3262 ui.writenoi18n(
3244 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3263 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
3245 % ((depth,) + tuple(snapsizedepth[depth]))
3264 % ((depth,) + tuple(snapsizedepth[depth]))
3246 )
3265 )
3247 ui.writenoi18n(
3266 ui.writenoi18n(
3248 b'delta size (min/max/avg) : %d / %d / %d\n'
3267 b'delta size (min/max/avg) : %d / %d / %d\n'
3249 % tuple(deltasize)
3268 % tuple(deltasize)
3250 )
3269 )
3251
3270
3252 if numdeltas > 0:
3271 if numdeltas > 0:
3253 ui.write(b'\n')
3272 ui.write(b'\n')
3254 fmt = pcfmtstr(numdeltas)
3273 fmt = pcfmtstr(numdeltas)
3255 fmt2 = pcfmtstr(numdeltas, 4)
3274 fmt2 = pcfmtstr(numdeltas, 4)
3256 ui.writenoi18n(
3275 ui.writenoi18n(
3257 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3276 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
3258 )
3277 )
3259 if numprev > 0:
3278 if numprev > 0:
3260 ui.writenoi18n(
3279 ui.writenoi18n(
3261 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3280 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
3262 )
3281 )
3263 ui.writenoi18n(
3282 ui.writenoi18n(
3264 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3283 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
3265 )
3284 )
3266 ui.writenoi18n(
3285 ui.writenoi18n(
3267 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3286 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
3268 )
3287 )
3269 if gdelta:
3288 if gdelta:
3270 ui.writenoi18n(
3289 ui.writenoi18n(
3271 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3290 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
3272 )
3291 )
3273 ui.writenoi18n(
3292 ui.writenoi18n(
3274 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3293 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
3275 )
3294 )
3276 ui.writenoi18n(
3295 ui.writenoi18n(
3277 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3296 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
3278 )
3297 )
3279
3298
3280
3299
3281 @command(
3300 @command(
3282 b'debugrevlogindex',
3301 b'debugrevlogindex',
3283 cmdutil.debugrevlogopts
3302 cmdutil.debugrevlogopts
3284 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3303 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
3285 _(b'[-f FORMAT] -c|-m|FILE'),
3304 _(b'[-f FORMAT] -c|-m|FILE'),
3286 optionalrepo=True,
3305 optionalrepo=True,
3287 )
3306 )
3288 def debugrevlogindex(ui, repo, file_=None, **opts):
3307 def debugrevlogindex(ui, repo, file_=None, **opts):
3289 """dump the contents of a revlog index"""
3308 """dump the contents of a revlog index"""
3290 opts = pycompat.byteskwargs(opts)
3309 opts = pycompat.byteskwargs(opts)
3291 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3310 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
3292 format = opts.get(b'format', 0)
3311 format = opts.get(b'format', 0)
3293 if format not in (0, 1):
3312 if format not in (0, 1):
3294 raise error.Abort(_(b"unknown format %d") % format)
3313 raise error.Abort(_(b"unknown format %d") % format)
3295
3314
3296 if ui.debugflag:
3315 if ui.debugflag:
3297 shortfn = hex
3316 shortfn = hex
3298 else:
3317 else:
3299 shortfn = short
3318 shortfn = short
3300
3319
3301 # There might not be anything in r, so have a sane default
3320 # There might not be anything in r, so have a sane default
3302 idlen = 12
3321 idlen = 12
3303 for i in r:
3322 for i in r:
3304 idlen = len(shortfn(r.node(i)))
3323 idlen = len(shortfn(r.node(i)))
3305 break
3324 break
3306
3325
3307 if format == 0:
3326 if format == 0:
3308 if ui.verbose:
3327 if ui.verbose:
3309 ui.writenoi18n(
3328 ui.writenoi18n(
3310 b" rev offset length linkrev %s %s p2\n"
3329 b" rev offset length linkrev %s %s p2\n"
3311 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3330 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3312 )
3331 )
3313 else:
3332 else:
3314 ui.writenoi18n(
3333 ui.writenoi18n(
3315 b" rev linkrev %s %s p2\n"
3334 b" rev linkrev %s %s p2\n"
3316 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3335 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
3317 )
3336 )
3318 elif format == 1:
3337 elif format == 1:
3319 if ui.verbose:
3338 if ui.verbose:
3320 ui.writenoi18n(
3339 ui.writenoi18n(
3321 (
3340 (
3322 b" rev flag offset length size link p1"
3341 b" rev flag offset length size link p1"
3323 b" p2 %s\n"
3342 b" p2 %s\n"
3324 )
3343 )
3325 % b"nodeid".rjust(idlen)
3344 % b"nodeid".rjust(idlen)
3326 )
3345 )
3327 else:
3346 else:
3328 ui.writenoi18n(
3347 ui.writenoi18n(
3329 b" rev flag size link p1 p2 %s\n"
3348 b" rev flag size link p1 p2 %s\n"
3330 % b"nodeid".rjust(idlen)
3349 % b"nodeid".rjust(idlen)
3331 )
3350 )
3332
3351
3333 for i in r:
3352 for i in r:
3334 node = r.node(i)
3353 node = r.node(i)
3335 if format == 0:
3354 if format == 0:
3336 try:
3355 try:
3337 pp = r.parents(node)
3356 pp = r.parents(node)
3338 except Exception:
3357 except Exception:
3339 pp = [repo.nullid, repo.nullid]
3358 pp = [repo.nullid, repo.nullid]
3340 if ui.verbose:
3359 if ui.verbose:
3341 ui.write(
3360 ui.write(
3342 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3361 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3343 % (
3362 % (
3344 i,
3363 i,
3345 r.start(i),
3364 r.start(i),
3346 r.length(i),
3365 r.length(i),
3347 r.linkrev(i),
3366 r.linkrev(i),
3348 shortfn(node),
3367 shortfn(node),
3349 shortfn(pp[0]),
3368 shortfn(pp[0]),
3350 shortfn(pp[1]),
3369 shortfn(pp[1]),
3351 )
3370 )
3352 )
3371 )
3353 else:
3372 else:
3354 ui.write(
3373 ui.write(
3355 b"% 6d % 7d %s %s %s\n"
3374 b"% 6d % 7d %s %s %s\n"
3356 % (
3375 % (
3357 i,
3376 i,
3358 r.linkrev(i),
3377 r.linkrev(i),
3359 shortfn(node),
3378 shortfn(node),
3360 shortfn(pp[0]),
3379 shortfn(pp[0]),
3361 shortfn(pp[1]),
3380 shortfn(pp[1]),
3362 )
3381 )
3363 )
3382 )
3364 elif format == 1:
3383 elif format == 1:
3365 pr = r.parentrevs(i)
3384 pr = r.parentrevs(i)
3366 if ui.verbose:
3385 if ui.verbose:
3367 ui.write(
3386 ui.write(
3368 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3387 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3369 % (
3388 % (
3370 i,
3389 i,
3371 r.flags(i),
3390 r.flags(i),
3372 r.start(i),
3391 r.start(i),
3373 r.length(i),
3392 r.length(i),
3374 r.rawsize(i),
3393 r.rawsize(i),
3375 r.linkrev(i),
3394 r.linkrev(i),
3376 pr[0],
3395 pr[0],
3377 pr[1],
3396 pr[1],
3378 shortfn(node),
3397 shortfn(node),
3379 )
3398 )
3380 )
3399 )
3381 else:
3400 else:
3382 ui.write(
3401 ui.write(
3383 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3402 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3384 % (
3403 % (
3385 i,
3404 i,
3386 r.flags(i),
3405 r.flags(i),
3387 r.rawsize(i),
3406 r.rawsize(i),
3388 r.linkrev(i),
3407 r.linkrev(i),
3389 pr[0],
3408 pr[0],
3390 pr[1],
3409 pr[1],
3391 shortfn(node),
3410 shortfn(node),
3392 )
3411 )
3393 )
3412 )
3394
3413
3395
3414
3396 @command(
3415 @command(
3397 b'debugrevspec',
3416 b'debugrevspec',
3398 [
3417 [
3399 (
3418 (
3400 b'',
3419 b'',
3401 b'optimize',
3420 b'optimize',
3402 None,
3421 None,
3403 _(b'print parsed tree after optimizing (DEPRECATED)'),
3422 _(b'print parsed tree after optimizing (DEPRECATED)'),
3404 ),
3423 ),
3405 (
3424 (
3406 b'',
3425 b'',
3407 b'show-revs',
3426 b'show-revs',
3408 True,
3427 True,
3409 _(b'print list of result revisions (default)'),
3428 _(b'print list of result revisions (default)'),
3410 ),
3429 ),
3411 (
3430 (
3412 b's',
3431 b's',
3413 b'show-set',
3432 b'show-set',
3414 None,
3433 None,
3415 _(b'print internal representation of result set'),
3434 _(b'print internal representation of result set'),
3416 ),
3435 ),
3417 (
3436 (
3418 b'p',
3437 b'p',
3419 b'show-stage',
3438 b'show-stage',
3420 [],
3439 [],
3421 _(b'print parsed tree at the given stage'),
3440 _(b'print parsed tree at the given stage'),
3422 _(b'NAME'),
3441 _(b'NAME'),
3423 ),
3442 ),
3424 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3443 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3425 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3444 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3426 ],
3445 ],
3427 b'REVSPEC',
3446 b'REVSPEC',
3428 )
3447 )
3429 def debugrevspec(ui, repo, expr, **opts):
3448 def debugrevspec(ui, repo, expr, **opts):
3430 """parse and apply a revision specification
3449 """parse and apply a revision specification
3431
3450
3432 Use -p/--show-stage option to print the parsed tree at the given stages.
3451 Use -p/--show-stage option to print the parsed tree at the given stages.
3433 Use -p all to print tree at every stage.
3452 Use -p all to print tree at every stage.
3434
3453
3435 Use --no-show-revs option with -s or -p to print only the set
3454 Use --no-show-revs option with -s or -p to print only the set
3436 representation or the parsed tree respectively.
3455 representation or the parsed tree respectively.
3437
3456
3438 Use --verify-optimized to compare the optimized result with the unoptimized
3457 Use --verify-optimized to compare the optimized result with the unoptimized
3439 one. Returns 1 if the optimized result differs.
3458 one. Returns 1 if the optimized result differs.
3440 """
3459 """
3441 opts = pycompat.byteskwargs(opts)
3460 opts = pycompat.byteskwargs(opts)
3442 aliases = ui.configitems(b'revsetalias')
3461 aliases = ui.configitems(b'revsetalias')
3443 stages = [
3462 stages = [
3444 (b'parsed', lambda tree: tree),
3463 (b'parsed', lambda tree: tree),
3445 (
3464 (
3446 b'expanded',
3465 b'expanded',
3447 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3466 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3448 ),
3467 ),
3449 (b'concatenated', revsetlang.foldconcat),
3468 (b'concatenated', revsetlang.foldconcat),
3450 (b'analyzed', revsetlang.analyze),
3469 (b'analyzed', revsetlang.analyze),
3451 (b'optimized', revsetlang.optimize),
3470 (b'optimized', revsetlang.optimize),
3452 ]
3471 ]
3453 if opts[b'no_optimized']:
3472 if opts[b'no_optimized']:
3454 stages = stages[:-1]
3473 stages = stages[:-1]
3455 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3474 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3456 raise error.Abort(
3475 raise error.Abort(
3457 _(b'cannot use --verify-optimized with --no-optimized')
3476 _(b'cannot use --verify-optimized with --no-optimized')
3458 )
3477 )
3459 stagenames = {n for n, f in stages}
3478 stagenames = {n for n, f in stages}
3460
3479
3461 showalways = set()
3480 showalways = set()
3462 showchanged = set()
3481 showchanged = set()
3463 if ui.verbose and not opts[b'show_stage']:
3482 if ui.verbose and not opts[b'show_stage']:
3464 # show parsed tree by --verbose (deprecated)
3483 # show parsed tree by --verbose (deprecated)
3465 showalways.add(b'parsed')
3484 showalways.add(b'parsed')
3466 showchanged.update([b'expanded', b'concatenated'])
3485 showchanged.update([b'expanded', b'concatenated'])
3467 if opts[b'optimize']:
3486 if opts[b'optimize']:
3468 showalways.add(b'optimized')
3487 showalways.add(b'optimized')
3469 if opts[b'show_stage'] and opts[b'optimize']:
3488 if opts[b'show_stage'] and opts[b'optimize']:
3470 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3489 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3471 if opts[b'show_stage'] == [b'all']:
3490 if opts[b'show_stage'] == [b'all']:
3472 showalways.update(stagenames)
3491 showalways.update(stagenames)
3473 else:
3492 else:
3474 for n in opts[b'show_stage']:
3493 for n in opts[b'show_stage']:
3475 if n not in stagenames:
3494 if n not in stagenames:
3476 raise error.Abort(_(b'invalid stage name: %s') % n)
3495 raise error.Abort(_(b'invalid stage name: %s') % n)
3477 showalways.update(opts[b'show_stage'])
3496 showalways.update(opts[b'show_stage'])
3478
3497
3479 treebystage = {}
3498 treebystage = {}
3480 printedtree = None
3499 printedtree = None
3481 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3500 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3482 for n, f in stages:
3501 for n, f in stages:
3483 treebystage[n] = tree = f(tree)
3502 treebystage[n] = tree = f(tree)
3484 if n in showalways or (n in showchanged and tree != printedtree):
3503 if n in showalways or (n in showchanged and tree != printedtree):
3485 if opts[b'show_stage'] or n != b'parsed':
3504 if opts[b'show_stage'] or n != b'parsed':
3486 ui.write(b"* %s:\n" % n)
3505 ui.write(b"* %s:\n" % n)
3487 ui.write(revsetlang.prettyformat(tree), b"\n")
3506 ui.write(revsetlang.prettyformat(tree), b"\n")
3488 printedtree = tree
3507 printedtree = tree
3489
3508
3490 if opts[b'verify_optimized']:
3509 if opts[b'verify_optimized']:
3491 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3510 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3492 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3511 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3493 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3512 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3494 ui.writenoi18n(
3513 ui.writenoi18n(
3495 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3514 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3496 )
3515 )
3497 ui.writenoi18n(
3516 ui.writenoi18n(
3498 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3517 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3499 )
3518 )
3500 arevs = list(arevs)
3519 arevs = list(arevs)
3501 brevs = list(brevs)
3520 brevs = list(brevs)
3502 if arevs == brevs:
3521 if arevs == brevs:
3503 return 0
3522 return 0
3504 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3523 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3505 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3524 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3506 sm = difflib.SequenceMatcher(None, arevs, brevs)
3525 sm = difflib.SequenceMatcher(None, arevs, brevs)
3507 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3526 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3508 if tag in ('delete', 'replace'):
3527 if tag in ('delete', 'replace'):
3509 for c in arevs[alo:ahi]:
3528 for c in arevs[alo:ahi]:
3510 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3529 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3511 if tag in ('insert', 'replace'):
3530 if tag in ('insert', 'replace'):
3512 for c in brevs[blo:bhi]:
3531 for c in brevs[blo:bhi]:
3513 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3532 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3514 if tag == 'equal':
3533 if tag == 'equal':
3515 for c in arevs[alo:ahi]:
3534 for c in arevs[alo:ahi]:
3516 ui.write(b' %d\n' % c)
3535 ui.write(b' %d\n' % c)
3517 return 1
3536 return 1
3518
3537
3519 func = revset.makematcher(tree)
3538 func = revset.makematcher(tree)
3520 revs = func(repo)
3539 revs = func(repo)
3521 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3540 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3522 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3541 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3523 if not opts[b'show_revs']:
3542 if not opts[b'show_revs']:
3524 return
3543 return
3525 for c in revs:
3544 for c in revs:
3526 ui.write(b"%d\n" % c)
3545 ui.write(b"%d\n" % c)
3527
3546
3528
3547
3529 @command(
3548 @command(
3530 b'debugserve',
3549 b'debugserve',
3531 [
3550 [
3532 (
3551 (
3533 b'',
3552 b'',
3534 b'sshstdio',
3553 b'sshstdio',
3535 False,
3554 False,
3536 _(b'run an SSH server bound to process handles'),
3555 _(b'run an SSH server bound to process handles'),
3537 ),
3556 ),
3538 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3557 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3539 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3558 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3540 ],
3559 ],
3541 b'',
3560 b'',
3542 )
3561 )
3543 def debugserve(ui, repo, **opts):
3562 def debugserve(ui, repo, **opts):
3544 """run a server with advanced settings
3563 """run a server with advanced settings
3545
3564
3546 This command is similar to :hg:`serve`. It exists partially as a
3565 This command is similar to :hg:`serve`. It exists partially as a
3547 workaround to the fact that ``hg serve --stdio`` must have specific
3566 workaround to the fact that ``hg serve --stdio`` must have specific
3548 arguments for security reasons.
3567 arguments for security reasons.
3549 """
3568 """
3550 opts = pycompat.byteskwargs(opts)
3569 opts = pycompat.byteskwargs(opts)
3551
3570
3552 if not opts[b'sshstdio']:
3571 if not opts[b'sshstdio']:
3553 raise error.Abort(_(b'only --sshstdio is currently supported'))
3572 raise error.Abort(_(b'only --sshstdio is currently supported'))
3554
3573
3555 logfh = None
3574 logfh = None
3556
3575
3557 if opts[b'logiofd'] and opts[b'logiofile']:
3576 if opts[b'logiofd'] and opts[b'logiofile']:
3558 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3577 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3559
3578
3560 if opts[b'logiofd']:
3579 if opts[b'logiofd']:
3561 # Ideally we would be line buffered. But line buffering in binary
3580 # Ideally we would be line buffered. But line buffering in binary
3562 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3581 # mode isn't supported and emits a warning in Python 3.8+. Disabling
3563 # buffering could have performance impacts. But since this isn't
3582 # buffering could have performance impacts. But since this isn't
3564 # performance critical code, it should be fine.
3583 # performance critical code, it should be fine.
3565 try:
3584 try:
3566 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3585 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
3567 except OSError as e:
3586 except OSError as e:
3568 if e.errno != errno.ESPIPE:
3587 if e.errno != errno.ESPIPE:
3569 raise
3588 raise
3570 # can't seek a pipe, so `ab` mode fails on py3
3589 # can't seek a pipe, so `ab` mode fails on py3
3571 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3590 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
3572 elif opts[b'logiofile']:
3591 elif opts[b'logiofile']:
3573 logfh = open(opts[b'logiofile'], b'ab', 0)
3592 logfh = open(opts[b'logiofile'], b'ab', 0)
3574
3593
3575 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3594 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3576 s.serve_forever()
3595 s.serve_forever()
3577
3596
3578
3597
3579 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3598 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3580 def debugsetparents(ui, repo, rev1, rev2=None):
3599 def debugsetparents(ui, repo, rev1, rev2=None):
3581 """manually set the parents of the current working directory (DANGEROUS)
3600 """manually set the parents of the current working directory (DANGEROUS)
3582
3601
3583 This command is not what you are looking for and should not be used. Using
3602 This command is not what you are looking for and should not be used. Using
3584 this command will most certainly results in slight corruption of the file
3603 this command will most certainly results in slight corruption of the file
3585 level histories withing your repository. DO NOT USE THIS COMMAND.
3604 level histories withing your repository. DO NOT USE THIS COMMAND.
3586
3605
3587 The command update the p1 and p2 field in the dirstate, and not touching
3606 The command update the p1 and p2 field in the dirstate, and not touching
3588 anything else. This useful for writing repository conversion tools, but
3607 anything else. This useful for writing repository conversion tools, but
3589 should be used with extreme care. For example, neither the working
3608 should be used with extreme care. For example, neither the working
3590 directory nor the dirstate is updated, so file status may be incorrect
3609 directory nor the dirstate is updated, so file status may be incorrect
3591 after running this command. Only used if you are one of the few people that
3610 after running this command. Only used if you are one of the few people that
3592 deeply unstand both conversion tools and file level histories. If you are
3611 deeply unstand both conversion tools and file level histories. If you are
3593 reading this help, you are not one of this people (most of them sailed west
3612 reading this help, you are not one of this people (most of them sailed west
3594 from Mithlond anyway.
3613 from Mithlond anyway.
3595
3614
3596 So one last time DO NOT USE THIS COMMAND.
3615 So one last time DO NOT USE THIS COMMAND.
3597
3616
3598 Returns 0 on success.
3617 Returns 0 on success.
3599 """
3618 """
3600
3619
3601 node1 = scmutil.revsingle(repo, rev1).node()
3620 node1 = scmutil.revsingle(repo, rev1).node()
3602 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3621 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3603
3622
3604 with repo.wlock():
3623 with repo.wlock():
3605 repo.setparents(node1, node2)
3624 repo.setparents(node1, node2)
3606
3625
3607
3626
3608 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3627 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3609 def debugsidedata(ui, repo, file_, rev=None, **opts):
3628 def debugsidedata(ui, repo, file_, rev=None, **opts):
3610 """dump the side data for a cl/manifest/file revision
3629 """dump the side data for a cl/manifest/file revision
3611
3630
3612 Use --verbose to dump the sidedata content."""
3631 Use --verbose to dump the sidedata content."""
3613 opts = pycompat.byteskwargs(opts)
3632 opts = pycompat.byteskwargs(opts)
3614 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3633 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3615 if rev is not None:
3634 if rev is not None:
3616 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3635 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3617 file_, rev = None, file_
3636 file_, rev = None, file_
3618 elif rev is None:
3637 elif rev is None:
3619 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3638 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3620 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3639 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3621 r = getattr(r, '_revlog', r)
3640 r = getattr(r, '_revlog', r)
3622 try:
3641 try:
3623 sidedata = r.sidedata(r.lookup(rev))
3642 sidedata = r.sidedata(r.lookup(rev))
3624 except KeyError:
3643 except KeyError:
3625 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3644 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3626 if sidedata:
3645 if sidedata:
3627 sidedata = list(sidedata.items())
3646 sidedata = list(sidedata.items())
3628 sidedata.sort()
3647 sidedata.sort()
3629 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3648 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3630 for key, value in sidedata:
3649 for key, value in sidedata:
3631 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3650 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3632 if ui.verbose:
3651 if ui.verbose:
3633 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3652 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3634
3653
3635
3654
3636 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3655 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3637 def debugssl(ui, repo, source=None, **opts):
3656 def debugssl(ui, repo, source=None, **opts):
3638 """test a secure connection to a server
3657 """test a secure connection to a server
3639
3658
3640 This builds the certificate chain for the server on Windows, installing the
3659 This builds the certificate chain for the server on Windows, installing the
3641 missing intermediates and trusted root via Windows Update if necessary. It
3660 missing intermediates and trusted root via Windows Update if necessary. It
3642 does nothing on other platforms.
3661 does nothing on other platforms.
3643
3662
3644 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3663 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3645 that server is used. See :hg:`help urls` for more information.
3664 that server is used. See :hg:`help urls` for more information.
3646
3665
3647 If the update succeeds, retry the original operation. Otherwise, the cause
3666 If the update succeeds, retry the original operation. Otherwise, the cause
3648 of the SSL error is likely another issue.
3667 of the SSL error is likely another issue.
3649 """
3668 """
3650 if not pycompat.iswindows:
3669 if not pycompat.iswindows:
3651 raise error.Abort(
3670 raise error.Abort(
3652 _(b'certificate chain building is only possible on Windows')
3671 _(b'certificate chain building is only possible on Windows')
3653 )
3672 )
3654
3673
3655 if not source:
3674 if not source:
3656 if not repo:
3675 if not repo:
3657 raise error.Abort(
3676 raise error.Abort(
3658 _(
3677 _(
3659 b"there is no Mercurial repository here, and no "
3678 b"there is no Mercurial repository here, and no "
3660 b"server specified"
3679 b"server specified"
3661 )
3680 )
3662 )
3681 )
3663 source = b"default"
3682 source = b"default"
3664
3683
3665 source, branches = urlutil.get_unique_pull_path(
3684 source, branches = urlutil.get_unique_pull_path(
3666 b'debugssl', repo, ui, source
3685 b'debugssl', repo, ui, source
3667 )
3686 )
3668 url = urlutil.url(source)
3687 url = urlutil.url(source)
3669
3688
3670 defaultport = {b'https': 443, b'ssh': 22}
3689 defaultport = {b'https': 443, b'ssh': 22}
3671 if url.scheme in defaultport:
3690 if url.scheme in defaultport:
3672 try:
3691 try:
3673 addr = (url.host, int(url.port or defaultport[url.scheme]))
3692 addr = (url.host, int(url.port or defaultport[url.scheme]))
3674 except ValueError:
3693 except ValueError:
3675 raise error.Abort(_(b"malformed port number in URL"))
3694 raise error.Abort(_(b"malformed port number in URL"))
3676 else:
3695 else:
3677 raise error.Abort(_(b"only https and ssh connections are supported"))
3696 raise error.Abort(_(b"only https and ssh connections are supported"))
3678
3697
3679 from . import win32
3698 from . import win32
3680
3699
3681 s = ssl.wrap_socket(
3700 s = ssl.wrap_socket(
3682 socket.socket(),
3701 socket.socket(),
3683 ssl_version=ssl.PROTOCOL_TLS,
3702 ssl_version=ssl.PROTOCOL_TLS,
3684 cert_reqs=ssl.CERT_NONE,
3703 cert_reqs=ssl.CERT_NONE,
3685 ca_certs=None,
3704 ca_certs=None,
3686 )
3705 )
3687
3706
3688 try:
3707 try:
3689 s.connect(addr)
3708 s.connect(addr)
3690 cert = s.getpeercert(True)
3709 cert = s.getpeercert(True)
3691
3710
3692 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3711 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3693
3712
3694 complete = win32.checkcertificatechain(cert, build=False)
3713 complete = win32.checkcertificatechain(cert, build=False)
3695
3714
3696 if not complete:
3715 if not complete:
3697 ui.status(_(b'certificate chain is incomplete, updating... '))
3716 ui.status(_(b'certificate chain is incomplete, updating... '))
3698
3717
3699 if not win32.checkcertificatechain(cert):
3718 if not win32.checkcertificatechain(cert):
3700 ui.status(_(b'failed.\n'))
3719 ui.status(_(b'failed.\n'))
3701 else:
3720 else:
3702 ui.status(_(b'done.\n'))
3721 ui.status(_(b'done.\n'))
3703 else:
3722 else:
3704 ui.status(_(b'full certificate chain is available\n'))
3723 ui.status(_(b'full certificate chain is available\n'))
3705 finally:
3724 finally:
3706 s.close()
3725 s.close()
3707
3726
3708
3727
3709 @command(
3728 @command(
3710 b"debugbackupbundle",
3729 b"debugbackupbundle",
3711 [
3730 [
3712 (
3731 (
3713 b"",
3732 b"",
3714 b"recover",
3733 b"recover",
3715 b"",
3734 b"",
3716 b"brings the specified changeset back into the repository",
3735 b"brings the specified changeset back into the repository",
3717 )
3736 )
3718 ]
3737 ]
3719 + cmdutil.logopts,
3738 + cmdutil.logopts,
3720 _(b"hg debugbackupbundle [--recover HASH]"),
3739 _(b"hg debugbackupbundle [--recover HASH]"),
3721 )
3740 )
3722 def debugbackupbundle(ui, repo, *pats, **opts):
3741 def debugbackupbundle(ui, repo, *pats, **opts):
3723 """lists the changesets available in backup bundles
3742 """lists the changesets available in backup bundles
3724
3743
3725 Without any arguments, this command prints a list of the changesets in each
3744 Without any arguments, this command prints a list of the changesets in each
3726 backup bundle.
3745 backup bundle.
3727
3746
3728 --recover takes a changeset hash and unbundles the first bundle that
3747 --recover takes a changeset hash and unbundles the first bundle that
3729 contains that hash, which puts that changeset back in your repository.
3748 contains that hash, which puts that changeset back in your repository.
3730
3749
3731 --verbose will print the entire commit message and the bundle path for that
3750 --verbose will print the entire commit message and the bundle path for that
3732 backup.
3751 backup.
3733 """
3752 """
3734 backups = list(
3753 backups = list(
3735 filter(
3754 filter(
3736 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3755 os.path.isfile, glob.glob(repo.vfs.join(b"strip-backup") + b"/*.hg")
3737 )
3756 )
3738 )
3757 )
3739 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3758 backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
3740
3759
3741 opts = pycompat.byteskwargs(opts)
3760 opts = pycompat.byteskwargs(opts)
3742 opts[b"bundle"] = b""
3761 opts[b"bundle"] = b""
3743 opts[b"force"] = None
3762 opts[b"force"] = None
3744 limit = logcmdutil.getlimit(opts)
3763 limit = logcmdutil.getlimit(opts)
3745
3764
3746 def display(other, chlist, displayer):
3765 def display(other, chlist, displayer):
3747 if opts.get(b"newest_first"):
3766 if opts.get(b"newest_first"):
3748 chlist.reverse()
3767 chlist.reverse()
3749 count = 0
3768 count = 0
3750 for n in chlist:
3769 for n in chlist:
3751 if limit is not None and count >= limit:
3770 if limit is not None and count >= limit:
3752 break
3771 break
3753 parents = [
3772 parents = [
3754 True for p in other.changelog.parents(n) if p != repo.nullid
3773 True for p in other.changelog.parents(n) if p != repo.nullid
3755 ]
3774 ]
3756 if opts.get(b"no_merges") and len(parents) == 2:
3775 if opts.get(b"no_merges") and len(parents) == 2:
3757 continue
3776 continue
3758 count += 1
3777 count += 1
3759 displayer.show(other[n])
3778 displayer.show(other[n])
3760
3779
3761 recovernode = opts.get(b"recover")
3780 recovernode = opts.get(b"recover")
3762 if recovernode:
3781 if recovernode:
3763 if scmutil.isrevsymbol(repo, recovernode):
3782 if scmutil.isrevsymbol(repo, recovernode):
3764 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3783 ui.warn(_(b"%s already exists in the repo\n") % recovernode)
3765 return
3784 return
3766 elif backups:
3785 elif backups:
3767 msg = _(
3786 msg = _(
3768 b"Recover changesets using: hg debugbackupbundle --recover "
3787 b"Recover changesets using: hg debugbackupbundle --recover "
3769 b"<changeset hash>\n\nAvailable backup changesets:"
3788 b"<changeset hash>\n\nAvailable backup changesets:"
3770 )
3789 )
3771 ui.status(msg, label=b"status.removed")
3790 ui.status(msg, label=b"status.removed")
3772 else:
3791 else:
3773 ui.status(_(b"no backup changesets found\n"))
3792 ui.status(_(b"no backup changesets found\n"))
3774 return
3793 return
3775
3794
3776 for backup in backups:
3795 for backup in backups:
3777 # Much of this is copied from the hg incoming logic
3796 # Much of this is copied from the hg incoming logic
3778 source = os.path.relpath(backup, encoding.getcwd())
3797 source = os.path.relpath(backup, encoding.getcwd())
3779 source, branches = urlutil.get_unique_pull_path(
3798 source, branches = urlutil.get_unique_pull_path(
3780 b'debugbackupbundle',
3799 b'debugbackupbundle',
3781 repo,
3800 repo,
3782 ui,
3801 ui,
3783 source,
3802 source,
3784 default_branches=opts.get(b'branch'),
3803 default_branches=opts.get(b'branch'),
3785 )
3804 )
3786 try:
3805 try:
3787 other = hg.peer(repo, opts, source)
3806 other = hg.peer(repo, opts, source)
3788 except error.LookupError as ex:
3807 except error.LookupError as ex:
3789 msg = _(b"\nwarning: unable to open bundle %s") % source
3808 msg = _(b"\nwarning: unable to open bundle %s") % source
3790 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3809 hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
3791 ui.warn(msg, hint=hint)
3810 ui.warn(msg, hint=hint)
3792 continue
3811 continue
3793 revs, checkout = hg.addbranchrevs(
3812 revs, checkout = hg.addbranchrevs(
3794 repo, other, branches, opts.get(b"rev")
3813 repo, other, branches, opts.get(b"rev")
3795 )
3814 )
3796
3815
3797 if revs:
3816 if revs:
3798 revs = [other.lookup(rev) for rev in revs]
3817 revs = [other.lookup(rev) for rev in revs]
3799
3818
3800 with ui.silent():
3819 with ui.silent():
3801 try:
3820 try:
3802 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3821 other, chlist, cleanupfn = bundlerepo.getremotechanges(
3803 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3822 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
3804 )
3823 )
3805 except error.LookupError:
3824 except error.LookupError:
3806 continue
3825 continue
3807
3826
3808 try:
3827 try:
3809 if not chlist:
3828 if not chlist:
3810 continue
3829 continue
3811 if recovernode:
3830 if recovernode:
3812 with repo.lock(), repo.transaction(b"unbundle") as tr:
3831 with repo.lock(), repo.transaction(b"unbundle") as tr:
3813 if scmutil.isrevsymbol(other, recovernode):
3832 if scmutil.isrevsymbol(other, recovernode):
3814 ui.status(_(b"Unbundling %s\n") % (recovernode))
3833 ui.status(_(b"Unbundling %s\n") % (recovernode))
3815 f = hg.openpath(ui, source)
3834 f = hg.openpath(ui, source)
3816 gen = exchange.readbundle(ui, f, source)
3835 gen = exchange.readbundle(ui, f, source)
3817 if isinstance(gen, bundle2.unbundle20):
3836 if isinstance(gen, bundle2.unbundle20):
3818 bundle2.applybundle(
3837 bundle2.applybundle(
3819 repo,
3838 repo,
3820 gen,
3839 gen,
3821 tr,
3840 tr,
3822 source=b"unbundle",
3841 source=b"unbundle",
3823 url=b"bundle:" + source,
3842 url=b"bundle:" + source,
3824 )
3843 )
3825 else:
3844 else:
3826 gen.apply(repo, b"unbundle", b"bundle:" + source)
3845 gen.apply(repo, b"unbundle", b"bundle:" + source)
3827 break
3846 break
3828 else:
3847 else:
3829 backupdate = encoding.strtolocal(
3848 backupdate = encoding.strtolocal(
3830 time.strftime(
3849 time.strftime(
3831 "%a %H:%M, %Y-%m-%d",
3850 "%a %H:%M, %Y-%m-%d",
3832 time.localtime(os.path.getmtime(source)),
3851 time.localtime(os.path.getmtime(source)),
3833 )
3852 )
3834 )
3853 )
3835 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3854 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
3836 if ui.verbose:
3855 if ui.verbose:
3837 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3856 ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
3838 else:
3857 else:
3839 opts[
3858 opts[
3840 b"template"
3859 b"template"
3841 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3860 ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
3842 displayer = logcmdutil.changesetdisplayer(
3861 displayer = logcmdutil.changesetdisplayer(
3843 ui, other, opts, False
3862 ui, other, opts, False
3844 )
3863 )
3845 display(other, chlist, displayer)
3864 display(other, chlist, displayer)
3846 displayer.close()
3865 displayer.close()
3847 finally:
3866 finally:
3848 cleanupfn()
3867 cleanupfn()
3849
3868
3850
3869
3851 @command(
3870 @command(
3852 b'debugsub',
3871 b'debugsub',
3853 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3872 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3854 _(b'[-r REV] [REV]'),
3873 _(b'[-r REV] [REV]'),
3855 )
3874 )
3856 def debugsub(ui, repo, rev=None):
3875 def debugsub(ui, repo, rev=None):
3857 ctx = scmutil.revsingle(repo, rev, None)
3876 ctx = scmutil.revsingle(repo, rev, None)
3858 for k, v in sorted(ctx.substate.items()):
3877 for k, v in sorted(ctx.substate.items()):
3859 ui.writenoi18n(b'path %s\n' % k)
3878 ui.writenoi18n(b'path %s\n' % k)
3860 ui.writenoi18n(b' source %s\n' % v[0])
3879 ui.writenoi18n(b' source %s\n' % v[0])
3861 ui.writenoi18n(b' revision %s\n' % v[1])
3880 ui.writenoi18n(b' revision %s\n' % v[1])
3862
3881
3863
3882
3864 @command(b'debugshell', optionalrepo=True)
3883 @command(b'debugshell', optionalrepo=True)
3865 def debugshell(ui, repo):
3884 def debugshell(ui, repo):
3866 """run an interactive Python interpreter
3885 """run an interactive Python interpreter
3867
3886
3868 The local namespace is provided with a reference to the ui and
3887 The local namespace is provided with a reference to the ui and
3869 the repo instance (if available).
3888 the repo instance (if available).
3870 """
3889 """
3871 import code
3890 import code
3872
3891
3873 imported_objects = {
3892 imported_objects = {
3874 'ui': ui,
3893 'ui': ui,
3875 'repo': repo,
3894 'repo': repo,
3876 }
3895 }
3877
3896
3878 code.interact(local=imported_objects)
3897 code.interact(local=imported_objects)
3879
3898
3880
3899
3881 @command(
3900 @command(
3882 b'debugsuccessorssets',
3901 b'debugsuccessorssets',
3883 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3902 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3884 _(b'[REV]'),
3903 _(b'[REV]'),
3885 )
3904 )
3886 def debugsuccessorssets(ui, repo, *revs, **opts):
3905 def debugsuccessorssets(ui, repo, *revs, **opts):
3887 """show set of successors for revision
3906 """show set of successors for revision
3888
3907
3889 A successors set of changeset A is a consistent group of revisions that
3908 A successors set of changeset A is a consistent group of revisions that
3890 succeed A. It contains non-obsolete changesets only unless closests
3909 succeed A. It contains non-obsolete changesets only unless closests
3891 successors set is set.
3910 successors set is set.
3892
3911
3893 In most cases a changeset A has a single successors set containing a single
3912 In most cases a changeset A has a single successors set containing a single
3894 successor (changeset A replaced by A').
3913 successor (changeset A replaced by A').
3895
3914
3896 A changeset that is made obsolete with no successors are called "pruned".
3915 A changeset that is made obsolete with no successors are called "pruned".
3897 Such changesets have no successors sets at all.
3916 Such changesets have no successors sets at all.
3898
3917
3899 A changeset that has been "split" will have a successors set containing
3918 A changeset that has been "split" will have a successors set containing
3900 more than one successor.
3919 more than one successor.
3901
3920
3902 A changeset that has been rewritten in multiple different ways is called
3921 A changeset that has been rewritten in multiple different ways is called
3903 "divergent". Such changesets have multiple successor sets (each of which
3922 "divergent". Such changesets have multiple successor sets (each of which
3904 may also be split, i.e. have multiple successors).
3923 may also be split, i.e. have multiple successors).
3905
3924
3906 Results are displayed as follows::
3925 Results are displayed as follows::
3907
3926
3908 <rev1>
3927 <rev1>
3909 <successors-1A>
3928 <successors-1A>
3910 <rev2>
3929 <rev2>
3911 <successors-2A>
3930 <successors-2A>
3912 <successors-2B1> <successors-2B2> <successors-2B3>
3931 <successors-2B1> <successors-2B2> <successors-2B3>
3913
3932
3914 Here rev2 has two possible (i.e. divergent) successors sets. The first
3933 Here rev2 has two possible (i.e. divergent) successors sets. The first
3915 holds one element, whereas the second holds three (i.e. the changeset has
3934 holds one element, whereas the second holds three (i.e. the changeset has
3916 been split).
3935 been split).
3917 """
3936 """
3918 # passed to successorssets caching computation from one call to another
3937 # passed to successorssets caching computation from one call to another
3919 cache = {}
3938 cache = {}
3920 ctx2str = bytes
3939 ctx2str = bytes
3921 node2str = short
3940 node2str = short
3922 for rev in scmutil.revrange(repo, revs):
3941 for rev in scmutil.revrange(repo, revs):
3923 ctx = repo[rev]
3942 ctx = repo[rev]
3924 ui.write(b'%s\n' % ctx2str(ctx))
3943 ui.write(b'%s\n' % ctx2str(ctx))
3925 for succsset in obsutil.successorssets(
3944 for succsset in obsutil.successorssets(
3926 repo, ctx.node(), closest=opts['closest'], cache=cache
3945 repo, ctx.node(), closest=opts['closest'], cache=cache
3927 ):
3946 ):
3928 if succsset:
3947 if succsset:
3929 ui.write(b' ')
3948 ui.write(b' ')
3930 ui.write(node2str(succsset[0]))
3949 ui.write(node2str(succsset[0]))
3931 for node in succsset[1:]:
3950 for node in succsset[1:]:
3932 ui.write(b' ')
3951 ui.write(b' ')
3933 ui.write(node2str(node))
3952 ui.write(node2str(node))
3934 ui.write(b'\n')
3953 ui.write(b'\n')
3935
3954
3936
3955
3937 @command(b'debugtagscache', [])
3956 @command(b'debugtagscache', [])
3938 def debugtagscache(ui, repo):
3957 def debugtagscache(ui, repo):
3939 """display the contents of .hg/cache/hgtagsfnodes1"""
3958 """display the contents of .hg/cache/hgtagsfnodes1"""
3940 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3959 cache = tagsmod.hgtagsfnodescache(repo.unfiltered())
3941 flog = repo.file(b'.hgtags')
3960 flog = repo.file(b'.hgtags')
3942 for r in repo:
3961 for r in repo:
3943 node = repo[r].node()
3962 node = repo[r].node()
3944 tagsnode = cache.getfnode(node, computemissing=False)
3963 tagsnode = cache.getfnode(node, computemissing=False)
3945 if tagsnode:
3964 if tagsnode:
3946 tagsnodedisplay = hex(tagsnode)
3965 tagsnodedisplay = hex(tagsnode)
3947 if not flog.hasnode(tagsnode):
3966 if not flog.hasnode(tagsnode):
3948 tagsnodedisplay += b' (unknown node)'
3967 tagsnodedisplay += b' (unknown node)'
3949 elif tagsnode is None:
3968 elif tagsnode is None:
3950 tagsnodedisplay = b'missing'
3969 tagsnodedisplay = b'missing'
3951 else:
3970 else:
3952 tagsnodedisplay = b'invalid'
3971 tagsnodedisplay = b'invalid'
3953
3972
3954 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3973 ui.write(b'%d %s %s\n' % (r, hex(node), tagsnodedisplay))
3955
3974
3956
3975
3957 @command(
3976 @command(
3958 b'debugtemplate',
3977 b'debugtemplate',
3959 [
3978 [
3960 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3979 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3961 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3980 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3962 ],
3981 ],
3963 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3982 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3964 optionalrepo=True,
3983 optionalrepo=True,
3965 )
3984 )
3966 def debugtemplate(ui, repo, tmpl, **opts):
3985 def debugtemplate(ui, repo, tmpl, **opts):
3967 """parse and apply a template
3986 """parse and apply a template
3968
3987
3969 If -r/--rev is given, the template is processed as a log template and
3988 If -r/--rev is given, the template is processed as a log template and
3970 applied to the given changesets. Otherwise, it is processed as a generic
3989 applied to the given changesets. Otherwise, it is processed as a generic
3971 template.
3990 template.
3972
3991
3973 Use --verbose to print the parsed tree.
3992 Use --verbose to print the parsed tree.
3974 """
3993 """
3975 revs = None
3994 revs = None
3976 if opts['rev']:
3995 if opts['rev']:
3977 if repo is None:
3996 if repo is None:
3978 raise error.RepoError(
3997 raise error.RepoError(
3979 _(b'there is no Mercurial repository here (.hg not found)')
3998 _(b'there is no Mercurial repository here (.hg not found)')
3980 )
3999 )
3981 revs = scmutil.revrange(repo, opts['rev'])
4000 revs = scmutil.revrange(repo, opts['rev'])
3982
4001
3983 props = {}
4002 props = {}
3984 for d in opts['define']:
4003 for d in opts['define']:
3985 try:
4004 try:
3986 k, v = (e.strip() for e in d.split(b'=', 1))
4005 k, v = (e.strip() for e in d.split(b'=', 1))
3987 if not k or k == b'ui':
4006 if not k or k == b'ui':
3988 raise ValueError
4007 raise ValueError
3989 props[k] = v
4008 props[k] = v
3990 except ValueError:
4009 except ValueError:
3991 raise error.Abort(_(b'malformed keyword definition: %s') % d)
4010 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3992
4011
3993 if ui.verbose:
4012 if ui.verbose:
3994 aliases = ui.configitems(b'templatealias')
4013 aliases = ui.configitems(b'templatealias')
3995 tree = templater.parse(tmpl)
4014 tree = templater.parse(tmpl)
3996 ui.note(templater.prettyformat(tree), b'\n')
4015 ui.note(templater.prettyformat(tree), b'\n')
3997 newtree = templater.expandaliases(tree, aliases)
4016 newtree = templater.expandaliases(tree, aliases)
3998 if newtree != tree:
4017 if newtree != tree:
3999 ui.notenoi18n(
4018 ui.notenoi18n(
4000 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4019 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
4001 )
4020 )
4002
4021
4003 if revs is None:
4022 if revs is None:
4004 tres = formatter.templateresources(ui, repo)
4023 tres = formatter.templateresources(ui, repo)
4005 t = formatter.maketemplater(ui, tmpl, resources=tres)
4024 t = formatter.maketemplater(ui, tmpl, resources=tres)
4006 if ui.verbose:
4025 if ui.verbose:
4007 kwds, funcs = t.symbolsuseddefault()
4026 kwds, funcs = t.symbolsuseddefault()
4008 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4027 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4009 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4028 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4010 ui.write(t.renderdefault(props))
4029 ui.write(t.renderdefault(props))
4011 else:
4030 else:
4012 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4031 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
4013 if ui.verbose:
4032 if ui.verbose:
4014 kwds, funcs = displayer.t.symbolsuseddefault()
4033 kwds, funcs = displayer.t.symbolsuseddefault()
4015 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4034 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
4016 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4035 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
4017 for r in revs:
4036 for r in revs:
4018 displayer.show(repo[r], **pycompat.strkwargs(props))
4037 displayer.show(repo[r], **pycompat.strkwargs(props))
4019 displayer.close()
4038 displayer.close()
4020
4039
4021
4040
4022 @command(
4041 @command(
4023 b'debuguigetpass',
4042 b'debuguigetpass',
4024 [
4043 [
4025 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4044 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4026 ],
4045 ],
4027 _(b'[-p TEXT]'),
4046 _(b'[-p TEXT]'),
4028 norepo=True,
4047 norepo=True,
4029 )
4048 )
4030 def debuguigetpass(ui, prompt=b''):
4049 def debuguigetpass(ui, prompt=b''):
4031 """show prompt to type password"""
4050 """show prompt to type password"""
4032 r = ui.getpass(prompt)
4051 r = ui.getpass(prompt)
4033 if r is None:
4052 if r is None:
4034 r = b"<default response>"
4053 r = b"<default response>"
4035 ui.writenoi18n(b'response: %s\n' % r)
4054 ui.writenoi18n(b'response: %s\n' % r)
4036
4055
4037
4056
4038 @command(
4057 @command(
4039 b'debuguiprompt',
4058 b'debuguiprompt',
4040 [
4059 [
4041 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4060 (b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),
4042 ],
4061 ],
4043 _(b'[-p TEXT]'),
4062 _(b'[-p TEXT]'),
4044 norepo=True,
4063 norepo=True,
4045 )
4064 )
4046 def debuguiprompt(ui, prompt=b''):
4065 def debuguiprompt(ui, prompt=b''):
4047 """show plain prompt"""
4066 """show plain prompt"""
4048 r = ui.prompt(prompt)
4067 r = ui.prompt(prompt)
4049 ui.writenoi18n(b'response: %s\n' % r)
4068 ui.writenoi18n(b'response: %s\n' % r)
4050
4069
4051
4070
4052 @command(b'debugupdatecaches', [])
4071 @command(b'debugupdatecaches', [])
4053 def debugupdatecaches(ui, repo, *pats, **opts):
4072 def debugupdatecaches(ui, repo, *pats, **opts):
4054 """warm all known caches in the repository"""
4073 """warm all known caches in the repository"""
4055 with repo.wlock(), repo.lock():
4074 with repo.wlock(), repo.lock():
4056 repo.updatecaches(caches=repository.CACHES_ALL)
4075 repo.updatecaches(caches=repository.CACHES_ALL)
4057
4076
4058
4077
4059 @command(
4078 @command(
4060 b'debugupgraderepo',
4079 b'debugupgraderepo',
4061 [
4080 [
4062 (
4081 (
4063 b'o',
4082 b'o',
4064 b'optimize',
4083 b'optimize',
4065 [],
4084 [],
4066 _(b'extra optimization to perform'),
4085 _(b'extra optimization to perform'),
4067 _(b'NAME'),
4086 _(b'NAME'),
4068 ),
4087 ),
4069 (b'', b'run', False, _(b'performs an upgrade')),
4088 (b'', b'run', False, _(b'performs an upgrade')),
4070 (b'', b'backup', True, _(b'keep the old repository content around')),
4089 (b'', b'backup', True, _(b'keep the old repository content around')),
4071 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4090 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
4072 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4091 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
4073 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4092 (b'', b'filelogs', None, _(b'select all filelogs for upgrade')),
4074 ],
4093 ],
4075 )
4094 )
4076 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4095 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
4077 """upgrade a repository to use different features
4096 """upgrade a repository to use different features
4078
4097
4079 If no arguments are specified, the repository is evaluated for upgrade
4098 If no arguments are specified, the repository is evaluated for upgrade
4080 and a list of problems and potential optimizations is printed.
4099 and a list of problems and potential optimizations is printed.
4081
4100
4082 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4101 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
4083 can be influenced via additional arguments. More details will be provided
4102 can be influenced via additional arguments. More details will be provided
4084 by the command output when run without ``--run``.
4103 by the command output when run without ``--run``.
4085
4104
4086 During the upgrade, the repository will be locked and no writes will be
4105 During the upgrade, the repository will be locked and no writes will be
4087 allowed.
4106 allowed.
4088
4107
4089 At the end of the upgrade, the repository may not be readable while new
4108 At the end of the upgrade, the repository may not be readable while new
4090 repository data is swapped in. This window will be as long as it takes to
4109 repository data is swapped in. This window will be as long as it takes to
4091 rename some directories inside the ``.hg`` directory. On most machines, this
4110 rename some directories inside the ``.hg`` directory. On most machines, this
4092 should complete almost instantaneously and the chances of a consumer being
4111 should complete almost instantaneously and the chances of a consumer being
4093 unable to access the repository should be low.
4112 unable to access the repository should be low.
4094
4113
4095 By default, all revlogs will be upgraded. You can restrict this using flags
4114 By default, all revlogs will be upgraded. You can restrict this using flags
4096 such as `--manifest`:
4115 such as `--manifest`:
4097
4116
4098 * `--manifest`: only optimize the manifest
4117 * `--manifest`: only optimize the manifest
4099 * `--no-manifest`: optimize all revlog but the manifest
4118 * `--no-manifest`: optimize all revlog but the manifest
4100 * `--changelog`: optimize the changelog only
4119 * `--changelog`: optimize the changelog only
4101 * `--no-changelog --no-manifest`: optimize filelogs only
4120 * `--no-changelog --no-manifest`: optimize filelogs only
4102 * `--filelogs`: optimize the filelogs only
4121 * `--filelogs`: optimize the filelogs only
4103 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4122 * `--no-changelog --no-manifest --no-filelogs`: skip all revlog optimizations
4104 """
4123 """
4105 return upgrade.upgraderepo(
4124 return upgrade.upgraderepo(
4106 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4125 ui, repo, run=run, optimize=set(optimize), backup=backup, **opts
4107 )
4126 )
4108
4127
4109
4128
4110 @command(
4129 @command(
4111 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4130 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
4112 )
4131 )
4113 def debugwalk(ui, repo, *pats, **opts):
4132 def debugwalk(ui, repo, *pats, **opts):
4114 """show how files match on given patterns"""
4133 """show how files match on given patterns"""
4115 opts = pycompat.byteskwargs(opts)
4134 opts = pycompat.byteskwargs(opts)
4116 m = scmutil.match(repo[None], pats, opts)
4135 m = scmutil.match(repo[None], pats, opts)
4117 if ui.verbose:
4136 if ui.verbose:
4118 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4137 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
4119 items = list(repo[None].walk(m))
4138 items = list(repo[None].walk(m))
4120 if not items:
4139 if not items:
4121 return
4140 return
4122 f = lambda fn: fn
4141 f = lambda fn: fn
4123 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4142 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
4124 f = lambda fn: util.normpath(fn)
4143 f = lambda fn: util.normpath(fn)
4125 fmt = b'f %%-%ds %%-%ds %%s' % (
4144 fmt = b'f %%-%ds %%-%ds %%s' % (
4126 max([len(abs) for abs in items]),
4145 max([len(abs) for abs in items]),
4127 max([len(repo.pathto(abs)) for abs in items]),
4146 max([len(repo.pathto(abs)) for abs in items]),
4128 )
4147 )
4129 for abs in items:
4148 for abs in items:
4130 line = fmt % (
4149 line = fmt % (
4131 abs,
4150 abs,
4132 f(repo.pathto(abs)),
4151 f(repo.pathto(abs)),
4133 m.exact(abs) and b'exact' or b'',
4152 m.exact(abs) and b'exact' or b'',
4134 )
4153 )
4135 ui.write(b"%s\n" % line.rstrip())
4154 ui.write(b"%s\n" % line.rstrip())
4136
4155
4137
4156
4138 @command(b'debugwhyunstable', [], _(b'REV'))
4157 @command(b'debugwhyunstable', [], _(b'REV'))
4139 def debugwhyunstable(ui, repo, rev):
4158 def debugwhyunstable(ui, repo, rev):
4140 """explain instabilities of a changeset"""
4159 """explain instabilities of a changeset"""
4141 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4160 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
4142 dnodes = b''
4161 dnodes = b''
4143 if entry.get(b'divergentnodes'):
4162 if entry.get(b'divergentnodes'):
4144 dnodes = (
4163 dnodes = (
4145 b' '.join(
4164 b' '.join(
4146 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4165 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
4147 for ctx in entry[b'divergentnodes']
4166 for ctx in entry[b'divergentnodes']
4148 )
4167 )
4149 + b' '
4168 + b' '
4150 )
4169 )
4151 ui.write(
4170 ui.write(
4152 b'%s: %s%s %s\n'
4171 b'%s: %s%s %s\n'
4153 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4172 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
4154 )
4173 )
4155
4174
4156
4175
4157 @command(
4176 @command(
4158 b'debugwireargs',
4177 b'debugwireargs',
4159 [
4178 [
4160 (b'', b'three', b'', b'three'),
4179 (b'', b'three', b'', b'three'),
4161 (b'', b'four', b'', b'four'),
4180 (b'', b'four', b'', b'four'),
4162 (b'', b'five', b'', b'five'),
4181 (b'', b'five', b'', b'five'),
4163 ]
4182 ]
4164 + cmdutil.remoteopts,
4183 + cmdutil.remoteopts,
4165 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4184 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
4166 norepo=True,
4185 norepo=True,
4167 )
4186 )
4168 def debugwireargs(ui, repopath, *vals, **opts):
4187 def debugwireargs(ui, repopath, *vals, **opts):
4169 opts = pycompat.byteskwargs(opts)
4188 opts = pycompat.byteskwargs(opts)
4170 repo = hg.peer(ui, opts, repopath)
4189 repo = hg.peer(ui, opts, repopath)
4171 try:
4190 try:
4172 for opt in cmdutil.remoteopts:
4191 for opt in cmdutil.remoteopts:
4173 del opts[opt[1]]
4192 del opts[opt[1]]
4174 args = {}
4193 args = {}
4175 for k, v in pycompat.iteritems(opts):
4194 for k, v in pycompat.iteritems(opts):
4176 if v:
4195 if v:
4177 args[k] = v
4196 args[k] = v
4178 args = pycompat.strkwargs(args)
4197 args = pycompat.strkwargs(args)
4179 # run twice to check that we don't mess up the stream for the next command
4198 # run twice to check that we don't mess up the stream for the next command
4180 res1 = repo.debugwireargs(*vals, **args)
4199 res1 = repo.debugwireargs(*vals, **args)
4181 res2 = repo.debugwireargs(*vals, **args)
4200 res2 = repo.debugwireargs(*vals, **args)
4182 ui.write(b"%s\n" % res1)
4201 ui.write(b"%s\n" % res1)
4183 if res1 != res2:
4202 if res1 != res2:
4184 ui.warn(b"%s\n" % res2)
4203 ui.warn(b"%s\n" % res2)
4185 finally:
4204 finally:
4186 repo.close()
4205 repo.close()
4187
4206
4188
4207
4189 def _parsewirelangblocks(fh):
4208 def _parsewirelangblocks(fh):
4190 activeaction = None
4209 activeaction = None
4191 blocklines = []
4210 blocklines = []
4192 lastindent = 0
4211 lastindent = 0
4193
4212
4194 for line in fh:
4213 for line in fh:
4195 line = line.rstrip()
4214 line = line.rstrip()
4196 if not line:
4215 if not line:
4197 continue
4216 continue
4198
4217
4199 if line.startswith(b'#'):
4218 if line.startswith(b'#'):
4200 continue
4219 continue
4201
4220
4202 if not line.startswith(b' '):
4221 if not line.startswith(b' '):
4203 # New block. Flush previous one.
4222 # New block. Flush previous one.
4204 if activeaction:
4223 if activeaction:
4205 yield activeaction, blocklines
4224 yield activeaction, blocklines
4206
4225
4207 activeaction = line
4226 activeaction = line
4208 blocklines = []
4227 blocklines = []
4209 lastindent = 0
4228 lastindent = 0
4210 continue
4229 continue
4211
4230
4212 # Else we start with an indent.
4231 # Else we start with an indent.
4213
4232
4214 if not activeaction:
4233 if not activeaction:
4215 raise error.Abort(_(b'indented line outside of block'))
4234 raise error.Abort(_(b'indented line outside of block'))
4216
4235
4217 indent = len(line) - len(line.lstrip())
4236 indent = len(line) - len(line.lstrip())
4218
4237
4219 # If this line is indented more than the last line, concatenate it.
4238 # If this line is indented more than the last line, concatenate it.
4220 if indent > lastindent and blocklines:
4239 if indent > lastindent and blocklines:
4221 blocklines[-1] += line.lstrip()
4240 blocklines[-1] += line.lstrip()
4222 else:
4241 else:
4223 blocklines.append(line)
4242 blocklines.append(line)
4224 lastindent = indent
4243 lastindent = indent
4225
4244
4226 # Flush last block.
4245 # Flush last block.
4227 if activeaction:
4246 if activeaction:
4228 yield activeaction, blocklines
4247 yield activeaction, blocklines
4229
4248
4230
4249
4231 @command(
4250 @command(
4232 b'debugwireproto',
4251 b'debugwireproto',
4233 [
4252 [
4234 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4253 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
4235 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4254 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
4236 (
4255 (
4237 b'',
4256 b'',
4238 b'noreadstderr',
4257 b'noreadstderr',
4239 False,
4258 False,
4240 _(b'do not read from stderr of the remote'),
4259 _(b'do not read from stderr of the remote'),
4241 ),
4260 ),
4242 (
4261 (
4243 b'',
4262 b'',
4244 b'nologhandshake',
4263 b'nologhandshake',
4245 False,
4264 False,
4246 _(b'do not log I/O related to the peer handshake'),
4265 _(b'do not log I/O related to the peer handshake'),
4247 ),
4266 ),
4248 ]
4267 ]
4249 + cmdutil.remoteopts,
4268 + cmdutil.remoteopts,
4250 _(b'[PATH]'),
4269 _(b'[PATH]'),
4251 optionalrepo=True,
4270 optionalrepo=True,
4252 )
4271 )
4253 def debugwireproto(ui, repo, path=None, **opts):
4272 def debugwireproto(ui, repo, path=None, **opts):
4254 """send wire protocol commands to a server
4273 """send wire protocol commands to a server
4255
4274
4256 This command can be used to issue wire protocol commands to remote
4275 This command can be used to issue wire protocol commands to remote
4257 peers and to debug the raw data being exchanged.
4276 peers and to debug the raw data being exchanged.
4258
4277
4259 ``--localssh`` will start an SSH server against the current repository
4278 ``--localssh`` will start an SSH server against the current repository
4260 and connect to that. By default, the connection will perform a handshake
4279 and connect to that. By default, the connection will perform a handshake
4261 and establish an appropriate peer instance.
4280 and establish an appropriate peer instance.
4262
4281
4263 ``--peer`` can be used to bypass the handshake protocol and construct a
4282 ``--peer`` can be used to bypass the handshake protocol and construct a
4264 peer instance using the specified class type. Valid values are ``raw``,
4283 peer instance using the specified class type. Valid values are ``raw``,
4265 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4284 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
4266 raw data payloads and don't support higher-level command actions.
4285 raw data payloads and don't support higher-level command actions.
4267
4286
4268 ``--noreadstderr`` can be used to disable automatic reading from stderr
4287 ``--noreadstderr`` can be used to disable automatic reading from stderr
4269 of the peer (for SSH connections only). Disabling automatic reading of
4288 of the peer (for SSH connections only). Disabling automatic reading of
4270 stderr is useful for making output more deterministic.
4289 stderr is useful for making output more deterministic.
4271
4290
4272 Commands are issued via a mini language which is specified via stdin.
4291 Commands are issued via a mini language which is specified via stdin.
4273 The language consists of individual actions to perform. An action is
4292 The language consists of individual actions to perform. An action is
4274 defined by a block. A block is defined as a line with no leading
4293 defined by a block. A block is defined as a line with no leading
4275 space followed by 0 or more lines with leading space. Blocks are
4294 space followed by 0 or more lines with leading space. Blocks are
4276 effectively a high-level command with additional metadata.
4295 effectively a high-level command with additional metadata.
4277
4296
4278 Lines beginning with ``#`` are ignored.
4297 Lines beginning with ``#`` are ignored.
4279
4298
4280 The following sections denote available actions.
4299 The following sections denote available actions.
4281
4300
4282 raw
4301 raw
4283 ---
4302 ---
4284
4303
4285 Send raw data to the server.
4304 Send raw data to the server.
4286
4305
4287 The block payload contains the raw data to send as one atomic send
4306 The block payload contains the raw data to send as one atomic send
4288 operation. The data may not actually be delivered in a single system
4307 operation. The data may not actually be delivered in a single system
4289 call: it depends on the abilities of the transport being used.
4308 call: it depends on the abilities of the transport being used.
4290
4309
4291 Each line in the block is de-indented and concatenated. Then, that
4310 Each line in the block is de-indented and concatenated. Then, that
4292 value is evaluated as a Python b'' literal. This allows the use of
4311 value is evaluated as a Python b'' literal. This allows the use of
4293 backslash escaping, etc.
4312 backslash escaping, etc.
4294
4313
4295 raw+
4314 raw+
4296 ----
4315 ----
4297
4316
4298 Behaves like ``raw`` except flushes output afterwards.
4317 Behaves like ``raw`` except flushes output afterwards.
4299
4318
4300 command <X>
4319 command <X>
4301 -----------
4320 -----------
4302
4321
4303 Send a request to run a named command, whose name follows the ``command``
4322 Send a request to run a named command, whose name follows the ``command``
4304 string.
4323 string.
4305
4324
4306 Arguments to the command are defined as lines in this block. The format of
4325 Arguments to the command are defined as lines in this block. The format of
4307 each line is ``<key> <value>``. e.g.::
4326 each line is ``<key> <value>``. e.g.::
4308
4327
4309 command listkeys
4328 command listkeys
4310 namespace bookmarks
4329 namespace bookmarks
4311
4330
4312 If the value begins with ``eval:``, it will be interpreted as a Python
4331 If the value begins with ``eval:``, it will be interpreted as a Python
4313 literal expression. Otherwise values are interpreted as Python b'' literals.
4332 literal expression. Otherwise values are interpreted as Python b'' literals.
4314 This allows sending complex types and encoding special byte sequences via
4333 This allows sending complex types and encoding special byte sequences via
4315 backslash escaping.
4334 backslash escaping.
4316
4335
4317 The following arguments have special meaning:
4336 The following arguments have special meaning:
4318
4337
4319 ``PUSHFILE``
4338 ``PUSHFILE``
4320 When defined, the *push* mechanism of the peer will be used instead
4339 When defined, the *push* mechanism of the peer will be used instead
4321 of the static request-response mechanism and the content of the
4340 of the static request-response mechanism and the content of the
4322 file specified in the value of this argument will be sent as the
4341 file specified in the value of this argument will be sent as the
4323 command payload.
4342 command payload.
4324
4343
4325 This can be used to submit a local bundle file to the remote.
4344 This can be used to submit a local bundle file to the remote.
4326
4345
4327 batchbegin
4346 batchbegin
4328 ----------
4347 ----------
4329
4348
4330 Instruct the peer to begin a batched send.
4349 Instruct the peer to begin a batched send.
4331
4350
4332 All ``command`` blocks are queued for execution until the next
4351 All ``command`` blocks are queued for execution until the next
4333 ``batchsubmit`` block.
4352 ``batchsubmit`` block.
4334
4353
4335 batchsubmit
4354 batchsubmit
4336 -----------
4355 -----------
4337
4356
4338 Submit previously queued ``command`` blocks as a batch request.
4357 Submit previously queued ``command`` blocks as a batch request.
4339
4358
4340 This action MUST be paired with a ``batchbegin`` action.
4359 This action MUST be paired with a ``batchbegin`` action.
4341
4360
4342 httprequest <method> <path>
4361 httprequest <method> <path>
4343 ---------------------------
4362 ---------------------------
4344
4363
4345 (HTTP peer only)
4364 (HTTP peer only)
4346
4365
4347 Send an HTTP request to the peer.
4366 Send an HTTP request to the peer.
4348
4367
4349 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4368 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
4350
4369
4351 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4370 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
4352 headers to add to the request. e.g. ``Accept: foo``.
4371 headers to add to the request. e.g. ``Accept: foo``.
4353
4372
4354 The following arguments are special:
4373 The following arguments are special:
4355
4374
4356 ``BODYFILE``
4375 ``BODYFILE``
4357 The content of the file defined as the value to this argument will be
4376 The content of the file defined as the value to this argument will be
4358 transferred verbatim as the HTTP request body.
4377 transferred verbatim as the HTTP request body.
4359
4378
4360 ``frame <type> <flags> <payload>``
4379 ``frame <type> <flags> <payload>``
4361 Send a unified protocol frame as part of the request body.
4380 Send a unified protocol frame as part of the request body.
4362
4381
4363 All frames will be collected and sent as the body to the HTTP
4382 All frames will be collected and sent as the body to the HTTP
4364 request.
4383 request.
4365
4384
4366 close
4385 close
4367 -----
4386 -----
4368
4387
4369 Close the connection to the server.
4388 Close the connection to the server.
4370
4389
4371 flush
4390 flush
4372 -----
4391 -----
4373
4392
4374 Flush data written to the server.
4393 Flush data written to the server.
4375
4394
4376 readavailable
4395 readavailable
4377 -------------
4396 -------------
4378
4397
4379 Close the write end of the connection and read all available data from
4398 Close the write end of the connection and read all available data from
4380 the server.
4399 the server.
4381
4400
4382 If the connection to the server encompasses multiple pipes, we poll both
4401 If the connection to the server encompasses multiple pipes, we poll both
4383 pipes and read available data.
4402 pipes and read available data.
4384
4403
4385 readline
4404 readline
4386 --------
4405 --------
4387
4406
4388 Read a line of output from the server. If there are multiple output
4407 Read a line of output from the server. If there are multiple output
4389 pipes, reads only the main pipe.
4408 pipes, reads only the main pipe.
4390
4409
4391 ereadline
4410 ereadline
4392 ---------
4411 ---------
4393
4412
4394 Like ``readline``, but read from the stderr pipe, if available.
4413 Like ``readline``, but read from the stderr pipe, if available.
4395
4414
4396 read <X>
4415 read <X>
4397 --------
4416 --------
4398
4417
4399 ``read()`` N bytes from the server's main output pipe.
4418 ``read()`` N bytes from the server's main output pipe.
4400
4419
4401 eread <X>
4420 eread <X>
4402 ---------
4421 ---------
4403
4422
4404 ``read()`` N bytes from the server's stderr pipe, if available.
4423 ``read()`` N bytes from the server's stderr pipe, if available.
4405
4424
4406 Specifying Unified Frame-Based Protocol Frames
4425 Specifying Unified Frame-Based Protocol Frames
4407 ----------------------------------------------
4426 ----------------------------------------------
4408
4427
4409 It is possible to emit a *Unified Frame-Based Protocol* by using special
4428 It is possible to emit a *Unified Frame-Based Protocol* by using special
4410 syntax.
4429 syntax.
4411
4430
4412 A frame is composed as a type, flags, and payload. These can be parsed
4431 A frame is composed as a type, flags, and payload. These can be parsed
4413 from a string of the form:
4432 from a string of the form:
4414
4433
4415 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4434 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
4416
4435
4417 ``request-id`` and ``stream-id`` are integers defining the request and
4436 ``request-id`` and ``stream-id`` are integers defining the request and
4418 stream identifiers.
4437 stream identifiers.
4419
4438
4420 ``type`` can be an integer value for the frame type or the string name
4439 ``type`` can be an integer value for the frame type or the string name
4421 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4440 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
4422 ``command-name``.
4441 ``command-name``.
4423
4442
4424 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4443 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
4425 components. Each component (and there can be just one) can be an integer
4444 components. Each component (and there can be just one) can be an integer
4426 or a flag name for stream flags or frame flags, respectively. Values are
4445 or a flag name for stream flags or frame flags, respectively. Values are
4427 resolved to integers and then bitwise OR'd together.
4446 resolved to integers and then bitwise OR'd together.
4428
4447
4429 ``payload`` represents the raw frame payload. If it begins with
4448 ``payload`` represents the raw frame payload. If it begins with
4430 ``cbor:``, the following string is evaluated as Python code and the
4449 ``cbor:``, the following string is evaluated as Python code and the
4431 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4450 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
4432 as a Python byte string literal.
4451 as a Python byte string literal.
4433 """
4452 """
4434 opts = pycompat.byteskwargs(opts)
4453 opts = pycompat.byteskwargs(opts)
4435
4454
4436 if opts[b'localssh'] and not repo:
4455 if opts[b'localssh'] and not repo:
4437 raise error.Abort(_(b'--localssh requires a repository'))
4456 raise error.Abort(_(b'--localssh requires a repository'))
4438
4457
4439 if opts[b'peer'] and opts[b'peer'] not in (
4458 if opts[b'peer'] and opts[b'peer'] not in (
4440 b'raw',
4459 b'raw',
4441 b'http2',
4460 b'http2',
4442 b'ssh1',
4461 b'ssh1',
4443 b'ssh2',
4462 b'ssh2',
4444 ):
4463 ):
4445 raise error.Abort(
4464 raise error.Abort(
4446 _(b'invalid value for --peer'),
4465 _(b'invalid value for --peer'),
4447 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4466 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
4448 )
4467 )
4449
4468
4450 if path and opts[b'localssh']:
4469 if path and opts[b'localssh']:
4451 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4470 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
4452
4471
4453 if ui.interactive():
4472 if ui.interactive():
4454 ui.write(_(b'(waiting for commands on stdin)\n'))
4473 ui.write(_(b'(waiting for commands on stdin)\n'))
4455
4474
4456 blocks = list(_parsewirelangblocks(ui.fin))
4475 blocks = list(_parsewirelangblocks(ui.fin))
4457
4476
4458 proc = None
4477 proc = None
4459 stdin = None
4478 stdin = None
4460 stdout = None
4479 stdout = None
4461 stderr = None
4480 stderr = None
4462 opener = None
4481 opener = None
4463
4482
4464 if opts[b'localssh']:
4483 if opts[b'localssh']:
4465 # We start the SSH server in its own process so there is process
4484 # We start the SSH server in its own process so there is process
4466 # separation. This prevents a whole class of potential bugs around
4485 # separation. This prevents a whole class of potential bugs around
4467 # shared state from interfering with server operation.
4486 # shared state from interfering with server operation.
4468 args = procutil.hgcmd() + [
4487 args = procutil.hgcmd() + [
4469 b'-R',
4488 b'-R',
4470 repo.root,
4489 repo.root,
4471 b'debugserve',
4490 b'debugserve',
4472 b'--sshstdio',
4491 b'--sshstdio',
4473 ]
4492 ]
4474 proc = subprocess.Popen(
4493 proc = subprocess.Popen(
4475 pycompat.rapply(procutil.tonativestr, args),
4494 pycompat.rapply(procutil.tonativestr, args),
4476 stdin=subprocess.PIPE,
4495 stdin=subprocess.PIPE,
4477 stdout=subprocess.PIPE,
4496 stdout=subprocess.PIPE,
4478 stderr=subprocess.PIPE,
4497 stderr=subprocess.PIPE,
4479 bufsize=0,
4498 bufsize=0,
4480 )
4499 )
4481
4500
4482 stdin = proc.stdin
4501 stdin = proc.stdin
4483 stdout = proc.stdout
4502 stdout = proc.stdout
4484 stderr = proc.stderr
4503 stderr = proc.stderr
4485
4504
4486 # We turn the pipes into observers so we can log I/O.
4505 # We turn the pipes into observers so we can log I/O.
4487 if ui.verbose or opts[b'peer'] == b'raw':
4506 if ui.verbose or opts[b'peer'] == b'raw':
4488 stdin = util.makeloggingfileobject(
4507 stdin = util.makeloggingfileobject(
4489 ui, proc.stdin, b'i', logdata=True
4508 ui, proc.stdin, b'i', logdata=True
4490 )
4509 )
4491 stdout = util.makeloggingfileobject(
4510 stdout = util.makeloggingfileobject(
4492 ui, proc.stdout, b'o', logdata=True
4511 ui, proc.stdout, b'o', logdata=True
4493 )
4512 )
4494 stderr = util.makeloggingfileobject(
4513 stderr = util.makeloggingfileobject(
4495 ui, proc.stderr, b'e', logdata=True
4514 ui, proc.stderr, b'e', logdata=True
4496 )
4515 )
4497
4516
4498 # --localssh also implies the peer connection settings.
4517 # --localssh also implies the peer connection settings.
4499
4518
4500 url = b'ssh://localserver'
4519 url = b'ssh://localserver'
4501 autoreadstderr = not opts[b'noreadstderr']
4520 autoreadstderr = not opts[b'noreadstderr']
4502
4521
4503 if opts[b'peer'] == b'ssh1':
4522 if opts[b'peer'] == b'ssh1':
4504 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4523 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
4505 peer = sshpeer.sshv1peer(
4524 peer = sshpeer.sshv1peer(
4506 ui,
4525 ui,
4507 url,
4526 url,
4508 proc,
4527 proc,
4509 stdin,
4528 stdin,
4510 stdout,
4529 stdout,
4511 stderr,
4530 stderr,
4512 None,
4531 None,
4513 autoreadstderr=autoreadstderr,
4532 autoreadstderr=autoreadstderr,
4514 )
4533 )
4515 elif opts[b'peer'] == b'ssh2':
4534 elif opts[b'peer'] == b'ssh2':
4516 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4535 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
4517 peer = sshpeer.sshv2peer(
4536 peer = sshpeer.sshv2peer(
4518 ui,
4537 ui,
4519 url,
4538 url,
4520 proc,
4539 proc,
4521 stdin,
4540 stdin,
4522 stdout,
4541 stdout,
4523 stderr,
4542 stderr,
4524 None,
4543 None,
4525 autoreadstderr=autoreadstderr,
4544 autoreadstderr=autoreadstderr,
4526 )
4545 )
4527 elif opts[b'peer'] == b'raw':
4546 elif opts[b'peer'] == b'raw':
4528 ui.write(_(b'using raw connection to peer\n'))
4547 ui.write(_(b'using raw connection to peer\n'))
4529 peer = None
4548 peer = None
4530 else:
4549 else:
4531 ui.write(_(b'creating ssh peer from handshake results\n'))
4550 ui.write(_(b'creating ssh peer from handshake results\n'))
4532 peer = sshpeer.makepeer(
4551 peer = sshpeer.makepeer(
4533 ui,
4552 ui,
4534 url,
4553 url,
4535 proc,
4554 proc,
4536 stdin,
4555 stdin,
4537 stdout,
4556 stdout,
4538 stderr,
4557 stderr,
4539 autoreadstderr=autoreadstderr,
4558 autoreadstderr=autoreadstderr,
4540 )
4559 )
4541
4560
4542 elif path:
4561 elif path:
4543 # We bypass hg.peer() so we can proxy the sockets.
4562 # We bypass hg.peer() so we can proxy the sockets.
4544 # TODO consider not doing this because we skip
4563 # TODO consider not doing this because we skip
4545 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4564 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4546 u = urlutil.url(path)
4565 u = urlutil.url(path)
4547 if u.scheme != b'http':
4566 if u.scheme != b'http':
4548 raise error.Abort(_(b'only http:// paths are currently supported'))
4567 raise error.Abort(_(b'only http:// paths are currently supported'))
4549
4568
4550 url, authinfo = u.authinfo()
4569 url, authinfo = u.authinfo()
4551 openerargs = {
4570 openerargs = {
4552 'useragent': b'Mercurial debugwireproto',
4571 'useragent': b'Mercurial debugwireproto',
4553 }
4572 }
4554
4573
4555 # Turn pipes/sockets into observers so we can log I/O.
4574 # Turn pipes/sockets into observers so we can log I/O.
4556 if ui.verbose:
4575 if ui.verbose:
4557 openerargs.update(
4576 openerargs.update(
4558 {
4577 {
4559 'loggingfh': ui,
4578 'loggingfh': ui,
4560 'loggingname': b's',
4579 'loggingname': b's',
4561 'loggingopts': {
4580 'loggingopts': {
4562 'logdata': True,
4581 'logdata': True,
4563 'logdataapis': False,
4582 'logdataapis': False,
4564 },
4583 },
4565 }
4584 }
4566 )
4585 )
4567
4586
4568 if ui.debugflag:
4587 if ui.debugflag:
4569 openerargs['loggingopts']['logdataapis'] = True
4588 openerargs['loggingopts']['logdataapis'] = True
4570
4589
4571 # Don't send default headers when in raw mode. This allows us to
4590 # Don't send default headers when in raw mode. This allows us to
4572 # bypass most of the behavior of our URL handling code so we can
4591 # bypass most of the behavior of our URL handling code so we can
4573 # have near complete control over what's sent on the wire.
4592 # have near complete control over what's sent on the wire.
4574 if opts[b'peer'] == b'raw':
4593 if opts[b'peer'] == b'raw':
4575 openerargs['sendaccept'] = False
4594 openerargs['sendaccept'] = False
4576
4595
4577 opener = urlmod.opener(ui, authinfo, **openerargs)
4596 opener = urlmod.opener(ui, authinfo, **openerargs)
4578
4597
4579 if opts[b'peer'] == b'http2':
4598 if opts[b'peer'] == b'http2':
4580 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4599 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4581 # We go through makepeer() because we need an API descriptor for
4600 # We go through makepeer() because we need an API descriptor for
4582 # the peer instance to be useful.
4601 # the peer instance to be useful.
4583 maybe_silent = (
4602 maybe_silent = (
4584 ui.silent()
4603 ui.silent()
4585 if opts[b'nologhandshake']
4604 if opts[b'nologhandshake']
4586 else util.nullcontextmanager()
4605 else util.nullcontextmanager()
4587 )
4606 )
4588 with maybe_silent, ui.configoverride(
4607 with maybe_silent, ui.configoverride(
4589 {(b'experimental', b'httppeer.advertise-v2'): True}
4608 {(b'experimental', b'httppeer.advertise-v2'): True}
4590 ):
4609 ):
4591 peer = httppeer.makepeer(ui, path, opener=opener)
4610 peer = httppeer.makepeer(ui, path, opener=opener)
4592
4611
4593 if not isinstance(peer, httppeer.httpv2peer):
4612 if not isinstance(peer, httppeer.httpv2peer):
4594 raise error.Abort(
4613 raise error.Abort(
4595 _(
4614 _(
4596 b'could not instantiate HTTP peer for '
4615 b'could not instantiate HTTP peer for '
4597 b'wire protocol version 2'
4616 b'wire protocol version 2'
4598 ),
4617 ),
4599 hint=_(
4618 hint=_(
4600 b'the server may not have the feature '
4619 b'the server may not have the feature '
4601 b'enabled or is not allowing this '
4620 b'enabled or is not allowing this '
4602 b'client version'
4621 b'client version'
4603 ),
4622 ),
4604 )
4623 )
4605
4624
4606 elif opts[b'peer'] == b'raw':
4625 elif opts[b'peer'] == b'raw':
4607 ui.write(_(b'using raw connection to peer\n'))
4626 ui.write(_(b'using raw connection to peer\n'))
4608 peer = None
4627 peer = None
4609 elif opts[b'peer']:
4628 elif opts[b'peer']:
4610 raise error.Abort(
4629 raise error.Abort(
4611 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4630 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4612 )
4631 )
4613 else:
4632 else:
4614 peer = httppeer.makepeer(ui, path, opener=opener)
4633 peer = httppeer.makepeer(ui, path, opener=opener)
4615
4634
4616 # We /could/ populate stdin/stdout with sock.makefile()...
4635 # We /could/ populate stdin/stdout with sock.makefile()...
4617 else:
4636 else:
4618 raise error.Abort(_(b'unsupported connection configuration'))
4637 raise error.Abort(_(b'unsupported connection configuration'))
4619
4638
4620 batchedcommands = None
4639 batchedcommands = None
4621
4640
4622 # Now perform actions based on the parsed wire language instructions.
4641 # Now perform actions based on the parsed wire language instructions.
4623 for action, lines in blocks:
4642 for action, lines in blocks:
4624 if action in (b'raw', b'raw+'):
4643 if action in (b'raw', b'raw+'):
4625 if not stdin:
4644 if not stdin:
4626 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4645 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4627
4646
4628 # Concatenate the data together.
4647 # Concatenate the data together.
4629 data = b''.join(l.lstrip() for l in lines)
4648 data = b''.join(l.lstrip() for l in lines)
4630 data = stringutil.unescapestr(data)
4649 data = stringutil.unescapestr(data)
4631 stdin.write(data)
4650 stdin.write(data)
4632
4651
4633 if action == b'raw+':
4652 if action == b'raw+':
4634 stdin.flush()
4653 stdin.flush()
4635 elif action == b'flush':
4654 elif action == b'flush':
4636 if not stdin:
4655 if not stdin:
4637 raise error.Abort(_(b'cannot call flush on this peer'))
4656 raise error.Abort(_(b'cannot call flush on this peer'))
4638 stdin.flush()
4657 stdin.flush()
4639 elif action.startswith(b'command'):
4658 elif action.startswith(b'command'):
4640 if not peer:
4659 if not peer:
4641 raise error.Abort(
4660 raise error.Abort(
4642 _(
4661 _(
4643 b'cannot send commands unless peer instance '
4662 b'cannot send commands unless peer instance '
4644 b'is available'
4663 b'is available'
4645 )
4664 )
4646 )
4665 )
4647
4666
4648 command = action.split(b' ', 1)[1]
4667 command = action.split(b' ', 1)[1]
4649
4668
4650 args = {}
4669 args = {}
4651 for line in lines:
4670 for line in lines:
4652 # We need to allow empty values.
4671 # We need to allow empty values.
4653 fields = line.lstrip().split(b' ', 1)
4672 fields = line.lstrip().split(b' ', 1)
4654 if len(fields) == 1:
4673 if len(fields) == 1:
4655 key = fields[0]
4674 key = fields[0]
4656 value = b''
4675 value = b''
4657 else:
4676 else:
4658 key, value = fields
4677 key, value = fields
4659
4678
4660 if value.startswith(b'eval:'):
4679 if value.startswith(b'eval:'):
4661 value = stringutil.evalpythonliteral(value[5:])
4680 value = stringutil.evalpythonliteral(value[5:])
4662 else:
4681 else:
4663 value = stringutil.unescapestr(value)
4682 value = stringutil.unescapestr(value)
4664
4683
4665 args[key] = value
4684 args[key] = value
4666
4685
4667 if batchedcommands is not None:
4686 if batchedcommands is not None:
4668 batchedcommands.append((command, args))
4687 batchedcommands.append((command, args))
4669 continue
4688 continue
4670
4689
4671 ui.status(_(b'sending %s command\n') % command)
4690 ui.status(_(b'sending %s command\n') % command)
4672
4691
4673 if b'PUSHFILE' in args:
4692 if b'PUSHFILE' in args:
4674 with open(args[b'PUSHFILE'], 'rb') as fh:
4693 with open(args[b'PUSHFILE'], 'rb') as fh:
4675 del args[b'PUSHFILE']
4694 del args[b'PUSHFILE']
4676 res, output = peer._callpush(
4695 res, output = peer._callpush(
4677 command, fh, **pycompat.strkwargs(args)
4696 command, fh, **pycompat.strkwargs(args)
4678 )
4697 )
4679 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4698 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4680 ui.status(
4699 ui.status(
4681 _(b'remote output: %s\n') % stringutil.escapestr(output)
4700 _(b'remote output: %s\n') % stringutil.escapestr(output)
4682 )
4701 )
4683 else:
4702 else:
4684 with peer.commandexecutor() as e:
4703 with peer.commandexecutor() as e:
4685 res = e.callcommand(command, args).result()
4704 res = e.callcommand(command, args).result()
4686
4705
4687 if isinstance(res, wireprotov2peer.commandresponse):
4706 if isinstance(res, wireprotov2peer.commandresponse):
4688 val = res.objects()
4707 val = res.objects()
4689 ui.status(
4708 ui.status(
4690 _(b'response: %s\n')
4709 _(b'response: %s\n')
4691 % stringutil.pprint(val, bprefix=True, indent=2)
4710 % stringutil.pprint(val, bprefix=True, indent=2)
4692 )
4711 )
4693 else:
4712 else:
4694 ui.status(
4713 ui.status(
4695 _(b'response: %s\n')
4714 _(b'response: %s\n')
4696 % stringutil.pprint(res, bprefix=True, indent=2)
4715 % stringutil.pprint(res, bprefix=True, indent=2)
4697 )
4716 )
4698
4717
4699 elif action == b'batchbegin':
4718 elif action == b'batchbegin':
4700 if batchedcommands is not None:
4719 if batchedcommands is not None:
4701 raise error.Abort(_(b'nested batchbegin not allowed'))
4720 raise error.Abort(_(b'nested batchbegin not allowed'))
4702
4721
4703 batchedcommands = []
4722 batchedcommands = []
4704 elif action == b'batchsubmit':
4723 elif action == b'batchsubmit':
4705 # There is a batching API we could go through. But it would be
4724 # There is a batching API we could go through. But it would be
4706 # difficult to normalize requests into function calls. It is easier
4725 # difficult to normalize requests into function calls. It is easier
4707 # to bypass this layer and normalize to commands + args.
4726 # to bypass this layer and normalize to commands + args.
4708 ui.status(
4727 ui.status(
4709 _(b'sending batch with %d sub-commands\n')
4728 _(b'sending batch with %d sub-commands\n')
4710 % len(batchedcommands)
4729 % len(batchedcommands)
4711 )
4730 )
4712 assert peer is not None
4731 assert peer is not None
4713 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4732 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4714 ui.status(
4733 ui.status(
4715 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4734 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4716 )
4735 )
4717
4736
4718 batchedcommands = None
4737 batchedcommands = None
4719
4738
4720 elif action.startswith(b'httprequest '):
4739 elif action.startswith(b'httprequest '):
4721 if not opener:
4740 if not opener:
4722 raise error.Abort(
4741 raise error.Abort(
4723 _(b'cannot use httprequest without an HTTP peer')
4742 _(b'cannot use httprequest without an HTTP peer')
4724 )
4743 )
4725
4744
4726 request = action.split(b' ', 2)
4745 request = action.split(b' ', 2)
4727 if len(request) != 3:
4746 if len(request) != 3:
4728 raise error.Abort(
4747 raise error.Abort(
4729 _(
4748 _(
4730 b'invalid httprequest: expected format is '
4749 b'invalid httprequest: expected format is '
4731 b'"httprequest <method> <path>'
4750 b'"httprequest <method> <path>'
4732 )
4751 )
4733 )
4752 )
4734
4753
4735 method, httppath = request[1:]
4754 method, httppath = request[1:]
4736 headers = {}
4755 headers = {}
4737 body = None
4756 body = None
4738 frames = []
4757 frames = []
4739 for line in lines:
4758 for line in lines:
4740 line = line.lstrip()
4759 line = line.lstrip()
4741 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4760 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4742 if m:
4761 if m:
4743 # Headers need to use native strings.
4762 # Headers need to use native strings.
4744 key = pycompat.strurl(m.group(1))
4763 key = pycompat.strurl(m.group(1))
4745 value = pycompat.strurl(m.group(2))
4764 value = pycompat.strurl(m.group(2))
4746 headers[key] = value
4765 headers[key] = value
4747 continue
4766 continue
4748
4767
4749 if line.startswith(b'BODYFILE '):
4768 if line.startswith(b'BODYFILE '):
4750 with open(line.split(b' ', 1), b'rb') as fh:
4769 with open(line.split(b' ', 1), b'rb') as fh:
4751 body = fh.read()
4770 body = fh.read()
4752 elif line.startswith(b'frame '):
4771 elif line.startswith(b'frame '):
4753 frame = wireprotoframing.makeframefromhumanstring(
4772 frame = wireprotoframing.makeframefromhumanstring(
4754 line[len(b'frame ') :]
4773 line[len(b'frame ') :]
4755 )
4774 )
4756
4775
4757 frames.append(frame)
4776 frames.append(frame)
4758 else:
4777 else:
4759 raise error.Abort(
4778 raise error.Abort(
4760 _(b'unknown argument to httprequest: %s') % line
4779 _(b'unknown argument to httprequest: %s') % line
4761 )
4780 )
4762
4781
4763 url = path + httppath
4782 url = path + httppath
4764
4783
4765 if frames:
4784 if frames:
4766 body = b''.join(bytes(f) for f in frames)
4785 body = b''.join(bytes(f) for f in frames)
4767
4786
4768 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4787 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4769
4788
4770 # urllib.Request insists on using has_data() as a proxy for
4789 # urllib.Request insists on using has_data() as a proxy for
4771 # determining the request method. Override that to use our
4790 # determining the request method. Override that to use our
4772 # explicitly requested method.
4791 # explicitly requested method.
4773 req.get_method = lambda: pycompat.sysstr(method)
4792 req.get_method = lambda: pycompat.sysstr(method)
4774
4793
4775 try:
4794 try:
4776 res = opener.open(req)
4795 res = opener.open(req)
4777 body = res.read()
4796 body = res.read()
4778 except util.urlerr.urlerror as e:
4797 except util.urlerr.urlerror as e:
4779 # read() method must be called, but only exists in Python 2
4798 # read() method must be called, but only exists in Python 2
4780 getattr(e, 'read', lambda: None)()
4799 getattr(e, 'read', lambda: None)()
4781 continue
4800 continue
4782
4801
4783 ct = res.headers.get('Content-Type')
4802 ct = res.headers.get('Content-Type')
4784 if ct == 'application/mercurial-cbor':
4803 if ct == 'application/mercurial-cbor':
4785 ui.write(
4804 ui.write(
4786 _(b'cbor> %s\n')
4805 _(b'cbor> %s\n')
4787 % stringutil.pprint(
4806 % stringutil.pprint(
4788 cborutil.decodeall(body), bprefix=True, indent=2
4807 cborutil.decodeall(body), bprefix=True, indent=2
4789 )
4808 )
4790 )
4809 )
4791
4810
4792 elif action == b'close':
4811 elif action == b'close':
4793 assert peer is not None
4812 assert peer is not None
4794 peer.close()
4813 peer.close()
4795 elif action == b'readavailable':
4814 elif action == b'readavailable':
4796 if not stdout or not stderr:
4815 if not stdout or not stderr:
4797 raise error.Abort(
4816 raise error.Abort(
4798 _(b'readavailable not available on this peer')
4817 _(b'readavailable not available on this peer')
4799 )
4818 )
4800
4819
4801 stdin.close()
4820 stdin.close()
4802 stdout.read()
4821 stdout.read()
4803 stderr.read()
4822 stderr.read()
4804
4823
4805 elif action == b'readline':
4824 elif action == b'readline':
4806 if not stdout:
4825 if not stdout:
4807 raise error.Abort(_(b'readline not available on this peer'))
4826 raise error.Abort(_(b'readline not available on this peer'))
4808 stdout.readline()
4827 stdout.readline()
4809 elif action == b'ereadline':
4828 elif action == b'ereadline':
4810 if not stderr:
4829 if not stderr:
4811 raise error.Abort(_(b'ereadline not available on this peer'))
4830 raise error.Abort(_(b'ereadline not available on this peer'))
4812 stderr.readline()
4831 stderr.readline()
4813 elif action.startswith(b'read '):
4832 elif action.startswith(b'read '):
4814 count = int(action.split(b' ', 1)[1])
4833 count = int(action.split(b' ', 1)[1])
4815 if not stdout:
4834 if not stdout:
4816 raise error.Abort(_(b'read not available on this peer'))
4835 raise error.Abort(_(b'read not available on this peer'))
4817 stdout.read(count)
4836 stdout.read(count)
4818 elif action.startswith(b'eread '):
4837 elif action.startswith(b'eread '):
4819 count = int(action.split(b' ', 1)[1])
4838 count = int(action.split(b' ', 1)[1])
4820 if not stderr:
4839 if not stderr:
4821 raise error.Abort(_(b'eread not available on this peer'))
4840 raise error.Abort(_(b'eread not available on this peer'))
4822 stderr.read(count)
4841 stderr.read(count)
4823 else:
4842 else:
4824 raise error.Abort(_(b'unknown action: %s') % action)
4843 raise error.Abort(_(b'unknown action: %s') % action)
4825
4844
4826 if batchedcommands is not None:
4845 if batchedcommands is not None:
4827 raise error.Abort(_(b'unclosed "batchbegin" request'))
4846 raise error.Abort(_(b'unclosed "batchbegin" request'))
4828
4847
4829 if peer:
4848 if peer:
4830 peer.close()
4849 peer.close()
4831
4850
4832 if proc:
4851 if proc:
4833 proc.kill()
4852 proc.kill()
@@ -1,1625 +1,1626 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_parents_change
502 @requires_parents_change
503 def update_file_reference(
503 def update_file_reference(
504 self,
504 self,
505 filename,
505 filename,
506 p1_tracked,
506 p1_tracked,
507 ):
507 ):
508 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
509
509
510 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
511 rewriting operation.
511 rewriting operation.
512
512
513 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
514 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
515 """
515 """
516 if self.in_merge:
516 if self.in_merge:
517 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
518 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
519 entry = self._map.get(filename)
519 entry = self._map.get(filename)
520 if entry is None:
520 if entry is None:
521 wc_tracked = False
521 wc_tracked = False
522 else:
522 else:
523 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
524 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
525 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
526 # check it.
526 # check it.
527 self.normallookup(filename)
527 self.normallookup(filename)
528 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
529 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
530 self._drop(filename)
530 self._drop(filename)
531 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
532 if not entry.added:
532 if not entry.added:
533 self._add(filename)
533 self._add(filename)
534 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
535 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
536 self._remove(filename)
536 self._remove(filename)
537 else:
537 else:
538 assert False, 'unreachable'
538 assert False, 'unreachable'
539
539
540 @requires_parents_change
540 @requires_parents_change
541 def update_file(
541 def update_file(
542 self,
542 self,
543 filename,
543 filename,
544 wc_tracked,
544 wc_tracked,
545 p1_tracked,
545 p1_tracked,
546 p2_tracked=False,
546 p2_tracked=False,
547 merged=False,
547 merged=False,
548 clean_p1=False,
548 clean_p1=False,
549 clean_p2=False,
549 clean_p2=False,
550 possibly_dirty=False,
550 possibly_dirty=False,
551 ):
551 ):
552 """update the information about a file in the dirstate
552 """update the information about a file in the dirstate
553
553
554 This is to be called when the direstates parent changes to keep track
554 This is to be called when the direstates parent changes to keep track
555 of what is the file situation in regards to the working copy and its parent.
555 of what is the file situation in regards to the working copy and its parent.
556
556
557 This function must be called within a `dirstate.parentchange` context.
557 This function must be called within a `dirstate.parentchange` context.
558
558
559 note: the API is at an early stage and we might need to ajust it
559 note: the API is at an early stage and we might need to ajust it
560 depending of what information ends up being relevant and useful to
560 depending of what information ends up being relevant and useful to
561 other processing.
561 other processing.
562 """
562 """
563 if merged and (clean_p1 or clean_p2):
563 if merged and (clean_p1 or clean_p2):
564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
564 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
565 raise error.ProgrammingError(msg)
565 raise error.ProgrammingError(msg)
566 if not (p1_tracked or p2_tracked or wc_tracked):
566 if not (p1_tracked or p2_tracked or wc_tracked):
567 self._drop(filename)
567 self._drop(filename)
568 elif merged:
568 elif merged:
569 assert wc_tracked
569 assert wc_tracked
570 assert self.in_merge # we are never in the "normallookup" case
570 assert self.in_merge # we are never in the "normallookup" case
571 self.otherparent(filename)
571 self.otherparent(filename)
572 elif not (p1_tracked or p2_tracked) and wc_tracked:
572 elif not (p1_tracked or p2_tracked) and wc_tracked:
573 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
573 self._addpath(filename, added=True, possibly_dirty=possibly_dirty)
574 self._map.copymap.pop(filename, None)
574 self._map.copymap.pop(filename, None)
575 elif (p1_tracked or p2_tracked) and not wc_tracked:
575 elif (p1_tracked or p2_tracked) and not wc_tracked:
576 self._remove(filename)
576 self._remove(filename)
577 elif clean_p2 and wc_tracked:
577 elif clean_p2 and wc_tracked:
578 assert p2_tracked
578 assert p2_tracked
579 self.otherparent(filename)
579 self.otherparent(filename)
580 elif not p1_tracked and p2_tracked and wc_tracked:
580 elif not p1_tracked and p2_tracked and wc_tracked:
581 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
581 self._addpath(filename, from_p2=True, possibly_dirty=possibly_dirty)
582 self._map.copymap.pop(filename, None)
582 self._map.copymap.pop(filename, None)
583 elif possibly_dirty:
583 elif possibly_dirty:
584 self._addpath(filename, possibly_dirty=possibly_dirty)
584 self._addpath(filename, possibly_dirty=possibly_dirty)
585 elif wc_tracked:
585 elif wc_tracked:
586 self.normal(filename)
586 self.normal(filename)
587 # XXX We need something for file that are dirty after an update
587 # XXX We need something for file that are dirty after an update
588 else:
588 else:
589 assert False, 'unreachable'
589 assert False, 'unreachable'
590
590
591 @requires_parents_change
591 @requires_parents_change
592 def update_parent_file_data(self, f, filedata):
592 def update_parent_file_data(self, f, filedata):
593 """update the information about the content of a file
593 """update the information about the content of a file
594
594
595 This function should be called within a `dirstate.parentchange` context.
595 This function should be called within a `dirstate.parentchange` context.
596 """
596 """
597 self.normal(f, parentfiledata=filedata)
597 self.normal(f, parentfiledata=filedata)
598
598
599 def _addpath(
599 def _addpath(
600 self,
600 self,
601 f,
601 f,
602 mode=0,
602 mode=0,
603 size=None,
603 size=None,
604 mtime=None,
604 mtime=None,
605 added=False,
605 added=False,
606 merged=False,
606 merged=False,
607 from_p2=False,
607 from_p2=False,
608 possibly_dirty=False,
608 possibly_dirty=False,
609 ):
609 ):
610 entry = self._map.get(f)
610 entry = self._map.get(f)
611 if added or entry is not None and entry.removed:
611 if added or entry is not None and entry.removed:
612 scmutil.checkfilename(f)
612 scmutil.checkfilename(f)
613 if self._map.hastrackeddir(f):
613 if self._map.hastrackeddir(f):
614 msg = _(b'directory %r already in dirstate')
614 msg = _(b'directory %r already in dirstate')
615 msg %= pycompat.bytestr(f)
615 msg %= pycompat.bytestr(f)
616 raise error.Abort(msg)
616 raise error.Abort(msg)
617 # shadows
617 # shadows
618 for d in pathutil.finddirs(f):
618 for d in pathutil.finddirs(f):
619 if self._map.hastrackeddir(d):
619 if self._map.hastrackeddir(d):
620 break
620 break
621 entry = self._map.get(d)
621 entry = self._map.get(d)
622 if entry is not None and not entry.removed:
622 if entry is not None and not entry.removed:
623 msg = _(b'file %r in dirstate clashes with %r')
623 msg = _(b'file %r in dirstate clashes with %r')
624 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
624 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
625 raise error.Abort(msg)
625 raise error.Abort(msg)
626 self._dirty = True
626 self._dirty = True
627 self._updatedfiles.add(f)
627 self._updatedfiles.add(f)
628 self._map.addfile(
628 self._map.addfile(
629 f,
629 f,
630 mode=mode,
630 mode=mode,
631 size=size,
631 size=size,
632 mtime=mtime,
632 mtime=mtime,
633 added=added,
633 added=added,
634 merged=merged,
634 merged=merged,
635 from_p2=from_p2,
635 from_p2=from_p2,
636 possibly_dirty=possibly_dirty,
636 possibly_dirty=possibly_dirty,
637 )
637 )
638
638
639 def normal(self, f, parentfiledata=None):
639 def normal(self, f, parentfiledata=None):
640 """Mark a file normal and clean.
640 """Mark a file normal and clean.
641
641
642 parentfiledata: (mode, size, mtime) of the clean file
642 parentfiledata: (mode, size, mtime) of the clean file
643
643
644 parentfiledata should be computed from memory (for mode,
644 parentfiledata should be computed from memory (for mode,
645 size), as or close as possible from the point where we
645 size), as or close as possible from the point where we
646 determined the file was clean, to limit the risk of the
646 determined the file was clean, to limit the risk of the
647 file having been changed by an external process between the
647 file having been changed by an external process between the
648 moment where the file was determined to be clean and now."""
648 moment where the file was determined to be clean and now."""
649 if parentfiledata:
649 if parentfiledata:
650 (mode, size, mtime) = parentfiledata
650 (mode, size, mtime) = parentfiledata
651 else:
651 else:
652 s = os.lstat(self._join(f))
652 s = os.lstat(self._join(f))
653 mode = s.st_mode
653 mode = s.st_mode
654 size = s.st_size
654 size = s.st_size
655 mtime = s[stat.ST_MTIME]
655 mtime = s[stat.ST_MTIME]
656 self._addpath(f, mode=mode, size=size, mtime=mtime)
656 self._addpath(f, mode=mode, size=size, mtime=mtime)
657 self._map.copymap.pop(f, None)
657 self._map.copymap.pop(f, None)
658 if f in self._map.nonnormalset:
658 if f in self._map.nonnormalset:
659 self._map.nonnormalset.remove(f)
659 self._map.nonnormalset.remove(f)
660 if mtime > self._lastnormaltime:
660 if mtime > self._lastnormaltime:
661 # Remember the most recent modification timeslot for status(),
661 # Remember the most recent modification timeslot for status(),
662 # to make sure we won't miss future size-preserving file content
662 # to make sure we won't miss future size-preserving file content
663 # modifications that happen within the same timeslot.
663 # modifications that happen within the same timeslot.
664 self._lastnormaltime = mtime
664 self._lastnormaltime = mtime
665
665
666 def normallookup(self, f):
666 def normallookup(self, f):
667 '''Mark a file normal, but possibly dirty.'''
667 '''Mark a file normal, but possibly dirty.'''
668 if self.in_merge:
668 if self.in_merge:
669 # if there is a merge going on and the file was either
669 # if there is a merge going on and the file was either
670 # "merged" or coming from other parent (-2) before
670 # "merged" or coming from other parent (-2) before
671 # being removed, restore that state.
671 # being removed, restore that state.
672 entry = self._map.get(f)
672 entry = self._map.get(f)
673 if entry is not None:
673 if entry is not None:
674 # XXX this should probably be dealt with a a lower level
674 # XXX this should probably be dealt with a a lower level
675 # (see `merged_removed` and `from_p2_removed`)
675 # (see `merged_removed` and `from_p2_removed`)
676 if entry.merged_removed or entry.from_p2_removed:
676 if entry.merged_removed or entry.from_p2_removed:
677 source = self._map.copymap.get(f)
677 source = self._map.copymap.get(f)
678 if entry.merged_removed:
678 if entry.merged_removed:
679 self.merge(f)
679 self.merge(f)
680 elif entry.from_p2_removed:
680 elif entry.from_p2_removed:
681 self.otherparent(f)
681 self.otherparent(f)
682 if source is not None:
682 if source is not None:
683 self.copy(source, f)
683 self.copy(source, f)
684 return
684 return
685 elif entry.merged or entry.from_p2:
685 elif entry.merged or entry.from_p2:
686 return
686 return
687 self._addpath(f, possibly_dirty=True)
687 self._addpath(f, possibly_dirty=True)
688 self._map.copymap.pop(f, None)
688 self._map.copymap.pop(f, None)
689
689
690 def otherparent(self, f):
690 def otherparent(self, f):
691 '''Mark as coming from the other parent, always dirty.'''
691 '''Mark as coming from the other parent, always dirty.'''
692 if not self.in_merge:
692 if not self.in_merge:
693 msg = _(b"setting %r to other parent only allowed in merges") % f
693 msg = _(b"setting %r to other parent only allowed in merges") % f
694 raise error.Abort(msg)
694 raise error.Abort(msg)
695 entry = self._map.get(f)
695 entry = self._map.get(f)
696 if entry is not None and entry.tracked:
696 if entry is not None and entry.tracked:
697 # merge-like
697 # merge-like
698 self._addpath(f, merged=True)
698 self._addpath(f, merged=True)
699 else:
699 else:
700 # add-like
700 # add-like
701 self._addpath(f, from_p2=True)
701 self._addpath(f, from_p2=True)
702 self._map.copymap.pop(f, None)
702 self._map.copymap.pop(f, None)
703
703
704 def add(self, f):
704 def add(self, f):
705 '''Mark a file added.'''
705 '''Mark a file added.'''
706 if not self.pendingparentchange():
706 if not self.pendingparentchange():
707 util.nouideprecwarn(
707 util.nouideprecwarn(
708 b"do not use `add` outside of update/merge context."
708 b"do not use `add` outside of update/merge context."
709 b" Use `set_tracked`",
709 b" Use `set_tracked`",
710 b'6.0',
710 b'6.0',
711 stacklevel=2,
711 stacklevel=2,
712 )
712 )
713 self._add(f)
713 self._add(f)
714
714
715 def _add(self, filename):
715 def _add(self, filename):
716 """internal function to mark a file as added"""
716 """internal function to mark a file as added"""
717 self._addpath(filename, added=True)
717 self._addpath(filename, added=True)
718 self._map.copymap.pop(filename, None)
718 self._map.copymap.pop(filename, None)
719
719
720 def remove(self, f):
720 def remove(self, f):
721 '''Mark a file removed'''
721 '''Mark a file removed'''
722 if not self.pendingparentchange():
722 if not self.pendingparentchange():
723 util.nouideprecwarn(
723 util.nouideprecwarn(
724 b"do not use `remove` outside of update/merge context."
724 b"do not use `remove` outside of update/merge context."
725 b" Use `set_untracked`",
725 b" Use `set_untracked`",
726 b'6.0',
726 b'6.0',
727 stacklevel=2,
727 stacklevel=2,
728 )
728 )
729 self._remove(f)
729 self._remove(f)
730
730
731 def _remove(self, filename):
731 def _remove(self, filename):
732 """internal function to mark a file removed"""
732 """internal function to mark a file removed"""
733 self._dirty = True
733 self._dirty = True
734 self._updatedfiles.add(filename)
734 self._updatedfiles.add(filename)
735 self._map.removefile(filename, in_merge=self.in_merge)
735 self._map.removefile(filename, in_merge=self.in_merge)
736
736
737 def merge(self, f):
737 def merge(self, f):
738 '''Mark a file merged.'''
738 '''Mark a file merged.'''
739 if not self.in_merge:
739 if not self.in_merge:
740 return self.normallookup(f)
740 return self.normallookup(f)
741 return self.otherparent(f)
741 return self.otherparent(f)
742
742
743 def drop(self, f):
743 def drop(self, f):
744 '''Drop a file from the dirstate'''
744 '''Drop a file from the dirstate'''
745 if not self.pendingparentchange():
745 if not self.pendingparentchange():
746 util.nouideprecwarn(
746 util.nouideprecwarn(
747 b"do not use `drop` outside of update/merge context."
747 b"do not use `drop` outside of update/merge context."
748 b" Use `set_untracked`",
748 b" Use `set_untracked`",
749 b'6.0',
749 b'6.0',
750 stacklevel=2,
750 stacklevel=2,
751 )
751 )
752 self._drop(f)
752 self._drop(f)
753
753
754 def _drop(self, filename):
754 def _drop(self, filename):
755 """internal function to drop a file from the dirstate"""
755 """internal function to drop a file from the dirstate"""
756 if self._map.dropfile(filename):
756 if self._map.dropfile(filename):
757 self._dirty = True
757 self._dirty = True
758 self._updatedfiles.add(filename)
758 self._updatedfiles.add(filename)
759 self._map.copymap.pop(filename, None)
759 self._map.copymap.pop(filename, None)
760
760
761 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
761 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
762 if exists is None:
762 if exists is None:
763 exists = os.path.lexists(os.path.join(self._root, path))
763 exists = os.path.lexists(os.path.join(self._root, path))
764 if not exists:
764 if not exists:
765 # Maybe a path component exists
765 # Maybe a path component exists
766 if not ignoremissing and b'/' in path:
766 if not ignoremissing and b'/' in path:
767 d, f = path.rsplit(b'/', 1)
767 d, f = path.rsplit(b'/', 1)
768 d = self._normalize(d, False, ignoremissing, None)
768 d = self._normalize(d, False, ignoremissing, None)
769 folded = d + b"/" + f
769 folded = d + b"/" + f
770 else:
770 else:
771 # No path components, preserve original case
771 # No path components, preserve original case
772 folded = path
772 folded = path
773 else:
773 else:
774 # recursively normalize leading directory components
774 # recursively normalize leading directory components
775 # against dirstate
775 # against dirstate
776 if b'/' in normed:
776 if b'/' in normed:
777 d, f = normed.rsplit(b'/', 1)
777 d, f = normed.rsplit(b'/', 1)
778 d = self._normalize(d, False, ignoremissing, True)
778 d = self._normalize(d, False, ignoremissing, True)
779 r = self._root + b"/" + d
779 r = self._root + b"/" + d
780 folded = d + b"/" + util.fspath(f, r)
780 folded = d + b"/" + util.fspath(f, r)
781 else:
781 else:
782 folded = util.fspath(normed, self._root)
782 folded = util.fspath(normed, self._root)
783 storemap[normed] = folded
783 storemap[normed] = folded
784
784
785 return folded
785 return folded
786
786
787 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
787 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
788 normed = util.normcase(path)
788 normed = util.normcase(path)
789 folded = self._map.filefoldmap.get(normed, None)
789 folded = self._map.filefoldmap.get(normed, None)
790 if folded is None:
790 if folded is None:
791 if isknown:
791 if isknown:
792 folded = path
792 folded = path
793 else:
793 else:
794 folded = self._discoverpath(
794 folded = self._discoverpath(
795 path, normed, ignoremissing, exists, self._map.filefoldmap
795 path, normed, ignoremissing, exists, self._map.filefoldmap
796 )
796 )
797 return folded
797 return folded
798
798
799 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
799 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
800 normed = util.normcase(path)
800 normed = util.normcase(path)
801 folded = self._map.filefoldmap.get(normed, None)
801 folded = self._map.filefoldmap.get(normed, None)
802 if folded is None:
802 if folded is None:
803 folded = self._map.dirfoldmap.get(normed, None)
803 folded = self._map.dirfoldmap.get(normed, None)
804 if folded is None:
804 if folded is None:
805 if isknown:
805 if isknown:
806 folded = path
806 folded = path
807 else:
807 else:
808 # store discovered result in dirfoldmap so that future
808 # store discovered result in dirfoldmap so that future
809 # normalizefile calls don't start matching directories
809 # normalizefile calls don't start matching directories
810 folded = self._discoverpath(
810 folded = self._discoverpath(
811 path, normed, ignoremissing, exists, self._map.dirfoldmap
811 path, normed, ignoremissing, exists, self._map.dirfoldmap
812 )
812 )
813 return folded
813 return folded
814
814
815 def normalize(self, path, isknown=False, ignoremissing=False):
815 def normalize(self, path, isknown=False, ignoremissing=False):
816 """
816 """
817 normalize the case of a pathname when on a casefolding filesystem
817 normalize the case of a pathname when on a casefolding filesystem
818
818
819 isknown specifies whether the filename came from walking the
819 isknown specifies whether the filename came from walking the
820 disk, to avoid extra filesystem access.
820 disk, to avoid extra filesystem access.
821
821
822 If ignoremissing is True, missing path are returned
822 If ignoremissing is True, missing path are returned
823 unchanged. Otherwise, we try harder to normalize possibly
823 unchanged. Otherwise, we try harder to normalize possibly
824 existing path components.
824 existing path components.
825
825
826 The normalized case is determined based on the following precedence:
826 The normalized case is determined based on the following precedence:
827
827
828 - version of name already stored in the dirstate
828 - version of name already stored in the dirstate
829 - version of name stored on disk
829 - version of name stored on disk
830 - version provided via command arguments
830 - version provided via command arguments
831 """
831 """
832
832
833 if self._checkcase:
833 if self._checkcase:
834 return self._normalize(path, isknown, ignoremissing)
834 return self._normalize(path, isknown, ignoremissing)
835 return path
835 return path
836
836
837 def clear(self):
837 def clear(self):
838 self._map.clear()
838 self._map.clear()
839 self._lastnormaltime = 0
839 self._lastnormaltime = 0
840 self._updatedfiles.clear()
840 self._updatedfiles.clear()
841 self._dirty = True
841 self._dirty = True
842
842
843 def rebuild(self, parent, allfiles, changedfiles=None):
843 def rebuild(self, parent, allfiles, changedfiles=None):
844 if changedfiles is None:
844 if changedfiles is None:
845 # Rebuild entire dirstate
845 # Rebuild entire dirstate
846 to_lookup = allfiles
846 to_lookup = allfiles
847 to_drop = []
847 to_drop = []
848 lastnormaltime = self._lastnormaltime
848 lastnormaltime = self._lastnormaltime
849 self.clear()
849 self.clear()
850 self._lastnormaltime = lastnormaltime
850 self._lastnormaltime = lastnormaltime
851 elif len(changedfiles) < 10:
851 elif len(changedfiles) < 10:
852 # Avoid turning allfiles into a set, which can be expensive if it's
852 # Avoid turning allfiles into a set, which can be expensive if it's
853 # large.
853 # large.
854 to_lookup = []
854 to_lookup = []
855 to_drop = []
855 to_drop = []
856 for f in changedfiles:
856 for f in changedfiles:
857 if f in allfiles:
857 if f in allfiles:
858 to_lookup.append(f)
858 to_lookup.append(f)
859 else:
859 else:
860 to_drop.append(f)
860 to_drop.append(f)
861 else:
861 else:
862 changedfilesset = set(changedfiles)
862 changedfilesset = set(changedfiles)
863 to_lookup = changedfilesset & set(allfiles)
863 to_lookup = changedfilesset & set(allfiles)
864 to_drop = changedfilesset - to_lookup
864 to_drop = changedfilesset - to_lookup
865
865
866 if self._origpl is None:
866 if self._origpl is None:
867 self._origpl = self._pl
867 self._origpl = self._pl
868 self._map.setparents(parent, self._nodeconstants.nullid)
868 self._map.setparents(parent, self._nodeconstants.nullid)
869
869
870 for f in to_lookup:
870 for f in to_lookup:
871 self.normallookup(f)
871 self.normallookup(f)
872 for f in to_drop:
872 for f in to_drop:
873 self._drop(f)
873 self._drop(f)
874
874
875 self._dirty = True
875 self._dirty = True
876
876
877 def identity(self):
877 def identity(self):
878 """Return identity of dirstate itself to detect changing in storage
878 """Return identity of dirstate itself to detect changing in storage
879
879
880 If identity of previous dirstate is equal to this, writing
880 If identity of previous dirstate is equal to this, writing
881 changes based on the former dirstate out can keep consistency.
881 changes based on the former dirstate out can keep consistency.
882 """
882 """
883 return self._map.identity
883 return self._map.identity
884
884
885 def write(self, tr):
885 def write(self, tr):
886 if not self._dirty:
886 if not self._dirty:
887 return
887 return
888
888
889 filename = self._filename
889 filename = self._filename
890 if tr:
890 if tr:
891 # 'dirstate.write()' is not only for writing in-memory
891 # 'dirstate.write()' is not only for writing in-memory
892 # changes out, but also for dropping ambiguous timestamp.
892 # changes out, but also for dropping ambiguous timestamp.
893 # delayed writing re-raise "ambiguous timestamp issue".
893 # delayed writing re-raise "ambiguous timestamp issue".
894 # See also the wiki page below for detail:
894 # See also the wiki page below for detail:
895 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
895 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
896
896
897 # emulate dropping timestamp in 'parsers.pack_dirstate'
897 # emulate dropping timestamp in 'parsers.pack_dirstate'
898 now = _getfsnow(self._opener)
898 now = _getfsnow(self._opener)
899 self._map.clearambiguoustimes(self._updatedfiles, now)
899 self._map.clearambiguoustimes(self._updatedfiles, now)
900
900
901 # emulate that all 'dirstate.normal' results are written out
901 # emulate that all 'dirstate.normal' results are written out
902 self._lastnormaltime = 0
902 self._lastnormaltime = 0
903 self._updatedfiles.clear()
903 self._updatedfiles.clear()
904
904
905 # delay writing in-memory changes out
905 # delay writing in-memory changes out
906 tr.addfilegenerator(
906 tr.addfilegenerator(
907 b'dirstate',
907 b'dirstate',
908 (self._filename,),
908 (self._filename,),
909 self._writedirstate,
909 lambda f: self._writedirstate(tr, f),
910 location=b'plain',
910 location=b'plain',
911 )
911 )
912 return
912 return
913
913
914 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
914 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
915 self._writedirstate(st)
915 self._writedirstate(tr, st)
916
916
917 def addparentchangecallback(self, category, callback):
917 def addparentchangecallback(self, category, callback):
918 """add a callback to be called when the wd parents are changed
918 """add a callback to be called when the wd parents are changed
919
919
920 Callback will be called with the following arguments:
920 Callback will be called with the following arguments:
921 dirstate, (oldp1, oldp2), (newp1, newp2)
921 dirstate, (oldp1, oldp2), (newp1, newp2)
922
922
923 Category is a unique identifier to allow overwriting an old callback
923 Category is a unique identifier to allow overwriting an old callback
924 with a newer callback.
924 with a newer callback.
925 """
925 """
926 self._plchangecallbacks[category] = callback
926 self._plchangecallbacks[category] = callback
927
927
928 def _writedirstate(self, st):
928 def _writedirstate(self, tr, st):
929 # notify callbacks about parents change
929 # notify callbacks about parents change
930 if self._origpl is not None and self._origpl != self._pl:
930 if self._origpl is not None and self._origpl != self._pl:
931 for c, callback in sorted(
931 for c, callback in sorted(
932 pycompat.iteritems(self._plchangecallbacks)
932 pycompat.iteritems(self._plchangecallbacks)
933 ):
933 ):
934 callback(self, self._origpl, self._pl)
934 callback(self, self._origpl, self._pl)
935 self._origpl = None
935 self._origpl = None
936 # use the modification time of the newly created temporary file as the
936 # use the modification time of the newly created temporary file as the
937 # filesystem's notion of 'now'
937 # filesystem's notion of 'now'
938 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
938 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
939
939
940 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
940 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
941 # timestamp of each entries in dirstate, because of 'now > mtime'
941 # timestamp of each entries in dirstate, because of 'now > mtime'
942 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
942 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
943 if delaywrite > 0:
943 if delaywrite > 0:
944 # do we have any files to delay for?
944 # do we have any files to delay for?
945 for f, e in pycompat.iteritems(self._map):
945 for f, e in pycompat.iteritems(self._map):
946 if e.need_delay(now):
946 if e.need_delay(now):
947 import time # to avoid useless import
947 import time # to avoid useless import
948
948
949 # rather than sleep n seconds, sleep until the next
949 # rather than sleep n seconds, sleep until the next
950 # multiple of n seconds
950 # multiple of n seconds
951 clock = time.time()
951 clock = time.time()
952 start = int(clock) - (int(clock) % delaywrite)
952 start = int(clock) - (int(clock) % delaywrite)
953 end = start + delaywrite
953 end = start + delaywrite
954 time.sleep(end - clock)
954 time.sleep(end - clock)
955 now = end # trust our estimate that the end is near now
955 now = end # trust our estimate that the end is near now
956 break
956 break
957
957
958 self._map.write(st, now)
958 self._map.write(tr, st, now)
959 self._lastnormaltime = 0
959 self._lastnormaltime = 0
960 self._dirty = False
960 self._dirty = False
961
961
962 def _dirignore(self, f):
962 def _dirignore(self, f):
963 if self._ignore(f):
963 if self._ignore(f):
964 return True
964 return True
965 for p in pathutil.finddirs(f):
965 for p in pathutil.finddirs(f):
966 if self._ignore(p):
966 if self._ignore(p):
967 return True
967 return True
968 return False
968 return False
969
969
970 def _ignorefiles(self):
970 def _ignorefiles(self):
971 files = []
971 files = []
972 if os.path.exists(self._join(b'.hgignore')):
972 if os.path.exists(self._join(b'.hgignore')):
973 files.append(self._join(b'.hgignore'))
973 files.append(self._join(b'.hgignore'))
974 for name, path in self._ui.configitems(b"ui"):
974 for name, path in self._ui.configitems(b"ui"):
975 if name == b'ignore' or name.startswith(b'ignore.'):
975 if name == b'ignore' or name.startswith(b'ignore.'):
976 # we need to use os.path.join here rather than self._join
976 # we need to use os.path.join here rather than self._join
977 # because path is arbitrary and user-specified
977 # because path is arbitrary and user-specified
978 files.append(os.path.join(self._rootdir, util.expandpath(path)))
978 files.append(os.path.join(self._rootdir, util.expandpath(path)))
979 return files
979 return files
980
980
981 def _ignorefileandline(self, f):
981 def _ignorefileandline(self, f):
982 files = collections.deque(self._ignorefiles())
982 files = collections.deque(self._ignorefiles())
983 visited = set()
983 visited = set()
984 while files:
984 while files:
985 i = files.popleft()
985 i = files.popleft()
986 patterns = matchmod.readpatternfile(
986 patterns = matchmod.readpatternfile(
987 i, self._ui.warn, sourceinfo=True
987 i, self._ui.warn, sourceinfo=True
988 )
988 )
989 for pattern, lineno, line in patterns:
989 for pattern, lineno, line in patterns:
990 kind, p = matchmod._patsplit(pattern, b'glob')
990 kind, p = matchmod._patsplit(pattern, b'glob')
991 if kind == b"subinclude":
991 if kind == b"subinclude":
992 if p not in visited:
992 if p not in visited:
993 files.append(p)
993 files.append(p)
994 continue
994 continue
995 m = matchmod.match(
995 m = matchmod.match(
996 self._root, b'', [], [pattern], warn=self._ui.warn
996 self._root, b'', [], [pattern], warn=self._ui.warn
997 )
997 )
998 if m(f):
998 if m(f):
999 return (i, lineno, line)
999 return (i, lineno, line)
1000 visited.add(i)
1000 visited.add(i)
1001 return (None, -1, b"")
1001 return (None, -1, b"")
1002
1002
1003 def _walkexplicit(self, match, subrepos):
1003 def _walkexplicit(self, match, subrepos):
1004 """Get stat data about the files explicitly specified by match.
1004 """Get stat data about the files explicitly specified by match.
1005
1005
1006 Return a triple (results, dirsfound, dirsnotfound).
1006 Return a triple (results, dirsfound, dirsnotfound).
1007 - results is a mapping from filename to stat result. It also contains
1007 - results is a mapping from filename to stat result. It also contains
1008 listings mapping subrepos and .hg to None.
1008 listings mapping subrepos and .hg to None.
1009 - dirsfound is a list of files found to be directories.
1009 - dirsfound is a list of files found to be directories.
1010 - dirsnotfound is a list of files that the dirstate thinks are
1010 - dirsnotfound is a list of files that the dirstate thinks are
1011 directories and that were not found."""
1011 directories and that were not found."""
1012
1012
1013 def badtype(mode):
1013 def badtype(mode):
1014 kind = _(b'unknown')
1014 kind = _(b'unknown')
1015 if stat.S_ISCHR(mode):
1015 if stat.S_ISCHR(mode):
1016 kind = _(b'character device')
1016 kind = _(b'character device')
1017 elif stat.S_ISBLK(mode):
1017 elif stat.S_ISBLK(mode):
1018 kind = _(b'block device')
1018 kind = _(b'block device')
1019 elif stat.S_ISFIFO(mode):
1019 elif stat.S_ISFIFO(mode):
1020 kind = _(b'fifo')
1020 kind = _(b'fifo')
1021 elif stat.S_ISSOCK(mode):
1021 elif stat.S_ISSOCK(mode):
1022 kind = _(b'socket')
1022 kind = _(b'socket')
1023 elif stat.S_ISDIR(mode):
1023 elif stat.S_ISDIR(mode):
1024 kind = _(b'directory')
1024 kind = _(b'directory')
1025 return _(b'unsupported file type (type is %s)') % kind
1025 return _(b'unsupported file type (type is %s)') % kind
1026
1026
1027 badfn = match.bad
1027 badfn = match.bad
1028 dmap = self._map
1028 dmap = self._map
1029 lstat = os.lstat
1029 lstat = os.lstat
1030 getkind = stat.S_IFMT
1030 getkind = stat.S_IFMT
1031 dirkind = stat.S_IFDIR
1031 dirkind = stat.S_IFDIR
1032 regkind = stat.S_IFREG
1032 regkind = stat.S_IFREG
1033 lnkkind = stat.S_IFLNK
1033 lnkkind = stat.S_IFLNK
1034 join = self._join
1034 join = self._join
1035 dirsfound = []
1035 dirsfound = []
1036 foundadd = dirsfound.append
1036 foundadd = dirsfound.append
1037 dirsnotfound = []
1037 dirsnotfound = []
1038 notfoundadd = dirsnotfound.append
1038 notfoundadd = dirsnotfound.append
1039
1039
1040 if not match.isexact() and self._checkcase:
1040 if not match.isexact() and self._checkcase:
1041 normalize = self._normalize
1041 normalize = self._normalize
1042 else:
1042 else:
1043 normalize = None
1043 normalize = None
1044
1044
1045 files = sorted(match.files())
1045 files = sorted(match.files())
1046 subrepos.sort()
1046 subrepos.sort()
1047 i, j = 0, 0
1047 i, j = 0, 0
1048 while i < len(files) and j < len(subrepos):
1048 while i < len(files) and j < len(subrepos):
1049 subpath = subrepos[j] + b"/"
1049 subpath = subrepos[j] + b"/"
1050 if files[i] < subpath:
1050 if files[i] < subpath:
1051 i += 1
1051 i += 1
1052 continue
1052 continue
1053 while i < len(files) and files[i].startswith(subpath):
1053 while i < len(files) and files[i].startswith(subpath):
1054 del files[i]
1054 del files[i]
1055 j += 1
1055 j += 1
1056
1056
1057 if not files or b'' in files:
1057 if not files or b'' in files:
1058 files = [b'']
1058 files = [b'']
1059 # constructing the foldmap is expensive, so don't do it for the
1059 # constructing the foldmap is expensive, so don't do it for the
1060 # common case where files is ['']
1060 # common case where files is ['']
1061 normalize = None
1061 normalize = None
1062 results = dict.fromkeys(subrepos)
1062 results = dict.fromkeys(subrepos)
1063 results[b'.hg'] = None
1063 results[b'.hg'] = None
1064
1064
1065 for ff in files:
1065 for ff in files:
1066 if normalize:
1066 if normalize:
1067 nf = normalize(ff, False, True)
1067 nf = normalize(ff, False, True)
1068 else:
1068 else:
1069 nf = ff
1069 nf = ff
1070 if nf in results:
1070 if nf in results:
1071 continue
1071 continue
1072
1072
1073 try:
1073 try:
1074 st = lstat(join(nf))
1074 st = lstat(join(nf))
1075 kind = getkind(st.st_mode)
1075 kind = getkind(st.st_mode)
1076 if kind == dirkind:
1076 if kind == dirkind:
1077 if nf in dmap:
1077 if nf in dmap:
1078 # file replaced by dir on disk but still in dirstate
1078 # file replaced by dir on disk but still in dirstate
1079 results[nf] = None
1079 results[nf] = None
1080 foundadd((nf, ff))
1080 foundadd((nf, ff))
1081 elif kind == regkind or kind == lnkkind:
1081 elif kind == regkind or kind == lnkkind:
1082 results[nf] = st
1082 results[nf] = st
1083 else:
1083 else:
1084 badfn(ff, badtype(kind))
1084 badfn(ff, badtype(kind))
1085 if nf in dmap:
1085 if nf in dmap:
1086 results[nf] = None
1086 results[nf] = None
1087 except OSError as inst: # nf not found on disk - it is dirstate only
1087 except OSError as inst: # nf not found on disk - it is dirstate only
1088 if nf in dmap: # does it exactly match a missing file?
1088 if nf in dmap: # does it exactly match a missing file?
1089 results[nf] = None
1089 results[nf] = None
1090 else: # does it match a missing directory?
1090 else: # does it match a missing directory?
1091 if self._map.hasdir(nf):
1091 if self._map.hasdir(nf):
1092 notfoundadd(nf)
1092 notfoundadd(nf)
1093 else:
1093 else:
1094 badfn(ff, encoding.strtolocal(inst.strerror))
1094 badfn(ff, encoding.strtolocal(inst.strerror))
1095
1095
1096 # match.files() may contain explicitly-specified paths that shouldn't
1096 # match.files() may contain explicitly-specified paths that shouldn't
1097 # be taken; drop them from the list of files found. dirsfound/notfound
1097 # be taken; drop them from the list of files found. dirsfound/notfound
1098 # aren't filtered here because they will be tested later.
1098 # aren't filtered here because they will be tested later.
1099 if match.anypats():
1099 if match.anypats():
1100 for f in list(results):
1100 for f in list(results):
1101 if f == b'.hg' or f in subrepos:
1101 if f == b'.hg' or f in subrepos:
1102 # keep sentinel to disable further out-of-repo walks
1102 # keep sentinel to disable further out-of-repo walks
1103 continue
1103 continue
1104 if not match(f):
1104 if not match(f):
1105 del results[f]
1105 del results[f]
1106
1106
1107 # Case insensitive filesystems cannot rely on lstat() failing to detect
1107 # Case insensitive filesystems cannot rely on lstat() failing to detect
1108 # a case-only rename. Prune the stat object for any file that does not
1108 # a case-only rename. Prune the stat object for any file that does not
1109 # match the case in the filesystem, if there are multiple files that
1109 # match the case in the filesystem, if there are multiple files that
1110 # normalize to the same path.
1110 # normalize to the same path.
1111 if match.isexact() and self._checkcase:
1111 if match.isexact() and self._checkcase:
1112 normed = {}
1112 normed = {}
1113
1113
1114 for f, st in pycompat.iteritems(results):
1114 for f, st in pycompat.iteritems(results):
1115 if st is None:
1115 if st is None:
1116 continue
1116 continue
1117
1117
1118 nc = util.normcase(f)
1118 nc = util.normcase(f)
1119 paths = normed.get(nc)
1119 paths = normed.get(nc)
1120
1120
1121 if paths is None:
1121 if paths is None:
1122 paths = set()
1122 paths = set()
1123 normed[nc] = paths
1123 normed[nc] = paths
1124
1124
1125 paths.add(f)
1125 paths.add(f)
1126
1126
1127 for norm, paths in pycompat.iteritems(normed):
1127 for norm, paths in pycompat.iteritems(normed):
1128 if len(paths) > 1:
1128 if len(paths) > 1:
1129 for path in paths:
1129 for path in paths:
1130 folded = self._discoverpath(
1130 folded = self._discoverpath(
1131 path, norm, True, None, self._map.dirfoldmap
1131 path, norm, True, None, self._map.dirfoldmap
1132 )
1132 )
1133 if path != folded:
1133 if path != folded:
1134 results[path] = None
1134 results[path] = None
1135
1135
1136 return results, dirsfound, dirsnotfound
1136 return results, dirsfound, dirsnotfound
1137
1137
1138 def walk(self, match, subrepos, unknown, ignored, full=True):
1138 def walk(self, match, subrepos, unknown, ignored, full=True):
1139 """
1139 """
1140 Walk recursively through the directory tree, finding all files
1140 Walk recursively through the directory tree, finding all files
1141 matched by match.
1141 matched by match.
1142
1142
1143 If full is False, maybe skip some known-clean files.
1143 If full is False, maybe skip some known-clean files.
1144
1144
1145 Return a dict mapping filename to stat-like object (either
1145 Return a dict mapping filename to stat-like object (either
1146 mercurial.osutil.stat instance or return value of os.stat()).
1146 mercurial.osutil.stat instance or return value of os.stat()).
1147
1147
1148 """
1148 """
1149 # full is a flag that extensions that hook into walk can use -- this
1149 # full is a flag that extensions that hook into walk can use -- this
1150 # implementation doesn't use it at all. This satisfies the contract
1150 # implementation doesn't use it at all. This satisfies the contract
1151 # because we only guarantee a "maybe".
1151 # because we only guarantee a "maybe".
1152
1152
1153 if ignored:
1153 if ignored:
1154 ignore = util.never
1154 ignore = util.never
1155 dirignore = util.never
1155 dirignore = util.never
1156 elif unknown:
1156 elif unknown:
1157 ignore = self._ignore
1157 ignore = self._ignore
1158 dirignore = self._dirignore
1158 dirignore = self._dirignore
1159 else:
1159 else:
1160 # if not unknown and not ignored, drop dir recursion and step 2
1160 # if not unknown and not ignored, drop dir recursion and step 2
1161 ignore = util.always
1161 ignore = util.always
1162 dirignore = util.always
1162 dirignore = util.always
1163
1163
1164 matchfn = match.matchfn
1164 matchfn = match.matchfn
1165 matchalways = match.always()
1165 matchalways = match.always()
1166 matchtdir = match.traversedir
1166 matchtdir = match.traversedir
1167 dmap = self._map
1167 dmap = self._map
1168 listdir = util.listdir
1168 listdir = util.listdir
1169 lstat = os.lstat
1169 lstat = os.lstat
1170 dirkind = stat.S_IFDIR
1170 dirkind = stat.S_IFDIR
1171 regkind = stat.S_IFREG
1171 regkind = stat.S_IFREG
1172 lnkkind = stat.S_IFLNK
1172 lnkkind = stat.S_IFLNK
1173 join = self._join
1173 join = self._join
1174
1174
1175 exact = skipstep3 = False
1175 exact = skipstep3 = False
1176 if match.isexact(): # match.exact
1176 if match.isexact(): # match.exact
1177 exact = True
1177 exact = True
1178 dirignore = util.always # skip step 2
1178 dirignore = util.always # skip step 2
1179 elif match.prefix(): # match.match, no patterns
1179 elif match.prefix(): # match.match, no patterns
1180 skipstep3 = True
1180 skipstep3 = True
1181
1181
1182 if not exact and self._checkcase:
1182 if not exact and self._checkcase:
1183 normalize = self._normalize
1183 normalize = self._normalize
1184 normalizefile = self._normalizefile
1184 normalizefile = self._normalizefile
1185 skipstep3 = False
1185 skipstep3 = False
1186 else:
1186 else:
1187 normalize = self._normalize
1187 normalize = self._normalize
1188 normalizefile = None
1188 normalizefile = None
1189
1189
1190 # step 1: find all explicit files
1190 # step 1: find all explicit files
1191 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1191 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1192 if matchtdir:
1192 if matchtdir:
1193 for d in work:
1193 for d in work:
1194 matchtdir(d[0])
1194 matchtdir(d[0])
1195 for d in dirsnotfound:
1195 for d in dirsnotfound:
1196 matchtdir(d)
1196 matchtdir(d)
1197
1197
1198 skipstep3 = skipstep3 and not (work or dirsnotfound)
1198 skipstep3 = skipstep3 and not (work or dirsnotfound)
1199 work = [d for d in work if not dirignore(d[0])]
1199 work = [d for d in work if not dirignore(d[0])]
1200
1200
1201 # step 2: visit subdirectories
1201 # step 2: visit subdirectories
1202 def traverse(work, alreadynormed):
1202 def traverse(work, alreadynormed):
1203 wadd = work.append
1203 wadd = work.append
1204 while work:
1204 while work:
1205 tracing.counter('dirstate.walk work', len(work))
1205 tracing.counter('dirstate.walk work', len(work))
1206 nd = work.pop()
1206 nd = work.pop()
1207 visitentries = match.visitchildrenset(nd)
1207 visitentries = match.visitchildrenset(nd)
1208 if not visitentries:
1208 if not visitentries:
1209 continue
1209 continue
1210 if visitentries == b'this' or visitentries == b'all':
1210 if visitentries == b'this' or visitentries == b'all':
1211 visitentries = None
1211 visitentries = None
1212 skip = None
1212 skip = None
1213 if nd != b'':
1213 if nd != b'':
1214 skip = b'.hg'
1214 skip = b'.hg'
1215 try:
1215 try:
1216 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1216 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1217 entries = listdir(join(nd), stat=True, skip=skip)
1217 entries = listdir(join(nd), stat=True, skip=skip)
1218 except OSError as inst:
1218 except OSError as inst:
1219 if inst.errno in (errno.EACCES, errno.ENOENT):
1219 if inst.errno in (errno.EACCES, errno.ENOENT):
1220 match.bad(
1220 match.bad(
1221 self.pathto(nd), encoding.strtolocal(inst.strerror)
1221 self.pathto(nd), encoding.strtolocal(inst.strerror)
1222 )
1222 )
1223 continue
1223 continue
1224 raise
1224 raise
1225 for f, kind, st in entries:
1225 for f, kind, st in entries:
1226 # Some matchers may return files in the visitentries set,
1226 # Some matchers may return files in the visitentries set,
1227 # instead of 'this', if the matcher explicitly mentions them
1227 # instead of 'this', if the matcher explicitly mentions them
1228 # and is not an exactmatcher. This is acceptable; we do not
1228 # and is not an exactmatcher. This is acceptable; we do not
1229 # make any hard assumptions about file-or-directory below
1229 # make any hard assumptions about file-or-directory below
1230 # based on the presence of `f` in visitentries. If
1230 # based on the presence of `f` in visitentries. If
1231 # visitchildrenset returned a set, we can always skip the
1231 # visitchildrenset returned a set, we can always skip the
1232 # entries *not* in the set it provided regardless of whether
1232 # entries *not* in the set it provided regardless of whether
1233 # they're actually a file or a directory.
1233 # they're actually a file or a directory.
1234 if visitentries and f not in visitentries:
1234 if visitentries and f not in visitentries:
1235 continue
1235 continue
1236 if normalizefile:
1236 if normalizefile:
1237 # even though f might be a directory, we're only
1237 # even though f might be a directory, we're only
1238 # interested in comparing it to files currently in the
1238 # interested in comparing it to files currently in the
1239 # dmap -- therefore normalizefile is enough
1239 # dmap -- therefore normalizefile is enough
1240 nf = normalizefile(
1240 nf = normalizefile(
1241 nd and (nd + b"/" + f) or f, True, True
1241 nd and (nd + b"/" + f) or f, True, True
1242 )
1242 )
1243 else:
1243 else:
1244 nf = nd and (nd + b"/" + f) or f
1244 nf = nd and (nd + b"/" + f) or f
1245 if nf not in results:
1245 if nf not in results:
1246 if kind == dirkind:
1246 if kind == dirkind:
1247 if not ignore(nf):
1247 if not ignore(nf):
1248 if matchtdir:
1248 if matchtdir:
1249 matchtdir(nf)
1249 matchtdir(nf)
1250 wadd(nf)
1250 wadd(nf)
1251 if nf in dmap and (matchalways or matchfn(nf)):
1251 if nf in dmap and (matchalways or matchfn(nf)):
1252 results[nf] = None
1252 results[nf] = None
1253 elif kind == regkind or kind == lnkkind:
1253 elif kind == regkind or kind == lnkkind:
1254 if nf in dmap:
1254 if nf in dmap:
1255 if matchalways or matchfn(nf):
1255 if matchalways or matchfn(nf):
1256 results[nf] = st
1256 results[nf] = st
1257 elif (matchalways or matchfn(nf)) and not ignore(
1257 elif (matchalways or matchfn(nf)) and not ignore(
1258 nf
1258 nf
1259 ):
1259 ):
1260 # unknown file -- normalize if necessary
1260 # unknown file -- normalize if necessary
1261 if not alreadynormed:
1261 if not alreadynormed:
1262 nf = normalize(nf, False, True)
1262 nf = normalize(nf, False, True)
1263 results[nf] = st
1263 results[nf] = st
1264 elif nf in dmap and (matchalways or matchfn(nf)):
1264 elif nf in dmap and (matchalways or matchfn(nf)):
1265 results[nf] = None
1265 results[nf] = None
1266
1266
1267 for nd, d in work:
1267 for nd, d in work:
1268 # alreadynormed means that processwork doesn't have to do any
1268 # alreadynormed means that processwork doesn't have to do any
1269 # expensive directory normalization
1269 # expensive directory normalization
1270 alreadynormed = not normalize or nd == d
1270 alreadynormed = not normalize or nd == d
1271 traverse([d], alreadynormed)
1271 traverse([d], alreadynormed)
1272
1272
1273 for s in subrepos:
1273 for s in subrepos:
1274 del results[s]
1274 del results[s]
1275 del results[b'.hg']
1275 del results[b'.hg']
1276
1276
1277 # step 3: visit remaining files from dmap
1277 # step 3: visit remaining files from dmap
1278 if not skipstep3 and not exact:
1278 if not skipstep3 and not exact:
1279 # If a dmap file is not in results yet, it was either
1279 # If a dmap file is not in results yet, it was either
1280 # a) not matching matchfn b) ignored, c) missing, or d) under a
1280 # a) not matching matchfn b) ignored, c) missing, or d) under a
1281 # symlink directory.
1281 # symlink directory.
1282 if not results and matchalways:
1282 if not results and matchalways:
1283 visit = [f for f in dmap]
1283 visit = [f for f in dmap]
1284 else:
1284 else:
1285 visit = [f for f in dmap if f not in results and matchfn(f)]
1285 visit = [f for f in dmap if f not in results and matchfn(f)]
1286 visit.sort()
1286 visit.sort()
1287
1287
1288 if unknown:
1288 if unknown:
1289 # unknown == True means we walked all dirs under the roots
1289 # unknown == True means we walked all dirs under the roots
1290 # that wasn't ignored, and everything that matched was stat'ed
1290 # that wasn't ignored, and everything that matched was stat'ed
1291 # and is already in results.
1291 # and is already in results.
1292 # The rest must thus be ignored or under a symlink.
1292 # The rest must thus be ignored or under a symlink.
1293 audit_path = pathutil.pathauditor(self._root, cached=True)
1293 audit_path = pathutil.pathauditor(self._root, cached=True)
1294
1294
1295 for nf in iter(visit):
1295 for nf in iter(visit):
1296 # If a stat for the same file was already added with a
1296 # If a stat for the same file was already added with a
1297 # different case, don't add one for this, since that would
1297 # different case, don't add one for this, since that would
1298 # make it appear as if the file exists under both names
1298 # make it appear as if the file exists under both names
1299 # on disk.
1299 # on disk.
1300 if (
1300 if (
1301 normalizefile
1301 normalizefile
1302 and normalizefile(nf, True, True) in results
1302 and normalizefile(nf, True, True) in results
1303 ):
1303 ):
1304 results[nf] = None
1304 results[nf] = None
1305 # Report ignored items in the dmap as long as they are not
1305 # Report ignored items in the dmap as long as they are not
1306 # under a symlink directory.
1306 # under a symlink directory.
1307 elif audit_path.check(nf):
1307 elif audit_path.check(nf):
1308 try:
1308 try:
1309 results[nf] = lstat(join(nf))
1309 results[nf] = lstat(join(nf))
1310 # file was just ignored, no links, and exists
1310 # file was just ignored, no links, and exists
1311 except OSError:
1311 except OSError:
1312 # file doesn't exist
1312 # file doesn't exist
1313 results[nf] = None
1313 results[nf] = None
1314 else:
1314 else:
1315 # It's either missing or under a symlink directory
1315 # It's either missing or under a symlink directory
1316 # which we in this case report as missing
1316 # which we in this case report as missing
1317 results[nf] = None
1317 results[nf] = None
1318 else:
1318 else:
1319 # We may not have walked the full directory tree above,
1319 # We may not have walked the full directory tree above,
1320 # so stat and check everything we missed.
1320 # so stat and check everything we missed.
1321 iv = iter(visit)
1321 iv = iter(visit)
1322 for st in util.statfiles([join(i) for i in visit]):
1322 for st in util.statfiles([join(i) for i in visit]):
1323 results[next(iv)] = st
1323 results[next(iv)] = st
1324 return results
1324 return results
1325
1325
1326 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1326 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1327 # Force Rayon (Rust parallelism library) to respect the number of
1327 # Force Rayon (Rust parallelism library) to respect the number of
1328 # workers. This is a temporary workaround until Rust code knows
1328 # workers. This is a temporary workaround until Rust code knows
1329 # how to read the config file.
1329 # how to read the config file.
1330 numcpus = self._ui.configint(b"worker", b"numcpus")
1330 numcpus = self._ui.configint(b"worker", b"numcpus")
1331 if numcpus is not None:
1331 if numcpus is not None:
1332 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1332 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1333
1333
1334 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1334 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1335 if not workers_enabled:
1335 if not workers_enabled:
1336 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1336 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1337
1337
1338 (
1338 (
1339 lookup,
1339 lookup,
1340 modified,
1340 modified,
1341 added,
1341 added,
1342 removed,
1342 removed,
1343 deleted,
1343 deleted,
1344 clean,
1344 clean,
1345 ignored,
1345 ignored,
1346 unknown,
1346 unknown,
1347 warnings,
1347 warnings,
1348 bad,
1348 bad,
1349 traversed,
1349 traversed,
1350 dirty,
1350 dirty,
1351 ) = rustmod.status(
1351 ) = rustmod.status(
1352 self._map._rustmap,
1352 self._map._rustmap,
1353 matcher,
1353 matcher,
1354 self._rootdir,
1354 self._rootdir,
1355 self._ignorefiles(),
1355 self._ignorefiles(),
1356 self._checkexec,
1356 self._checkexec,
1357 self._lastnormaltime,
1357 self._lastnormaltime,
1358 bool(list_clean),
1358 bool(list_clean),
1359 bool(list_ignored),
1359 bool(list_ignored),
1360 bool(list_unknown),
1360 bool(list_unknown),
1361 bool(matcher.traversedir),
1361 bool(matcher.traversedir),
1362 )
1362 )
1363
1363
1364 self._dirty |= dirty
1364 self._dirty |= dirty
1365
1365
1366 if matcher.traversedir:
1366 if matcher.traversedir:
1367 for dir in traversed:
1367 for dir in traversed:
1368 matcher.traversedir(dir)
1368 matcher.traversedir(dir)
1369
1369
1370 if self._ui.warn:
1370 if self._ui.warn:
1371 for item in warnings:
1371 for item in warnings:
1372 if isinstance(item, tuple):
1372 if isinstance(item, tuple):
1373 file_path, syntax = item
1373 file_path, syntax = item
1374 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1374 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1375 file_path,
1375 file_path,
1376 syntax,
1376 syntax,
1377 )
1377 )
1378 self._ui.warn(msg)
1378 self._ui.warn(msg)
1379 else:
1379 else:
1380 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1380 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1381 self._ui.warn(
1381 self._ui.warn(
1382 msg
1382 msg
1383 % (
1383 % (
1384 pathutil.canonpath(
1384 pathutil.canonpath(
1385 self._rootdir, self._rootdir, item
1385 self._rootdir, self._rootdir, item
1386 ),
1386 ),
1387 b"No such file or directory",
1387 b"No such file or directory",
1388 )
1388 )
1389 )
1389 )
1390
1390
1391 for (fn, message) in bad:
1391 for (fn, message) in bad:
1392 matcher.bad(fn, encoding.strtolocal(message))
1392 matcher.bad(fn, encoding.strtolocal(message))
1393
1393
1394 status = scmutil.status(
1394 status = scmutil.status(
1395 modified=modified,
1395 modified=modified,
1396 added=added,
1396 added=added,
1397 removed=removed,
1397 removed=removed,
1398 deleted=deleted,
1398 deleted=deleted,
1399 unknown=unknown,
1399 unknown=unknown,
1400 ignored=ignored,
1400 ignored=ignored,
1401 clean=clean,
1401 clean=clean,
1402 )
1402 )
1403 return (lookup, status)
1403 return (lookup, status)
1404
1404
1405 def status(self, match, subrepos, ignored, clean, unknown):
1405 def status(self, match, subrepos, ignored, clean, unknown):
1406 """Determine the status of the working copy relative to the
1406 """Determine the status of the working copy relative to the
1407 dirstate and return a pair of (unsure, status), where status is of type
1407 dirstate and return a pair of (unsure, status), where status is of type
1408 scmutil.status and:
1408 scmutil.status and:
1409
1409
1410 unsure:
1410 unsure:
1411 files that might have been modified since the dirstate was
1411 files that might have been modified since the dirstate was
1412 written, but need to be read to be sure (size is the same
1412 written, but need to be read to be sure (size is the same
1413 but mtime differs)
1413 but mtime differs)
1414 status.modified:
1414 status.modified:
1415 files that have definitely been modified since the dirstate
1415 files that have definitely been modified since the dirstate
1416 was written (different size or mode)
1416 was written (different size or mode)
1417 status.clean:
1417 status.clean:
1418 files that have definitely not been modified since the
1418 files that have definitely not been modified since the
1419 dirstate was written
1419 dirstate was written
1420 """
1420 """
1421 listignored, listclean, listunknown = ignored, clean, unknown
1421 listignored, listclean, listunknown = ignored, clean, unknown
1422 lookup, modified, added, unknown, ignored = [], [], [], [], []
1422 lookup, modified, added, unknown, ignored = [], [], [], [], []
1423 removed, deleted, clean = [], [], []
1423 removed, deleted, clean = [], [], []
1424
1424
1425 dmap = self._map
1425 dmap = self._map
1426 dmap.preload()
1426 dmap.preload()
1427
1427
1428 use_rust = True
1428 use_rust = True
1429
1429
1430 allowed_matchers = (
1430 allowed_matchers = (
1431 matchmod.alwaysmatcher,
1431 matchmod.alwaysmatcher,
1432 matchmod.exactmatcher,
1432 matchmod.exactmatcher,
1433 matchmod.includematcher,
1433 matchmod.includematcher,
1434 )
1434 )
1435
1435
1436 if rustmod is None:
1436 if rustmod is None:
1437 use_rust = False
1437 use_rust = False
1438 elif self._checkcase:
1438 elif self._checkcase:
1439 # Case-insensitive filesystems are not handled yet
1439 # Case-insensitive filesystems are not handled yet
1440 use_rust = False
1440 use_rust = False
1441 elif subrepos:
1441 elif subrepos:
1442 use_rust = False
1442 use_rust = False
1443 elif sparse.enabled:
1443 elif sparse.enabled:
1444 use_rust = False
1444 use_rust = False
1445 elif not isinstance(match, allowed_matchers):
1445 elif not isinstance(match, allowed_matchers):
1446 # Some matchers have yet to be implemented
1446 # Some matchers have yet to be implemented
1447 use_rust = False
1447 use_rust = False
1448
1448
1449 if use_rust:
1449 if use_rust:
1450 try:
1450 try:
1451 return self._rust_status(
1451 return self._rust_status(
1452 match, listclean, listignored, listunknown
1452 match, listclean, listignored, listunknown
1453 )
1453 )
1454 except rustmod.FallbackError:
1454 except rustmod.FallbackError:
1455 pass
1455 pass
1456
1456
1457 def noop(f):
1457 def noop(f):
1458 pass
1458 pass
1459
1459
1460 dcontains = dmap.__contains__
1460 dcontains = dmap.__contains__
1461 dget = dmap.__getitem__
1461 dget = dmap.__getitem__
1462 ladd = lookup.append # aka "unsure"
1462 ladd = lookup.append # aka "unsure"
1463 madd = modified.append
1463 madd = modified.append
1464 aadd = added.append
1464 aadd = added.append
1465 uadd = unknown.append if listunknown else noop
1465 uadd = unknown.append if listunknown else noop
1466 iadd = ignored.append if listignored else noop
1466 iadd = ignored.append if listignored else noop
1467 radd = removed.append
1467 radd = removed.append
1468 dadd = deleted.append
1468 dadd = deleted.append
1469 cadd = clean.append if listclean else noop
1469 cadd = clean.append if listclean else noop
1470 mexact = match.exact
1470 mexact = match.exact
1471 dirignore = self._dirignore
1471 dirignore = self._dirignore
1472 checkexec = self._checkexec
1472 checkexec = self._checkexec
1473 copymap = self._map.copymap
1473 copymap = self._map.copymap
1474 lastnormaltime = self._lastnormaltime
1474 lastnormaltime = self._lastnormaltime
1475
1475
1476 # We need to do full walks when either
1476 # We need to do full walks when either
1477 # - we're listing all clean files, or
1477 # - we're listing all clean files, or
1478 # - match.traversedir does something, because match.traversedir should
1478 # - match.traversedir does something, because match.traversedir should
1479 # be called for every dir in the working dir
1479 # be called for every dir in the working dir
1480 full = listclean or match.traversedir is not None
1480 full = listclean or match.traversedir is not None
1481 for fn, st in pycompat.iteritems(
1481 for fn, st in pycompat.iteritems(
1482 self.walk(match, subrepos, listunknown, listignored, full=full)
1482 self.walk(match, subrepos, listunknown, listignored, full=full)
1483 ):
1483 ):
1484 if not dcontains(fn):
1484 if not dcontains(fn):
1485 if (listignored or mexact(fn)) and dirignore(fn):
1485 if (listignored or mexact(fn)) and dirignore(fn):
1486 if listignored:
1486 if listignored:
1487 iadd(fn)
1487 iadd(fn)
1488 else:
1488 else:
1489 uadd(fn)
1489 uadd(fn)
1490 continue
1490 continue
1491
1491
1492 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1492 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1493 # written like that for performance reasons. dmap[fn] is not a
1493 # written like that for performance reasons. dmap[fn] is not a
1494 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1494 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1495 # opcode has fast paths when the value to be unpacked is a tuple or
1495 # opcode has fast paths when the value to be unpacked is a tuple or
1496 # a list, but falls back to creating a full-fledged iterator in
1496 # a list, but falls back to creating a full-fledged iterator in
1497 # general. That is much slower than simply accessing and storing the
1497 # general. That is much slower than simply accessing and storing the
1498 # tuple members one by one.
1498 # tuple members one by one.
1499 t = dget(fn)
1499 t = dget(fn)
1500 mode = t.mode
1500 mode = t.mode
1501 size = t.size
1501 size = t.size
1502 time = t.mtime
1502 time = t.mtime
1503
1503
1504 if not st and t.tracked:
1504 if not st and t.tracked:
1505 dadd(fn)
1505 dadd(fn)
1506 elif t.merged:
1506 elif t.merged:
1507 madd(fn)
1507 madd(fn)
1508 elif t.added:
1508 elif t.added:
1509 aadd(fn)
1509 aadd(fn)
1510 elif t.removed:
1510 elif t.removed:
1511 radd(fn)
1511 radd(fn)
1512 elif t.tracked:
1512 elif t.tracked:
1513 if (
1513 if (
1514 size >= 0
1514 size >= 0
1515 and (
1515 and (
1516 (size != st.st_size and size != st.st_size & _rangemask)
1516 (size != st.st_size and size != st.st_size & _rangemask)
1517 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1517 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1518 )
1518 )
1519 or t.from_p2
1519 or t.from_p2
1520 or fn in copymap
1520 or fn in copymap
1521 ):
1521 ):
1522 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1522 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1523 # issue6456: Size returned may be longer due to
1523 # issue6456: Size returned may be longer due to
1524 # encryption on EXT-4 fscrypt, undecided.
1524 # encryption on EXT-4 fscrypt, undecided.
1525 ladd(fn)
1525 ladd(fn)
1526 else:
1526 else:
1527 madd(fn)
1527 madd(fn)
1528 elif (
1528 elif (
1529 time != st[stat.ST_MTIME]
1529 time != st[stat.ST_MTIME]
1530 and time != st[stat.ST_MTIME] & _rangemask
1530 and time != st[stat.ST_MTIME] & _rangemask
1531 ):
1531 ):
1532 ladd(fn)
1532 ladd(fn)
1533 elif st[stat.ST_MTIME] == lastnormaltime:
1533 elif st[stat.ST_MTIME] == lastnormaltime:
1534 # fn may have just been marked as normal and it may have
1534 # fn may have just been marked as normal and it may have
1535 # changed in the same second without changing its size.
1535 # changed in the same second without changing its size.
1536 # This can happen if we quickly do multiple commits.
1536 # This can happen if we quickly do multiple commits.
1537 # Force lookup, so we don't miss such a racy file change.
1537 # Force lookup, so we don't miss such a racy file change.
1538 ladd(fn)
1538 ladd(fn)
1539 elif listclean:
1539 elif listclean:
1540 cadd(fn)
1540 cadd(fn)
1541 status = scmutil.status(
1541 status = scmutil.status(
1542 modified, added, removed, deleted, unknown, ignored, clean
1542 modified, added, removed, deleted, unknown, ignored, clean
1543 )
1543 )
1544 return (lookup, status)
1544 return (lookup, status)
1545
1545
1546 def matches(self, match):
1546 def matches(self, match):
1547 """
1547 """
1548 return files in the dirstate (in whatever state) filtered by match
1548 return files in the dirstate (in whatever state) filtered by match
1549 """
1549 """
1550 dmap = self._map
1550 dmap = self._map
1551 if rustmod is not None:
1551 if rustmod is not None:
1552 dmap = self._map._rustmap
1552 dmap = self._map._rustmap
1553
1553
1554 if match.always():
1554 if match.always():
1555 return dmap.keys()
1555 return dmap.keys()
1556 files = match.files()
1556 files = match.files()
1557 if match.isexact():
1557 if match.isexact():
1558 # fast path -- filter the other way around, since typically files is
1558 # fast path -- filter the other way around, since typically files is
1559 # much smaller than dmap
1559 # much smaller than dmap
1560 return [f for f in files if f in dmap]
1560 return [f for f in files if f in dmap]
1561 if match.prefix() and all(fn in dmap for fn in files):
1561 if match.prefix() and all(fn in dmap for fn in files):
1562 # fast path -- all the values are known to be files, so just return
1562 # fast path -- all the values are known to be files, so just return
1563 # that
1563 # that
1564 return list(files)
1564 return list(files)
1565 return [f for f in dmap if match(f)]
1565 return [f for f in dmap if match(f)]
1566
1566
1567 def _actualfilename(self, tr):
1567 def _actualfilename(self, tr):
1568 if tr:
1568 if tr:
1569 return self._pendingfilename
1569 return self._pendingfilename
1570 else:
1570 else:
1571 return self._filename
1571 return self._filename
1572
1572
1573 def savebackup(self, tr, backupname):
1573 def savebackup(self, tr, backupname):
1574 '''Save current dirstate into backup file'''
1574 '''Save current dirstate into backup file'''
1575 filename = self._actualfilename(tr)
1575 filename = self._actualfilename(tr)
1576 assert backupname != filename
1576 assert backupname != filename
1577
1577
1578 # use '_writedirstate' instead of 'write' to write changes certainly,
1578 # use '_writedirstate' instead of 'write' to write changes certainly,
1579 # because the latter omits writing out if transaction is running.
1579 # because the latter omits writing out if transaction is running.
1580 # output file will be used to create backup of dirstate at this point.
1580 # output file will be used to create backup of dirstate at this point.
1581 if self._dirty or not self._opener.exists(filename):
1581 if self._dirty or not self._opener.exists(filename):
1582 self._writedirstate(
1582 self._writedirstate(
1583 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1583 tr,
1584 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1584 )
1585 )
1585
1586
1586 if tr:
1587 if tr:
1587 # ensure that subsequent tr.writepending returns True for
1588 # ensure that subsequent tr.writepending returns True for
1588 # changes written out above, even if dirstate is never
1589 # changes written out above, even if dirstate is never
1589 # changed after this
1590 # changed after this
1590 tr.addfilegenerator(
1591 tr.addfilegenerator(
1591 b'dirstate',
1592 b'dirstate',
1592 (self._filename,),
1593 (self._filename,),
1593 self._writedirstate,
1594 lambda f: self._writedirstate(tr, f),
1594 location=b'plain',
1595 location=b'plain',
1595 )
1596 )
1596
1597
1597 # ensure that pending file written above is unlinked at
1598 # ensure that pending file written above is unlinked at
1598 # failure, even if tr.writepending isn't invoked until the
1599 # failure, even if tr.writepending isn't invoked until the
1599 # end of this transaction
1600 # end of this transaction
1600 tr.registertmp(filename, location=b'plain')
1601 tr.registertmp(filename, location=b'plain')
1601
1602
1602 self._opener.tryunlink(backupname)
1603 self._opener.tryunlink(backupname)
1603 # hardlink backup is okay because _writedirstate is always called
1604 # hardlink backup is okay because _writedirstate is always called
1604 # with an "atomictemp=True" file.
1605 # with an "atomictemp=True" file.
1605 util.copyfile(
1606 util.copyfile(
1606 self._opener.join(filename),
1607 self._opener.join(filename),
1607 self._opener.join(backupname),
1608 self._opener.join(backupname),
1608 hardlink=True,
1609 hardlink=True,
1609 )
1610 )
1610
1611
1611 def restorebackup(self, tr, backupname):
1612 def restorebackup(self, tr, backupname):
1612 '''Restore dirstate by backup file'''
1613 '''Restore dirstate by backup file'''
1613 # this "invalidate()" prevents "wlock.release()" from writing
1614 # this "invalidate()" prevents "wlock.release()" from writing
1614 # changes of dirstate out after restoring from backup file
1615 # changes of dirstate out after restoring from backup file
1615 self.invalidate()
1616 self.invalidate()
1616 filename = self._actualfilename(tr)
1617 filename = self._actualfilename(tr)
1617 o = self._opener
1618 o = self._opener
1618 if util.samefile(o.join(backupname), o.join(filename)):
1619 if util.samefile(o.join(backupname), o.join(filename)):
1619 o.unlink(backupname)
1620 o.unlink(backupname)
1620 else:
1621 else:
1621 o.rename(backupname, filename, checkambig=True)
1622 o.rename(backupname, filename, checkambig=True)
1622
1623
1623 def clearbackup(self, tr, backupname):
1624 def clearbackup(self, tr, backupname):
1624 '''Clear backup file'''
1625 '''Clear backup file'''
1625 self._opener.unlink(backupname)
1626 self._opener.unlink(backupname)
@@ -1,687 +1,717 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
22 docket as docketmod,
23 )
24
21 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
22 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
23
27
24 propertycache = util.propertycache
28 propertycache = util.propertycache
25
29
26 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
27
31
28
32
29 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
30 FROM_P2 = -2
34 FROM_P2 = -2
31
35
32 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
33 NONNORMAL = -1
37 NONNORMAL = -1
34
38
35 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
36 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
37
41
38 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
39
43
40
44
41 class dirstatemap(object):
45 class dirstatemap(object):
42 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
43
47
44 The dirstate contains the following state:
48 The dirstate contains the following state:
45
49
46 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
47 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
48
52
49 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
50 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
51
55
52 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
53 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
54 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
55 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
56 `dropfile` methods.
60 `dropfile` methods.
57
61
58 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
59
63
60 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
61
65
62 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
63 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
64
68
65 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
66 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
67
71
68 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
69 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
70
74
71 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
72 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
73 """
77 """
74
78
75 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
76 self._ui = ui
80 self._ui = ui
77 self._opener = opener
81 self._opener = opener
78 self._root = root
82 self._root = root
79 self._filename = b'dirstate'
83 self._filename = b'dirstate'
80 self._nodelen = 20
84 self._nodelen = 20
81 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
82 assert (
86 assert (
83 not use_dirstate_v2
87 not use_dirstate_v2
84 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
85
89
86 self._parents = None
90 self._parents = None
87 self._dirtyparents = False
91 self._dirtyparents = False
88
92
89 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
90 self._pendingmode = None
94 self._pendingmode = None
91
95
92 @propertycache
96 @propertycache
93 def _map(self):
97 def _map(self):
94 self._map = {}
98 self._map = {}
95 self.read()
99 self.read()
96 return self._map
100 return self._map
97
101
98 @propertycache
102 @propertycache
99 def copymap(self):
103 def copymap(self):
100 self.copymap = {}
104 self.copymap = {}
101 self._map
105 self._map
102 return self.copymap
106 return self.copymap
103
107
104 def directories(self):
108 def directories(self):
105 # Rust / dirstate-v2 only
109 # Rust / dirstate-v2 only
106 return []
110 return []
107
111
108 def clear(self):
112 def clear(self):
109 self._map.clear()
113 self._map.clear()
110 self.copymap.clear()
114 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
115 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
116 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
117 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
118 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
119 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
120 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
121 util.clearcachedproperty(self, b"otherparentset")
118
122
119 def items(self):
123 def items(self):
120 return pycompat.iteritems(self._map)
124 return pycompat.iteritems(self._map)
121
125
122 # forward for python2,3 compat
126 # forward for python2,3 compat
123 iteritems = items
127 iteritems = items
124
128
125 def __len__(self):
129 def __len__(self):
126 return len(self._map)
130 return len(self._map)
127
131
128 def __iter__(self):
132 def __iter__(self):
129 return iter(self._map)
133 return iter(self._map)
130
134
131 def get(self, key, default=None):
135 def get(self, key, default=None):
132 return self._map.get(key, default)
136 return self._map.get(key, default)
133
137
134 def __contains__(self, key):
138 def __contains__(self, key):
135 return key in self._map
139 return key in self._map
136
140
137 def __getitem__(self, key):
141 def __getitem__(self, key):
138 return self._map[key]
142 return self._map[key]
139
143
140 def keys(self):
144 def keys(self):
141 return self._map.keys()
145 return self._map.keys()
142
146
143 def preload(self):
147 def preload(self):
144 """Loads the underlying data, if it's not already loaded"""
148 """Loads the underlying data, if it's not already loaded"""
145 self._map
149 self._map
146
150
147 def addfile(
151 def addfile(
148 self,
152 self,
149 f,
153 f,
150 mode=0,
154 mode=0,
151 size=None,
155 size=None,
152 mtime=None,
156 mtime=None,
153 added=False,
157 added=False,
154 merged=False,
158 merged=False,
155 from_p2=False,
159 from_p2=False,
156 possibly_dirty=False,
160 possibly_dirty=False,
157 ):
161 ):
158 """Add a tracked file to the dirstate."""
162 """Add a tracked file to the dirstate."""
159 if added:
163 if added:
160 assert not merged
164 assert not merged
161 assert not possibly_dirty
165 assert not possibly_dirty
162 assert not from_p2
166 assert not from_p2
163 state = b'a'
167 state = b'a'
164 size = NONNORMAL
168 size = NONNORMAL
165 mtime = AMBIGUOUS_TIME
169 mtime = AMBIGUOUS_TIME
166 elif merged:
170 elif merged:
167 assert not possibly_dirty
171 assert not possibly_dirty
168 assert not from_p2
172 assert not from_p2
169 state = b'm'
173 state = b'm'
170 size = FROM_P2
174 size = FROM_P2
171 mtime = AMBIGUOUS_TIME
175 mtime = AMBIGUOUS_TIME
172 elif from_p2:
176 elif from_p2:
173 assert not possibly_dirty
177 assert not possibly_dirty
174 state = b'n'
178 state = b'n'
175 size = FROM_P2
179 size = FROM_P2
176 mtime = AMBIGUOUS_TIME
180 mtime = AMBIGUOUS_TIME
177 elif possibly_dirty:
181 elif possibly_dirty:
178 state = b'n'
182 state = b'n'
179 size = NONNORMAL
183 size = NONNORMAL
180 mtime = AMBIGUOUS_TIME
184 mtime = AMBIGUOUS_TIME
181 else:
185 else:
182 assert size != FROM_P2
186 assert size != FROM_P2
183 assert size != NONNORMAL
187 assert size != NONNORMAL
184 state = b'n'
188 state = b'n'
185 size = size & rangemask
189 size = size & rangemask
186 mtime = mtime & rangemask
190 mtime = mtime & rangemask
187 assert state is not None
191 assert state is not None
188 assert size is not None
192 assert size is not None
189 assert mtime is not None
193 assert mtime is not None
190 old_entry = self.get(f)
194 old_entry = self.get(f)
191 if (
195 if (
192 old_entry is None or old_entry.removed
196 old_entry is None or old_entry.removed
193 ) and "_dirs" in self.__dict__:
197 ) and "_dirs" in self.__dict__:
194 self._dirs.addpath(f)
198 self._dirs.addpath(f)
195 if old_entry is None and "_alldirs" in self.__dict__:
199 if old_entry is None and "_alldirs" in self.__dict__:
196 self._alldirs.addpath(f)
200 self._alldirs.addpath(f)
197 self._map[f] = DirstateItem(state, mode, size, mtime)
201 self._map[f] = DirstateItem(state, mode, size, mtime)
198 if state != b'n' or mtime == AMBIGUOUS_TIME:
202 if state != b'n' or mtime == AMBIGUOUS_TIME:
199 self.nonnormalset.add(f)
203 self.nonnormalset.add(f)
200 if size == FROM_P2:
204 if size == FROM_P2:
201 self.otherparentset.add(f)
205 self.otherparentset.add(f)
202
206
203 def removefile(self, f, in_merge=False):
207 def removefile(self, f, in_merge=False):
204 """
208 """
205 Mark a file as removed in the dirstate.
209 Mark a file as removed in the dirstate.
206
210
207 The `size` parameter is used to store sentinel values that indicate
211 The `size` parameter is used to store sentinel values that indicate
208 the file's previous state. In the future, we should refactor this
212 the file's previous state. In the future, we should refactor this
209 to be more explicit about what that state is.
213 to be more explicit about what that state is.
210 """
214 """
211 entry = self.get(f)
215 entry = self.get(f)
212 size = 0
216 size = 0
213 if in_merge:
217 if in_merge:
214 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
218 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
215 # during a merge. So I (marmoute) am not sure we need the
219 # during a merge. So I (marmoute) am not sure we need the
216 # conditionnal at all. Adding double checking this with assert
220 # conditionnal at all. Adding double checking this with assert
217 # would be nice.
221 # would be nice.
218 if entry is not None:
222 if entry is not None:
219 # backup the previous state
223 # backup the previous state
220 if entry.merged: # merge
224 if entry.merged: # merge
221 size = NONNORMAL
225 size = NONNORMAL
222 elif entry.from_p2:
226 elif entry.from_p2:
223 size = FROM_P2
227 size = FROM_P2
224 self.otherparentset.add(f)
228 self.otherparentset.add(f)
225 if entry is not None and not (entry.merged or entry.from_p2):
229 if entry is not None and not (entry.merged or entry.from_p2):
226 self.copymap.pop(f, None)
230 self.copymap.pop(f, None)
227
231
228 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
232 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
229 self._dirs.delpath(f)
233 self._dirs.delpath(f)
230 if entry is None and "_alldirs" in self.__dict__:
234 if entry is None and "_alldirs" in self.__dict__:
231 self._alldirs.addpath(f)
235 self._alldirs.addpath(f)
232 if "filefoldmap" in self.__dict__:
236 if "filefoldmap" in self.__dict__:
233 normed = util.normcase(f)
237 normed = util.normcase(f)
234 self.filefoldmap.pop(normed, None)
238 self.filefoldmap.pop(normed, None)
235 self._map[f] = DirstateItem(b'r', 0, size, 0)
239 self._map[f] = DirstateItem(b'r', 0, size, 0)
236 self.nonnormalset.add(f)
240 self.nonnormalset.add(f)
237
241
238 def dropfile(self, f):
242 def dropfile(self, f):
239 """
243 """
240 Remove a file from the dirstate. Returns True if the file was
244 Remove a file from the dirstate. Returns True if the file was
241 previously recorded.
245 previously recorded.
242 """
246 """
243 old_entry = self._map.pop(f, None)
247 old_entry = self._map.pop(f, None)
244 exists = False
248 exists = False
245 oldstate = b'?'
249 oldstate = b'?'
246 if old_entry is not None:
250 if old_entry is not None:
247 exists = True
251 exists = True
248 oldstate = old_entry.state
252 oldstate = old_entry.state
249 if exists:
253 if exists:
250 if oldstate != b"r" and "_dirs" in self.__dict__:
254 if oldstate != b"r" and "_dirs" in self.__dict__:
251 self._dirs.delpath(f)
255 self._dirs.delpath(f)
252 if "_alldirs" in self.__dict__:
256 if "_alldirs" in self.__dict__:
253 self._alldirs.delpath(f)
257 self._alldirs.delpath(f)
254 if "filefoldmap" in self.__dict__:
258 if "filefoldmap" in self.__dict__:
255 normed = util.normcase(f)
259 normed = util.normcase(f)
256 self.filefoldmap.pop(normed, None)
260 self.filefoldmap.pop(normed, None)
257 self.nonnormalset.discard(f)
261 self.nonnormalset.discard(f)
258 return exists
262 return exists
259
263
260 def clearambiguoustimes(self, files, now):
264 def clearambiguoustimes(self, files, now):
261 for f in files:
265 for f in files:
262 e = self.get(f)
266 e = self.get(f)
263 if e is not None and e.need_delay(now):
267 if e is not None and e.need_delay(now):
264 e.set_possibly_dirty()
268 e.set_possibly_dirty()
265 self.nonnormalset.add(f)
269 self.nonnormalset.add(f)
266
270
267 def nonnormalentries(self):
271 def nonnormalentries(self):
268 '''Compute the nonnormal dirstate entries from the dmap'''
272 '''Compute the nonnormal dirstate entries from the dmap'''
269 try:
273 try:
270 return parsers.nonnormalotherparententries(self._map)
274 return parsers.nonnormalotherparententries(self._map)
271 except AttributeError:
275 except AttributeError:
272 nonnorm = set()
276 nonnorm = set()
273 otherparent = set()
277 otherparent = set()
274 for fname, e in pycompat.iteritems(self._map):
278 for fname, e in pycompat.iteritems(self._map):
275 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
279 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
276 nonnorm.add(fname)
280 nonnorm.add(fname)
277 if e.from_p2:
281 if e.from_p2:
278 otherparent.add(fname)
282 otherparent.add(fname)
279 return nonnorm, otherparent
283 return nonnorm, otherparent
280
284
281 @propertycache
285 @propertycache
282 def filefoldmap(self):
286 def filefoldmap(self):
283 """Returns a dictionary mapping normalized case paths to their
287 """Returns a dictionary mapping normalized case paths to their
284 non-normalized versions.
288 non-normalized versions.
285 """
289 """
286 try:
290 try:
287 makefilefoldmap = parsers.make_file_foldmap
291 makefilefoldmap = parsers.make_file_foldmap
288 except AttributeError:
292 except AttributeError:
289 pass
293 pass
290 else:
294 else:
291 return makefilefoldmap(
295 return makefilefoldmap(
292 self._map, util.normcasespec, util.normcasefallback
296 self._map, util.normcasespec, util.normcasefallback
293 )
297 )
294
298
295 f = {}
299 f = {}
296 normcase = util.normcase
300 normcase = util.normcase
297 for name, s in pycompat.iteritems(self._map):
301 for name, s in pycompat.iteritems(self._map):
298 if not s.removed:
302 if not s.removed:
299 f[normcase(name)] = name
303 f[normcase(name)] = name
300 f[b'.'] = b'.' # prevents useless util.fspath() invocation
304 f[b'.'] = b'.' # prevents useless util.fspath() invocation
301 return f
305 return f
302
306
303 def hastrackeddir(self, d):
307 def hastrackeddir(self, d):
304 """
308 """
305 Returns True if the dirstate contains a tracked (not removed) file
309 Returns True if the dirstate contains a tracked (not removed) file
306 in this directory.
310 in this directory.
307 """
311 """
308 return d in self._dirs
312 return d in self._dirs
309
313
310 def hasdir(self, d):
314 def hasdir(self, d):
311 """
315 """
312 Returns True if the dirstate contains a file (tracked or removed)
316 Returns True if the dirstate contains a file (tracked or removed)
313 in this directory.
317 in this directory.
314 """
318 """
315 return d in self._alldirs
319 return d in self._alldirs
316
320
317 @propertycache
321 @propertycache
318 def _dirs(self):
322 def _dirs(self):
319 return pathutil.dirs(self._map, b'r')
323 return pathutil.dirs(self._map, b'r')
320
324
321 @propertycache
325 @propertycache
322 def _alldirs(self):
326 def _alldirs(self):
323 return pathutil.dirs(self._map)
327 return pathutil.dirs(self._map)
324
328
325 def _opendirstatefile(self):
329 def _opendirstatefile(self):
326 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
330 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
327 if self._pendingmode is not None and self._pendingmode != mode:
331 if self._pendingmode is not None and self._pendingmode != mode:
328 fp.close()
332 fp.close()
329 raise error.Abort(
333 raise error.Abort(
330 _(b'working directory state may be changed parallelly')
334 _(b'working directory state may be changed parallelly')
331 )
335 )
332 self._pendingmode = mode
336 self._pendingmode = mode
333 return fp
337 return fp
334
338
335 def parents(self):
339 def parents(self):
336 if not self._parents:
340 if not self._parents:
337 try:
341 try:
338 fp = self._opendirstatefile()
342 fp = self._opendirstatefile()
339 st = fp.read(2 * self._nodelen)
343 st = fp.read(2 * self._nodelen)
340 fp.close()
344 fp.close()
341 except IOError as err:
345 except IOError as err:
342 if err.errno != errno.ENOENT:
346 if err.errno != errno.ENOENT:
343 raise
347 raise
344 # File doesn't exist, so the current state is empty
348 # File doesn't exist, so the current state is empty
345 st = b''
349 st = b''
346
350
347 l = len(st)
351 l = len(st)
348 if l == self._nodelen * 2:
352 if l == self._nodelen * 2:
349 self._parents = (
353 self._parents = (
350 st[: self._nodelen],
354 st[: self._nodelen],
351 st[self._nodelen : 2 * self._nodelen],
355 st[self._nodelen : 2 * self._nodelen],
352 )
356 )
353 elif l == 0:
357 elif l == 0:
354 self._parents = (
358 self._parents = (
355 self._nodeconstants.nullid,
359 self._nodeconstants.nullid,
356 self._nodeconstants.nullid,
360 self._nodeconstants.nullid,
357 )
361 )
358 else:
362 else:
359 raise error.Abort(
363 raise error.Abort(
360 _(b'working directory state appears damaged!')
364 _(b'working directory state appears damaged!')
361 )
365 )
362
366
363 return self._parents
367 return self._parents
364
368
365 def setparents(self, p1, p2):
369 def setparents(self, p1, p2):
366 self._parents = (p1, p2)
370 self._parents = (p1, p2)
367 self._dirtyparents = True
371 self._dirtyparents = True
368
372
369 def read(self):
373 def read(self):
370 # ignore HG_PENDING because identity is used only for writing
374 # ignore HG_PENDING because identity is used only for writing
371 self.identity = util.filestat.frompath(
375 self.identity = util.filestat.frompath(
372 self._opener.join(self._filename)
376 self._opener.join(self._filename)
373 )
377 )
374
378
375 try:
379 try:
376 fp = self._opendirstatefile()
380 fp = self._opendirstatefile()
377 try:
381 try:
378 st = fp.read()
382 st = fp.read()
379 finally:
383 finally:
380 fp.close()
384 fp.close()
381 except IOError as err:
385 except IOError as err:
382 if err.errno != errno.ENOENT:
386 if err.errno != errno.ENOENT:
383 raise
387 raise
384 return
388 return
385 if not st:
389 if not st:
386 return
390 return
387
391
388 if util.safehasattr(parsers, b'dict_new_presized'):
392 if util.safehasattr(parsers, b'dict_new_presized'):
389 # Make an estimate of the number of files in the dirstate based on
393 # Make an estimate of the number of files in the dirstate based on
390 # its size. This trades wasting some memory for avoiding costly
394 # its size. This trades wasting some memory for avoiding costly
391 # resizes. Each entry have a prefix of 17 bytes followed by one or
395 # resizes. Each entry have a prefix of 17 bytes followed by one or
392 # two path names. Studies on various large-scale real-world repositories
396 # two path names. Studies on various large-scale real-world repositories
393 # found 54 bytes a reasonable upper limit for the average path names.
397 # found 54 bytes a reasonable upper limit for the average path names.
394 # Copy entries are ignored for the sake of this estimate.
398 # Copy entries are ignored for the sake of this estimate.
395 self._map = parsers.dict_new_presized(len(st) // 71)
399 self._map = parsers.dict_new_presized(len(st) // 71)
396
400
397 # Python's garbage collector triggers a GC each time a certain number
401 # Python's garbage collector triggers a GC each time a certain number
398 # of container objects (the number being defined by
402 # of container objects (the number being defined by
399 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
403 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
400 # for each file in the dirstate. The C version then immediately marks
404 # for each file in the dirstate. The C version then immediately marks
401 # them as not to be tracked by the collector. However, this has no
405 # them as not to be tracked by the collector. However, this has no
402 # effect on when GCs are triggered, only on what objects the GC looks
406 # effect on when GCs are triggered, only on what objects the GC looks
403 # into. This means that O(number of files) GCs are unavoidable.
407 # into. This means that O(number of files) GCs are unavoidable.
404 # Depending on when in the process's lifetime the dirstate is parsed,
408 # Depending on when in the process's lifetime the dirstate is parsed,
405 # this can get very expensive. As a workaround, disable GC while
409 # this can get very expensive. As a workaround, disable GC while
406 # parsing the dirstate.
410 # parsing the dirstate.
407 #
411 #
408 # (we cannot decorate the function directly since it is in a C module)
412 # (we cannot decorate the function directly since it is in a C module)
409 parse_dirstate = util.nogc(parsers.parse_dirstate)
413 parse_dirstate = util.nogc(parsers.parse_dirstate)
410 p = parse_dirstate(self._map, self.copymap, st)
414 p = parse_dirstate(self._map, self.copymap, st)
411 if not self._dirtyparents:
415 if not self._dirtyparents:
412 self.setparents(*p)
416 self.setparents(*p)
413
417
414 # Avoid excess attribute lookups by fast pathing certain checks
418 # Avoid excess attribute lookups by fast pathing certain checks
415 self.__contains__ = self._map.__contains__
419 self.__contains__ = self._map.__contains__
416 self.__getitem__ = self._map.__getitem__
420 self.__getitem__ = self._map.__getitem__
417 self.get = self._map.get
421 self.get = self._map.get
418
422
419 def write(self, st, now):
423 def write(self, _tr, st, now):
420 st.write(
424 st.write(
421 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
425 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
422 )
426 )
423 st.close()
427 st.close()
424 self._dirtyparents = False
428 self._dirtyparents = False
425 self.nonnormalset, self.otherparentset = self.nonnormalentries()
429 self.nonnormalset, self.otherparentset = self.nonnormalentries()
426
430
427 @propertycache
431 @propertycache
428 def nonnormalset(self):
432 def nonnormalset(self):
429 nonnorm, otherparents = self.nonnormalentries()
433 nonnorm, otherparents = self.nonnormalentries()
430 self.otherparentset = otherparents
434 self.otherparentset = otherparents
431 return nonnorm
435 return nonnorm
432
436
433 @propertycache
437 @propertycache
434 def otherparentset(self):
438 def otherparentset(self):
435 nonnorm, otherparents = self.nonnormalentries()
439 nonnorm, otherparents = self.nonnormalentries()
436 self.nonnormalset = nonnorm
440 self.nonnormalset = nonnorm
437 return otherparents
441 return otherparents
438
442
439 def non_normal_or_other_parent_paths(self):
443 def non_normal_or_other_parent_paths(self):
440 return self.nonnormalset.union(self.otherparentset)
444 return self.nonnormalset.union(self.otherparentset)
441
445
442 @propertycache
446 @propertycache
443 def identity(self):
447 def identity(self):
444 self._map
448 self._map
445 return self.identity
449 return self.identity
446
450
447 @propertycache
451 @propertycache
448 def dirfoldmap(self):
452 def dirfoldmap(self):
449 f = {}
453 f = {}
450 normcase = util.normcase
454 normcase = util.normcase
451 for name in self._dirs:
455 for name in self._dirs:
452 f[normcase(name)] = name
456 f[normcase(name)] = name
453 return f
457 return f
454
458
455
459
456 if rustmod is not None:
460 if rustmod is not None:
457
461
458 class dirstatemap(object):
462 class dirstatemap(object):
459 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
463 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
460 self._use_dirstate_v2 = use_dirstate_v2
464 self._use_dirstate_v2 = use_dirstate_v2
461 self._nodeconstants = nodeconstants
465 self._nodeconstants = nodeconstants
462 self._ui = ui
466 self._ui = ui
463 self._opener = opener
467 self._opener = opener
464 self._root = root
468 self._root = root
465 self._filename = b'dirstate'
469 self._filename = b'dirstate'
466 self._nodelen = 20 # Also update Rust code when changing this!
470 self._nodelen = 20 # Also update Rust code when changing this!
467 self._parents = None
471 self._parents = None
468 self._dirtyparents = False
472 self._dirtyparents = False
473 self._docket = None
469
474
470 # for consistent view between _pl() and _read() invocations
475 # for consistent view between _pl() and _read() invocations
471 self._pendingmode = None
476 self._pendingmode = None
472
477
473 self._use_dirstate_tree = self._ui.configbool(
478 self._use_dirstate_tree = self._ui.configbool(
474 b"experimental",
479 b"experimental",
475 b"dirstate-tree.in-memory",
480 b"dirstate-tree.in-memory",
476 False,
481 False,
477 )
482 )
478
483
479 def addfile(
484 def addfile(
480 self,
485 self,
481 f,
486 f,
482 mode=0,
487 mode=0,
483 size=None,
488 size=None,
484 mtime=None,
489 mtime=None,
485 added=False,
490 added=False,
486 merged=False,
491 merged=False,
487 from_p2=False,
492 from_p2=False,
488 possibly_dirty=False,
493 possibly_dirty=False,
489 ):
494 ):
490 return self._rustmap.addfile(
495 return self._rustmap.addfile(
491 f,
496 f,
492 mode,
497 mode,
493 size,
498 size,
494 mtime,
499 mtime,
495 added,
500 added,
496 merged,
501 merged,
497 from_p2,
502 from_p2,
498 possibly_dirty,
503 possibly_dirty,
499 )
504 )
500
505
501 def removefile(self, *args, **kwargs):
506 def removefile(self, *args, **kwargs):
502 return self._rustmap.removefile(*args, **kwargs)
507 return self._rustmap.removefile(*args, **kwargs)
503
508
504 def dropfile(self, *args, **kwargs):
509 def dropfile(self, *args, **kwargs):
505 return self._rustmap.dropfile(*args, **kwargs)
510 return self._rustmap.dropfile(*args, **kwargs)
506
511
507 def clearambiguoustimes(self, *args, **kwargs):
512 def clearambiguoustimes(self, *args, **kwargs):
508 return self._rustmap.clearambiguoustimes(*args, **kwargs)
513 return self._rustmap.clearambiguoustimes(*args, **kwargs)
509
514
510 def nonnormalentries(self):
515 def nonnormalentries(self):
511 return self._rustmap.nonnormalentries()
516 return self._rustmap.nonnormalentries()
512
517
513 def get(self, *args, **kwargs):
518 def get(self, *args, **kwargs):
514 return self._rustmap.get(*args, **kwargs)
519 return self._rustmap.get(*args, **kwargs)
515
520
516 @property
521 @property
517 def copymap(self):
522 def copymap(self):
518 return self._rustmap.copymap()
523 return self._rustmap.copymap()
519
524
520 def directories(self):
525 def directories(self):
521 return self._rustmap.directories()
526 return self._rustmap.directories()
522
527
523 def preload(self):
528 def preload(self):
524 self._rustmap
529 self._rustmap
525
530
526 def clear(self):
531 def clear(self):
527 self._rustmap.clear()
532 self._rustmap.clear()
528 self.setparents(
533 self.setparents(
529 self._nodeconstants.nullid, self._nodeconstants.nullid
534 self._nodeconstants.nullid, self._nodeconstants.nullid
530 )
535 )
531 util.clearcachedproperty(self, b"_dirs")
536 util.clearcachedproperty(self, b"_dirs")
532 util.clearcachedproperty(self, b"_alldirs")
537 util.clearcachedproperty(self, b"_alldirs")
533 util.clearcachedproperty(self, b"dirfoldmap")
538 util.clearcachedproperty(self, b"dirfoldmap")
534
539
535 def items(self):
540 def items(self):
536 return self._rustmap.items()
541 return self._rustmap.items()
537
542
538 def keys(self):
543 def keys(self):
539 return iter(self._rustmap)
544 return iter(self._rustmap)
540
545
541 def __contains__(self, key):
546 def __contains__(self, key):
542 return key in self._rustmap
547 return key in self._rustmap
543
548
544 def __getitem__(self, item):
549 def __getitem__(self, item):
545 return self._rustmap[item]
550 return self._rustmap[item]
546
551
547 def __len__(self):
552 def __len__(self):
548 return len(self._rustmap)
553 return len(self._rustmap)
549
554
550 def __iter__(self):
555 def __iter__(self):
551 return iter(self._rustmap)
556 return iter(self._rustmap)
552
557
553 # forward for python2,3 compat
558 # forward for python2,3 compat
554 iteritems = items
559 iteritems = items
555
560
556 def _opendirstatefile(self):
561 def _opendirstatefile(self):
557 fp, mode = txnutil.trypending(
562 fp, mode = txnutil.trypending(
558 self._root, self._opener, self._filename
563 self._root, self._opener, self._filename
559 )
564 )
560 if self._pendingmode is not None and self._pendingmode != mode:
565 if self._pendingmode is not None and self._pendingmode != mode:
561 fp.close()
566 fp.close()
562 raise error.Abort(
567 raise error.Abort(
563 _(b'working directory state may be changed parallelly')
568 _(b'working directory state may be changed parallelly')
564 )
569 )
565 self._pendingmode = mode
570 self._pendingmode = mode
566 return fp
571 return fp
567
572
573 def _readdirstatefile(self, size=-1):
574 try:
575 with self._opendirstatefile() as fp:
576 return fp.read(size)
577 except IOError as err:
578 if err.errno != errno.ENOENT:
579 raise
580 # File doesn't exist, so the current state is empty
581 return b''
582
568 def setparents(self, p1, p2):
583 def setparents(self, p1, p2):
569 self._parents = (p1, p2)
584 self._parents = (p1, p2)
570 self._dirtyparents = True
585 self._dirtyparents = True
571
586
572 def parents(self):
587 def parents(self):
573 if not self._parents:
588 if not self._parents:
574 if self._use_dirstate_v2:
589 if self._use_dirstate_v2:
575 offset = len(rustmod.V2_FORMAT_MARKER)
590 self._parents = self.docket.parents
576 else:
591 else:
577 offset = 0
592 read_len = self._nodelen * 2
578 read_len = offset + self._nodelen * 2
593 st = self._readdirstatefile(read_len)
579 try:
580 fp = self._opendirstatefile()
581 st = fp.read(read_len)
582 fp.close()
583 except IOError as err:
584 if err.errno != errno.ENOENT:
585 raise
586 # File doesn't exist, so the current state is empty
587 st = b''
588
589 l = len(st)
594 l = len(st)
590 if l == read_len:
595 if l == read_len:
591 st = st[offset:]
592 self._parents = (
596 self._parents = (
593 st[: self._nodelen],
597 st[: self._nodelen],
594 st[self._nodelen : 2 * self._nodelen],
598 st[self._nodelen : 2 * self._nodelen],
595 )
599 )
596 elif l == 0:
600 elif l == 0:
597 self._parents = (
601 self._parents = (
598 self._nodeconstants.nullid,
602 self._nodeconstants.nullid,
599 self._nodeconstants.nullid,
603 self._nodeconstants.nullid,
600 )
604 )
601 else:
605 else:
602 raise error.Abort(
606 raise error.Abort(
603 _(b'working directory state appears damaged!')
607 _(b'working directory state appears damaged!')
604 )
608 )
605
609
606 return self._parents
610 return self._parents
607
611
612 @property
613 def docket(self):
614 if not self._docket:
615 if not self._use_dirstate_v2:
616 raise error.ProgrammingError(
617 b'dirstate only has a docket in v2 format'
618 )
619 self._docket = docketmod.DirstateDocket.parse(
620 self._readdirstatefile(), self._nodeconstants
621 )
622 return self._docket
623
608 @propertycache
624 @propertycache
609 def _rustmap(self):
625 def _rustmap(self):
610 """
626 """
611 Fills the Dirstatemap when called.
627 Fills the Dirstatemap when called.
612 """
628 """
613 # ignore HG_PENDING because identity is used only for writing
629 # ignore HG_PENDING because identity is used only for writing
614 self.identity = util.filestat.frompath(
630 self.identity = util.filestat.frompath(
615 self._opener.join(self._filename)
631 self._opener.join(self._filename)
616 )
632 )
617
633
618 try:
634 if self._use_dirstate_v2:
619 fp = self._opendirstatefile()
635 if self.docket.uuid:
620 try:
636 # TODO: use mmap when possible
621 st = fp.read()
637 data = self._opener.read(self.docket.data_filename())
622 finally:
638 else:
623 fp.close()
639 data = b''
624 except IOError as err:
640 self._rustmap = rustmod.DirstateMap.new_v2(data)
625 if err.errno != errno.ENOENT:
641 parents = self.docket.parents
626 raise
642 else:
627 st = b''
643 self._rustmap, parents = rustmod.DirstateMap.new_v1(
628
644 self._use_dirstate_tree, self._readdirstatefile()
629 self._rustmap, parents = rustmod.DirstateMap.new(
630 self._use_dirstate_tree, self._use_dirstate_v2, st
631 )
645 )
632
646
633 if parents and not self._dirtyparents:
647 if parents and not self._dirtyparents:
634 self.setparents(*parents)
648 self.setparents(*parents)
635
649
636 self.__contains__ = self._rustmap.__contains__
650 self.__contains__ = self._rustmap.__contains__
637 self.__getitem__ = self._rustmap.__getitem__
651 self.__getitem__ = self._rustmap.__getitem__
638 self.get = self._rustmap.get
652 self.get = self._rustmap.get
639 return self._rustmap
653 return self._rustmap
640
654
641 def write(self, st, now):
655 def write(self, tr, st, now):
642 parents = self.parents()
656 if self._use_dirstate_v2:
643 packed = self._rustmap.write(
657 packed = self._rustmap.write_v2(now)
644 self._use_dirstate_v2, parents[0], parents[1], now
658 old_docket = self.docket
659 new_docket = docketmod.DirstateDocket.with_new_uuid(
660 self.parents(), len(packed)
645 )
661 )
662 self._opener.write(new_docket.data_filename(), packed)
663 # Write the new docket after the new data file has been
664 # written. Because `st` was opened with `atomictemp=True`,
665 # the actual `.hg/dirstate` file is only affected on close.
666 st.write(new_docket.serialize())
667 st.close()
668 # Remove the old data file after the new docket pointing to
669 # the new data file was written.
670 if old_docket.uuid:
671 self._opener.unlink(old_docket.data_filename())
672 self._docket = new_docket
673 else:
674 p1, p2 = self.parents()
675 packed = self._rustmap.write_v1(p1, p2, now)
646 st.write(packed)
676 st.write(packed)
647 st.close()
677 st.close()
648 self._dirtyparents = False
678 self._dirtyparents = False
649
679
650 @propertycache
680 @propertycache
651 def filefoldmap(self):
681 def filefoldmap(self):
652 """Returns a dictionary mapping normalized case paths to their
682 """Returns a dictionary mapping normalized case paths to their
653 non-normalized versions.
683 non-normalized versions.
654 """
684 """
655 return self._rustmap.filefoldmapasdict()
685 return self._rustmap.filefoldmapasdict()
656
686
657 def hastrackeddir(self, d):
687 def hastrackeddir(self, d):
658 return self._rustmap.hastrackeddir(d)
688 return self._rustmap.hastrackeddir(d)
659
689
660 def hasdir(self, d):
690 def hasdir(self, d):
661 return self._rustmap.hasdir(d)
691 return self._rustmap.hasdir(d)
662
692
663 @propertycache
693 @propertycache
664 def identity(self):
694 def identity(self):
665 self._rustmap
695 self._rustmap
666 return self.identity
696 return self.identity
667
697
668 @property
698 @property
669 def nonnormalset(self):
699 def nonnormalset(self):
670 nonnorm = self._rustmap.non_normal_entries()
700 nonnorm = self._rustmap.non_normal_entries()
671 return nonnorm
701 return nonnorm
672
702
673 @propertycache
703 @propertycache
674 def otherparentset(self):
704 def otherparentset(self):
675 otherparents = self._rustmap.other_parent_entries()
705 otherparents = self._rustmap.other_parent_entries()
676 return otherparents
706 return otherparents
677
707
678 def non_normal_or_other_parent_paths(self):
708 def non_normal_or_other_parent_paths(self):
679 return self._rustmap.non_normal_or_other_parent_paths()
709 return self._rustmap.non_normal_or_other_parent_paths()
680
710
681 @propertycache
711 @propertycache
682 def dirfoldmap(self):
712 def dirfoldmap(self):
683 f = {}
713 f = {}
684 normcase = util.normcase
714 normcase = util.normcase
685 for name, _pseudo_entry in self.directories():
715 for name, _pseudo_entry in self.directories():
686 f[normcase(name)] = name
716 f[normcase(name)] = name
687 return f
717 return f
@@ -1,648 +1,649 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..pycompat import getattr
13 from ..pycompat import getattr
14 from .. import (
14 from .. import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 manifest,
18 manifest,
19 metadata,
19 metadata,
20 pycompat,
20 pycompat,
21 requirements,
21 requirements,
22 scmutil,
22 scmutil,
23 store,
23 store,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27 from ..revlogutils import (
27 from ..revlogutils import (
28 constants as revlogconst,
28 constants as revlogconst,
29 flagutil,
29 flagutil,
30 nodemap,
30 nodemap,
31 sidedata as sidedatamod,
31 sidedata as sidedatamod,
32 )
32 )
33 from . import actions as upgrade_actions
33 from . import actions as upgrade_actions
34
34
35
35
36 def get_sidedata_helpers(srcrepo, dstrepo):
36 def get_sidedata_helpers(srcrepo, dstrepo):
37 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
37 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
38 sequential = pycompat.iswindows or not use_w
38 sequential = pycompat.iswindows or not use_w
39 if not sequential:
39 if not sequential:
40 srcrepo.register_sidedata_computer(
40 srcrepo.register_sidedata_computer(
41 revlogconst.KIND_CHANGELOG,
41 revlogconst.KIND_CHANGELOG,
42 sidedatamod.SD_FILES,
42 sidedatamod.SD_FILES,
43 (sidedatamod.SD_FILES,),
43 (sidedatamod.SD_FILES,),
44 metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
44 metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
45 flagutil.REVIDX_HASCOPIESINFO,
45 flagutil.REVIDX_HASCOPIESINFO,
46 replace=True,
46 replace=True,
47 )
47 )
48 return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
48 return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
49
49
50
50
51 def _revlogfrompath(repo, rl_type, path):
51 def _revlogfrompath(repo, rl_type, path):
52 """Obtain a revlog from a repo path.
52 """Obtain a revlog from a repo path.
53
53
54 An instance of the appropriate class is returned.
54 An instance of the appropriate class is returned.
55 """
55 """
56 if rl_type & store.FILEFLAGS_CHANGELOG:
56 if rl_type & store.FILEFLAGS_CHANGELOG:
57 return changelog.changelog(repo.svfs)
57 return changelog.changelog(repo.svfs)
58 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
58 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
59 mandir = b''
59 mandir = b''
60 if b'/' in path:
60 if b'/' in path:
61 mandir = path.rsplit(b'/', 1)[0]
61 mandir = path.rsplit(b'/', 1)[0]
62 return manifest.manifestrevlog(
62 return manifest.manifestrevlog(
63 repo.nodeconstants, repo.svfs, tree=mandir
63 repo.nodeconstants, repo.svfs, tree=mandir
64 )
64 )
65 else:
65 else:
66 # drop the extension and the `data/` prefix
66 # drop the extension and the `data/` prefix
67 path_part = path.rsplit(b'.', 1)[0].split(b'/', 1)
67 path_part = path.rsplit(b'.', 1)[0].split(b'/', 1)
68 if len(path_part) < 2:
68 if len(path_part) < 2:
69 msg = _('cannot recognize revlog from filename: %s')
69 msg = _('cannot recognize revlog from filename: %s')
70 msg %= path
70 msg %= path
71 raise error.Abort(msg)
71 raise error.Abort(msg)
72 path = path_part[1]
72 path = path_part[1]
73 return filelog.filelog(repo.svfs, path)
73 return filelog.filelog(repo.svfs, path)
74
74
75
75
76 def _copyrevlog(tr, destrepo, oldrl, rl_type, unencodedname):
76 def _copyrevlog(tr, destrepo, oldrl, rl_type, unencodedname):
77 """copy all relevant files for `oldrl` into `destrepo` store
77 """copy all relevant files for `oldrl` into `destrepo` store
78
78
79 Files are copied "as is" without any transformation. The copy is performed
79 Files are copied "as is" without any transformation. The copy is performed
80 without extra checks. Callers are responsible for making sure the copied
80 without extra checks. Callers are responsible for making sure the copied
81 content is compatible with format of the destination repository.
81 content is compatible with format of the destination repository.
82 """
82 """
83 oldrl = getattr(oldrl, '_revlog', oldrl)
83 oldrl = getattr(oldrl, '_revlog', oldrl)
84 newrl = _revlogfrompath(destrepo, rl_type, unencodedname)
84 newrl = _revlogfrompath(destrepo, rl_type, unencodedname)
85 newrl = getattr(newrl, '_revlog', newrl)
85 newrl = getattr(newrl, '_revlog', newrl)
86
86
87 oldvfs = oldrl.opener
87 oldvfs = oldrl.opener
88 newvfs = newrl.opener
88 newvfs = newrl.opener
89 oldindex = oldvfs.join(oldrl._indexfile)
89 oldindex = oldvfs.join(oldrl._indexfile)
90 newindex = newvfs.join(newrl._indexfile)
90 newindex = newvfs.join(newrl._indexfile)
91 olddata = oldvfs.join(oldrl._datafile)
91 olddata = oldvfs.join(oldrl._datafile)
92 newdata = newvfs.join(newrl._datafile)
92 newdata = newvfs.join(newrl._datafile)
93
93
94 with newvfs(newrl._indexfile, b'w'):
94 with newvfs(newrl._indexfile, b'w'):
95 pass # create all the directories
95 pass # create all the directories
96
96
97 util.copyfile(oldindex, newindex)
97 util.copyfile(oldindex, newindex)
98 copydata = oldrl.opener.exists(oldrl._datafile)
98 copydata = oldrl.opener.exists(oldrl._datafile)
99 if copydata:
99 if copydata:
100 util.copyfile(olddata, newdata)
100 util.copyfile(olddata, newdata)
101
101
102 if rl_type & store.FILEFLAGS_FILELOG:
102 if rl_type & store.FILEFLAGS_FILELOG:
103 destrepo.svfs.fncache.add(unencodedname)
103 destrepo.svfs.fncache.add(unencodedname)
104 if copydata:
104 if copydata:
105 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
105 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
106
106
107
107
108 UPGRADE_CHANGELOG = b"changelog"
108 UPGRADE_CHANGELOG = b"changelog"
109 UPGRADE_MANIFEST = b"manifest"
109 UPGRADE_MANIFEST = b"manifest"
110 UPGRADE_FILELOGS = b"all-filelogs"
110 UPGRADE_FILELOGS = b"all-filelogs"
111
111
112 UPGRADE_ALL_REVLOGS = frozenset(
112 UPGRADE_ALL_REVLOGS = frozenset(
113 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
113 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
114 )
114 )
115
115
116
116
117 def matchrevlog(revlogfilter, rl_type):
117 def matchrevlog(revlogfilter, rl_type):
118 """check if a revlog is selected for cloning.
118 """check if a revlog is selected for cloning.
119
119
120 In other words, are there any updates which need to be done on revlog
120 In other words, are there any updates which need to be done on revlog
121 or it can be blindly copied.
121 or it can be blindly copied.
122
122
123 The store entry is checked against the passed filter"""
123 The store entry is checked against the passed filter"""
124 if rl_type & store.FILEFLAGS_CHANGELOG:
124 if rl_type & store.FILEFLAGS_CHANGELOG:
125 return UPGRADE_CHANGELOG in revlogfilter
125 return UPGRADE_CHANGELOG in revlogfilter
126 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
126 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
127 return UPGRADE_MANIFEST in revlogfilter
127 return UPGRADE_MANIFEST in revlogfilter
128 assert rl_type & store.FILEFLAGS_FILELOG
128 assert rl_type & store.FILEFLAGS_FILELOG
129 return UPGRADE_FILELOGS in revlogfilter
129 return UPGRADE_FILELOGS in revlogfilter
130
130
131
131
132 def _perform_clone(
132 def _perform_clone(
133 ui,
133 ui,
134 dstrepo,
134 dstrepo,
135 tr,
135 tr,
136 old_revlog,
136 old_revlog,
137 rl_type,
137 rl_type,
138 unencoded,
138 unencoded,
139 upgrade_op,
139 upgrade_op,
140 sidedata_helpers,
140 sidedata_helpers,
141 oncopiedrevision,
141 oncopiedrevision,
142 ):
142 ):
143 """returns the new revlog object created"""
143 """returns the new revlog object created"""
144 newrl = None
144 newrl = None
145 if matchrevlog(upgrade_op.revlogs_to_process, rl_type):
145 if matchrevlog(upgrade_op.revlogs_to_process, rl_type):
146 ui.note(
146 ui.note(
147 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
147 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
148 )
148 )
149 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
149 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
150 old_revlog.clone(
150 old_revlog.clone(
151 tr,
151 tr,
152 newrl,
152 newrl,
153 addrevisioncb=oncopiedrevision,
153 addrevisioncb=oncopiedrevision,
154 deltareuse=upgrade_op.delta_reuse_mode,
154 deltareuse=upgrade_op.delta_reuse_mode,
155 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
155 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
156 sidedata_helpers=sidedata_helpers,
156 sidedata_helpers=sidedata_helpers,
157 )
157 )
158 else:
158 else:
159 msg = _(b'blindly copying %s containing %i revisions\n')
159 msg = _(b'blindly copying %s containing %i revisions\n')
160 ui.note(msg % (unencoded, len(old_revlog)))
160 ui.note(msg % (unencoded, len(old_revlog)))
161 _copyrevlog(tr, dstrepo, old_revlog, rl_type, unencoded)
161 _copyrevlog(tr, dstrepo, old_revlog, rl_type, unencoded)
162
162
163 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
163 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
164 return newrl
164 return newrl
165
165
166
166
167 def _clonerevlogs(
167 def _clonerevlogs(
168 ui,
168 ui,
169 srcrepo,
169 srcrepo,
170 dstrepo,
170 dstrepo,
171 tr,
171 tr,
172 upgrade_op,
172 upgrade_op,
173 ):
173 ):
174 """Copy revlogs between 2 repos."""
174 """Copy revlogs between 2 repos."""
175 revcount = 0
175 revcount = 0
176 srcsize = 0
176 srcsize = 0
177 srcrawsize = 0
177 srcrawsize = 0
178 dstsize = 0
178 dstsize = 0
179 fcount = 0
179 fcount = 0
180 frevcount = 0
180 frevcount = 0
181 fsrcsize = 0
181 fsrcsize = 0
182 frawsize = 0
182 frawsize = 0
183 fdstsize = 0
183 fdstsize = 0
184 mcount = 0
184 mcount = 0
185 mrevcount = 0
185 mrevcount = 0
186 msrcsize = 0
186 msrcsize = 0
187 mrawsize = 0
187 mrawsize = 0
188 mdstsize = 0
188 mdstsize = 0
189 crevcount = 0
189 crevcount = 0
190 csrcsize = 0
190 csrcsize = 0
191 crawsize = 0
191 crawsize = 0
192 cdstsize = 0
192 cdstsize = 0
193
193
194 alldatafiles = list(srcrepo.store.walk())
194 alldatafiles = list(srcrepo.store.walk())
195 # mapping of data files which needs to be cloned
195 # mapping of data files which needs to be cloned
196 # key is unencoded filename
196 # key is unencoded filename
197 # value is revlog_object_from_srcrepo
197 # value is revlog_object_from_srcrepo
198 manifests = {}
198 manifests = {}
199 changelogs = {}
199 changelogs = {}
200 filelogs = {}
200 filelogs = {}
201
201
202 # Perform a pass to collect metadata. This validates we can open all
202 # Perform a pass to collect metadata. This validates we can open all
203 # source files and allows a unified progress bar to be displayed.
203 # source files and allows a unified progress bar to be displayed.
204 for rl_type, unencoded, encoded, size in alldatafiles:
204 for rl_type, unencoded, encoded, size in alldatafiles:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
205 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
206 continue
206 continue
207
207
208 # the store.walk function will wrongly pickup transaction backup and
208 # the store.walk function will wrongly pickup transaction backup and
209 # get confused. As a quick fix for 5.9 release, we ignore those.
209 # get confused. As a quick fix for 5.9 release, we ignore those.
210 # (this is not a module constants because it seems better to keep the
210 # (this is not a module constants because it seems better to keep the
211 # hack together)
211 # hack together)
212 skip_undo = (
212 skip_undo = (
213 b'undo.backup.00changelog.i',
213 b'undo.backup.00changelog.i',
214 b'undo.backup.00manifest.i',
214 b'undo.backup.00manifest.i',
215 )
215 )
216 if unencoded in skip_undo:
216 if unencoded in skip_undo:
217 continue
217 continue
218
218
219 rl = _revlogfrompath(srcrepo, rl_type, unencoded)
219 rl = _revlogfrompath(srcrepo, rl_type, unencoded)
220
220
221 info = rl.storageinfo(
221 info = rl.storageinfo(
222 exclusivefiles=True,
222 exclusivefiles=True,
223 revisionscount=True,
223 revisionscount=True,
224 trackedsize=True,
224 trackedsize=True,
225 storedsize=True,
225 storedsize=True,
226 )
226 )
227
227
228 revcount += info[b'revisionscount'] or 0
228 revcount += info[b'revisionscount'] or 0
229 datasize = info[b'storedsize'] or 0
229 datasize = info[b'storedsize'] or 0
230 rawsize = info[b'trackedsize'] or 0
230 rawsize = info[b'trackedsize'] or 0
231
231
232 srcsize += datasize
232 srcsize += datasize
233 srcrawsize += rawsize
233 srcrawsize += rawsize
234
234
235 # This is for the separate progress bars.
235 # This is for the separate progress bars.
236 if rl_type & store.FILEFLAGS_CHANGELOG:
236 if rl_type & store.FILEFLAGS_CHANGELOG:
237 changelogs[unencoded] = (rl_type, rl)
237 changelogs[unencoded] = (rl_type, rl)
238 crevcount += len(rl)
238 crevcount += len(rl)
239 csrcsize += datasize
239 csrcsize += datasize
240 crawsize += rawsize
240 crawsize += rawsize
241 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
241 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
242 manifests[unencoded] = (rl_type, rl)
242 manifests[unencoded] = (rl_type, rl)
243 mcount += 1
243 mcount += 1
244 mrevcount += len(rl)
244 mrevcount += len(rl)
245 msrcsize += datasize
245 msrcsize += datasize
246 mrawsize += rawsize
246 mrawsize += rawsize
247 elif rl_type & store.FILEFLAGS_FILELOG:
247 elif rl_type & store.FILEFLAGS_FILELOG:
248 filelogs[unencoded] = (rl_type, rl)
248 filelogs[unencoded] = (rl_type, rl)
249 fcount += 1
249 fcount += 1
250 frevcount += len(rl)
250 frevcount += len(rl)
251 fsrcsize += datasize
251 fsrcsize += datasize
252 frawsize += rawsize
252 frawsize += rawsize
253 else:
253 else:
254 error.ProgrammingError(b'unknown revlog type')
254 error.ProgrammingError(b'unknown revlog type')
255
255
256 if not revcount:
256 if not revcount:
257 return
257 return
258
258
259 ui.status(
259 ui.status(
260 _(
260 _(
261 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
261 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
262 b'%d in changelog)\n'
262 b'%d in changelog)\n'
263 )
263 )
264 % (revcount, frevcount, mrevcount, crevcount)
264 % (revcount, frevcount, mrevcount, crevcount)
265 )
265 )
266 ui.status(
266 ui.status(
267 _(b'migrating %s in store; %s tracked data\n')
267 _(b'migrating %s in store; %s tracked data\n')
268 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
268 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
269 )
269 )
270
270
271 # Used to keep track of progress.
271 # Used to keep track of progress.
272 progress = None
272 progress = None
273
273
274 def oncopiedrevision(rl, rev, node):
274 def oncopiedrevision(rl, rev, node):
275 progress.increment()
275 progress.increment()
276
276
277 sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
277 sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
278
278
279 # Migrating filelogs
279 # Migrating filelogs
280 ui.status(
280 ui.status(
281 _(
281 _(
282 b'migrating %d filelogs containing %d revisions '
282 b'migrating %d filelogs containing %d revisions '
283 b'(%s in store; %s tracked data)\n'
283 b'(%s in store; %s tracked data)\n'
284 )
284 )
285 % (
285 % (
286 fcount,
286 fcount,
287 frevcount,
287 frevcount,
288 util.bytecount(fsrcsize),
288 util.bytecount(fsrcsize),
289 util.bytecount(frawsize),
289 util.bytecount(frawsize),
290 )
290 )
291 )
291 )
292 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
292 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
293 for unencoded, (rl_type, oldrl) in sorted(filelogs.items()):
293 for unencoded, (rl_type, oldrl) in sorted(filelogs.items()):
294 newrl = _perform_clone(
294 newrl = _perform_clone(
295 ui,
295 ui,
296 dstrepo,
296 dstrepo,
297 tr,
297 tr,
298 oldrl,
298 oldrl,
299 rl_type,
299 rl_type,
300 unencoded,
300 unencoded,
301 upgrade_op,
301 upgrade_op,
302 sidedata_helpers,
302 sidedata_helpers,
303 oncopiedrevision,
303 oncopiedrevision,
304 )
304 )
305 info = newrl.storageinfo(storedsize=True)
305 info = newrl.storageinfo(storedsize=True)
306 fdstsize += info[b'storedsize'] or 0
306 fdstsize += info[b'storedsize'] or 0
307 ui.status(
307 ui.status(
308 _(
308 _(
309 b'finished migrating %d filelog revisions across %d '
309 b'finished migrating %d filelog revisions across %d '
310 b'filelogs; change in size: %s\n'
310 b'filelogs; change in size: %s\n'
311 )
311 )
312 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
312 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
313 )
313 )
314
314
315 # Migrating manifests
315 # Migrating manifests
316 ui.status(
316 ui.status(
317 _(
317 _(
318 b'migrating %d manifests containing %d revisions '
318 b'migrating %d manifests containing %d revisions '
319 b'(%s in store; %s tracked data)\n'
319 b'(%s in store; %s tracked data)\n'
320 )
320 )
321 % (
321 % (
322 mcount,
322 mcount,
323 mrevcount,
323 mrevcount,
324 util.bytecount(msrcsize),
324 util.bytecount(msrcsize),
325 util.bytecount(mrawsize),
325 util.bytecount(mrawsize),
326 )
326 )
327 )
327 )
328 if progress:
328 if progress:
329 progress.complete()
329 progress.complete()
330 progress = srcrepo.ui.makeprogress(
330 progress = srcrepo.ui.makeprogress(
331 _(b'manifest revisions'), total=mrevcount
331 _(b'manifest revisions'), total=mrevcount
332 )
332 )
333 for unencoded, (rl_type, oldrl) in sorted(manifests.items()):
333 for unencoded, (rl_type, oldrl) in sorted(manifests.items()):
334 newrl = _perform_clone(
334 newrl = _perform_clone(
335 ui,
335 ui,
336 dstrepo,
336 dstrepo,
337 tr,
337 tr,
338 oldrl,
338 oldrl,
339 rl_type,
339 rl_type,
340 unencoded,
340 unencoded,
341 upgrade_op,
341 upgrade_op,
342 sidedata_helpers,
342 sidedata_helpers,
343 oncopiedrevision,
343 oncopiedrevision,
344 )
344 )
345 info = newrl.storageinfo(storedsize=True)
345 info = newrl.storageinfo(storedsize=True)
346 mdstsize += info[b'storedsize'] or 0
346 mdstsize += info[b'storedsize'] or 0
347 ui.status(
347 ui.status(
348 _(
348 _(
349 b'finished migrating %d manifest revisions across %d '
349 b'finished migrating %d manifest revisions across %d '
350 b'manifests; change in size: %s\n'
350 b'manifests; change in size: %s\n'
351 )
351 )
352 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
352 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
353 )
353 )
354
354
355 # Migrating changelog
355 # Migrating changelog
356 ui.status(
356 ui.status(
357 _(
357 _(
358 b'migrating changelog containing %d revisions '
358 b'migrating changelog containing %d revisions '
359 b'(%s in store; %s tracked data)\n'
359 b'(%s in store; %s tracked data)\n'
360 )
360 )
361 % (
361 % (
362 crevcount,
362 crevcount,
363 util.bytecount(csrcsize),
363 util.bytecount(csrcsize),
364 util.bytecount(crawsize),
364 util.bytecount(crawsize),
365 )
365 )
366 )
366 )
367 if progress:
367 if progress:
368 progress.complete()
368 progress.complete()
369 progress = srcrepo.ui.makeprogress(
369 progress = srcrepo.ui.makeprogress(
370 _(b'changelog revisions'), total=crevcount
370 _(b'changelog revisions'), total=crevcount
371 )
371 )
372 for unencoded, (rl_type, oldrl) in sorted(changelogs.items()):
372 for unencoded, (rl_type, oldrl) in sorted(changelogs.items()):
373 newrl = _perform_clone(
373 newrl = _perform_clone(
374 ui,
374 ui,
375 dstrepo,
375 dstrepo,
376 tr,
376 tr,
377 oldrl,
377 oldrl,
378 rl_type,
378 rl_type,
379 unencoded,
379 unencoded,
380 upgrade_op,
380 upgrade_op,
381 sidedata_helpers,
381 sidedata_helpers,
382 oncopiedrevision,
382 oncopiedrevision,
383 )
383 )
384 info = newrl.storageinfo(storedsize=True)
384 info = newrl.storageinfo(storedsize=True)
385 cdstsize += info[b'storedsize'] or 0
385 cdstsize += info[b'storedsize'] or 0
386 progress.complete()
386 progress.complete()
387 ui.status(
387 ui.status(
388 _(
388 _(
389 b'finished migrating %d changelog revisions; change in size: '
389 b'finished migrating %d changelog revisions; change in size: '
390 b'%s\n'
390 b'%s\n'
391 )
391 )
392 % (crevcount, util.bytecount(cdstsize - csrcsize))
392 % (crevcount, util.bytecount(cdstsize - csrcsize))
393 )
393 )
394
394
395 dstsize = fdstsize + mdstsize + cdstsize
395 dstsize = fdstsize + mdstsize + cdstsize
396 ui.status(
396 ui.status(
397 _(
397 _(
398 b'finished migrating %d total revisions; total change in store '
398 b'finished migrating %d total revisions; total change in store '
399 b'size: %s\n'
399 b'size: %s\n'
400 )
400 )
401 % (revcount, util.bytecount(dstsize - srcsize))
401 % (revcount, util.bytecount(dstsize - srcsize))
402 )
402 )
403
403
404
404
405 def _files_to_copy_post_revlog_clone(srcrepo):
405 def _files_to_copy_post_revlog_clone(srcrepo):
406 """yields files which should be copied to destination after revlogs
406 """yields files which should be copied to destination after revlogs
407 are cloned"""
407 are cloned"""
408 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
408 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
409 # don't copy revlogs as they are already cloned
409 # don't copy revlogs as they are already cloned
410 if store.revlog_type(path) is not None:
410 if store.revlog_type(path) is not None:
411 continue
411 continue
412 # Skip transaction related files.
412 # Skip transaction related files.
413 if path.startswith(b'undo'):
413 if path.startswith(b'undo'):
414 continue
414 continue
415 # Only copy regular files.
415 # Only copy regular files.
416 if kind != stat.S_IFREG:
416 if kind != stat.S_IFREG:
417 continue
417 continue
418 # Skip other skipped files.
418 # Skip other skipped files.
419 if path in (b'lock', b'fncache'):
419 if path in (b'lock', b'fncache'):
420 continue
420 continue
421 # TODO: should we skip cache too?
421 # TODO: should we skip cache too?
422
422
423 yield path
423 yield path
424
424
425
425
426 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
426 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
427 """Replace the stores after current repository is upgraded
427 """Replace the stores after current repository is upgraded
428
428
429 Creates a backup of current repository store at backup path
429 Creates a backup of current repository store at backup path
430 Replaces upgraded store files in current repo from upgraded one
430 Replaces upgraded store files in current repo from upgraded one
431
431
432 Arguments:
432 Arguments:
433 currentrepo: repo object of current repository
433 currentrepo: repo object of current repository
434 upgradedrepo: repo object of the upgraded data
434 upgradedrepo: repo object of the upgraded data
435 backupvfs: vfs object for the backup path
435 backupvfs: vfs object for the backup path
436 upgrade_op: upgrade operation object
436 upgrade_op: upgrade operation object
437 to be used to decide what all is upgraded
437 to be used to decide what all is upgraded
438 """
438 """
439 # TODO: don't blindly rename everything in store
439 # TODO: don't blindly rename everything in store
440 # There can be upgrades where store is not touched at all
440 # There can be upgrades where store is not touched at all
441 if upgrade_op.backup_store:
441 if upgrade_op.backup_store:
442 util.rename(currentrepo.spath, backupvfs.join(b'store'))
442 util.rename(currentrepo.spath, backupvfs.join(b'store'))
443 else:
443 else:
444 currentrepo.vfs.rmtree(b'store', forcibly=True)
444 currentrepo.vfs.rmtree(b'store', forcibly=True)
445 util.rename(upgradedrepo.spath, currentrepo.spath)
445 util.rename(upgradedrepo.spath, currentrepo.spath)
446
446
447
447
448 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
448 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
449 """Hook point for extensions to perform additional actions during upgrade.
449 """Hook point for extensions to perform additional actions during upgrade.
450
450
451 This function is called after revlogs and store files have been copied but
451 This function is called after revlogs and store files have been copied but
452 before the new store is swapped into the original location.
452 before the new store is swapped into the original location.
453 """
453 """
454
454
455
455
456 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
456 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
457 """Do the low-level work of upgrading a repository.
457 """Do the low-level work of upgrading a repository.
458
458
459 The upgrade is effectively performed as a copy between a source
459 The upgrade is effectively performed as a copy between a source
460 repository and a temporary destination repository.
460 repository and a temporary destination repository.
461
461
462 The source repository is unmodified for as long as possible so the
462 The source repository is unmodified for as long as possible so the
463 upgrade can abort at any time without causing loss of service for
463 upgrade can abort at any time without causing loss of service for
464 readers and without corrupting the source repository.
464 readers and without corrupting the source repository.
465 """
465 """
466 assert srcrepo.currentwlock()
466 assert srcrepo.currentwlock()
467 assert dstrepo.currentwlock()
467 assert dstrepo.currentwlock()
468 backuppath = None
468 backuppath = None
469 backupvfs = None
469 backupvfs = None
470
470
471 ui.status(
471 ui.status(
472 _(
472 _(
473 b'(it is safe to interrupt this process any time before '
473 b'(it is safe to interrupt this process any time before '
474 b'data migration completes)\n'
474 b'data migration completes)\n'
475 )
475 )
476 )
476 )
477
477
478 if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions:
478 if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions:
479 ui.status(_(b'upgrading to dirstate-v2 from v1\n'))
479 ui.status(_(b'upgrading to dirstate-v2 from v1\n'))
480 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2')
480 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2')
481 upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2)
481 upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2)
482
482
483 if upgrade_actions.dirstatev2 in upgrade_op.removed_actions:
483 if upgrade_actions.dirstatev2 in upgrade_op.removed_actions:
484 ui.status(_(b'downgrading from dirstate-v2 to v1\n'))
484 ui.status(_(b'downgrading from dirstate-v2 to v1\n'))
485 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
485 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
486 upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
486 upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
487
487
488 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
488 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
489 return
489 return
490
490
491 if upgrade_op.requirements_only:
491 if upgrade_op.requirements_only:
492 ui.status(_(b'upgrading repository requirements\n'))
492 ui.status(_(b'upgrading repository requirements\n'))
493 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
493 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
494 # if there is only one action and that is persistent nodemap upgrade
494 # if there is only one action and that is persistent nodemap upgrade
495 # directly write the nodemap file and update requirements instead of going
495 # directly write the nodemap file and update requirements instead of going
496 # through the whole cloning process
496 # through the whole cloning process
497 elif (
497 elif (
498 len(upgrade_op.upgrade_actions) == 1
498 len(upgrade_op.upgrade_actions) == 1
499 and b'persistent-nodemap' in upgrade_op.upgrade_actions_names
499 and b'persistent-nodemap' in upgrade_op.upgrade_actions_names
500 and not upgrade_op.removed_actions
500 and not upgrade_op.removed_actions
501 ):
501 ):
502 ui.status(
502 ui.status(
503 _(b'upgrading repository to use persistent nodemap feature\n')
503 _(b'upgrading repository to use persistent nodemap feature\n')
504 )
504 )
505 with srcrepo.transaction(b'upgrade') as tr:
505 with srcrepo.transaction(b'upgrade') as tr:
506 unfi = srcrepo.unfiltered()
506 unfi = srcrepo.unfiltered()
507 cl = unfi.changelog
507 cl = unfi.changelog
508 nodemap.persist_nodemap(tr, cl, force=True)
508 nodemap.persist_nodemap(tr, cl, force=True)
509 # we want to directly operate on the underlying revlog to force
509 # we want to directly operate on the underlying revlog to force
510 # create a nodemap file. This is fine since this is upgrade code
510 # create a nodemap file. This is fine since this is upgrade code
511 # and it heavily relies on repository being revlog based
511 # and it heavily relies on repository being revlog based
512 # hence accessing private attributes can be justified
512 # hence accessing private attributes can be justified
513 nodemap.persist_nodemap(
513 nodemap.persist_nodemap(
514 tr, unfi.manifestlog._rootstore._revlog, force=True
514 tr, unfi.manifestlog._rootstore._revlog, force=True
515 )
515 )
516 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
516 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
517 elif (
517 elif (
518 len(upgrade_op.removed_actions) == 1
518 len(upgrade_op.removed_actions) == 1
519 and [
519 and [
520 x
520 x
521 for x in upgrade_op.removed_actions
521 for x in upgrade_op.removed_actions
522 if x.name == b'persistent-nodemap'
522 if x.name == b'persistent-nodemap'
523 ]
523 ]
524 and not upgrade_op.upgrade_actions
524 and not upgrade_op.upgrade_actions
525 ):
525 ):
526 ui.status(
526 ui.status(
527 _(b'downgrading repository to not use persistent nodemap feature\n')
527 _(b'downgrading repository to not use persistent nodemap feature\n')
528 )
528 )
529 with srcrepo.transaction(b'upgrade') as tr:
529 with srcrepo.transaction(b'upgrade') as tr:
530 unfi = srcrepo.unfiltered()
530 unfi = srcrepo.unfiltered()
531 cl = unfi.changelog
531 cl = unfi.changelog
532 nodemap.delete_nodemap(tr, srcrepo, cl)
532 nodemap.delete_nodemap(tr, srcrepo, cl)
533 # check comment 20 lines above for accessing private attributes
533 # check comment 20 lines above for accessing private attributes
534 nodemap.delete_nodemap(
534 nodemap.delete_nodemap(
535 tr, srcrepo, unfi.manifestlog._rootstore._revlog
535 tr, srcrepo, unfi.manifestlog._rootstore._revlog
536 )
536 )
537 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
537 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
538 else:
538 else:
539 with dstrepo.transaction(b'upgrade') as tr:
539 with dstrepo.transaction(b'upgrade') as tr:
540 _clonerevlogs(
540 _clonerevlogs(
541 ui,
541 ui,
542 srcrepo,
542 srcrepo,
543 dstrepo,
543 dstrepo,
544 tr,
544 tr,
545 upgrade_op,
545 upgrade_op,
546 )
546 )
547
547
548 # Now copy other files in the store directory.
548 # Now copy other files in the store directory.
549 for p in _files_to_copy_post_revlog_clone(srcrepo):
549 for p in _files_to_copy_post_revlog_clone(srcrepo):
550 srcrepo.ui.status(_(b'copying %s\n') % p)
550 srcrepo.ui.status(_(b'copying %s\n') % p)
551 src = srcrepo.store.rawvfs.join(p)
551 src = srcrepo.store.rawvfs.join(p)
552 dst = dstrepo.store.rawvfs.join(p)
552 dst = dstrepo.store.rawvfs.join(p)
553 util.copyfile(src, dst, copystat=True)
553 util.copyfile(src, dst, copystat=True)
554
554
555 finishdatamigration(ui, srcrepo, dstrepo, requirements)
555 finishdatamigration(ui, srcrepo, dstrepo, requirements)
556
556
557 ui.status(_(b'data fully upgraded in a temporary repository\n'))
557 ui.status(_(b'data fully upgraded in a temporary repository\n'))
558
558
559 if upgrade_op.backup_store:
559 if upgrade_op.backup_store:
560 backuppath = pycompat.mkdtemp(
560 backuppath = pycompat.mkdtemp(
561 prefix=b'upgradebackup.', dir=srcrepo.path
561 prefix=b'upgradebackup.', dir=srcrepo.path
562 )
562 )
563 backupvfs = vfsmod.vfs(backuppath)
563 backupvfs = vfsmod.vfs(backuppath)
564
564
565 # Make a backup of requires file first, as it is the first to be modified.
565 # Make a backup of requires file first, as it is the first to be modified.
566 util.copyfile(
566 util.copyfile(
567 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
567 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
568 )
568 )
569
569
570 # We install an arbitrary requirement that clients must not support
570 # We install an arbitrary requirement that clients must not support
571 # as a mechanism to lock out new clients during the data swap. This is
571 # as a mechanism to lock out new clients during the data swap. This is
572 # better than allowing a client to continue while the repository is in
572 # better than allowing a client to continue while the repository is in
573 # an inconsistent state.
573 # an inconsistent state.
574 ui.status(
574 ui.status(
575 _(
575 _(
576 b'marking source repository as being upgraded; clients will be '
576 b'marking source repository as being upgraded; clients will be '
577 b'unable to read from repository\n'
577 b'unable to read from repository\n'
578 )
578 )
579 )
579 )
580 scmutil.writereporequirements(
580 scmutil.writereporequirements(
581 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
581 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
582 )
582 )
583
583
584 ui.status(_(b'starting in-place swap of repository data\n'))
584 ui.status(_(b'starting in-place swap of repository data\n'))
585 if upgrade_op.backup_store:
585 if upgrade_op.backup_store:
586 ui.status(
586 ui.status(
587 _(b'replaced files will be backed up at %s\n') % backuppath
587 _(b'replaced files will be backed up at %s\n') % backuppath
588 )
588 )
589
589
590 # Now swap in the new store directory. Doing it as a rename should make
590 # Now swap in the new store directory. Doing it as a rename should make
591 # the operation nearly instantaneous and atomic (at least in well-behaved
591 # the operation nearly instantaneous and atomic (at least in well-behaved
592 # environments).
592 # environments).
593 ui.status(_(b'replacing store...\n'))
593 ui.status(_(b'replacing store...\n'))
594 tstart = util.timer()
594 tstart = util.timer()
595 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
595 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
596 elapsed = util.timer() - tstart
596 elapsed = util.timer() - tstart
597 ui.status(
597 ui.status(
598 _(
598 _(
599 b'store replacement complete; repository was inconsistent for '
599 b'store replacement complete; repository was inconsistent for '
600 b'%0.1fs\n'
600 b'%0.1fs\n'
601 )
601 )
602 % elapsed
602 % elapsed
603 )
603 )
604
604
605 # We first write the requirements file. Any new requirements will lock
605 # We first write the requirements file. Any new requirements will lock
606 # out legacy clients.
606 # out legacy clients.
607 ui.status(
607 ui.status(
608 _(
608 _(
609 b'finalizing requirements file and making repository readable '
609 b'finalizing requirements file and making repository readable '
610 b'again\n'
610 b'again\n'
611 )
611 )
612 )
612 )
613 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
613 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
614
614
615 if upgrade_op.backup_store:
615 if upgrade_op.backup_store:
616 # The lock file from the old store won't be removed because nothing has a
616 # The lock file from the old store won't be removed because nothing has a
617 # reference to its new location. So clean it up manually. Alternatively, we
617 # reference to its new location. So clean it up manually. Alternatively, we
618 # could update srcrepo.svfs and other variables to point to the new
618 # could update srcrepo.svfs and other variables to point to the new
619 # location. This is simpler.
619 # location. This is simpler.
620 assert backupvfs is not None # help pytype
620 assert backupvfs is not None # help pytype
621 backupvfs.unlink(b'store/lock')
621 backupvfs.unlink(b'store/lock')
622
622
623 return backuppath
623 return backuppath
624
624
625
625
626 def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new):
626 def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new):
627 if upgrade_op.backup_store:
627 if upgrade_op.backup_store:
628 backuppath = pycompat.mkdtemp(
628 backuppath = pycompat.mkdtemp(
629 prefix=b'upgradebackup.', dir=srcrepo.path
629 prefix=b'upgradebackup.', dir=srcrepo.path
630 )
630 )
631 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
631 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
632 backupvfs = vfsmod.vfs(backuppath)
632 backupvfs = vfsmod.vfs(backuppath)
633 util.copyfile(
633 util.copyfile(
634 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
634 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
635 )
635 )
636 util.copyfile(
636 util.copyfile(
637 srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate')
637 srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate')
638 )
638 )
639
639
640 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
640 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
641 srcrepo.dirstate._map._use_dirstate_tree = True
641 srcrepo.dirstate._map._use_dirstate_tree = True
642 srcrepo.dirstate._map.preload()
642 srcrepo.dirstate._map.preload()
643 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
643 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
644 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
644 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
645 srcrepo.dirstate._dirty = True
645 srcrepo.dirstate._dirty = True
646 srcrepo.vfs.unlink(b'dirstate')
646 srcrepo.dirstate.write(None)
647 srcrepo.dirstate.write(None)
647
648
648 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
649 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
@@ -1,1206 +1,1200 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 pub struct DirstateMap<'on_disk> {
32 pub struct DirstateMap<'on_disk> {
33 /// Contents of the `.hg/dirstate` file
33 /// Contents of the `.hg/dirstate` file
34 pub(super) on_disk: &'on_disk [u8],
34 pub(super) on_disk: &'on_disk [u8],
35
35
36 pub(super) root: ChildNodes<'on_disk>,
36 pub(super) root: ChildNodes<'on_disk>,
37
37
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
39 pub(super) nodes_with_entry_count: u32,
39 pub(super) nodes_with_entry_count: u32,
40
40
41 /// Number of nodes anywhere in the tree that have
41 /// Number of nodes anywhere in the tree that have
42 /// `.copy_source.is_some()`.
42 /// `.copy_source.is_some()`.
43 pub(super) nodes_with_copy_source_count: u32,
43 pub(super) nodes_with_copy_source_count: u32,
44
44
45 /// See on_disk::Header
45 /// See on_disk::Header
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
47 }
47 }
48
48
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
50 /// map key would also work: all paths in a given map have the same parent
50 /// map key would also work: all paths in a given map have the same parent
51 /// path, so comparing full paths gives the same result as comparing base
51 /// path, so comparing full paths gives the same result as comparing base
52 /// names. However `HashMap` would waste time always re-hashing the same
52 /// names. However `HashMap` would waste time always re-hashing the same
53 /// string prefix.
53 /// string prefix.
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
55
55
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
59 InMemory(&'tree HgPathBuf),
59 InMemory(&'tree HgPathBuf),
60 OnDisk(&'on_disk HgPath),
60 OnDisk(&'on_disk HgPath),
61 }
61 }
62
62
63 pub(super) enum ChildNodes<'on_disk> {
63 pub(super) enum ChildNodes<'on_disk> {
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
65 OnDisk(&'on_disk [on_disk::Node]),
65 OnDisk(&'on_disk [on_disk::Node]),
66 }
66 }
67
67
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum NodeRef<'tree, 'on_disk> {
73 pub(super) enum NodeRef<'tree, 'on_disk> {
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
75 OnDisk(&'on_disk on_disk::Node),
75 OnDisk(&'on_disk on_disk::Node),
76 }
76 }
77
77
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
80 match *self {
80 match *self {
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
83 }
83 }
84 }
84 }
85 }
85 }
86
86
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
88 type Target = HgPath;
88 type Target = HgPath;
89
89
90 fn deref(&self) -> &HgPath {
90 fn deref(&self) -> &HgPath {
91 match *self {
91 match *self {
92 BorrowedPath::InMemory(in_memory) => in_memory,
92 BorrowedPath::InMemory(in_memory) => in_memory,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
94 }
94 }
95 }
95 }
96 }
96 }
97
97
98 impl Default for ChildNodes<'_> {
98 impl Default for ChildNodes<'_> {
99 fn default() -> Self {
99 fn default() -> Self {
100 ChildNodes::InMemory(Default::default())
100 ChildNodes::InMemory(Default::default())
101 }
101 }
102 }
102 }
103
103
104 impl<'on_disk> ChildNodes<'on_disk> {
104 impl<'on_disk> ChildNodes<'on_disk> {
105 pub(super) fn as_ref<'tree>(
105 pub(super) fn as_ref<'tree>(
106 &'tree self,
106 &'tree self,
107 ) -> ChildNodesRef<'tree, 'on_disk> {
107 ) -> ChildNodesRef<'tree, 'on_disk> {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn is_empty(&self) -> bool {
114 pub(super) fn is_empty(&self) -> bool {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn make_mut(
121 pub(super) fn make_mut(
122 &mut self,
122 &mut self,
123 on_disk: &'on_disk [u8],
123 on_disk: &'on_disk [u8],
124 ) -> Result<
124 ) -> Result<
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
126 DirstateV2ParseError,
126 DirstateV2ParseError,
127 > {
127 > {
128 match self {
128 match self {
129 ChildNodes::InMemory(nodes) => Ok(nodes),
129 ChildNodes::InMemory(nodes) => Ok(nodes),
130 ChildNodes::OnDisk(nodes) => {
130 ChildNodes::OnDisk(nodes) => {
131 let nodes = nodes
131 let nodes = nodes
132 .iter()
132 .iter()
133 .map(|node| {
133 .map(|node| {
134 Ok((
134 Ok((
135 node.path(on_disk)?,
135 node.path(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
137 ))
137 ))
138 })
138 })
139 .collect::<Result<_, _>>()?;
139 .collect::<Result<_, _>>()?;
140 *self = ChildNodes::InMemory(nodes);
140 *self = ChildNodes::InMemory(nodes);
141 match self {
141 match self {
142 ChildNodes::InMemory(nodes) => Ok(nodes),
142 ChildNodes::InMemory(nodes) => Ok(nodes),
143 ChildNodes::OnDisk(_) => unreachable!(),
143 ChildNodes::OnDisk(_) => unreachable!(),
144 }
144 }
145 }
145 }
146 }
146 }
147 }
147 }
148 }
148 }
149
149
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
151 pub(super) fn get(
151 pub(super) fn get(
152 &self,
152 &self,
153 base_name: &HgPath,
153 base_name: &HgPath,
154 on_disk: &'on_disk [u8],
154 on_disk: &'on_disk [u8],
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
156 match self {
156 match self {
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
158 .get_key_value(base_name)
158 .get_key_value(base_name)
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
160 ChildNodesRef::OnDisk(nodes) => {
160 ChildNodesRef::OnDisk(nodes) => {
161 let mut parse_result = Ok(());
161 let mut parse_result = Ok(());
162 let search_result = nodes.binary_search_by(|node| {
162 let search_result = nodes.binary_search_by(|node| {
163 match node.base_name(on_disk) {
163 match node.base_name(on_disk) {
164 Ok(node_base_name) => node_base_name.cmp(base_name),
164 Ok(node_base_name) => node_base_name.cmp(base_name),
165 Err(e) => {
165 Err(e) => {
166 parse_result = Err(e);
166 parse_result = Err(e);
167 // Dummy comparison result, `search_result` won’t
167 // Dummy comparison result, `search_result` won’t
168 // be used since `parse_result` is an error
168 // be used since `parse_result` is an error
169 std::cmp::Ordering::Equal
169 std::cmp::Ordering::Equal
170 }
170 }
171 }
171 }
172 });
172 });
173 parse_result.map(|()| {
173 parse_result.map(|()| {
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
175 })
175 })
176 }
176 }
177 }
177 }
178 }
178 }
179
179
180 /// Iterate in undefined order
180 /// Iterate in undefined order
181 pub(super) fn iter(
181 pub(super) fn iter(
182 &self,
182 &self,
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
184 match self {
184 match self {
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
187 ),
187 ),
188 ChildNodesRef::OnDisk(nodes) => {
188 ChildNodesRef::OnDisk(nodes) => {
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
190 }
190 }
191 }
191 }
192 }
192 }
193
193
194 /// Iterate in parallel in undefined order
194 /// Iterate in parallel in undefined order
195 pub(super) fn par_iter(
195 pub(super) fn par_iter(
196 &self,
196 &self,
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
198 {
198 {
199 use rayon::prelude::*;
199 use rayon::prelude::*;
200 match self {
200 match self {
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
203 ),
203 ),
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
205 nodes.par_iter().map(NodeRef::OnDisk),
205 nodes.par_iter().map(NodeRef::OnDisk),
206 ),
206 ),
207 }
207 }
208 }
208 }
209
209
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
211 match self {
211 match self {
212 ChildNodesRef::InMemory(nodes) => {
212 ChildNodesRef::InMemory(nodes) => {
213 let mut vec: Vec<_> = nodes
213 let mut vec: Vec<_> = nodes
214 .iter()
214 .iter()
215 .map(|(k, v)| NodeRef::InMemory(k, v))
215 .map(|(k, v)| NodeRef::InMemory(k, v))
216 .collect();
216 .collect();
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
218 match node {
218 match node {
219 NodeRef::InMemory(path, _node) => path.base_name(),
219 NodeRef::InMemory(path, _node) => path.base_name(),
220 NodeRef::OnDisk(_) => unreachable!(),
220 NodeRef::OnDisk(_) => unreachable!(),
221 }
221 }
222 }
222 }
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
224 // value: https://github.com/rust-lang/rust/issues/34162
224 // value: https://github.com/rust-lang/rust/issues/34162
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
226 vec
226 vec
227 }
227 }
228 ChildNodesRef::OnDisk(nodes) => {
228 ChildNodesRef::OnDisk(nodes) => {
229 // Nodes on disk are already sorted
229 // Nodes on disk are already sorted
230 nodes.iter().map(NodeRef::OnDisk).collect()
230 nodes.iter().map(NodeRef::OnDisk).collect()
231 }
231 }
232 }
232 }
233 }
233 }
234 }
234 }
235
235
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
237 pub(super) fn full_path(
237 pub(super) fn full_path(
238 &self,
238 &self,
239 on_disk: &'on_disk [u8],
239 on_disk: &'on_disk [u8],
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
241 match self {
241 match self {
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
244 }
244 }
245 }
245 }
246
246
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
248 /// HgPath>` detached from `'tree`
248 /// HgPath>` detached from `'tree`
249 pub(super) fn full_path_borrowed(
249 pub(super) fn full_path_borrowed(
250 &self,
250 &self,
251 on_disk: &'on_disk [u8],
251 on_disk: &'on_disk [u8],
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
253 match self {
253 match self {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
257 },
257 },
258 NodeRef::OnDisk(node) => {
258 NodeRef::OnDisk(node) => {
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
260 }
260 }
261 }
261 }
262 }
262 }
263
263
264 pub(super) fn base_name(
264 pub(super) fn base_name(
265 &self,
265 &self,
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
268 match self {
268 match self {
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn children(
274 pub(super) fn children(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
280 NodeRef::OnDisk(node) => {
280 NodeRef::OnDisk(node) => {
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
282 }
282 }
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn has_copy_source(&self) -> bool {
286 pub(super) fn has_copy_source(&self) -> bool {
287 match self {
287 match self {
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
290 }
290 }
291 }
291 }
292
292
293 pub(super) fn copy_source(
293 pub(super) fn copy_source(
294 &self,
294 &self,
295 on_disk: &'on_disk [u8],
295 on_disk: &'on_disk [u8],
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => {
298 NodeRef::InMemory(_path, node) => {
299 Ok(node.copy_source.as_ref().map(|s| &**s))
299 Ok(node.copy_source.as_ref().map(|s| &**s))
300 }
300 }
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
302 }
302 }
303 }
303 }
304
304
305 pub(super) fn entry(
305 pub(super) fn entry(
306 &self,
306 &self,
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
308 match self {
308 match self {
309 NodeRef::InMemory(_path, node) => {
309 NodeRef::InMemory(_path, node) => {
310 Ok(node.data.as_entry().copied())
310 Ok(node.data.as_entry().copied())
311 }
311 }
312 NodeRef::OnDisk(node) => node.entry(),
312 NodeRef::OnDisk(node) => node.entry(),
313 }
313 }
314 }
314 }
315
315
316 pub(super) fn state(
316 pub(super) fn state(
317 &self,
317 &self,
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
319 match self {
319 match self {
320 NodeRef::InMemory(_path, node) => {
320 NodeRef::InMemory(_path, node) => {
321 Ok(node.data.as_entry().map(|entry| entry.state))
321 Ok(node.data.as_entry().map(|entry| entry.state))
322 }
322 }
323 NodeRef::OnDisk(node) => node.state(),
323 NodeRef::OnDisk(node) => node.state(),
324 }
324 }
325 }
325 }
326
326
327 pub(super) fn cached_directory_mtime(
327 pub(super) fn cached_directory_mtime(
328 &self,
328 &self,
329 ) -> Option<&'tree on_disk::Timestamp> {
329 ) -> Option<&'tree on_disk::Timestamp> {
330 match self {
330 match self {
331 NodeRef::InMemory(_path, node) => match &node.data {
331 NodeRef::InMemory(_path, node) => match &node.data {
332 NodeData::CachedDirectory { mtime } => Some(mtime),
332 NodeData::CachedDirectory { mtime } => Some(mtime),
333 _ => None,
333 _ => None,
334 },
334 },
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
336 }
336 }
337 }
337 }
338
338
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => {
341 NodeRef::InMemory(_path, node) => {
342 node.descendants_with_entry_count
342 node.descendants_with_entry_count
343 }
343 }
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
345 }
345 }
346 }
346 }
347
347
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
349 match self {
349 match self {
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
352 }
352 }
353 }
353 }
354 }
354 }
355
355
356 /// Represents a file or a directory
356 /// Represents a file or a directory
357 #[derive(Default)]
357 #[derive(Default)]
358 pub(super) struct Node<'on_disk> {
358 pub(super) struct Node<'on_disk> {
359 pub(super) data: NodeData,
359 pub(super) data: NodeData,
360
360
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
362
362
363 pub(super) children: ChildNodes<'on_disk>,
363 pub(super) children: ChildNodes<'on_disk>,
364
364
365 /// How many (non-inclusive) descendants of this node have an entry.
365 /// How many (non-inclusive) descendants of this node have an entry.
366 pub(super) descendants_with_entry_count: u32,
366 pub(super) descendants_with_entry_count: u32,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry whose
368 /// How many (non-inclusive) descendants of this node have an entry whose
369 /// state is "tracked".
369 /// state is "tracked".
370 pub(super) tracked_descendants_count: u32,
370 pub(super) tracked_descendants_count: u32,
371 }
371 }
372
372
373 pub(super) enum NodeData {
373 pub(super) enum NodeData {
374 Entry(DirstateEntry),
374 Entry(DirstateEntry),
375 CachedDirectory { mtime: on_disk::Timestamp },
375 CachedDirectory { mtime: on_disk::Timestamp },
376 None,
376 None,
377 }
377 }
378
378
379 impl Default for NodeData {
379 impl Default for NodeData {
380 fn default() -> Self {
380 fn default() -> Self {
381 NodeData::None
381 NodeData::None
382 }
382 }
383 }
383 }
384
384
385 impl NodeData {
385 impl NodeData {
386 fn has_entry(&self) -> bool {
386 fn has_entry(&self) -> bool {
387 match self {
387 match self {
388 NodeData::Entry(_) => true,
388 NodeData::Entry(_) => true,
389 _ => false,
389 _ => false,
390 }
390 }
391 }
391 }
392
392
393 fn as_entry(&self) -> Option<&DirstateEntry> {
393 fn as_entry(&self) -> Option<&DirstateEntry> {
394 match self {
394 match self {
395 NodeData::Entry(entry) => Some(entry),
395 NodeData::Entry(entry) => Some(entry),
396 _ => None,
396 _ => None,
397 }
397 }
398 }
398 }
399 }
399 }
400
400
401 impl<'on_disk> DirstateMap<'on_disk> {
401 impl<'on_disk> DirstateMap<'on_disk> {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
403 Self {
403 Self {
404 on_disk,
404 on_disk,
405 root: ChildNodes::default(),
405 root: ChildNodes::default(),
406 nodes_with_entry_count: 0,
406 nodes_with_entry_count: 0,
407 nodes_with_copy_source_count: 0,
407 nodes_with_copy_source_count: 0,
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
409 }
409 }
410 }
410 }
411
411
412 #[timed]
412 #[timed]
413 pub fn new_v2(
413 pub fn new_v2(on_disk: &'on_disk [u8]) -> Result<Self, DirstateError> {
414 on_disk: &'on_disk [u8],
415 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
416 Ok(on_disk::read(on_disk)?)
414 Ok(on_disk::read(on_disk)?)
417 }
415 }
418
416
419 #[timed]
417 #[timed]
420 pub fn new_v1(
418 pub fn new_v1(
421 on_disk: &'on_disk [u8],
419 on_disk: &'on_disk [u8],
422 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
420 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
423 let mut map = Self::empty(on_disk);
421 let mut map = Self::empty(on_disk);
424 if map.on_disk.is_empty() {
422 if map.on_disk.is_empty() {
425 return Ok((map, None));
423 return Ok((map, None));
426 }
424 }
427
425
428 let parents = parse_dirstate_entries(
426 let parents = parse_dirstate_entries(
429 map.on_disk,
427 map.on_disk,
430 |path, entry, copy_source| {
428 |path, entry, copy_source| {
431 let tracked = entry.state.is_tracked();
429 let tracked = entry.state.is_tracked();
432 let node = Self::get_or_insert_node(
430 let node = Self::get_or_insert_node(
433 map.on_disk,
431 map.on_disk,
434 &mut map.root,
432 &mut map.root,
435 path,
433 path,
436 WithBasename::to_cow_borrowed,
434 WithBasename::to_cow_borrowed,
437 |ancestor| {
435 |ancestor| {
438 if tracked {
436 if tracked {
439 ancestor.tracked_descendants_count += 1
437 ancestor.tracked_descendants_count += 1
440 }
438 }
441 ancestor.descendants_with_entry_count += 1
439 ancestor.descendants_with_entry_count += 1
442 },
440 },
443 )?;
441 )?;
444 assert!(
442 assert!(
445 !node.data.has_entry(),
443 !node.data.has_entry(),
446 "duplicate dirstate entry in read"
444 "duplicate dirstate entry in read"
447 );
445 );
448 assert!(
446 assert!(
449 node.copy_source.is_none(),
447 node.copy_source.is_none(),
450 "duplicate dirstate entry in read"
448 "duplicate dirstate entry in read"
451 );
449 );
452 node.data = NodeData::Entry(*entry);
450 node.data = NodeData::Entry(*entry);
453 node.copy_source = copy_source.map(Cow::Borrowed);
451 node.copy_source = copy_source.map(Cow::Borrowed);
454 map.nodes_with_entry_count += 1;
452 map.nodes_with_entry_count += 1;
455 if copy_source.is_some() {
453 if copy_source.is_some() {
456 map.nodes_with_copy_source_count += 1
454 map.nodes_with_copy_source_count += 1
457 }
455 }
458 Ok(())
456 Ok(())
459 },
457 },
460 )?;
458 )?;
461 let parents = Some(parents.clone());
459 let parents = Some(parents.clone());
462
460
463 Ok((map, parents))
461 Ok((map, parents))
464 }
462 }
465
463
466 fn get_node<'tree>(
464 fn get_node<'tree>(
467 &'tree self,
465 &'tree self,
468 path: &HgPath,
466 path: &HgPath,
469 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
467 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
470 let mut children = self.root.as_ref();
468 let mut children = self.root.as_ref();
471 let mut components = path.components();
469 let mut components = path.components();
472 let mut component =
470 let mut component =
473 components.next().expect("expected at least one components");
471 components.next().expect("expected at least one components");
474 loop {
472 loop {
475 if let Some(child) = children.get(component, self.on_disk)? {
473 if let Some(child) = children.get(component, self.on_disk)? {
476 if let Some(next_component) = components.next() {
474 if let Some(next_component) = components.next() {
477 component = next_component;
475 component = next_component;
478 children = child.children(self.on_disk)?;
476 children = child.children(self.on_disk)?;
479 } else {
477 } else {
480 return Ok(Some(child));
478 return Ok(Some(child));
481 }
479 }
482 } else {
480 } else {
483 return Ok(None);
481 return Ok(None);
484 }
482 }
485 }
483 }
486 }
484 }
487
485
488 /// Returns a mutable reference to the node at `path` if it exists
486 /// Returns a mutable reference to the node at `path` if it exists
489 ///
487 ///
490 /// This takes `root` instead of `&mut self` so that callers can mutate
488 /// This takes `root` instead of `&mut self` so that callers can mutate
491 /// other fields while the returned borrow is still valid
489 /// other fields while the returned borrow is still valid
492 fn get_node_mut<'tree>(
490 fn get_node_mut<'tree>(
493 on_disk: &'on_disk [u8],
491 on_disk: &'on_disk [u8],
494 root: &'tree mut ChildNodes<'on_disk>,
492 root: &'tree mut ChildNodes<'on_disk>,
495 path: &HgPath,
493 path: &HgPath,
496 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
494 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
497 let mut children = root;
495 let mut children = root;
498 let mut components = path.components();
496 let mut components = path.components();
499 let mut component =
497 let mut component =
500 components.next().expect("expected at least one components");
498 components.next().expect("expected at least one components");
501 loop {
499 loop {
502 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
500 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
503 {
501 {
504 if let Some(next_component) = components.next() {
502 if let Some(next_component) = components.next() {
505 component = next_component;
503 component = next_component;
506 children = &mut child.children;
504 children = &mut child.children;
507 } else {
505 } else {
508 return Ok(Some(child));
506 return Ok(Some(child));
509 }
507 }
510 } else {
508 } else {
511 return Ok(None);
509 return Ok(None);
512 }
510 }
513 }
511 }
514 }
512 }
515
513
516 pub(super) fn get_or_insert<'tree, 'path>(
514 pub(super) fn get_or_insert<'tree, 'path>(
517 &'tree mut self,
515 &'tree mut self,
518 path: &HgPath,
516 path: &HgPath,
519 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
517 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
520 Self::get_or_insert_node(
518 Self::get_or_insert_node(
521 self.on_disk,
519 self.on_disk,
522 &mut self.root,
520 &mut self.root,
523 path,
521 path,
524 WithBasename::to_cow_owned,
522 WithBasename::to_cow_owned,
525 |_| {},
523 |_| {},
526 )
524 )
527 }
525 }
528
526
529 pub(super) fn get_or_insert_node<'tree, 'path>(
527 pub(super) fn get_or_insert_node<'tree, 'path>(
530 on_disk: &'on_disk [u8],
528 on_disk: &'on_disk [u8],
531 root: &'tree mut ChildNodes<'on_disk>,
529 root: &'tree mut ChildNodes<'on_disk>,
532 path: &'path HgPath,
530 path: &'path HgPath,
533 to_cow: impl Fn(
531 to_cow: impl Fn(
534 WithBasename<&'path HgPath>,
532 WithBasename<&'path HgPath>,
535 ) -> WithBasename<Cow<'on_disk, HgPath>>,
533 ) -> WithBasename<Cow<'on_disk, HgPath>>,
536 mut each_ancestor: impl FnMut(&mut Node),
534 mut each_ancestor: impl FnMut(&mut Node),
537 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
535 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
538 let mut child_nodes = root;
536 let mut child_nodes = root;
539 let mut inclusive_ancestor_paths =
537 let mut inclusive_ancestor_paths =
540 WithBasename::inclusive_ancestors_of(path);
538 WithBasename::inclusive_ancestors_of(path);
541 let mut ancestor_path = inclusive_ancestor_paths
539 let mut ancestor_path = inclusive_ancestor_paths
542 .next()
540 .next()
543 .expect("expected at least one inclusive ancestor");
541 .expect("expected at least one inclusive ancestor");
544 loop {
542 loop {
545 // TODO: can we avoid allocating an owned key in cases where the
543 // TODO: can we avoid allocating an owned key in cases where the
546 // map already contains that key, without introducing double
544 // map already contains that key, without introducing double
547 // lookup?
545 // lookup?
548 let child_node = child_nodes
546 let child_node = child_nodes
549 .make_mut(on_disk)?
547 .make_mut(on_disk)?
550 .entry(to_cow(ancestor_path))
548 .entry(to_cow(ancestor_path))
551 .or_default();
549 .or_default();
552 if let Some(next) = inclusive_ancestor_paths.next() {
550 if let Some(next) = inclusive_ancestor_paths.next() {
553 each_ancestor(child_node);
551 each_ancestor(child_node);
554 ancestor_path = next;
552 ancestor_path = next;
555 child_nodes = &mut child_node.children;
553 child_nodes = &mut child_node.children;
556 } else {
554 } else {
557 return Ok(child_node);
555 return Ok(child_node);
558 }
556 }
559 }
557 }
560 }
558 }
561
559
562 fn add_or_remove_file(
560 fn add_or_remove_file(
563 &mut self,
561 &mut self,
564 path: &HgPath,
562 path: &HgPath,
565 old_state: EntryState,
563 old_state: EntryState,
566 new_entry: DirstateEntry,
564 new_entry: DirstateEntry,
567 ) -> Result<(), DirstateV2ParseError> {
565 ) -> Result<(), DirstateV2ParseError> {
568 let had_entry = old_state != EntryState::Unknown;
566 let had_entry = old_state != EntryState::Unknown;
569 let tracked_count_increment =
567 let tracked_count_increment =
570 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
568 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
571 (false, true) => 1,
569 (false, true) => 1,
572 (true, false) => -1,
570 (true, false) => -1,
573 _ => 0,
571 _ => 0,
574 };
572 };
575
573
576 let node = Self::get_or_insert_node(
574 let node = Self::get_or_insert_node(
577 self.on_disk,
575 self.on_disk,
578 &mut self.root,
576 &mut self.root,
579 path,
577 path,
580 WithBasename::to_cow_owned,
578 WithBasename::to_cow_owned,
581 |ancestor| {
579 |ancestor| {
582 if !had_entry {
580 if !had_entry {
583 ancestor.descendants_with_entry_count += 1;
581 ancestor.descendants_with_entry_count += 1;
584 }
582 }
585
583
586 // We can’t use `+= increment` because the counter is unsigned,
584 // We can’t use `+= increment` because the counter is unsigned,
587 // and we want debug builds to detect accidental underflow
585 // and we want debug builds to detect accidental underflow
588 // through zero
586 // through zero
589 match tracked_count_increment {
587 match tracked_count_increment {
590 1 => ancestor.tracked_descendants_count += 1,
588 1 => ancestor.tracked_descendants_count += 1,
591 -1 => ancestor.tracked_descendants_count -= 1,
589 -1 => ancestor.tracked_descendants_count -= 1,
592 _ => {}
590 _ => {}
593 }
591 }
594 },
592 },
595 )?;
593 )?;
596 if !had_entry {
594 if !had_entry {
597 self.nodes_with_entry_count += 1
595 self.nodes_with_entry_count += 1
598 }
596 }
599 node.data = NodeData::Entry(new_entry);
597 node.data = NodeData::Entry(new_entry);
600 Ok(())
598 Ok(())
601 }
599 }
602
600
603 fn iter_nodes<'tree>(
601 fn iter_nodes<'tree>(
604 &'tree self,
602 &'tree self,
605 ) -> impl Iterator<
603 ) -> impl Iterator<
606 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
604 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
607 > + 'tree {
605 > + 'tree {
608 // Depth first tree traversal.
606 // Depth first tree traversal.
609 //
607 //
610 // If we could afford internal iteration and recursion,
608 // If we could afford internal iteration and recursion,
611 // this would look like:
609 // this would look like:
612 //
610 //
613 // ```
611 // ```
614 // fn traverse_children(
612 // fn traverse_children(
615 // children: &ChildNodes,
613 // children: &ChildNodes,
616 // each: &mut impl FnMut(&Node),
614 // each: &mut impl FnMut(&Node),
617 // ) {
615 // ) {
618 // for child in children.values() {
616 // for child in children.values() {
619 // traverse_children(&child.children, each);
617 // traverse_children(&child.children, each);
620 // each(child);
618 // each(child);
621 // }
619 // }
622 // }
620 // }
623 // ```
621 // ```
624 //
622 //
625 // However we want an external iterator and therefore can’t use the
623 // However we want an external iterator and therefore can’t use the
626 // call stack. Use an explicit stack instead:
624 // call stack. Use an explicit stack instead:
627 let mut stack = Vec::new();
625 let mut stack = Vec::new();
628 let mut iter = self.root.as_ref().iter();
626 let mut iter = self.root.as_ref().iter();
629 std::iter::from_fn(move || {
627 std::iter::from_fn(move || {
630 while let Some(child_node) = iter.next() {
628 while let Some(child_node) = iter.next() {
631 let children = match child_node.children(self.on_disk) {
629 let children = match child_node.children(self.on_disk) {
632 Ok(children) => children,
630 Ok(children) => children,
633 Err(error) => return Some(Err(error)),
631 Err(error) => return Some(Err(error)),
634 };
632 };
635 // Pseudo-recursion
633 // Pseudo-recursion
636 let new_iter = children.iter();
634 let new_iter = children.iter();
637 let old_iter = std::mem::replace(&mut iter, new_iter);
635 let old_iter = std::mem::replace(&mut iter, new_iter);
638 stack.push((child_node, old_iter));
636 stack.push((child_node, old_iter));
639 }
637 }
640 // Found the end of a `children.iter()` iterator.
638 // Found the end of a `children.iter()` iterator.
641 if let Some((child_node, next_iter)) = stack.pop() {
639 if let Some((child_node, next_iter)) = stack.pop() {
642 // "Return" from pseudo-recursion by restoring state from the
640 // "Return" from pseudo-recursion by restoring state from the
643 // explicit stack
641 // explicit stack
644 iter = next_iter;
642 iter = next_iter;
645
643
646 Some(Ok(child_node))
644 Some(Ok(child_node))
647 } else {
645 } else {
648 // Reached the bottom of the stack, we’re done
646 // Reached the bottom of the stack, we’re done
649 None
647 None
650 }
648 }
651 })
649 })
652 }
650 }
653
651
654 fn clear_known_ambiguous_mtimes(
652 fn clear_known_ambiguous_mtimes(
655 &mut self,
653 &mut self,
656 paths: &[impl AsRef<HgPath>],
654 paths: &[impl AsRef<HgPath>],
657 ) -> Result<(), DirstateV2ParseError> {
655 ) -> Result<(), DirstateV2ParseError> {
658 for path in paths {
656 for path in paths {
659 if let Some(node) = Self::get_node_mut(
657 if let Some(node) = Self::get_node_mut(
660 self.on_disk,
658 self.on_disk,
661 &mut self.root,
659 &mut self.root,
662 path.as_ref(),
660 path.as_ref(),
663 )? {
661 )? {
664 if let NodeData::Entry(entry) = &mut node.data {
662 if let NodeData::Entry(entry) = &mut node.data {
665 entry.clear_mtime();
663 entry.clear_mtime();
666 }
664 }
667 }
665 }
668 }
666 }
669 Ok(())
667 Ok(())
670 }
668 }
671
669
672 /// Return a faillilble iterator of full paths of nodes that have an
670 /// Return a faillilble iterator of full paths of nodes that have an
673 /// `entry` for which the given `predicate` returns true.
671 /// `entry` for which the given `predicate` returns true.
674 ///
672 ///
675 /// Fallibility means that each iterator item is a `Result`, which may
673 /// Fallibility means that each iterator item is a `Result`, which may
676 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
674 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
677 /// should only happen if Mercurial is buggy or a repository is corrupted.
675 /// should only happen if Mercurial is buggy or a repository is corrupted.
678 fn filter_full_paths<'tree>(
676 fn filter_full_paths<'tree>(
679 &'tree self,
677 &'tree self,
680 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
678 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
681 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
679 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
682 {
680 {
683 filter_map_results(self.iter_nodes(), move |node| {
681 filter_map_results(self.iter_nodes(), move |node| {
684 if let Some(entry) = node.entry()? {
682 if let Some(entry) = node.entry()? {
685 if predicate(&entry) {
683 if predicate(&entry) {
686 return Ok(Some(node.full_path(self.on_disk)?));
684 return Ok(Some(node.full_path(self.on_disk)?));
687 }
685 }
688 }
686 }
689 Ok(None)
687 Ok(None)
690 })
688 })
691 }
689 }
692 }
690 }
693
691
694 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
692 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
695 ///
693 ///
696 /// The callback is only called for incoming `Ok` values. Errors are passed
694 /// The callback is only called for incoming `Ok` values. Errors are passed
697 /// through as-is. In order to let it use the `?` operator the callback is
695 /// through as-is. In order to let it use the `?` operator the callback is
698 /// expected to return a `Result` of `Option`, instead of an `Option` of
696 /// expected to return a `Result` of `Option`, instead of an `Option` of
699 /// `Result`.
697 /// `Result`.
700 fn filter_map_results<'a, I, F, A, B, E>(
698 fn filter_map_results<'a, I, F, A, B, E>(
701 iter: I,
699 iter: I,
702 f: F,
700 f: F,
703 ) -> impl Iterator<Item = Result<B, E>> + 'a
701 ) -> impl Iterator<Item = Result<B, E>> + 'a
704 where
702 where
705 I: Iterator<Item = Result<A, E>> + 'a,
703 I: Iterator<Item = Result<A, E>> + 'a,
706 F: Fn(A) -> Result<Option<B>, E> + 'a,
704 F: Fn(A) -> Result<Option<B>, E> + 'a,
707 {
705 {
708 iter.filter_map(move |result| match result {
706 iter.filter_map(move |result| match result {
709 Ok(node) => f(node).transpose(),
707 Ok(node) => f(node).transpose(),
710 Err(e) => Some(Err(e)),
708 Err(e) => Some(Err(e)),
711 })
709 })
712 }
710 }
713
711
714 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
712 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
715 fn clear(&mut self) {
713 fn clear(&mut self) {
716 self.root = Default::default();
714 self.root = Default::default();
717 self.nodes_with_entry_count = 0;
715 self.nodes_with_entry_count = 0;
718 self.nodes_with_copy_source_count = 0;
716 self.nodes_with_copy_source_count = 0;
719 }
717 }
720
718
721 fn add_file(
719 fn add_file(
722 &mut self,
720 &mut self,
723 filename: &HgPath,
721 filename: &HgPath,
724 entry: DirstateEntry,
722 entry: DirstateEntry,
725 added: bool,
723 added: bool,
726 merged: bool,
724 merged: bool,
727 from_p2: bool,
725 from_p2: bool,
728 possibly_dirty: bool,
726 possibly_dirty: bool,
729 ) -> Result<(), DirstateError> {
727 ) -> Result<(), DirstateError> {
730 let mut entry = entry;
728 let mut entry = entry;
731 if added {
729 if added {
732 assert!(!possibly_dirty);
730 assert!(!possibly_dirty);
733 assert!(!from_p2);
731 assert!(!from_p2);
734 entry.state = EntryState::Added;
732 entry.state = EntryState::Added;
735 entry.size = SIZE_NON_NORMAL;
733 entry.size = SIZE_NON_NORMAL;
736 entry.mtime = MTIME_UNSET;
734 entry.mtime = MTIME_UNSET;
737 } else if merged {
735 } else if merged {
738 assert!(!possibly_dirty);
736 assert!(!possibly_dirty);
739 assert!(!from_p2);
737 assert!(!from_p2);
740 entry.state = EntryState::Merged;
738 entry.state = EntryState::Merged;
741 entry.size = SIZE_FROM_OTHER_PARENT;
739 entry.size = SIZE_FROM_OTHER_PARENT;
742 entry.mtime = MTIME_UNSET;
740 entry.mtime = MTIME_UNSET;
743 } else if from_p2 {
741 } else if from_p2 {
744 assert!(!possibly_dirty);
742 assert!(!possibly_dirty);
745 entry.state = EntryState::Normal;
743 entry.state = EntryState::Normal;
746 entry.size = SIZE_FROM_OTHER_PARENT;
744 entry.size = SIZE_FROM_OTHER_PARENT;
747 entry.mtime = MTIME_UNSET;
745 entry.mtime = MTIME_UNSET;
748 } else if possibly_dirty {
746 } else if possibly_dirty {
749 entry.state = EntryState::Normal;
747 entry.state = EntryState::Normal;
750 entry.size = SIZE_NON_NORMAL;
748 entry.size = SIZE_NON_NORMAL;
751 entry.mtime = MTIME_UNSET;
749 entry.mtime = MTIME_UNSET;
752 } else {
750 } else {
753 entry.state = EntryState::Normal;
751 entry.state = EntryState::Normal;
754 entry.size = entry.size & V1_RANGEMASK;
752 entry.size = entry.size & V1_RANGEMASK;
755 entry.mtime = entry.mtime & V1_RANGEMASK;
753 entry.mtime = entry.mtime & V1_RANGEMASK;
756 }
754 }
757
755
758 let old_state = match self.get(filename)? {
756 let old_state = match self.get(filename)? {
759 Some(e) => e.state,
757 Some(e) => e.state,
760 None => EntryState::Unknown,
758 None => EntryState::Unknown,
761 };
759 };
762
760
763 Ok(self.add_or_remove_file(filename, old_state, entry)?)
761 Ok(self.add_or_remove_file(filename, old_state, entry)?)
764 }
762 }
765
763
766 fn remove_file(
764 fn remove_file(
767 &mut self,
765 &mut self,
768 filename: &HgPath,
766 filename: &HgPath,
769 in_merge: bool,
767 in_merge: bool,
770 ) -> Result<(), DirstateError> {
768 ) -> Result<(), DirstateError> {
771 let old_entry_opt = self.get(filename)?;
769 let old_entry_opt = self.get(filename)?;
772 let old_state = match old_entry_opt {
770 let old_state = match old_entry_opt {
773 Some(e) => e.state,
771 Some(e) => e.state,
774 None => EntryState::Unknown,
772 None => EntryState::Unknown,
775 };
773 };
776 let mut size = 0;
774 let mut size = 0;
777 if in_merge {
775 if in_merge {
778 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
776 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
779 // during a merge. So I (marmoute) am not sure we need the
777 // during a merge. So I (marmoute) am not sure we need the
780 // conditionnal at all. Adding double checking this with assert
778 // conditionnal at all. Adding double checking this with assert
781 // would be nice.
779 // would be nice.
782 if let Some(old_entry) = old_entry_opt {
780 if let Some(old_entry) = old_entry_opt {
783 // backup the previous state
781 // backup the previous state
784 if old_entry.state == EntryState::Merged {
782 if old_entry.state == EntryState::Merged {
785 size = SIZE_NON_NORMAL;
783 size = SIZE_NON_NORMAL;
786 } else if old_entry.state == EntryState::Normal
784 } else if old_entry.state == EntryState::Normal
787 && old_entry.size == SIZE_FROM_OTHER_PARENT
785 && old_entry.size == SIZE_FROM_OTHER_PARENT
788 {
786 {
789 // other parent
787 // other parent
790 size = SIZE_FROM_OTHER_PARENT;
788 size = SIZE_FROM_OTHER_PARENT;
791 }
789 }
792 }
790 }
793 }
791 }
794 if size == 0 {
792 if size == 0 {
795 self.copy_map_remove(filename)?;
793 self.copy_map_remove(filename)?;
796 }
794 }
797 let entry = DirstateEntry {
795 let entry = DirstateEntry {
798 state: EntryState::Removed,
796 state: EntryState::Removed,
799 mode: 0,
797 mode: 0,
800 size,
798 size,
801 mtime: 0,
799 mtime: 0,
802 };
800 };
803 Ok(self.add_or_remove_file(filename, old_state, entry)?)
801 Ok(self.add_or_remove_file(filename, old_state, entry)?)
804 }
802 }
805
803
806 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
804 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
807 let old_state = match self.get(filename)? {
805 let old_state = match self.get(filename)? {
808 Some(e) => e.state,
806 Some(e) => e.state,
809 None => EntryState::Unknown,
807 None => EntryState::Unknown,
810 };
808 };
811 struct Dropped {
809 struct Dropped {
812 was_tracked: bool,
810 was_tracked: bool,
813 had_entry: bool,
811 had_entry: bool,
814 had_copy_source: bool,
812 had_copy_source: bool,
815 }
813 }
816
814
817 /// If this returns `Ok(Some((dropped, removed)))`, then
815 /// If this returns `Ok(Some((dropped, removed)))`, then
818 ///
816 ///
819 /// * `dropped` is about the leaf node that was at `filename`
817 /// * `dropped` is about the leaf node that was at `filename`
820 /// * `removed` is whether this particular level of recursion just
818 /// * `removed` is whether this particular level of recursion just
821 /// removed a node in `nodes`.
819 /// removed a node in `nodes`.
822 fn recur<'on_disk>(
820 fn recur<'on_disk>(
823 on_disk: &'on_disk [u8],
821 on_disk: &'on_disk [u8],
824 nodes: &mut ChildNodes<'on_disk>,
822 nodes: &mut ChildNodes<'on_disk>,
825 path: &HgPath,
823 path: &HgPath,
826 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
824 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
827 let (first_path_component, rest_of_path) =
825 let (first_path_component, rest_of_path) =
828 path.split_first_component();
826 path.split_first_component();
829 let node = if let Some(node) =
827 let node = if let Some(node) =
830 nodes.make_mut(on_disk)?.get_mut(first_path_component)
828 nodes.make_mut(on_disk)?.get_mut(first_path_component)
831 {
829 {
832 node
830 node
833 } else {
831 } else {
834 return Ok(None);
832 return Ok(None);
835 };
833 };
836 let dropped;
834 let dropped;
837 if let Some(rest) = rest_of_path {
835 if let Some(rest) = rest_of_path {
838 if let Some((d, removed)) =
836 if let Some((d, removed)) =
839 recur(on_disk, &mut node.children, rest)?
837 recur(on_disk, &mut node.children, rest)?
840 {
838 {
841 dropped = d;
839 dropped = d;
842 if dropped.had_entry {
840 if dropped.had_entry {
843 node.descendants_with_entry_count -= 1;
841 node.descendants_with_entry_count -= 1;
844 }
842 }
845 if dropped.was_tracked {
843 if dropped.was_tracked {
846 node.tracked_descendants_count -= 1;
844 node.tracked_descendants_count -= 1;
847 }
845 }
848
846
849 // Directory caches must be invalidated when removing a
847 // Directory caches must be invalidated when removing a
850 // child node
848 // child node
851 if removed {
849 if removed {
852 if let NodeData::CachedDirectory { .. } = &node.data {
850 if let NodeData::CachedDirectory { .. } = &node.data {
853 node.data = NodeData::None
851 node.data = NodeData::None
854 }
852 }
855 }
853 }
856 } else {
854 } else {
857 return Ok(None);
855 return Ok(None);
858 }
856 }
859 } else {
857 } else {
860 let had_entry = node.data.has_entry();
858 let had_entry = node.data.has_entry();
861 if had_entry {
859 if had_entry {
862 node.data = NodeData::None
860 node.data = NodeData::None
863 }
861 }
864 dropped = Dropped {
862 dropped = Dropped {
865 was_tracked: node
863 was_tracked: node
866 .data
864 .data
867 .as_entry()
865 .as_entry()
868 .map_or(false, |entry| entry.state.is_tracked()),
866 .map_or(false, |entry| entry.state.is_tracked()),
869 had_entry,
867 had_entry,
870 had_copy_source: node.copy_source.take().is_some(),
868 had_copy_source: node.copy_source.take().is_some(),
871 };
869 };
872 }
870 }
873 // After recursion, for both leaf (rest_of_path is None) nodes and
871 // After recursion, for both leaf (rest_of_path is None) nodes and
874 // parent nodes, remove a node if it just became empty.
872 // parent nodes, remove a node if it just became empty.
875 let remove = !node.data.has_entry()
873 let remove = !node.data.has_entry()
876 && node.copy_source.is_none()
874 && node.copy_source.is_none()
877 && node.children.is_empty();
875 && node.children.is_empty();
878 if remove {
876 if remove {
879 nodes.make_mut(on_disk)?.remove(first_path_component);
877 nodes.make_mut(on_disk)?.remove(first_path_component);
880 }
878 }
881 Ok(Some((dropped, remove)))
879 Ok(Some((dropped, remove)))
882 }
880 }
883
881
884 if let Some((dropped, _removed)) =
882 if let Some((dropped, _removed)) =
885 recur(self.on_disk, &mut self.root, filename)?
883 recur(self.on_disk, &mut self.root, filename)?
886 {
884 {
887 if dropped.had_entry {
885 if dropped.had_entry {
888 self.nodes_with_entry_count -= 1
886 self.nodes_with_entry_count -= 1
889 }
887 }
890 if dropped.had_copy_source {
888 if dropped.had_copy_source {
891 self.nodes_with_copy_source_count -= 1
889 self.nodes_with_copy_source_count -= 1
892 }
890 }
893 Ok(dropped.had_entry)
891 Ok(dropped.had_entry)
894 } else {
892 } else {
895 debug_assert!(!old_state.is_tracked());
893 debug_assert!(!old_state.is_tracked());
896 Ok(false)
894 Ok(false)
897 }
895 }
898 }
896 }
899
897
900 fn clear_ambiguous_times(
898 fn clear_ambiguous_times(
901 &mut self,
899 &mut self,
902 filenames: Vec<HgPathBuf>,
900 filenames: Vec<HgPathBuf>,
903 now: i32,
901 now: i32,
904 ) -> Result<(), DirstateV2ParseError> {
902 ) -> Result<(), DirstateV2ParseError> {
905 for filename in filenames {
903 for filename in filenames {
906 if let Some(node) =
904 if let Some(node) =
907 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
905 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
908 {
906 {
909 if let NodeData::Entry(entry) = &mut node.data {
907 if let NodeData::Entry(entry) = &mut node.data {
910 entry.clear_ambiguous_mtime(now);
908 entry.clear_ambiguous_mtime(now);
911 }
909 }
912 }
910 }
913 }
911 }
914 Ok(())
912 Ok(())
915 }
913 }
916
914
917 fn non_normal_entries_contains(
915 fn non_normal_entries_contains(
918 &mut self,
916 &mut self,
919 key: &HgPath,
917 key: &HgPath,
920 ) -> Result<bool, DirstateV2ParseError> {
918 ) -> Result<bool, DirstateV2ParseError> {
921 Ok(if let Some(node) = self.get_node(key)? {
919 Ok(if let Some(node) = self.get_node(key)? {
922 node.entry()?.map_or(false, |entry| entry.is_non_normal())
920 node.entry()?.map_or(false, |entry| entry.is_non_normal())
923 } else {
921 } else {
924 false
922 false
925 })
923 })
926 }
924 }
927
925
928 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
926 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
929 // Do nothing, this `DirstateMap` does not have a separate "non normal
927 // Do nothing, this `DirstateMap` does not have a separate "non normal
930 // entries" set that need to be kept up to date
928 // entries" set that need to be kept up to date
931 }
929 }
932
930
933 fn non_normal_or_other_parent_paths(
931 fn non_normal_or_other_parent_paths(
934 &mut self,
932 &mut self,
935 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
933 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
936 {
934 {
937 Box::new(self.filter_full_paths(|entry| {
935 Box::new(self.filter_full_paths(|entry| {
938 entry.is_non_normal() || entry.is_from_other_parent()
936 entry.is_non_normal() || entry.is_from_other_parent()
939 }))
937 }))
940 }
938 }
941
939
942 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
940 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
943 // Do nothing, this `DirstateMap` does not have a separate "non normal
941 // Do nothing, this `DirstateMap` does not have a separate "non normal
944 // entries" and "from other parent" sets that need to be recomputed
942 // entries" and "from other parent" sets that need to be recomputed
945 }
943 }
946
944
947 fn iter_non_normal_paths(
945 fn iter_non_normal_paths(
948 &mut self,
946 &mut self,
949 ) -> Box<
947 ) -> Box<
950 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
948 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
951 > {
949 > {
952 self.iter_non_normal_paths_panic()
950 self.iter_non_normal_paths_panic()
953 }
951 }
954
952
955 fn iter_non_normal_paths_panic(
953 fn iter_non_normal_paths_panic(
956 &self,
954 &self,
957 ) -> Box<
955 ) -> Box<
958 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
956 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
959 > {
957 > {
960 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
958 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
961 }
959 }
962
960
963 fn iter_other_parent_paths(
961 fn iter_other_parent_paths(
964 &mut self,
962 &mut self,
965 ) -> Box<
963 ) -> Box<
966 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
964 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
967 > {
965 > {
968 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
966 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
969 }
967 }
970
968
971 fn has_tracked_dir(
969 fn has_tracked_dir(
972 &mut self,
970 &mut self,
973 directory: &HgPath,
971 directory: &HgPath,
974 ) -> Result<bool, DirstateError> {
972 ) -> Result<bool, DirstateError> {
975 if let Some(node) = self.get_node(directory)? {
973 if let Some(node) = self.get_node(directory)? {
976 // A node without a `DirstateEntry` was created to hold child
974 // A node without a `DirstateEntry` was created to hold child
977 // nodes, and is therefore a directory.
975 // nodes, and is therefore a directory.
978 let state = node.state()?;
976 let state = node.state()?;
979 Ok(state.is_none() && node.tracked_descendants_count() > 0)
977 Ok(state.is_none() && node.tracked_descendants_count() > 0)
980 } else {
978 } else {
981 Ok(false)
979 Ok(false)
982 }
980 }
983 }
981 }
984
982
985 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
983 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
986 if let Some(node) = self.get_node(directory)? {
984 if let Some(node) = self.get_node(directory)? {
987 // A node without a `DirstateEntry` was created to hold child
985 // A node without a `DirstateEntry` was created to hold child
988 // nodes, and is therefore a directory.
986 // nodes, and is therefore a directory.
989 let state = node.state()?;
987 let state = node.state()?;
990 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
988 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
991 } else {
989 } else {
992 Ok(false)
990 Ok(false)
993 }
991 }
994 }
992 }
995
993
996 #[timed]
994 #[timed]
997 fn pack_v1(
995 fn pack_v1(
998 &mut self,
996 &mut self,
999 parents: DirstateParents,
997 parents: DirstateParents,
1000 now: Timestamp,
998 now: Timestamp,
1001 ) -> Result<Vec<u8>, DirstateError> {
999 ) -> Result<Vec<u8>, DirstateError> {
1002 let now: i32 = now.0.try_into().expect("time overflow");
1000 let now: i32 = now.0.try_into().expect("time overflow");
1003 let mut ambiguous_mtimes = Vec::new();
1001 let mut ambiguous_mtimes = Vec::new();
1004 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1002 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1005 // reallocations
1003 // reallocations
1006 let mut size = parents.as_bytes().len();
1004 let mut size = parents.as_bytes().len();
1007 for node in self.iter_nodes() {
1005 for node in self.iter_nodes() {
1008 let node = node?;
1006 let node = node?;
1009 if let Some(entry) = node.entry()? {
1007 if let Some(entry) = node.entry()? {
1010 size += packed_entry_size(
1008 size += packed_entry_size(
1011 node.full_path(self.on_disk)?,
1009 node.full_path(self.on_disk)?,
1012 node.copy_source(self.on_disk)?,
1010 node.copy_source(self.on_disk)?,
1013 );
1011 );
1014 if entry.mtime_is_ambiguous(now) {
1012 if entry.mtime_is_ambiguous(now) {
1015 ambiguous_mtimes.push(
1013 ambiguous_mtimes.push(
1016 node.full_path_borrowed(self.on_disk)?
1014 node.full_path_borrowed(self.on_disk)?
1017 .detach_from_tree(),
1015 .detach_from_tree(),
1018 )
1016 )
1019 }
1017 }
1020 }
1018 }
1021 }
1019 }
1022 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1020 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1023
1021
1024 let mut packed = Vec::with_capacity(size);
1022 let mut packed = Vec::with_capacity(size);
1025 packed.extend(parents.as_bytes());
1023 packed.extend(parents.as_bytes());
1026
1024
1027 for node in self.iter_nodes() {
1025 for node in self.iter_nodes() {
1028 let node = node?;
1026 let node = node?;
1029 if let Some(entry) = node.entry()? {
1027 if let Some(entry) = node.entry()? {
1030 pack_entry(
1028 pack_entry(
1031 node.full_path(self.on_disk)?,
1029 node.full_path(self.on_disk)?,
1032 &entry,
1030 &entry,
1033 node.copy_source(self.on_disk)?,
1031 node.copy_source(self.on_disk)?,
1034 &mut packed,
1032 &mut packed,
1035 );
1033 );
1036 }
1034 }
1037 }
1035 }
1038 Ok(packed)
1036 Ok(packed)
1039 }
1037 }
1040
1038
1041 #[timed]
1039 #[timed]
1042 fn pack_v2(
1040 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError> {
1043 &mut self,
1044 parents: DirstateParents,
1045 now: Timestamp,
1046 ) -> Result<Vec<u8>, DirstateError> {
1047 // TODO: how do we want to handle this in 2038?
1041 // TODO: how do we want to handle this in 2038?
1048 let now: i32 = now.0.try_into().expect("time overflow");
1042 let now: i32 = now.0.try_into().expect("time overflow");
1049 let mut paths = Vec::new();
1043 let mut paths = Vec::new();
1050 for node in self.iter_nodes() {
1044 for node in self.iter_nodes() {
1051 let node = node?;
1045 let node = node?;
1052 if let Some(entry) = node.entry()? {
1046 if let Some(entry) = node.entry()? {
1053 if entry.mtime_is_ambiguous(now) {
1047 if entry.mtime_is_ambiguous(now) {
1054 paths.push(
1048 paths.push(
1055 node.full_path_borrowed(self.on_disk)?
1049 node.full_path_borrowed(self.on_disk)?
1056 .detach_from_tree(),
1050 .detach_from_tree(),
1057 )
1051 )
1058 }
1052 }
1059 }
1053 }
1060 }
1054 }
1061 // Borrow of `self` ends here since we collect cloned paths
1055 // Borrow of `self` ends here since we collect cloned paths
1062
1056
1063 self.clear_known_ambiguous_mtimes(&paths)?;
1057 self.clear_known_ambiguous_mtimes(&paths)?;
1064
1058
1065 on_disk::write(self, parents)
1059 on_disk::write(self)
1066 }
1060 }
1067
1061
1068 fn status<'a>(
1062 fn status<'a>(
1069 &'a mut self,
1063 &'a mut self,
1070 matcher: &'a (dyn Matcher + Sync),
1064 matcher: &'a (dyn Matcher + Sync),
1071 root_dir: PathBuf,
1065 root_dir: PathBuf,
1072 ignore_files: Vec<PathBuf>,
1066 ignore_files: Vec<PathBuf>,
1073 options: StatusOptions,
1067 options: StatusOptions,
1074 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1068 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1075 {
1069 {
1076 super::status::status(self, matcher, root_dir, ignore_files, options)
1070 super::status::status(self, matcher, root_dir, ignore_files, options)
1077 }
1071 }
1078
1072
1079 fn copy_map_len(&self) -> usize {
1073 fn copy_map_len(&self) -> usize {
1080 self.nodes_with_copy_source_count as usize
1074 self.nodes_with_copy_source_count as usize
1081 }
1075 }
1082
1076
1083 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1077 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1084 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1078 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1085 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1079 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1086 Some((node.full_path(self.on_disk)?, source))
1080 Some((node.full_path(self.on_disk)?, source))
1087 } else {
1081 } else {
1088 None
1082 None
1089 })
1083 })
1090 }))
1084 }))
1091 }
1085 }
1092
1086
1093 fn copy_map_contains_key(
1087 fn copy_map_contains_key(
1094 &self,
1088 &self,
1095 key: &HgPath,
1089 key: &HgPath,
1096 ) -> Result<bool, DirstateV2ParseError> {
1090 ) -> Result<bool, DirstateV2ParseError> {
1097 Ok(if let Some(node) = self.get_node(key)? {
1091 Ok(if let Some(node) = self.get_node(key)? {
1098 node.has_copy_source()
1092 node.has_copy_source()
1099 } else {
1093 } else {
1100 false
1094 false
1101 })
1095 })
1102 }
1096 }
1103
1097
1104 fn copy_map_get(
1098 fn copy_map_get(
1105 &self,
1099 &self,
1106 key: &HgPath,
1100 key: &HgPath,
1107 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1101 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1108 if let Some(node) = self.get_node(key)? {
1102 if let Some(node) = self.get_node(key)? {
1109 if let Some(source) = node.copy_source(self.on_disk)? {
1103 if let Some(source) = node.copy_source(self.on_disk)? {
1110 return Ok(Some(source));
1104 return Ok(Some(source));
1111 }
1105 }
1112 }
1106 }
1113 Ok(None)
1107 Ok(None)
1114 }
1108 }
1115
1109
1116 fn copy_map_remove(
1110 fn copy_map_remove(
1117 &mut self,
1111 &mut self,
1118 key: &HgPath,
1112 key: &HgPath,
1119 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1113 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1120 let count = &mut self.nodes_with_copy_source_count;
1114 let count = &mut self.nodes_with_copy_source_count;
1121 Ok(
1115 Ok(
1122 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1116 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1123 |node| {
1117 |node| {
1124 if node.copy_source.is_some() {
1118 if node.copy_source.is_some() {
1125 *count -= 1
1119 *count -= 1
1126 }
1120 }
1127 node.copy_source.take().map(Cow::into_owned)
1121 node.copy_source.take().map(Cow::into_owned)
1128 },
1122 },
1129 ),
1123 ),
1130 )
1124 )
1131 }
1125 }
1132
1126
1133 fn copy_map_insert(
1127 fn copy_map_insert(
1134 &mut self,
1128 &mut self,
1135 key: HgPathBuf,
1129 key: HgPathBuf,
1136 value: HgPathBuf,
1130 value: HgPathBuf,
1137 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1131 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1138 let node = Self::get_or_insert_node(
1132 let node = Self::get_or_insert_node(
1139 self.on_disk,
1133 self.on_disk,
1140 &mut self.root,
1134 &mut self.root,
1141 &key,
1135 &key,
1142 WithBasename::to_cow_owned,
1136 WithBasename::to_cow_owned,
1143 |_ancestor| {},
1137 |_ancestor| {},
1144 )?;
1138 )?;
1145 if node.copy_source.is_none() {
1139 if node.copy_source.is_none() {
1146 self.nodes_with_copy_source_count += 1
1140 self.nodes_with_copy_source_count += 1
1147 }
1141 }
1148 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1142 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1149 }
1143 }
1150
1144
1151 fn len(&self) -> usize {
1145 fn len(&self) -> usize {
1152 self.nodes_with_entry_count as usize
1146 self.nodes_with_entry_count as usize
1153 }
1147 }
1154
1148
1155 fn contains_key(
1149 fn contains_key(
1156 &self,
1150 &self,
1157 key: &HgPath,
1151 key: &HgPath,
1158 ) -> Result<bool, DirstateV2ParseError> {
1152 ) -> Result<bool, DirstateV2ParseError> {
1159 Ok(self.get(key)?.is_some())
1153 Ok(self.get(key)?.is_some())
1160 }
1154 }
1161
1155
1162 fn get(
1156 fn get(
1163 &self,
1157 &self,
1164 key: &HgPath,
1158 key: &HgPath,
1165 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1159 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1166 Ok(if let Some(node) = self.get_node(key)? {
1160 Ok(if let Some(node) = self.get_node(key)? {
1167 node.entry()?
1161 node.entry()?
1168 } else {
1162 } else {
1169 None
1163 None
1170 })
1164 })
1171 }
1165 }
1172
1166
1173 fn iter(&self) -> StateMapIter<'_> {
1167 fn iter(&self) -> StateMapIter<'_> {
1174 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1168 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1175 Ok(if let Some(entry) = node.entry()? {
1169 Ok(if let Some(entry) = node.entry()? {
1176 Some((node.full_path(self.on_disk)?, entry))
1170 Some((node.full_path(self.on_disk)?, entry))
1177 } else {
1171 } else {
1178 None
1172 None
1179 })
1173 })
1180 }))
1174 }))
1181 }
1175 }
1182
1176
1183 fn iter_directories(
1177 fn iter_directories(
1184 &self,
1178 &self,
1185 ) -> Box<
1179 ) -> Box<
1186 dyn Iterator<
1180 dyn Iterator<
1187 Item = Result<
1181 Item = Result<
1188 (&HgPath, Option<Timestamp>),
1182 (&HgPath, Option<Timestamp>),
1189 DirstateV2ParseError,
1183 DirstateV2ParseError,
1190 >,
1184 >,
1191 > + Send
1185 > + Send
1192 + '_,
1186 + '_,
1193 > {
1187 > {
1194 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1188 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1195 Ok(if node.state()?.is_none() {
1189 Ok(if node.state()?.is_none() {
1196 Some((
1190 Some((
1197 node.full_path(self.on_disk)?,
1191 node.full_path(self.on_disk)?,
1198 node.cached_directory_mtime()
1192 node.cached_directory_mtime()
1199 .map(|mtime| Timestamp(mtime.seconds())),
1193 .map(|mtime| Timestamp(mtime.seconds())),
1200 ))
1194 ))
1201 } else {
1195 } else {
1202 None
1196 None
1203 })
1197 })
1204 }))
1198 }))
1205 }
1199 }
1206 }
1200 }
@@ -1,487 +1,479 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
14 use crate::StateMapIter;
15 use crate::StatusError;
15 use crate::StatusError;
16 use crate::StatusOptions;
16 use crate::StatusOptions;
17
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
22 ///
23 /// A trait object is used to support two different concrete types:
23 /// A trait object is used to support two different concrete types:
24 ///
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
32 /// abstracted in this trait.
33 ///
33 ///
34 /// The dirstate map associates paths of files in the working directory to
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
37 /// Remove information about all files in this map
38 fn clear(&mut self);
38 fn clear(&mut self);
39
39
40 /// Add or change the information associated to a given file.
40 /// Add or change the information associated to a given file.
41 ///
41 ///
42 /// `old_state` is the state in the entry that `get` would have returned
42 /// `old_state` is the state in the entry that `get` would have returned
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
44 ///
44 ///
45 /// `entry.state` should never be `EntryState::Unknown`.
45 /// `entry.state` should never be `EntryState::Unknown`.
46 fn add_file(
46 fn add_file(
47 &mut self,
47 &mut self,
48 filename: &HgPath,
48 filename: &HgPath,
49 entry: DirstateEntry,
49 entry: DirstateEntry,
50 added: bool,
50 added: bool,
51 merged: bool,
51 merged: bool,
52 from_p2: bool,
52 from_p2: bool,
53 possibly_dirty: bool,
53 possibly_dirty: bool,
54 ) -> Result<(), DirstateError>;
54 ) -> Result<(), DirstateError>;
55
55
56 /// Mark a file as "removed" (as in `hg rm`).
56 /// Mark a file as "removed" (as in `hg rm`).
57 ///
57 ///
58 /// `old_state` is the state in the entry that `get` would have returned
58 /// `old_state` is the state in the entry that `get` would have returned
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 ///
60 ///
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// put in the size field in the dirstate-v1 format.
62 /// put in the size field in the dirstate-v1 format.
63 fn remove_file(
63 fn remove_file(
64 &mut self,
64 &mut self,
65 filename: &HgPath,
65 filename: &HgPath,
66 in_merge: bool,
66 in_merge: bool,
67 ) -> Result<(), DirstateError>;
67 ) -> Result<(), DirstateError>;
68
68
69 /// Drop information about this file from the map if any, and return
69 /// Drop information about this file from the map if any, and return
70 /// whether there was any.
70 /// whether there was any.
71 ///
71 ///
72 /// `get` will now return `None` for this filename.
72 /// `get` will now return `None` for this filename.
73 ///
73 ///
74 /// `old_state` is the state in the entry that `get` would have returned
74 /// `old_state` is the state in the entry that `get` would have returned
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
77
77
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
80 /// timestamp.
80 /// timestamp.
81 fn clear_ambiguous_times(
81 fn clear_ambiguous_times(
82 &mut self,
82 &mut self,
83 filenames: Vec<HgPathBuf>,
83 filenames: Vec<HgPathBuf>,
84 now: i32,
84 now: i32,
85 ) -> Result<(), DirstateV2ParseError>;
85 ) -> Result<(), DirstateV2ParseError>;
86
86
87 /// Return whether the map has an "non-normal" entry for the given
87 /// Return whether the map has an "non-normal" entry for the given
88 /// filename. That is, any entry with a `state` other than
88 /// filename. That is, any entry with a `state` other than
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
90 fn non_normal_entries_contains(
90 fn non_normal_entries_contains(
91 &mut self,
91 &mut self,
92 key: &HgPath,
92 key: &HgPath,
93 ) -> Result<bool, DirstateV2ParseError>;
93 ) -> Result<bool, DirstateV2ParseError>;
94
94
95 /// Mark the given path as "normal" file. This is only relevant in the flat
95 /// Mark the given path as "normal" file. This is only relevant in the flat
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
97 /// up to date.
97 /// up to date.
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
99
99
100 /// Return an iterator of paths whose respective entry are either
100 /// Return an iterator of paths whose respective entry are either
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
102 /// parent".
102 /// parent".
103 ///
103 ///
104 /// If that information is cached, create the cache as needed.
104 /// If that information is cached, create the cache as needed.
105 ///
105 ///
106 /// "From other parent" is defined as `state == Normal && size == -2`.
106 /// "From other parent" is defined as `state == Normal && size == -2`.
107 ///
107 ///
108 /// Because parse errors can happen during iteration, the iterated items
108 /// Because parse errors can happen during iteration, the iterated items
109 /// are `Result`s.
109 /// are `Result`s.
110 fn non_normal_or_other_parent_paths(
110 fn non_normal_or_other_parent_paths(
111 &mut self,
111 &mut self,
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
113
113
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
115 ///
115 ///
116 /// If `force` is true, the cache is re-created even if it already exists.
116 /// If `force` is true, the cache is re-created even if it already exists.
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
118
118
119 /// Return an iterator of paths whose respective entry are "non-normal"
119 /// Return an iterator of paths whose respective entry are "non-normal"
120 /// (see `non_normal_entries_contains`).
120 /// (see `non_normal_entries_contains`).
121 ///
121 ///
122 /// If that information is cached, create the cache as needed.
122 /// If that information is cached, create the cache as needed.
123 ///
123 ///
124 /// Because parse errors can happen during iteration, the iterated items
124 /// Because parse errors can happen during iteration, the iterated items
125 /// are `Result`s.
125 /// are `Result`s.
126 fn iter_non_normal_paths(
126 fn iter_non_normal_paths(
127 &mut self,
127 &mut self,
128 ) -> Box<
128 ) -> Box<
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
130 >;
130 >;
131
131
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
133 /// self`.
133 /// self`.
134 ///
134 ///
135 /// Panics if a cache is necessary but does not exist yet.
135 /// Panics if a cache is necessary but does not exist yet.
136 fn iter_non_normal_paths_panic(
136 fn iter_non_normal_paths_panic(
137 &self,
137 &self,
138 ) -> Box<
138 ) -> Box<
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
140 >;
140 >;
141
141
142 /// Return an iterator of paths whose respective entry are "from other
142 /// Return an iterator of paths whose respective entry are "from other
143 /// parent".
143 /// parent".
144 ///
144 ///
145 /// If that information is cached, create the cache as needed.
145 /// If that information is cached, create the cache as needed.
146 ///
146 ///
147 /// "From other parent" is defined as `state == Normal && size == -2`.
147 /// "From other parent" is defined as `state == Normal && size == -2`.
148 ///
148 ///
149 /// Because parse errors can happen during iteration, the iterated items
149 /// Because parse errors can happen during iteration, the iterated items
150 /// are `Result`s.
150 /// are `Result`s.
151 fn iter_other_parent_paths(
151 fn iter_other_parent_paths(
152 &mut self,
152 &mut self,
153 ) -> Box<
153 ) -> Box<
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
155 >;
155 >;
156
156
157 /// Returns whether the sub-tree rooted at the given directory contains any
157 /// Returns whether the sub-tree rooted at the given directory contains any
158 /// tracked file.
158 /// tracked file.
159 ///
159 ///
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
161 fn has_tracked_dir(
161 fn has_tracked_dir(
162 &mut self,
162 &mut self,
163 directory: &HgPath,
163 directory: &HgPath,
164 ) -> Result<bool, DirstateError>;
164 ) -> Result<bool, DirstateError>;
165
165
166 /// Returns whether the sub-tree rooted at the given directory contains any
166 /// Returns whether the sub-tree rooted at the given directory contains any
167 /// file with a dirstate entry.
167 /// file with a dirstate entry.
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
169
169
170 /// Clear mtimes that are ambigous with `now` (similar to
170 /// Clear mtimes that are ambigous with `now` (similar to
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
173 /// format.
173 /// format.
174 fn pack_v1(
174 fn pack_v1(
175 &mut self,
175 &mut self,
176 parents: DirstateParents,
176 parents: DirstateParents,
177 now: Timestamp,
177 now: Timestamp,
178 ) -> Result<Vec<u8>, DirstateError>;
178 ) -> Result<Vec<u8>, DirstateError>;
179
179
180 /// Clear mtimes that are ambigous with `now` (similar to
180 /// Clear mtimes that are ambigous with `now` (similar to
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
182 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v2
182 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v2
183 /// format.
183 /// format.
184 ///
184 ///
185 /// Note: this is only supported by the tree dirstate map.
185 /// Note: this is only supported by the tree dirstate map.
186 fn pack_v2(
186 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError>;
187 &mut self,
188 parents: DirstateParents,
189 now: Timestamp,
190 ) -> Result<Vec<u8>, DirstateError>;
191
187
192 /// Run the status algorithm.
188 /// Run the status algorithm.
193 ///
189 ///
194 /// This is not sematically a method of the dirstate map, but a different
190 /// This is not sematically a method of the dirstate map, but a different
195 /// algorithm is used for the flat v.s. tree dirstate map so having it in
191 /// algorithm is used for the flat v.s. tree dirstate map so having it in
196 /// this trait enables the same dynamic dispatch as with other methods.
192 /// this trait enables the same dynamic dispatch as with other methods.
197 fn status<'a>(
193 fn status<'a>(
198 &'a mut self,
194 &'a mut self,
199 matcher: &'a (dyn Matcher + Sync),
195 matcher: &'a (dyn Matcher + Sync),
200 root_dir: PathBuf,
196 root_dir: PathBuf,
201 ignore_files: Vec<PathBuf>,
197 ignore_files: Vec<PathBuf>,
202 options: StatusOptions,
198 options: StatusOptions,
203 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
199 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
204
200
205 /// Returns how many files in the dirstate map have a recorded copy source.
201 /// Returns how many files in the dirstate map have a recorded copy source.
206 fn copy_map_len(&self) -> usize;
202 fn copy_map_len(&self) -> usize;
207
203
208 /// Returns an iterator of `(path, copy_source)` for all files that have a
204 /// Returns an iterator of `(path, copy_source)` for all files that have a
209 /// copy source.
205 /// copy source.
210 fn copy_map_iter(&self) -> CopyMapIter<'_>;
206 fn copy_map_iter(&self) -> CopyMapIter<'_>;
211
207
212 /// Returns whether the givef file has a copy source.
208 /// Returns whether the givef file has a copy source.
213 fn copy_map_contains_key(
209 fn copy_map_contains_key(
214 &self,
210 &self,
215 key: &HgPath,
211 key: &HgPath,
216 ) -> Result<bool, DirstateV2ParseError>;
212 ) -> Result<bool, DirstateV2ParseError>;
217
213
218 /// Returns the copy source for the given file.
214 /// Returns the copy source for the given file.
219 fn copy_map_get(
215 fn copy_map_get(
220 &self,
216 &self,
221 key: &HgPath,
217 key: &HgPath,
222 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
218 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
223
219
224 /// Removes the recorded copy source if any for the given file, and returns
220 /// Removes the recorded copy source if any for the given file, and returns
225 /// it.
221 /// it.
226 fn copy_map_remove(
222 fn copy_map_remove(
227 &mut self,
223 &mut self,
228 key: &HgPath,
224 key: &HgPath,
229 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
225 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
230
226
231 /// Set the given `value` copy source for the given `key` file.
227 /// Set the given `value` copy source for the given `key` file.
232 fn copy_map_insert(
228 fn copy_map_insert(
233 &mut self,
229 &mut self,
234 key: HgPathBuf,
230 key: HgPathBuf,
235 value: HgPathBuf,
231 value: HgPathBuf,
236 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
232 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
237
233
238 /// Returns the number of files that have an entry.
234 /// Returns the number of files that have an entry.
239 fn len(&self) -> usize;
235 fn len(&self) -> usize;
240
236
241 /// Returns whether the given file has an entry.
237 /// Returns whether the given file has an entry.
242 fn contains_key(&self, key: &HgPath)
238 fn contains_key(&self, key: &HgPath)
243 -> Result<bool, DirstateV2ParseError>;
239 -> Result<bool, DirstateV2ParseError>;
244
240
245 /// Returns the entry, if any, for the given file.
241 /// Returns the entry, if any, for the given file.
246 fn get(
242 fn get(
247 &self,
243 &self,
248 key: &HgPath,
244 key: &HgPath,
249 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
245 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
250
246
251 /// Returns a `(path, entry)` iterator of files that have an entry.
247 /// Returns a `(path, entry)` iterator of files that have an entry.
252 ///
248 ///
253 /// Because parse errors can happen during iteration, the iterated items
249 /// Because parse errors can happen during iteration, the iterated items
254 /// are `Result`s.
250 /// are `Result`s.
255 fn iter(&self) -> StateMapIter<'_>;
251 fn iter(&self) -> StateMapIter<'_>;
256
252
257 /// In the tree dirstate, return an iterator of "directory" (entry-less)
253 /// In the tree dirstate, return an iterator of "directory" (entry-less)
258 /// nodes with the data stored for them. This is for `hg debugdirstate
254 /// nodes with the data stored for them. This is for `hg debugdirstate
259 /// --dirs`.
255 /// --dirs`.
260 ///
256 ///
261 /// In the flat dirstate, returns an empty iterator.
257 /// In the flat dirstate, returns an empty iterator.
262 ///
258 ///
263 /// Because parse errors can happen during iteration, the iterated items
259 /// Because parse errors can happen during iteration, the iterated items
264 /// are `Result`s.
260 /// are `Result`s.
265 fn iter_directories(
261 fn iter_directories(
266 &self,
262 &self,
267 ) -> Box<
263 ) -> Box<
268 dyn Iterator<
264 dyn Iterator<
269 Item = Result<
265 Item = Result<
270 (&HgPath, Option<Timestamp>),
266 (&HgPath, Option<Timestamp>),
271 DirstateV2ParseError,
267 DirstateV2ParseError,
272 >,
268 >,
273 > + Send
269 > + Send
274 + '_,
270 + '_,
275 >;
271 >;
276 }
272 }
277
273
278 impl DirstateMapMethods for DirstateMap {
274 impl DirstateMapMethods for DirstateMap {
279 fn clear(&mut self) {
275 fn clear(&mut self) {
280 self.clear()
276 self.clear()
281 }
277 }
282
278
283 fn add_file(
279 fn add_file(
284 &mut self,
280 &mut self,
285 filename: &HgPath,
281 filename: &HgPath,
286 entry: DirstateEntry,
282 entry: DirstateEntry,
287 added: bool,
283 added: bool,
288 merged: bool,
284 merged: bool,
289 from_p2: bool,
285 from_p2: bool,
290 possibly_dirty: bool,
286 possibly_dirty: bool,
291 ) -> Result<(), DirstateError> {
287 ) -> Result<(), DirstateError> {
292 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
288 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
293 }
289 }
294
290
295 fn remove_file(
291 fn remove_file(
296 &mut self,
292 &mut self,
297 filename: &HgPath,
293 filename: &HgPath,
298 in_merge: bool,
294 in_merge: bool,
299 ) -> Result<(), DirstateError> {
295 ) -> Result<(), DirstateError> {
300 self.remove_file(filename, in_merge)
296 self.remove_file(filename, in_merge)
301 }
297 }
302
298
303 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
299 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
304 self.drop_file(filename)
300 self.drop_file(filename)
305 }
301 }
306
302
307 fn clear_ambiguous_times(
303 fn clear_ambiguous_times(
308 &mut self,
304 &mut self,
309 filenames: Vec<HgPathBuf>,
305 filenames: Vec<HgPathBuf>,
310 now: i32,
306 now: i32,
311 ) -> Result<(), DirstateV2ParseError> {
307 ) -> Result<(), DirstateV2ParseError> {
312 Ok(self.clear_ambiguous_times(filenames, now))
308 Ok(self.clear_ambiguous_times(filenames, now))
313 }
309 }
314
310
315 fn non_normal_entries_contains(
311 fn non_normal_entries_contains(
316 &mut self,
312 &mut self,
317 key: &HgPath,
313 key: &HgPath,
318 ) -> Result<bool, DirstateV2ParseError> {
314 ) -> Result<bool, DirstateV2ParseError> {
319 let (non_normal, _other_parent) =
315 let (non_normal, _other_parent) =
320 self.get_non_normal_other_parent_entries();
316 self.get_non_normal_other_parent_entries();
321 Ok(non_normal.contains(key))
317 Ok(non_normal.contains(key))
322 }
318 }
323
319
324 fn non_normal_entries_remove(&mut self, key: &HgPath) {
320 fn non_normal_entries_remove(&mut self, key: &HgPath) {
325 self.non_normal_entries_remove(key)
321 self.non_normal_entries_remove(key)
326 }
322 }
327
323
328 fn non_normal_or_other_parent_paths(
324 fn non_normal_or_other_parent_paths(
329 &mut self,
325 &mut self,
330 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
326 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
331 {
327 {
332 let (non_normal, other_parent) =
328 let (non_normal, other_parent) =
333 self.get_non_normal_other_parent_entries();
329 self.get_non_normal_other_parent_entries();
334 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
330 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
335 }
331 }
336
332
337 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
333 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
338 self.set_non_normal_other_parent_entries(force)
334 self.set_non_normal_other_parent_entries(force)
339 }
335 }
340
336
341 fn iter_non_normal_paths(
337 fn iter_non_normal_paths(
342 &mut self,
338 &mut self,
343 ) -> Box<
339 ) -> Box<
344 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
340 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
345 > {
341 > {
346 let (non_normal, _other_parent) =
342 let (non_normal, _other_parent) =
347 self.get_non_normal_other_parent_entries();
343 self.get_non_normal_other_parent_entries();
348 Box::new(non_normal.iter().map(|p| Ok(&**p)))
344 Box::new(non_normal.iter().map(|p| Ok(&**p)))
349 }
345 }
350
346
351 fn iter_non_normal_paths_panic(
347 fn iter_non_normal_paths_panic(
352 &self,
348 &self,
353 ) -> Box<
349 ) -> Box<
354 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
350 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
355 > {
351 > {
356 let (non_normal, _other_parent) =
352 let (non_normal, _other_parent) =
357 self.get_non_normal_other_parent_entries_panic();
353 self.get_non_normal_other_parent_entries_panic();
358 Box::new(non_normal.iter().map(|p| Ok(&**p)))
354 Box::new(non_normal.iter().map(|p| Ok(&**p)))
359 }
355 }
360
356
361 fn iter_other_parent_paths(
357 fn iter_other_parent_paths(
362 &mut self,
358 &mut self,
363 ) -> Box<
359 ) -> Box<
364 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
360 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
365 > {
361 > {
366 let (_non_normal, other_parent) =
362 let (_non_normal, other_parent) =
367 self.get_non_normal_other_parent_entries();
363 self.get_non_normal_other_parent_entries();
368 Box::new(other_parent.iter().map(|p| Ok(&**p)))
364 Box::new(other_parent.iter().map(|p| Ok(&**p)))
369 }
365 }
370
366
371 fn has_tracked_dir(
367 fn has_tracked_dir(
372 &mut self,
368 &mut self,
373 directory: &HgPath,
369 directory: &HgPath,
374 ) -> Result<bool, DirstateError> {
370 ) -> Result<bool, DirstateError> {
375 self.has_tracked_dir(directory)
371 self.has_tracked_dir(directory)
376 }
372 }
377
373
378 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
374 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
379 self.has_dir(directory)
375 self.has_dir(directory)
380 }
376 }
381
377
382 fn pack_v1(
378 fn pack_v1(
383 &mut self,
379 &mut self,
384 parents: DirstateParents,
380 parents: DirstateParents,
385 now: Timestamp,
381 now: Timestamp,
386 ) -> Result<Vec<u8>, DirstateError> {
382 ) -> Result<Vec<u8>, DirstateError> {
387 self.pack(parents, now)
383 self.pack(parents, now)
388 }
384 }
389
385
390 fn pack_v2(
386 fn pack_v2(&mut self, _now: Timestamp) -> Result<Vec<u8>, DirstateError> {
391 &mut self,
392 _parents: DirstateParents,
393 _now: Timestamp,
394 ) -> Result<Vec<u8>, DirstateError> {
395 panic!(
387 panic!(
396 "should have used dirstate_tree::DirstateMap to use the v2 format"
388 "should have used dirstate_tree::DirstateMap to use the v2 format"
397 )
389 )
398 }
390 }
399
391
400 fn status<'a>(
392 fn status<'a>(
401 &'a mut self,
393 &'a mut self,
402 matcher: &'a (dyn Matcher + Sync),
394 matcher: &'a (dyn Matcher + Sync),
403 root_dir: PathBuf,
395 root_dir: PathBuf,
404 ignore_files: Vec<PathBuf>,
396 ignore_files: Vec<PathBuf>,
405 options: StatusOptions,
397 options: StatusOptions,
406 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
398 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
407 {
399 {
408 crate::status(self, matcher, root_dir, ignore_files, options)
400 crate::status(self, matcher, root_dir, ignore_files, options)
409 }
401 }
410
402
411 fn copy_map_len(&self) -> usize {
403 fn copy_map_len(&self) -> usize {
412 self.copy_map.len()
404 self.copy_map.len()
413 }
405 }
414
406
415 fn copy_map_iter(&self) -> CopyMapIter<'_> {
407 fn copy_map_iter(&self) -> CopyMapIter<'_> {
416 Box::new(
408 Box::new(
417 self.copy_map
409 self.copy_map
418 .iter()
410 .iter()
419 .map(|(key, value)| Ok((&**key, &**value))),
411 .map(|(key, value)| Ok((&**key, &**value))),
420 )
412 )
421 }
413 }
422
414
423 fn copy_map_contains_key(
415 fn copy_map_contains_key(
424 &self,
416 &self,
425 key: &HgPath,
417 key: &HgPath,
426 ) -> Result<bool, DirstateV2ParseError> {
418 ) -> Result<bool, DirstateV2ParseError> {
427 Ok(self.copy_map.contains_key(key))
419 Ok(self.copy_map.contains_key(key))
428 }
420 }
429
421
430 fn copy_map_get(
422 fn copy_map_get(
431 &self,
423 &self,
432 key: &HgPath,
424 key: &HgPath,
433 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
425 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
434 Ok(self.copy_map.get(key).map(|p| &**p))
426 Ok(self.copy_map.get(key).map(|p| &**p))
435 }
427 }
436
428
437 fn copy_map_remove(
429 fn copy_map_remove(
438 &mut self,
430 &mut self,
439 key: &HgPath,
431 key: &HgPath,
440 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
432 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
441 Ok(self.copy_map.remove(key))
433 Ok(self.copy_map.remove(key))
442 }
434 }
443
435
444 fn copy_map_insert(
436 fn copy_map_insert(
445 &mut self,
437 &mut self,
446 key: HgPathBuf,
438 key: HgPathBuf,
447 value: HgPathBuf,
439 value: HgPathBuf,
448 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
440 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
449 Ok(self.copy_map.insert(key, value))
441 Ok(self.copy_map.insert(key, value))
450 }
442 }
451
443
452 fn len(&self) -> usize {
444 fn len(&self) -> usize {
453 (&**self).len()
445 (&**self).len()
454 }
446 }
455
447
456 fn contains_key(
448 fn contains_key(
457 &self,
449 &self,
458 key: &HgPath,
450 key: &HgPath,
459 ) -> Result<bool, DirstateV2ParseError> {
451 ) -> Result<bool, DirstateV2ParseError> {
460 Ok((&**self).contains_key(key))
452 Ok((&**self).contains_key(key))
461 }
453 }
462
454
463 fn get(
455 fn get(
464 &self,
456 &self,
465 key: &HgPath,
457 key: &HgPath,
466 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
458 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
467 Ok((&**self).get(key).cloned())
459 Ok((&**self).get(key).cloned())
468 }
460 }
469
461
470 fn iter(&self) -> StateMapIter<'_> {
462 fn iter(&self) -> StateMapIter<'_> {
471 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
463 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
472 }
464 }
473
465
474 fn iter_directories(
466 fn iter_directories(
475 &self,
467 &self,
476 ) -> Box<
468 ) -> Box<
477 dyn Iterator<
469 dyn Iterator<
478 Item = Result<
470 Item = Result<
479 (&HgPath, Option<Timestamp>),
471 (&HgPath, Option<Timestamp>),
480 DirstateV2ParseError,
472 DirstateV2ParseError,
481 >,
473 >,
482 > + Send
474 > + Send
483 + '_,
475 + '_,
484 > {
476 > {
485 Box::new(std::iter::empty())
477 Box::new(std::iter::empty())
486 }
478 }
487 }
479 }
@@ -1,609 +1,636 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! # File format
3 //! # File format
4 //!
4 //!
5 //! The file starts with a fixed-sized header, whose layout is defined by the
5 //! The file starts with a fixed-sized header, whose layout is defined by the
6 //! `Header` struct. Its `root` field contains the slice (offset and length) to
6 //! `Header` struct. Its `root` field contains the slice (offset and length) to
7 //! the nodes representing the files and directories at the root of the
7 //! the nodes representing the files and directories at the root of the
8 //! repository. Each node is also fixed-size, defined by the `Node` struct.
8 //! repository. Each node is also fixed-size, defined by the `Node` struct.
9 //! Nodes in turn contain slices to variable-size paths, and to their own child
9 //! Nodes in turn contain slices to variable-size paths, and to their own child
10 //! nodes (if any) for nested files and directories.
10 //! nodes (if any) for nested files and directories.
11
11
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
12 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
13 use crate::dirstate_tree::path_with_basename::WithBasename;
13 use crate::dirstate_tree::path_with_basename::WithBasename;
14 use crate::errors::HgError;
14 use crate::errors::HgError;
15 use crate::utils::hg_path::HgPath;
15 use crate::utils::hg_path::HgPath;
16 use crate::DirstateEntry;
16 use crate::DirstateEntry;
17 use crate::DirstateError;
17 use crate::DirstateError;
18 use crate::DirstateParents;
18 use crate::DirstateParents;
19 use crate::EntryState;
19 use crate::EntryState;
20 use bytes_cast::unaligned::{I32Be, I64Be, U32Be};
20 use bytes_cast::unaligned::{I32Be, I64Be, U32Be};
21 use bytes_cast::BytesCast;
21 use bytes_cast::BytesCast;
22 use format_bytes::format_bytes;
22 use std::borrow::Cow;
23 use std::borrow::Cow;
23 use std::convert::TryFrom;
24 use std::convert::TryFrom;
24 use std::time::{Duration, SystemTime, UNIX_EPOCH};
25 use std::time::{Duration, SystemTime, UNIX_EPOCH};
25
26
26 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
27 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
27 /// This a redundant sanity check more than an actual "magic number" since
28 /// This a redundant sanity check more than an actual "magic number" since
28 /// `.hg/requires` already governs which format should be used.
29 /// `.hg/requires` already governs which format should be used.
29 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
30 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
30
31
32 /// Keep space for 256-bit hashes
33 const STORED_NODE_ID_BYTES: usize = 32;
34
35 /// … even though only 160 bits are used for now, with SHA-1
36 const USED_NODE_ID_BYTES: usize = 20;
37
31 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
38 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
32 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
39 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
33
40
41 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
42 #[derive(BytesCast)]
43 #[repr(C)]
44 struct DocketHeader {
45 marker: [u8; V2_FORMAT_MARKER.len()],
46 parent_1: [u8; STORED_NODE_ID_BYTES],
47 parent_2: [u8; STORED_NODE_ID_BYTES],
48 data_size: Size,
49 uuid_size: u8,
50 }
51
52 pub struct Docket<'on_disk> {
53 header: &'on_disk DocketHeader,
54 uuid: &'on_disk [u8],
55 }
56
34 #[derive(BytesCast)]
57 #[derive(BytesCast)]
35 #[repr(C)]
58 #[repr(C)]
36 struct Header {
59 struct Header {
37 marker: [u8; V2_FORMAT_MARKER.len()],
38
39 /// `dirstatemap.parents()` in `mercurial/dirstate.py` relies on this
40 /// `parents` field being at this offset, immediately after `marker`.
41 parents: DirstateParents,
42
43 root: ChildNodes,
60 root: ChildNodes,
44 nodes_with_entry_count: Size,
61 nodes_with_entry_count: Size,
45 nodes_with_copy_source_count: Size,
62 nodes_with_copy_source_count: Size,
46
63
47 /// If non-zero, a hash of ignore files that were used for some previous
64 /// If non-zero, a hash of ignore files that were used for some previous
48 /// run of the `status` algorithm.
65 /// run of the `status` algorithm.
49 ///
66 ///
50 /// We define:
67 /// We define:
51 ///
68 ///
52 /// * "Root" ignore files are `.hgignore` at the root of the repository if
69 /// * "Root" ignore files are `.hgignore` at the root of the repository if
53 /// it exists, and files from `ui.ignore.*` config. This set of files is
70 /// it exists, and files from `ui.ignore.*` config. This set of files is
54 /// then sorted by the string representation of their path.
71 /// then sorted by the string representation of their path.
55 /// * The "expanded contents" of an ignore files is the byte string made
72 /// * The "expanded contents" of an ignore files is the byte string made
56 /// by concatenating its contents with the "expanded contents" of other
73 /// by concatenating its contents with the "expanded contents" of other
57 /// files included with `include:` or `subinclude:` files, in inclusion
74 /// files included with `include:` or `subinclude:` files, in inclusion
58 /// order. This definition is recursive, as included files can
75 /// order. This definition is recursive, as included files can
59 /// themselves include more files.
76 /// themselves include more files.
60 ///
77 ///
61 /// This hash is defined as the SHA-1 of the concatenation (in sorted
78 /// This hash is defined as the SHA-1 of the concatenation (in sorted
62 /// order) of the "expanded contents" of each "root" ignore file.
79 /// order) of the "expanded contents" of each "root" ignore file.
63 /// (Note that computing this does not require actually concatenating byte
80 /// (Note that computing this does not require actually concatenating byte
64 /// strings into contiguous memory, instead SHA-1 hashing can be done
81 /// strings into contiguous memory, instead SHA-1 hashing can be done
65 /// incrementally.)
82 /// incrementally.)
66 ignore_patterns_hash: IgnorePatternsHash,
83 ignore_patterns_hash: IgnorePatternsHash,
67 }
84 }
68
85
69 #[derive(BytesCast)]
86 #[derive(BytesCast)]
70 #[repr(C)]
87 #[repr(C)]
71 pub(super) struct Node {
88 pub(super) struct Node {
72 full_path: PathSlice,
89 full_path: PathSlice,
73
90
74 /// In bytes from `self.full_path.start`
91 /// In bytes from `self.full_path.start`
75 base_name_start: Size,
92 base_name_start: Size,
76
93
77 copy_source: OptPathSlice,
94 copy_source: OptPathSlice,
78 children: ChildNodes,
95 children: ChildNodes,
79 pub(super) descendants_with_entry_count: Size,
96 pub(super) descendants_with_entry_count: Size,
80 pub(super) tracked_descendants_count: Size,
97 pub(super) tracked_descendants_count: Size,
81
98
82 /// Depending on the value of `state`:
99 /// Depending on the value of `state`:
83 ///
100 ///
84 /// * A null byte: `data` is not used.
101 /// * A null byte: `data` is not used.
85 ///
102 ///
86 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
103 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
87 /// represent a dirstate entry like in the v1 format.
104 /// represent a dirstate entry like in the v1 format.
88 ///
105 ///
89 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
106 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
90 /// as the `Timestamp` for the mtime of a cached directory.
107 /// as the `Timestamp` for the mtime of a cached directory.
91 ///
108 ///
92 /// The presence of this state means that at some point, this path in
109 /// The presence of this state means that at some point, this path in
93 /// the working directory was observed:
110 /// the working directory was observed:
94 ///
111 ///
95 /// - To be a directory
112 /// - To be a directory
96 /// - With the modification time as given by `Timestamp`
113 /// - With the modification time as given by `Timestamp`
97 /// - That timestamp was already strictly in the past when observed,
114 /// - That timestamp was already strictly in the past when observed,
98 /// meaning that later changes cannot happen in the same clock tick
115 /// meaning that later changes cannot happen in the same clock tick
99 /// and must cause a different modification time (unless the system
116 /// and must cause a different modification time (unless the system
100 /// clock jumps back and we get unlucky, which is not impossible but
117 /// clock jumps back and we get unlucky, which is not impossible but
101 /// but deemed unlikely enough).
118 /// but deemed unlikely enough).
102 /// - All direct children of this directory (as returned by
119 /// - All direct children of this directory (as returned by
103 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
120 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
104 /// are ignored by ignore patterns whose hash is in
121 /// are ignored by ignore patterns whose hash is in
105 /// `Header::ignore_patterns_hash`.
122 /// `Header::ignore_patterns_hash`.
106 ///
123 ///
107 /// This means that if `std::fs::symlink_metadata` later reports the
124 /// This means that if `std::fs::symlink_metadata` later reports the
108 /// same modification time and ignored patterns haven’t changed, a run
125 /// same modification time and ignored patterns haven’t changed, a run
109 /// of status that is not listing ignored files can skip calling
126 /// of status that is not listing ignored files can skip calling
110 /// `std::fs::read_dir` again for this directory, iterate child
127 /// `std::fs::read_dir` again for this directory, iterate child
111 /// dirstate nodes instead.
128 /// dirstate nodes instead.
112 state: u8,
129 state: u8,
113 data: Entry,
130 data: Entry,
114 }
131 }
115
132
116 #[derive(BytesCast, Copy, Clone)]
133 #[derive(BytesCast, Copy, Clone)]
117 #[repr(C)]
134 #[repr(C)]
118 struct Entry {
135 struct Entry {
119 mode: I32Be,
136 mode: I32Be,
120 mtime: I32Be,
137 mtime: I32Be,
121 size: I32Be,
138 size: I32Be,
122 }
139 }
123
140
124 /// Duration since the Unix epoch
141 /// Duration since the Unix epoch
125 #[derive(BytesCast, Copy, Clone, PartialEq)]
142 #[derive(BytesCast, Copy, Clone, PartialEq)]
126 #[repr(C)]
143 #[repr(C)]
127 pub(super) struct Timestamp {
144 pub(super) struct Timestamp {
128 seconds: I64Be,
145 seconds: I64Be,
129
146
130 /// In `0 .. 1_000_000_000`.
147 /// In `0 .. 1_000_000_000`.
131 ///
148 ///
132 /// This timestamp is later or earlier than `(seconds, 0)` by this many
149 /// This timestamp is later or earlier than `(seconds, 0)` by this many
133 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
150 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
134 nanoseconds: U32Be,
151 nanoseconds: U32Be,
135 }
152 }
136
153
137 /// Counted in bytes from the start of the file
154 /// Counted in bytes from the start of the file
138 ///
155 ///
139 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
156 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
140 type Offset = U32Be;
157 type Offset = U32Be;
141
158
142 /// Counted in number of items
159 /// Counted in number of items
143 ///
160 ///
144 /// NOTE: not supporting directories with more than 4 billion direct children,
161 /// NOTE: not supporting directories with more than 4 billion direct children,
145 /// or filenames more than 4 GiB.
162 /// or filenames more than 4 GiB.
146 type Size = U32Be;
163 type Size = U32Be;
147
164
148 /// Location of consecutive, fixed-size items.
165 /// Location of consecutive, fixed-size items.
149 ///
166 ///
150 /// An item can be a single byte for paths, or a struct with
167 /// An item can be a single byte for paths, or a struct with
151 /// `derive(BytesCast)`.
168 /// `derive(BytesCast)`.
152 #[derive(BytesCast, Copy, Clone)]
169 #[derive(BytesCast, Copy, Clone)]
153 #[repr(C)]
170 #[repr(C)]
154 struct Slice {
171 struct Slice {
155 start: Offset,
172 start: Offset,
156 len: Size,
173 len: Size,
157 }
174 }
158
175
159 /// A contiguous sequence of `len` times `Node`, representing the child nodes
176 /// A contiguous sequence of `len` times `Node`, representing the child nodes
160 /// of either some other node or of the repository root.
177 /// of either some other node or of the repository root.
161 ///
178 ///
162 /// Always sorted by ascending `full_path`, to allow binary search.
179 /// Always sorted by ascending `full_path`, to allow binary search.
163 /// Since nodes with the same parent nodes also have the same parent path,
180 /// Since nodes with the same parent nodes also have the same parent path,
164 /// only the `base_name`s need to be compared during binary search.
181 /// only the `base_name`s need to be compared during binary search.
165 type ChildNodes = Slice;
182 type ChildNodes = Slice;
166
183
167 /// A `HgPath` of `len` bytes
184 /// A `HgPath` of `len` bytes
168 type PathSlice = Slice;
185 type PathSlice = Slice;
169
186
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
187 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
171 type OptPathSlice = Slice;
188 type OptPathSlice = Slice;
172
189
173 /// Make sure that size-affecting changes are made knowingly
190 /// Make sure that size-affecting changes are made knowingly
174 fn _static_assert_size_of() {
191 fn _static_assert_size_of() {
175 let _ = std::mem::transmute::<Header, [u8; 88]>;
192 let _ = std::mem::transmute::<DocketHeader, [u8; 81]>;
193 let _ = std::mem::transmute::<Header, [u8; 36]>;
176 let _ = std::mem::transmute::<Node, [u8; 49]>;
194 let _ = std::mem::transmute::<Node, [u8; 49]>;
177 }
195 }
178
196
179 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
197 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
180 ///
198 ///
181 /// This should only happen if Mercurial is buggy or a repository is corrupted.
199 /// This should only happen if Mercurial is buggy or a repository is corrupted.
182 #[derive(Debug)]
200 #[derive(Debug)]
183 pub struct DirstateV2ParseError;
201 pub struct DirstateV2ParseError;
184
202
185 impl From<DirstateV2ParseError> for HgError {
203 impl From<DirstateV2ParseError> for HgError {
186 fn from(_: DirstateV2ParseError) -> Self {
204 fn from(_: DirstateV2ParseError) -> Self {
187 HgError::corrupted("dirstate-v2 parse error")
205 HgError::corrupted("dirstate-v2 parse error")
188 }
206 }
189 }
207 }
190
208
191 impl From<DirstateV2ParseError> for crate::DirstateError {
209 impl From<DirstateV2ParseError> for crate::DirstateError {
192 fn from(error: DirstateV2ParseError) -> Self {
210 fn from(error: DirstateV2ParseError) -> Self {
193 HgError::from(error).into()
211 HgError::from(error).into()
194 }
212 }
195 }
213 }
196
214
197 fn read_header(on_disk: &[u8]) -> Result<&Header, DirstateV2ParseError> {
215 impl<'on_disk> Docket<'on_disk> {
198 let (header, _) =
216 pub fn parents(&self) -> DirstateParents {
199 Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
217 use crate::Node;
200 if header.marker == *V2_FORMAT_MARKER {
218 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
201 Ok(header)
219 .unwrap()
220 .clone();
221 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
222 .unwrap()
223 .clone();
224 DirstateParents { p1, p2 }
225 }
226
227 pub fn data_filename(&self) -> String {
228 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
229 }
230 }
231
232 pub fn read_docket(
233 on_disk: &[u8],
234 ) -> Result<Docket<'_>, DirstateV2ParseError> {
235 let (header, uuid) =
236 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
237 let uuid_size = header.uuid_size as usize;
238 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
239 Ok(Docket { header, uuid })
202 } else {
240 } else {
203 Err(DirstateV2ParseError)
241 Err(DirstateV2ParseError)
204 }
242 }
205 }
243 }
206
244
207 pub(super) fn read<'on_disk>(
245 pub(super) fn read<'on_disk>(
208 on_disk: &'on_disk [u8],
246 on_disk: &'on_disk [u8],
209 ) -> Result<
247 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
210 (DirstateMap<'on_disk>, Option<DirstateParents>),
211 DirstateV2ParseError,
212 > {
213 if on_disk.is_empty() {
248 if on_disk.is_empty() {
214 return Ok((DirstateMap::empty(on_disk), None));
249 return Ok(DirstateMap::empty(on_disk));
215 }
250 }
216 let header = read_header(on_disk)?;
251 let (header, _) =
252 Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
217 let dirstate_map = DirstateMap {
253 let dirstate_map = DirstateMap {
218 on_disk,
254 on_disk,
219 root: dirstate_map::ChildNodes::OnDisk(read_slice::<Node>(
255 root: dirstate_map::ChildNodes::OnDisk(read_slice::<Node>(
220 on_disk,
256 on_disk,
221 header.root,
257 header.root,
222 )?),
258 )?),
223 nodes_with_entry_count: header.nodes_with_entry_count.get(),
259 nodes_with_entry_count: header.nodes_with_entry_count.get(),
224 nodes_with_copy_source_count: header
260 nodes_with_copy_source_count: header
225 .nodes_with_copy_source_count
261 .nodes_with_copy_source_count
226 .get(),
262 .get(),
227 ignore_patterns_hash: header.ignore_patterns_hash,
263 ignore_patterns_hash: header.ignore_patterns_hash,
228 };
264 };
229 let parents = Some(header.parents.clone());
265 Ok(dirstate_map)
230 Ok((dirstate_map, parents))
231 }
266 }
232
267
233 impl Node {
268 impl Node {
234 pub(super) fn full_path<'on_disk>(
269 pub(super) fn full_path<'on_disk>(
235 &self,
270 &self,
236 on_disk: &'on_disk [u8],
271 on_disk: &'on_disk [u8],
237 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
272 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
238 read_hg_path(on_disk, self.full_path)
273 read_hg_path(on_disk, self.full_path)
239 }
274 }
240
275
241 pub(super) fn base_name_start<'on_disk>(
276 pub(super) fn base_name_start<'on_disk>(
242 &self,
277 &self,
243 ) -> Result<usize, DirstateV2ParseError> {
278 ) -> Result<usize, DirstateV2ParseError> {
244 let start = self.base_name_start.get();
279 let start = self.base_name_start.get();
245 if start < self.full_path.len.get() {
280 if start < self.full_path.len.get() {
246 let start = usize::try_from(start)
281 let start = usize::try_from(start)
247 // u32 -> usize, could only panic on a 16-bit CPU
282 // u32 -> usize, could only panic on a 16-bit CPU
248 .expect("dirstate-v2 base_name_start out of bounds");
283 .expect("dirstate-v2 base_name_start out of bounds");
249 Ok(start)
284 Ok(start)
250 } else {
285 } else {
251 Err(DirstateV2ParseError)
286 Err(DirstateV2ParseError)
252 }
287 }
253 }
288 }
254
289
255 pub(super) fn base_name<'on_disk>(
290 pub(super) fn base_name<'on_disk>(
256 &self,
291 &self,
257 on_disk: &'on_disk [u8],
292 on_disk: &'on_disk [u8],
258 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
293 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
259 let full_path = self.full_path(on_disk)?;
294 let full_path = self.full_path(on_disk)?;
260 let base_name_start = self.base_name_start()?;
295 let base_name_start = self.base_name_start()?;
261 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
296 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
262 }
297 }
263
298
264 pub(super) fn path<'on_disk>(
299 pub(super) fn path<'on_disk>(
265 &self,
300 &self,
266 on_disk: &'on_disk [u8],
301 on_disk: &'on_disk [u8],
267 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
302 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
268 Ok(WithBasename::from_raw_parts(
303 Ok(WithBasename::from_raw_parts(
269 Cow::Borrowed(self.full_path(on_disk)?),
304 Cow::Borrowed(self.full_path(on_disk)?),
270 self.base_name_start()?,
305 self.base_name_start()?,
271 ))
306 ))
272 }
307 }
273
308
274 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
309 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
275 self.copy_source.start.get() != 0
310 self.copy_source.start.get() != 0
276 }
311 }
277
312
278 pub(super) fn copy_source<'on_disk>(
313 pub(super) fn copy_source<'on_disk>(
279 &self,
314 &self,
280 on_disk: &'on_disk [u8],
315 on_disk: &'on_disk [u8],
281 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
316 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
282 Ok(if self.has_copy_source() {
317 Ok(if self.has_copy_source() {
283 Some(read_hg_path(on_disk, self.copy_source)?)
318 Some(read_hg_path(on_disk, self.copy_source)?)
284 } else {
319 } else {
285 None
320 None
286 })
321 })
287 }
322 }
288
323
289 pub(super) fn node_data(
324 pub(super) fn node_data(
290 &self,
325 &self,
291 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
326 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
292 let entry = |state| {
327 let entry = |state| {
293 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
328 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
294 };
329 };
295
330
296 match self.state {
331 match self.state {
297 b'\0' => Ok(dirstate_map::NodeData::None),
332 b'\0' => Ok(dirstate_map::NodeData::None),
298 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
333 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
299 mtime: *self.data.as_timestamp(),
334 mtime: *self.data.as_timestamp(),
300 }),
335 }),
301 b'n' => Ok(entry(EntryState::Normal)),
336 b'n' => Ok(entry(EntryState::Normal)),
302 b'a' => Ok(entry(EntryState::Added)),
337 b'a' => Ok(entry(EntryState::Added)),
303 b'r' => Ok(entry(EntryState::Removed)),
338 b'r' => Ok(entry(EntryState::Removed)),
304 b'm' => Ok(entry(EntryState::Merged)),
339 b'm' => Ok(entry(EntryState::Merged)),
305 _ => Err(DirstateV2ParseError),
340 _ => Err(DirstateV2ParseError),
306 }
341 }
307 }
342 }
308
343
309 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
344 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
310 if self.state == b'd' {
345 if self.state == b'd' {
311 Some(self.data.as_timestamp())
346 Some(self.data.as_timestamp())
312 } else {
347 } else {
313 None
348 None
314 }
349 }
315 }
350 }
316
351
317 pub(super) fn state(
352 pub(super) fn state(
318 &self,
353 &self,
319 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
354 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
320 match self.state {
355 match self.state {
321 b'\0' | b'd' => Ok(None),
356 b'\0' | b'd' => Ok(None),
322 b'n' => Ok(Some(EntryState::Normal)),
357 b'n' => Ok(Some(EntryState::Normal)),
323 b'a' => Ok(Some(EntryState::Added)),
358 b'a' => Ok(Some(EntryState::Added)),
324 b'r' => Ok(Some(EntryState::Removed)),
359 b'r' => Ok(Some(EntryState::Removed)),
325 b'm' => Ok(Some(EntryState::Merged)),
360 b'm' => Ok(Some(EntryState::Merged)),
326 _ => Err(DirstateV2ParseError),
361 _ => Err(DirstateV2ParseError),
327 }
362 }
328 }
363 }
329
364
330 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
365 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
331 DirstateEntry {
366 DirstateEntry {
332 state,
367 state,
333 mode: self.data.mode.get(),
368 mode: self.data.mode.get(),
334 mtime: self.data.mtime.get(),
369 mtime: self.data.mtime.get(),
335 size: self.data.size.get(),
370 size: self.data.size.get(),
336 }
371 }
337 }
372 }
338
373
339 pub(super) fn entry(
374 pub(super) fn entry(
340 &self,
375 &self,
341 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
376 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
342 Ok(self
377 Ok(self
343 .state()?
378 .state()?
344 .map(|state| self.entry_with_given_state(state)))
379 .map(|state| self.entry_with_given_state(state)))
345 }
380 }
346
381
347 pub(super) fn children<'on_disk>(
382 pub(super) fn children<'on_disk>(
348 &self,
383 &self,
349 on_disk: &'on_disk [u8],
384 on_disk: &'on_disk [u8],
350 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
385 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
351 read_slice::<Node>(on_disk, self.children)
386 read_slice::<Node>(on_disk, self.children)
352 }
387 }
353
388
354 pub(super) fn to_in_memory_node<'on_disk>(
389 pub(super) fn to_in_memory_node<'on_disk>(
355 &self,
390 &self,
356 on_disk: &'on_disk [u8],
391 on_disk: &'on_disk [u8],
357 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
392 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
358 Ok(dirstate_map::Node {
393 Ok(dirstate_map::Node {
359 children: dirstate_map::ChildNodes::OnDisk(
394 children: dirstate_map::ChildNodes::OnDisk(
360 self.children(on_disk)?,
395 self.children(on_disk)?,
361 ),
396 ),
362 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
397 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
363 data: self.node_data()?,
398 data: self.node_data()?,
364 descendants_with_entry_count: self
399 descendants_with_entry_count: self
365 .descendants_with_entry_count
400 .descendants_with_entry_count
366 .get(),
401 .get(),
367 tracked_descendants_count: self.tracked_descendants_count.get(),
402 tracked_descendants_count: self.tracked_descendants_count.get(),
368 })
403 })
369 }
404 }
370 }
405 }
371
406
372 impl Entry {
407 impl Entry {
373 fn from_timestamp(timestamp: Timestamp) -> Self {
408 fn from_timestamp(timestamp: Timestamp) -> Self {
374 // Safety: both types implement the `ByteCast` trait, so we could
409 // Safety: both types implement the `ByteCast` trait, so we could
375 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
410 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
376 // `transmute` instead makes the compiler check that the two types
411 // `transmute` instead makes the compiler check that the two types
377 // have the same size, which eliminates the error case of
412 // have the same size, which eliminates the error case of
378 // `from_bytes`.
413 // `from_bytes`.
379 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
414 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
380 }
415 }
381
416
382 fn as_timestamp(&self) -> &Timestamp {
417 fn as_timestamp(&self) -> &Timestamp {
383 // Safety: same as above in `from_timestamp`
418 // Safety: same as above in `from_timestamp`
384 unsafe { &*(self as *const Entry as *const Timestamp) }
419 unsafe { &*(self as *const Entry as *const Timestamp) }
385 }
420 }
386 }
421 }
387
422
388 impl Timestamp {
423 impl Timestamp {
389 pub fn seconds(&self) -> i64 {
424 pub fn seconds(&self) -> i64 {
390 self.seconds.get()
425 self.seconds.get()
391 }
426 }
392 }
427 }
393
428
394 impl From<SystemTime> for Timestamp {
429 impl From<SystemTime> for Timestamp {
395 fn from(system_time: SystemTime) -> Self {
430 fn from(system_time: SystemTime) -> Self {
396 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
431 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
397 Ok(duration) => {
432 Ok(duration) => {
398 (duration.as_secs() as i64, duration.subsec_nanos())
433 (duration.as_secs() as i64, duration.subsec_nanos())
399 }
434 }
400 Err(error) => {
435 Err(error) => {
401 let negative = error.duration();
436 let negative = error.duration();
402 (-(negative.as_secs() as i64), negative.subsec_nanos())
437 (-(negative.as_secs() as i64), negative.subsec_nanos())
403 }
438 }
404 };
439 };
405 Timestamp {
440 Timestamp {
406 seconds: secs.into(),
441 seconds: secs.into(),
407 nanoseconds: nanos.into(),
442 nanoseconds: nanos.into(),
408 }
443 }
409 }
444 }
410 }
445 }
411
446
412 impl From<&'_ Timestamp> for SystemTime {
447 impl From<&'_ Timestamp> for SystemTime {
413 fn from(timestamp: &'_ Timestamp) -> Self {
448 fn from(timestamp: &'_ Timestamp) -> Self {
414 let secs = timestamp.seconds.get();
449 let secs = timestamp.seconds.get();
415 let nanos = timestamp.nanoseconds.get();
450 let nanos = timestamp.nanoseconds.get();
416 if secs >= 0 {
451 if secs >= 0 {
417 UNIX_EPOCH + Duration::new(secs as u64, nanos)
452 UNIX_EPOCH + Duration::new(secs as u64, nanos)
418 } else {
453 } else {
419 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
454 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
420 }
455 }
421 }
456 }
422 }
457 }
423
458
424 fn read_hg_path(
459 fn read_hg_path(
425 on_disk: &[u8],
460 on_disk: &[u8],
426 slice: Slice,
461 slice: Slice,
427 ) -> Result<&HgPath, DirstateV2ParseError> {
462 ) -> Result<&HgPath, DirstateV2ParseError> {
428 let bytes = read_slice::<u8>(on_disk, slice)?;
463 let bytes = read_slice::<u8>(on_disk, slice)?;
429 Ok(HgPath::new(bytes))
464 Ok(HgPath::new(bytes))
430 }
465 }
431
466
432 fn read_slice<T>(
467 fn read_slice<T>(
433 on_disk: &[u8],
468 on_disk: &[u8],
434 slice: Slice,
469 slice: Slice,
435 ) -> Result<&[T], DirstateV2ParseError>
470 ) -> Result<&[T], DirstateV2ParseError>
436 where
471 where
437 T: BytesCast,
472 T: BytesCast,
438 {
473 {
439 // Either `usize::MAX` would result in "out of bounds" error since a single
474 // Either `usize::MAX` would result in "out of bounds" error since a single
440 // `&[u8]` cannot occupy the entire addess space.
475 // `&[u8]` cannot occupy the entire addess space.
441 let start = usize::try_from(slice.start.get()).unwrap_or(std::usize::MAX);
476 let start = usize::try_from(slice.start.get()).unwrap_or(std::usize::MAX);
442 let len = usize::try_from(slice.len.get()).unwrap_or(std::usize::MAX);
477 let len = usize::try_from(slice.len.get()).unwrap_or(std::usize::MAX);
443 on_disk
478 on_disk
444 .get(start..)
479 .get(start..)
445 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
480 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
446 .map(|(slice, _rest)| slice)
481 .map(|(slice, _rest)| slice)
447 .ok_or_else(|| DirstateV2ParseError)
482 .ok_or_else(|| DirstateV2ParseError)
448 }
483 }
449
484
450 pub(crate) fn parse_dirstate_parents(
451 on_disk: &[u8],
452 ) -> Result<&DirstateParents, HgError> {
453 Ok(&read_header(on_disk)?.parents)
454 }
455
456 pub(crate) fn for_each_tracked_path<'on_disk>(
485 pub(crate) fn for_each_tracked_path<'on_disk>(
457 on_disk: &'on_disk [u8],
486 on_disk: &'on_disk [u8],
458 mut f: impl FnMut(&'on_disk HgPath),
487 mut f: impl FnMut(&'on_disk HgPath),
459 ) -> Result<(), DirstateV2ParseError> {
488 ) -> Result<(), DirstateV2ParseError> {
460 let header = read_header(on_disk)?;
489 let (header, _) =
490 Header::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
461 fn recur<'on_disk>(
491 fn recur<'on_disk>(
462 on_disk: &'on_disk [u8],
492 on_disk: &'on_disk [u8],
463 nodes: Slice,
493 nodes: Slice,
464 f: &mut impl FnMut(&'on_disk HgPath),
494 f: &mut impl FnMut(&'on_disk HgPath),
465 ) -> Result<(), DirstateV2ParseError> {
495 ) -> Result<(), DirstateV2ParseError> {
466 for node in read_slice::<Node>(on_disk, nodes)? {
496 for node in read_slice::<Node>(on_disk, nodes)? {
467 if let Some(state) = node.state()? {
497 if let Some(state) = node.state()? {
468 if state.is_tracked() {
498 if state.is_tracked() {
469 f(node.full_path(on_disk)?)
499 f(node.full_path(on_disk)?)
470 }
500 }
471 }
501 }
472 recur(on_disk, node.children, f)?
502 recur(on_disk, node.children, f)?
473 }
503 }
474 Ok(())
504 Ok(())
475 }
505 }
476 recur(on_disk, header.root, &mut f)
506 recur(on_disk, header.root, &mut f)
477 }
507 }
478
508
479 pub(super) fn write(
509 pub(super) fn write(
480 dirstate_map: &mut DirstateMap,
510 dirstate_map: &mut DirstateMap,
481 parents: DirstateParents,
482 ) -> Result<Vec<u8>, DirstateError> {
511 ) -> Result<Vec<u8>, DirstateError> {
483 let header_len = std::mem::size_of::<Header>();
512 let header_len = std::mem::size_of::<Header>();
484
513
485 // This ignores the space for paths, and for nodes without an entry.
514 // This ignores the space for paths, and for nodes without an entry.
486 // TODO: better estimate? Skip the `Vec` and write to a file directly?
515 // TODO: better estimate? Skip the `Vec` and write to a file directly?
487 let size_guess = header_len
516 let size_guess = header_len
488 + std::mem::size_of::<Node>()
517 + std::mem::size_of::<Node>()
489 * dirstate_map.nodes_with_entry_count as usize;
518 * dirstate_map.nodes_with_entry_count as usize;
490 let mut out = Vec::with_capacity(size_guess);
519 let mut out = Vec::with_capacity(size_guess);
491
520
492 // Keep space for the header. We’ll fill it out at the end when we know the
521 // Keep space for the header. We’ll fill it out at the end when we know the
493 // actual offset for the root nodes.
522 // actual offset for the root nodes.
494 out.resize(header_len, 0_u8);
523 out.resize(header_len, 0_u8);
495
524
496 let root =
525 let root =
497 write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
526 write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
498
527
499 let header = Header {
528 let header = Header {
500 marker: *V2_FORMAT_MARKER,
501 parents: parents,
502 root,
529 root,
503 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
530 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
504 nodes_with_copy_source_count: dirstate_map
531 nodes_with_copy_source_count: dirstate_map
505 .nodes_with_copy_source_count
532 .nodes_with_copy_source_count
506 .into(),
533 .into(),
507 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
534 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
508 };
535 };
509 out[..header_len].copy_from_slice(header.as_bytes());
536 out[..header_len].copy_from_slice(header.as_bytes());
510 Ok(out)
537 Ok(out)
511 }
538 }
512
539
513 fn write_nodes(
540 fn write_nodes(
514 dirstate_map: &DirstateMap,
541 dirstate_map: &DirstateMap,
515 nodes: dirstate_map::ChildNodesRef,
542 nodes: dirstate_map::ChildNodesRef,
516 out: &mut Vec<u8>,
543 out: &mut Vec<u8>,
517 ) -> Result<ChildNodes, DirstateError> {
544 ) -> Result<ChildNodes, DirstateError> {
518 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
545 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
519 // order. Sort to enable binary search in the written file.
546 // order. Sort to enable binary search in the written file.
520 let nodes = nodes.sorted();
547 let nodes = nodes.sorted();
521
548
522 // First accumulate serialized nodes in a `Vec`
549 // First accumulate serialized nodes in a `Vec`
523 let mut on_disk_nodes = Vec::with_capacity(nodes.len());
550 let mut on_disk_nodes = Vec::with_capacity(nodes.len());
524 for node in nodes {
551 for node in nodes {
525 let children = write_nodes(
552 let children = write_nodes(
526 dirstate_map,
553 dirstate_map,
527 node.children(dirstate_map.on_disk)?,
554 node.children(dirstate_map.on_disk)?,
528 out,
555 out,
529 )?;
556 )?;
530 let full_path = node.full_path(dirstate_map.on_disk)?;
557 let full_path = node.full_path(dirstate_map.on_disk)?;
531 let full_path = write_slice::<u8>(full_path.as_bytes(), out);
558 let full_path = write_slice::<u8>(full_path.as_bytes(), out);
532 let copy_source =
559 let copy_source =
533 if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
560 if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
534 write_slice::<u8>(source.as_bytes(), out)
561 write_slice::<u8>(source.as_bytes(), out)
535 } else {
562 } else {
536 Slice {
563 Slice {
537 start: 0.into(),
564 start: 0.into(),
538 len: 0.into(),
565 len: 0.into(),
539 }
566 }
540 };
567 };
541 on_disk_nodes.push(match node {
568 on_disk_nodes.push(match node {
542 NodeRef::InMemory(path, node) => {
569 NodeRef::InMemory(path, node) => {
543 let (state, data) = match &node.data {
570 let (state, data) = match &node.data {
544 dirstate_map::NodeData::Entry(entry) => (
571 dirstate_map::NodeData::Entry(entry) => (
545 entry.state.into(),
572 entry.state.into(),
546 Entry {
573 Entry {
547 mode: entry.mode.into(),
574 mode: entry.mode.into(),
548 mtime: entry.mtime.into(),
575 mtime: entry.mtime.into(),
549 size: entry.size.into(),
576 size: entry.size.into(),
550 },
577 },
551 ),
578 ),
552 dirstate_map::NodeData::CachedDirectory { mtime } => {
579 dirstate_map::NodeData::CachedDirectory { mtime } => {
553 (b'd', Entry::from_timestamp(*mtime))
580 (b'd', Entry::from_timestamp(*mtime))
554 }
581 }
555 dirstate_map::NodeData::None => (
582 dirstate_map::NodeData::None => (
556 b'\0',
583 b'\0',
557 Entry {
584 Entry {
558 mode: 0.into(),
585 mode: 0.into(),
559 mtime: 0.into(),
586 mtime: 0.into(),
560 size: 0.into(),
587 size: 0.into(),
561 },
588 },
562 ),
589 ),
563 };
590 };
564 Node {
591 Node {
565 children,
592 children,
566 copy_source,
593 copy_source,
567 full_path,
594 full_path,
568 base_name_start: u32::try_from(path.base_name_start())
595 base_name_start: u32::try_from(path.base_name_start())
569 // Could only panic for paths over 4 GiB
596 // Could only panic for paths over 4 GiB
570 .expect("dirstate-v2 offset overflow")
597 .expect("dirstate-v2 offset overflow")
571 .into(),
598 .into(),
572 descendants_with_entry_count: node
599 descendants_with_entry_count: node
573 .descendants_with_entry_count
600 .descendants_with_entry_count
574 .into(),
601 .into(),
575 tracked_descendants_count: node
602 tracked_descendants_count: node
576 .tracked_descendants_count
603 .tracked_descendants_count
577 .into(),
604 .into(),
578 state,
605 state,
579 data,
606 data,
580 }
607 }
581 }
608 }
582 NodeRef::OnDisk(node) => Node {
609 NodeRef::OnDisk(node) => Node {
583 children,
610 children,
584 copy_source,
611 copy_source,
585 full_path,
612 full_path,
586 ..*node
613 ..*node
587 },
614 },
588 })
615 })
589 }
616 }
590 // … so we can write them contiguously
617 // … so we can write them contiguously
591 Ok(write_slice::<Node>(&on_disk_nodes, out))
618 Ok(write_slice::<Node>(&on_disk_nodes, out))
592 }
619 }
593
620
594 fn write_slice<T>(slice: &[T], out: &mut Vec<u8>) -> Slice
621 fn write_slice<T>(slice: &[T], out: &mut Vec<u8>) -> Slice
595 where
622 where
596 T: BytesCast,
623 T: BytesCast,
597 {
624 {
598 let start = u32::try_from(out.len())
625 let start = u32::try_from(out.len())
599 // Could only panic for a dirstate file larger than 4 GiB
626 // Could only panic for a dirstate file larger than 4 GiB
600 .expect("dirstate-v2 offset overflow")
627 .expect("dirstate-v2 offset overflow")
601 .into();
628 .into();
602 let len = u32::try_from(slice.len())
629 let len = u32::try_from(slice.len())
603 // Could only panic for paths over 4 GiB or nodes with over 4 billions
630 // Could only panic for paths over 4 GiB or nodes with over 4 billions
604 // child nodes
631 // child nodes
605 .expect("dirstate-v2 offset overflow")
632 .expect("dirstate-v2 offset overflow")
606 .into();
633 .into();
607 out.extend(slice.as_bytes());
634 out.extend(slice.as_bytes());
608 Slice { start, len }
635 Slice { start, len }
609 }
636 }
@@ -1,79 +1,84 b''
1 // list_tracked_files.rs
1 // list_tracked_files.rs
2 //
2 //
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::parse_dirstate_entries;
8 use crate::dirstate::parsers::parse_dirstate_entries;
9 use crate::dirstate_tree::on_disk::for_each_tracked_path;
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 use crate::errors::HgError;
10 use crate::errors::HgError;
11 use crate::repo::Repo;
11 use crate::repo::Repo;
12 use crate::revlog::changelog::Changelog;
12 use crate::revlog::changelog::Changelog;
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
13 use crate::revlog::manifest::{Manifest, ManifestEntry};
14 use crate::revlog::node::Node;
14 use crate::revlog::node::Node;
15 use crate::revlog::revlog::RevlogError;
15 use crate::revlog::revlog::RevlogError;
16 use crate::utils::hg_path::HgPath;
16 use crate::utils::hg_path::HgPath;
17 use crate::DirstateError;
17 use crate::DirstateError;
18 use rayon::prelude::*;
18 use rayon::prelude::*;
19
19
20 /// List files under Mercurial control in the working directory
20 /// List files under Mercurial control in the working directory
21 /// by reading the dirstate
21 /// by reading the dirstate
22 pub struct Dirstate {
22 pub struct Dirstate {
23 /// The `dirstate` content.
23 /// The `dirstate` content.
24 content: Vec<u8>,
24 content: Vec<u8>,
25 dirstate_v2: bool,
25 dirstate_v2: bool,
26 }
26 }
27
27
28 impl Dirstate {
28 impl Dirstate {
29 pub fn new(repo: &Repo) -> Result<Self, HgError> {
29 pub fn new(repo: &Repo) -> Result<Self, HgError> {
30 let mut content = repo.hg_vfs().read("dirstate")?;
31 if repo.has_dirstate_v2() {
32 let docket = read_docket(&content)?;
33 content = repo.hg_vfs().read(docket.data_filename())?;
34 }
30 Ok(Self {
35 Ok(Self {
31 content: repo.hg_vfs().read("dirstate")?,
36 content,
32 dirstate_v2: repo.has_dirstate_v2(),
37 dirstate_v2: repo.has_dirstate_v2(),
33 })
38 })
34 }
39 }
35
40
36 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
41 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
37 let mut files = Vec::new();
42 let mut files = Vec::new();
38 if !self.content.is_empty() {
43 if !self.content.is_empty() {
39 if self.dirstate_v2 {
44 if self.dirstate_v2 {
40 for_each_tracked_path(&self.content, |path| files.push(path))?
45 for_each_tracked_path(&self.content, |path| files.push(path))?
41 } else {
46 } else {
42 let _parents = parse_dirstate_entries(
47 let _parents = parse_dirstate_entries(
43 &self.content,
48 &self.content,
44 |path, entry, _copy_source| {
49 |path, entry, _copy_source| {
45 if entry.state.is_tracked() {
50 if entry.state.is_tracked() {
46 files.push(path)
51 files.push(path)
47 }
52 }
48 Ok(())
53 Ok(())
49 },
54 },
50 )?;
55 )?;
51 }
56 }
52 }
57 }
53 files.par_sort_unstable();
58 files.par_sort_unstable();
54 Ok(files)
59 Ok(files)
55 }
60 }
56 }
61 }
57
62
58 /// List files under Mercurial control at a given revision.
63 /// List files under Mercurial control at a given revision.
59 pub fn list_rev_tracked_files(
64 pub fn list_rev_tracked_files(
60 repo: &Repo,
65 repo: &Repo,
61 revset: &str,
66 revset: &str,
62 ) -> Result<FilesForRev, RevlogError> {
67 ) -> Result<FilesForRev, RevlogError> {
63 let rev = crate::revset::resolve_single(revset, repo)?;
68 let rev = crate::revset::resolve_single(revset, repo)?;
64 let changelog = Changelog::open(repo)?;
69 let changelog = Changelog::open(repo)?;
65 let manifest = Manifest::open(repo)?;
70 let manifest = Manifest::open(repo)?;
66 let changelog_entry = changelog.get_rev(rev)?;
71 let changelog_entry = changelog.get_rev(rev)?;
67 let manifest_node =
72 let manifest_node =
68 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
73 Node::from_hex_for_repo(&changelog_entry.manifest_node()?)?;
69 let manifest_entry = manifest.get_node(manifest_node.into())?;
74 let manifest_entry = manifest.get_node(manifest_node.into())?;
70 Ok(FilesForRev(manifest_entry))
75 Ok(FilesForRev(manifest_entry))
71 }
76 }
72
77
73 pub struct FilesForRev(ManifestEntry);
78 pub struct FilesForRev(ManifestEntry);
74
79
75 impl FilesForRev {
80 impl FilesForRev {
76 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
81 pub fn iter(&self) -> impl Iterator<Item = &HgPath> {
77 self.0.files()
82 self.0.files()
78 }
83 }
79 }
84 }
@@ -1,287 +1,288 b''
1 use crate::config::{Config, ConfigError, ConfigParseError};
1 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 use crate::errors::{HgError, IoErrorContext, IoResultExt};
3 use crate::exit_codes;
3 use crate::exit_codes;
4 use crate::requirements;
4 use crate::requirements;
5 use crate::utils::files::get_path_from_bytes;
5 use crate::utils::files::get_path_from_bytes;
6 use crate::utils::SliceExt;
6 use crate::utils::SliceExt;
7 use memmap::{Mmap, MmapOptions};
7 use memmap::{Mmap, MmapOptions};
8 use std::collections::HashSet;
8 use std::collections::HashSet;
9 use std::path::{Path, PathBuf};
9 use std::path::{Path, PathBuf};
10
10
11 /// A repository on disk
11 /// A repository on disk
12 pub struct Repo {
12 pub struct Repo {
13 working_directory: PathBuf,
13 working_directory: PathBuf,
14 dot_hg: PathBuf,
14 dot_hg: PathBuf,
15 store: PathBuf,
15 store: PathBuf,
16 requirements: HashSet<String>,
16 requirements: HashSet<String>,
17 config: Config,
17 config: Config,
18 }
18 }
19
19
20 #[derive(Debug, derive_more::From)]
20 #[derive(Debug, derive_more::From)]
21 pub enum RepoError {
21 pub enum RepoError {
22 NotFound {
22 NotFound {
23 at: PathBuf,
23 at: PathBuf,
24 },
24 },
25 #[from]
25 #[from]
26 ConfigParseError(ConfigParseError),
26 ConfigParseError(ConfigParseError),
27 #[from]
27 #[from]
28 Other(HgError),
28 Other(HgError),
29 }
29 }
30
30
31 impl From<ConfigError> for RepoError {
31 impl From<ConfigError> for RepoError {
32 fn from(error: ConfigError) -> Self {
32 fn from(error: ConfigError) -> Self {
33 match error {
33 match error {
34 ConfigError::Parse(error) => error.into(),
34 ConfigError::Parse(error) => error.into(),
35 ConfigError::Other(error) => error.into(),
35 ConfigError::Other(error) => error.into(),
36 }
36 }
37 }
37 }
38 }
38 }
39
39
40 /// Filesystem access abstraction for the contents of a given "base" diretory
40 /// Filesystem access abstraction for the contents of a given "base" diretory
41 #[derive(Clone, Copy)]
41 #[derive(Clone, Copy)]
42 pub struct Vfs<'a> {
42 pub struct Vfs<'a> {
43 pub(crate) base: &'a Path,
43 pub(crate) base: &'a Path,
44 }
44 }
45
45
46 impl Repo {
46 impl Repo {
47 /// tries to find nearest repository root in current working directory or
47 /// tries to find nearest repository root in current working directory or
48 /// its ancestors
48 /// its ancestors
49 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
49 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
50 let current_directory = crate::utils::current_dir()?;
50 let current_directory = crate::utils::current_dir()?;
51 // ancestors() is inclusive: it first yields `current_directory`
51 // ancestors() is inclusive: it first yields `current_directory`
52 // as-is.
52 // as-is.
53 for ancestor in current_directory.ancestors() {
53 for ancestor in current_directory.ancestors() {
54 if ancestor.join(".hg").is_dir() {
54 if ancestor.join(".hg").is_dir() {
55 return Ok(ancestor.to_path_buf());
55 return Ok(ancestor.to_path_buf());
56 }
56 }
57 }
57 }
58 return Err(RepoError::NotFound {
58 return Err(RepoError::NotFound {
59 at: current_directory,
59 at: current_directory,
60 });
60 });
61 }
61 }
62
62
63 /// Find a repository, either at the given path (which must contain a `.hg`
63 /// Find a repository, either at the given path (which must contain a `.hg`
64 /// sub-directory) or by searching the current directory and its
64 /// sub-directory) or by searching the current directory and its
65 /// ancestors.
65 /// ancestors.
66 ///
66 ///
67 /// A method with two very different "modes" like this usually a code smell
67 /// A method with two very different "modes" like this usually a code smell
68 /// to make two methods instead, but in this case an `Option` is what rhg
68 /// to make two methods instead, but in this case an `Option` is what rhg
69 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
69 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
70 /// Having two methods would just move that `if` to almost all callers.
70 /// Having two methods would just move that `if` to almost all callers.
71 pub fn find(
71 pub fn find(
72 config: &Config,
72 config: &Config,
73 explicit_path: Option<PathBuf>,
73 explicit_path: Option<PathBuf>,
74 ) -> Result<Self, RepoError> {
74 ) -> Result<Self, RepoError> {
75 if let Some(root) = explicit_path {
75 if let Some(root) = explicit_path {
76 if root.join(".hg").is_dir() {
76 if root.join(".hg").is_dir() {
77 Self::new_at_path(root.to_owned(), config)
77 Self::new_at_path(root.to_owned(), config)
78 } else if root.is_file() {
78 } else if root.is_file() {
79 Err(HgError::unsupported("bundle repository").into())
79 Err(HgError::unsupported("bundle repository").into())
80 } else {
80 } else {
81 Err(RepoError::NotFound {
81 Err(RepoError::NotFound {
82 at: root.to_owned(),
82 at: root.to_owned(),
83 })
83 })
84 }
84 }
85 } else {
85 } else {
86 let root = Self::find_repo_root()?;
86 let root = Self::find_repo_root()?;
87 Self::new_at_path(root, config)
87 Self::new_at_path(root, config)
88 }
88 }
89 }
89 }
90
90
91 /// To be called after checking that `.hg` is a sub-directory
91 /// To be called after checking that `.hg` is a sub-directory
92 fn new_at_path(
92 fn new_at_path(
93 working_directory: PathBuf,
93 working_directory: PathBuf,
94 config: &Config,
94 config: &Config,
95 ) -> Result<Self, RepoError> {
95 ) -> Result<Self, RepoError> {
96 let dot_hg = working_directory.join(".hg");
96 let dot_hg = working_directory.join(".hg");
97
97
98 let mut repo_config_files = Vec::new();
98 let mut repo_config_files = Vec::new();
99 repo_config_files.push(dot_hg.join("hgrc"));
99 repo_config_files.push(dot_hg.join("hgrc"));
100 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
100 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
101
101
102 let hg_vfs = Vfs { base: &dot_hg };
102 let hg_vfs = Vfs { base: &dot_hg };
103 let mut reqs = requirements::load_if_exists(hg_vfs)?;
103 let mut reqs = requirements::load_if_exists(hg_vfs)?;
104 let relative =
104 let relative =
105 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
105 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
106 let shared =
106 let shared =
107 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
107 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
108
108
109 // From `mercurial/localrepo.py`:
109 // From `mercurial/localrepo.py`:
110 //
110 //
111 // if .hg/requires contains the sharesafe requirement, it means
111 // if .hg/requires contains the sharesafe requirement, it means
112 // there exists a `.hg/store/requires` too and we should read it
112 // there exists a `.hg/store/requires` too and we should read it
113 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
113 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
114 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
114 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
115 // is not present, refer checkrequirementscompat() for that
115 // is not present, refer checkrequirementscompat() for that
116 //
116 //
117 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
117 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
118 // repository was shared the old way. We check the share source
118 // repository was shared the old way. We check the share source
119 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
119 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
120 // current repository needs to be reshared
120 // current repository needs to be reshared
121 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
121 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
122
122
123 let store_path;
123 let store_path;
124 if !shared {
124 if !shared {
125 store_path = dot_hg.join("store");
125 store_path = dot_hg.join("store");
126 } else {
126 } else {
127 let bytes = hg_vfs.read("sharedpath")?;
127 let bytes = hg_vfs.read("sharedpath")?;
128 let mut shared_path =
128 let mut shared_path =
129 get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
129 get_path_from_bytes(bytes.trim_end_newlines()).to_owned();
130 if relative {
130 if relative {
131 shared_path = dot_hg.join(shared_path)
131 shared_path = dot_hg.join(shared_path)
132 }
132 }
133 if !shared_path.is_dir() {
133 if !shared_path.is_dir() {
134 return Err(HgError::corrupted(format!(
134 return Err(HgError::corrupted(format!(
135 ".hg/sharedpath points to nonexistent directory {}",
135 ".hg/sharedpath points to nonexistent directory {}",
136 shared_path.display()
136 shared_path.display()
137 ))
137 ))
138 .into());
138 .into());
139 }
139 }
140
140
141 store_path = shared_path.join("store");
141 store_path = shared_path.join("store");
142
142
143 let source_is_share_safe =
143 let source_is_share_safe =
144 requirements::load(Vfs { base: &shared_path })?
144 requirements::load(Vfs { base: &shared_path })?
145 .contains(requirements::SHARESAFE_REQUIREMENT);
145 .contains(requirements::SHARESAFE_REQUIREMENT);
146
146
147 if share_safe && !source_is_share_safe {
147 if share_safe && !source_is_share_safe {
148 return Err(match config
148 return Err(match config
149 .get(b"share", b"safe-mismatch.source-not-safe")
149 .get(b"share", b"safe-mismatch.source-not-safe")
150 {
150 {
151 Some(b"abort") | None => HgError::abort(
151 Some(b"abort") | None => HgError::abort(
152 "abort: share source does not support share-safe requirement\n\
152 "abort: share source does not support share-safe requirement\n\
153 (see `hg help config.format.use-share-safe` for more information)",
153 (see `hg help config.format.use-share-safe` for more information)",
154 exit_codes::ABORT,
154 exit_codes::ABORT,
155 ),
155 ),
156 _ => HgError::unsupported("share-safe downgrade"),
156 _ => HgError::unsupported("share-safe downgrade"),
157 }
157 }
158 .into());
158 .into());
159 } else if source_is_share_safe && !share_safe {
159 } else if source_is_share_safe && !share_safe {
160 return Err(
160 return Err(
161 match config.get(b"share", b"safe-mismatch.source-safe") {
161 match config.get(b"share", b"safe-mismatch.source-safe") {
162 Some(b"abort") | None => HgError::abort(
162 Some(b"abort") | None => HgError::abort(
163 "abort: version mismatch: source uses share-safe \
163 "abort: version mismatch: source uses share-safe \
164 functionality while the current share does not\n\
164 functionality while the current share does not\n\
165 (see `hg help config.format.use-share-safe` for more information)",
165 (see `hg help config.format.use-share-safe` for more information)",
166 exit_codes::ABORT,
166 exit_codes::ABORT,
167 ),
167 ),
168 _ => HgError::unsupported("share-safe upgrade"),
168 _ => HgError::unsupported("share-safe upgrade"),
169 }
169 }
170 .into(),
170 .into(),
171 );
171 );
172 }
172 }
173
173
174 if share_safe {
174 if share_safe {
175 repo_config_files.insert(0, shared_path.join("hgrc"))
175 repo_config_files.insert(0, shared_path.join("hgrc"))
176 }
176 }
177 }
177 }
178 if share_safe {
178 if share_safe {
179 reqs.extend(requirements::load(Vfs { base: &store_path })?);
179 reqs.extend(requirements::load(Vfs { base: &store_path })?);
180 }
180 }
181
181
182 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
182 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
183 config.combine_with_repo(&repo_config_files)?
183 config.combine_with_repo(&repo_config_files)?
184 } else {
184 } else {
185 config.clone()
185 config.clone()
186 };
186 };
187
187
188 let repo = Self {
188 let repo = Self {
189 requirements: reqs,
189 requirements: reqs,
190 working_directory,
190 working_directory,
191 store: store_path,
191 store: store_path,
192 dot_hg,
192 dot_hg,
193 config: repo_config,
193 config: repo_config,
194 };
194 };
195
195
196 requirements::check(&repo)?;
196 requirements::check(&repo)?;
197
197
198 Ok(repo)
198 Ok(repo)
199 }
199 }
200
200
201 pub fn working_directory_path(&self) -> &Path {
201 pub fn working_directory_path(&self) -> &Path {
202 &self.working_directory
202 &self.working_directory
203 }
203 }
204
204
205 pub fn requirements(&self) -> &HashSet<String> {
205 pub fn requirements(&self) -> &HashSet<String> {
206 &self.requirements
206 &self.requirements
207 }
207 }
208
208
209 pub fn config(&self) -> &Config {
209 pub fn config(&self) -> &Config {
210 &self.config
210 &self.config
211 }
211 }
212
212
213 /// For accessing repository files (in `.hg`), except for the store
213 /// For accessing repository files (in `.hg`), except for the store
214 /// (`.hg/store`).
214 /// (`.hg/store`).
215 pub fn hg_vfs(&self) -> Vfs<'_> {
215 pub fn hg_vfs(&self) -> Vfs<'_> {
216 Vfs { base: &self.dot_hg }
216 Vfs { base: &self.dot_hg }
217 }
217 }
218
218
219 /// For accessing repository store files (in `.hg/store`)
219 /// For accessing repository store files (in `.hg/store`)
220 pub fn store_vfs(&self) -> Vfs<'_> {
220 pub fn store_vfs(&self) -> Vfs<'_> {
221 Vfs { base: &self.store }
221 Vfs { base: &self.store }
222 }
222 }
223
223
224 /// For accessing the working copy
224 /// For accessing the working copy
225 pub fn working_directory_vfs(&self) -> Vfs<'_> {
225 pub fn working_directory_vfs(&self) -> Vfs<'_> {
226 Vfs {
226 Vfs {
227 base: &self.working_directory,
227 base: &self.working_directory,
228 }
228 }
229 }
229 }
230
230
231 pub fn has_dirstate_v2(&self) -> bool {
231 pub fn has_dirstate_v2(&self) -> bool {
232 self.requirements
232 self.requirements
233 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
233 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
234 }
234 }
235
235
236 pub fn dirstate_parents(
236 pub fn dirstate_parents(
237 &self,
237 &self,
238 ) -> Result<crate::dirstate::DirstateParents, HgError> {
238 ) -> Result<crate::dirstate::DirstateParents, HgError> {
239 let dirstate = self.hg_vfs().mmap_open("dirstate")?;
239 let dirstate = self.hg_vfs().mmap_open("dirstate")?;
240 if dirstate.is_empty() {
240 if dirstate.is_empty() {
241 return Ok(crate::dirstate::DirstateParents::NULL);
241 return Ok(crate::dirstate::DirstateParents::NULL);
242 }
242 }
243 let parents = if self.has_dirstate_v2() {
243 let parents = if self.has_dirstate_v2() {
244 crate::dirstate_tree::on_disk::parse_dirstate_parents(&dirstate)?
244 crate::dirstate_tree::on_disk::read_docket(&dirstate)?.parents()
245 } else {
245 } else {
246 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
246 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
247 .clone()
247 };
248 };
248 Ok(parents.clone())
249 Ok(parents)
249 }
250 }
250 }
251 }
251
252
252 impl Vfs<'_> {
253 impl Vfs<'_> {
253 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
254 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
254 self.base.join(relative_path)
255 self.base.join(relative_path)
255 }
256 }
256
257
257 pub fn read(
258 pub fn read(
258 &self,
259 &self,
259 relative_path: impl AsRef<Path>,
260 relative_path: impl AsRef<Path>,
260 ) -> Result<Vec<u8>, HgError> {
261 ) -> Result<Vec<u8>, HgError> {
261 let path = self.join(relative_path);
262 let path = self.join(relative_path);
262 std::fs::read(&path).when_reading_file(&path)
263 std::fs::read(&path).when_reading_file(&path)
263 }
264 }
264
265
265 pub fn mmap_open(
266 pub fn mmap_open(
266 &self,
267 &self,
267 relative_path: impl AsRef<Path>,
268 relative_path: impl AsRef<Path>,
268 ) -> Result<Mmap, HgError> {
269 ) -> Result<Mmap, HgError> {
269 let path = self.base.join(relative_path);
270 let path = self.base.join(relative_path);
270 let file = std::fs::File::open(&path).when_reading_file(&path)?;
271 let file = std::fs::File::open(&path).when_reading_file(&path)?;
271 // TODO: what are the safety requirements here?
272 // TODO: what are the safety requirements here?
272 let mmap = unsafe { MmapOptions::new().map(&file) }
273 let mmap = unsafe { MmapOptions::new().map(&file) }
273 .when_reading_file(&path)?;
274 .when_reading_file(&path)?;
274 Ok(mmap)
275 Ok(mmap)
275 }
276 }
276
277
277 pub fn rename(
278 pub fn rename(
278 &self,
279 &self,
279 relative_from: impl AsRef<Path>,
280 relative_from: impl AsRef<Path>,
280 relative_to: impl AsRef<Path>,
281 relative_to: impl AsRef<Path>,
281 ) -> Result<(), HgError> {
282 ) -> Result<(), HgError> {
282 let from = self.join(relative_from);
283 let from = self.join(relative_from);
283 let to = self.join(relative_to);
284 let to = self.join(relative_to);
284 std::fs::rename(&from, &to)
285 std::fs::rename(&from, &to)
285 .with_context(|| IoErrorContext::RenamingFile { from, to })
286 .with_context(|| IoErrorContext::RenamingFile { from, to })
286 }
287 }
287 }
288 }
@@ -1,582 +1,606 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_directory_item,
22 dirstate::make_directory_item,
23 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item,
24 dirstate::non_normal_entries::{
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
26 },
27 dirstate::owning::OwningDirstateMap,
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
28 parsers::dirstate_parents_to_pytuple,
29 };
29 };
30 use hg::{
30 use hg::{
31 dirstate::parsers::Timestamp,
31 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
32 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
34 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
35 dirstate_tree::on_disk::DirstateV2ParseError,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
41 };
41 };
42
42
43 // TODO
43 // TODO
44 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
48 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
49 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
51 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
52 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
53 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
57
57
58 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
59 @staticmethod
60 def new(
60 def new_v1(
61 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
62 use_dirstate_v2: bool,
63 on_disk: PyBytes,
62 on_disk: PyBytes,
64 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
65 let dirstate_error = |e: DirstateError| {
64 let dirstate_error = |e: DirstateError| {
66 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
65 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
67 };
66 };
68 let (inner, parents) = if use_dirstate_tree || use_dirstate_v2 {
67 let (inner, parents) = if use_dirstate_tree {
69 let (map, parents) =
68 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
70 OwningDirstateMap::new(py, on_disk, use_dirstate_v2)
71 .map_err(dirstate_error)?;
69 .map_err(dirstate_error)?;
72 (Box::new(map) as _, parents)
70 (Box::new(map) as _, parents)
73 } else {
71 } else {
74 let bytes = on_disk.data(py);
72 let bytes = on_disk.data(py);
75 let mut map = RustDirstateMap::default();
73 let mut map = RustDirstateMap::default();
76 let parents = map.read(bytes).map_err(dirstate_error)?;
74 let parents = map.read(bytes).map_err(dirstate_error)?;
77 (Box::new(map) as _, parents)
75 (Box::new(map) as _, parents)
78 };
76 };
79 let map = Self::create_instance(py, inner)?;
77 let map = Self::create_instance(py, inner)?;
80 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
81 Ok((map, parents).to_py_object(py).into_object())
79 Ok((map, parents).to_py_object(py).into_object())
82 }
80 }
83
81
82 /// Returns a DirstateMap
83 @staticmethod
84 def new_v2(
85 on_disk: PyBytes,
86 ) -> PyResult<PyObject> {
87 let dirstate_error = |e: DirstateError| {
88 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
89 };
90 let inner = OwningDirstateMap::new_v2(py, on_disk)
91 .map_err(dirstate_error)?;
92 let map = Self::create_instance(py, Box::new(inner))?;
93 Ok(map.into_object())
94 }
95
84 def clear(&self) -> PyResult<PyObject> {
96 def clear(&self) -> PyResult<PyObject> {
85 self.inner(py).borrow_mut().clear();
97 self.inner(py).borrow_mut().clear();
86 Ok(py.None())
98 Ok(py.None())
87 }
99 }
88
100
89 def get(
101 def get(
90 &self,
102 &self,
91 key: PyObject,
103 key: PyObject,
92 default: Option<PyObject> = None
104 default: Option<PyObject> = None
93 ) -> PyResult<Option<PyObject>> {
105 ) -> PyResult<Option<PyObject>> {
94 let key = key.extract::<PyBytes>(py)?;
106 let key = key.extract::<PyBytes>(py)?;
95 match self
107 match self
96 .inner(py)
108 .inner(py)
97 .borrow()
109 .borrow()
98 .get(HgPath::new(key.data(py)))
110 .get(HgPath::new(key.data(py)))
99 .map_err(|e| v2_error(py, e))?
111 .map_err(|e| v2_error(py, e))?
100 {
112 {
101 Some(entry) => {
113 Some(entry) => {
102 Ok(Some(make_dirstate_item(py, &entry)?))
114 Ok(Some(make_dirstate_item(py, &entry)?))
103 },
115 },
104 None => Ok(default)
116 None => Ok(default)
105 }
117 }
106 }
118 }
107
119
108 def addfile(
120 def addfile(
109 &self,
121 &self,
110 f: PyObject,
122 f: PyObject,
111 mode: PyObject,
123 mode: PyObject,
112 size: PyObject,
124 size: PyObject,
113 mtime: PyObject,
125 mtime: PyObject,
114 added: PyObject,
126 added: PyObject,
115 merged: PyObject,
127 merged: PyObject,
116 from_p2: PyObject,
128 from_p2: PyObject,
117 possibly_dirty: PyObject,
129 possibly_dirty: PyObject,
118 ) -> PyResult<PyObject> {
130 ) -> PyResult<PyObject> {
119 let f = f.extract::<PyBytes>(py)?;
131 let f = f.extract::<PyBytes>(py)?;
120 let filename = HgPath::new(f.data(py));
132 let filename = HgPath::new(f.data(py));
121 let mode = if mode.is_none(py) {
133 let mode = if mode.is_none(py) {
122 // fallback default value
134 // fallback default value
123 0
135 0
124 } else {
136 } else {
125 mode.extract(py)?
137 mode.extract(py)?
126 };
138 };
127 let size = if size.is_none(py) {
139 let size = if size.is_none(py) {
128 // fallback default value
140 // fallback default value
129 SIZE_NON_NORMAL
141 SIZE_NON_NORMAL
130 } else {
142 } else {
131 size.extract(py)?
143 size.extract(py)?
132 };
144 };
133 let mtime = if mtime.is_none(py) {
145 let mtime = if mtime.is_none(py) {
134 // fallback default value
146 // fallback default value
135 MTIME_UNSET
147 MTIME_UNSET
136 } else {
148 } else {
137 mtime.extract(py)?
149 mtime.extract(py)?
138 };
150 };
139 let entry = DirstateEntry {
151 let entry = DirstateEntry {
140 // XXX Arbitrary default value since the value is determined later
152 // XXX Arbitrary default value since the value is determined later
141 state: EntryState::Normal,
153 state: EntryState::Normal,
142 mode: mode,
154 mode: mode,
143 size: size,
155 size: size,
144 mtime: mtime,
156 mtime: mtime,
145 };
157 };
146 let added = added.extract::<PyBool>(py)?.is_true();
158 let added = added.extract::<PyBool>(py)?.is_true();
147 let merged = merged.extract::<PyBool>(py)?.is_true();
159 let merged = merged.extract::<PyBool>(py)?.is_true();
148 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
160 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
149 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
161 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
150 self.inner(py).borrow_mut().add_file(
162 self.inner(py).borrow_mut().add_file(
151 filename,
163 filename,
152 entry,
164 entry,
153 added,
165 added,
154 merged,
166 merged,
155 from_p2,
167 from_p2,
156 possibly_dirty
168 possibly_dirty
157 ).and(Ok(py.None())).or_else(|e: DirstateError| {
169 ).and(Ok(py.None())).or_else(|e: DirstateError| {
158 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
170 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
159 })
171 })
160 }
172 }
161
173
162 def removefile(
174 def removefile(
163 &self,
175 &self,
164 f: PyObject,
176 f: PyObject,
165 in_merge: PyObject
177 in_merge: PyObject
166 ) -> PyResult<PyObject> {
178 ) -> PyResult<PyObject> {
167 self.inner(py).borrow_mut()
179 self.inner(py).borrow_mut()
168 .remove_file(
180 .remove_file(
169 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
181 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
170 in_merge.extract::<PyBool>(py)?.is_true(),
182 in_merge.extract::<PyBool>(py)?.is_true(),
171 )
183 )
172 .or_else(|_| {
184 .or_else(|_| {
173 Err(PyErr::new::<exc::OSError, _>(
185 Err(PyErr::new::<exc::OSError, _>(
174 py,
186 py,
175 "Dirstate error".to_string(),
187 "Dirstate error".to_string(),
176 ))
188 ))
177 })?;
189 })?;
178 Ok(py.None())
190 Ok(py.None())
179 }
191 }
180
192
181 def dropfile(
193 def dropfile(
182 &self,
194 &self,
183 f: PyObject,
195 f: PyObject,
184 ) -> PyResult<PyBool> {
196 ) -> PyResult<PyBool> {
185 self.inner(py).borrow_mut()
197 self.inner(py).borrow_mut()
186 .drop_file(
198 .drop_file(
187 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
199 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
188 )
200 )
189 .and_then(|b| Ok(b.to_py_object(py)))
201 .and_then(|b| Ok(b.to_py_object(py)))
190 .or_else(|e| {
202 .or_else(|e| {
191 Err(PyErr::new::<exc::OSError, _>(
203 Err(PyErr::new::<exc::OSError, _>(
192 py,
204 py,
193 format!("Dirstate error: {}", e.to_string()),
205 format!("Dirstate error: {}", e.to_string()),
194 ))
206 ))
195 })
207 })
196 }
208 }
197
209
198 def clearambiguoustimes(
210 def clearambiguoustimes(
199 &self,
211 &self,
200 files: PyObject,
212 files: PyObject,
201 now: PyObject
213 now: PyObject
202 ) -> PyResult<PyObject> {
214 ) -> PyResult<PyObject> {
203 let files: PyResult<Vec<HgPathBuf>> = files
215 let files: PyResult<Vec<HgPathBuf>> = files
204 .iter(py)?
216 .iter(py)?
205 .map(|filename| {
217 .map(|filename| {
206 Ok(HgPathBuf::from_bytes(
218 Ok(HgPathBuf::from_bytes(
207 filename?.extract::<PyBytes>(py)?.data(py),
219 filename?.extract::<PyBytes>(py)?.data(py),
208 ))
220 ))
209 })
221 })
210 .collect();
222 .collect();
211 self.inner(py)
223 self.inner(py)
212 .borrow_mut()
224 .borrow_mut()
213 .clear_ambiguous_times(files?, now.extract(py)?)
225 .clear_ambiguous_times(files?, now.extract(py)?)
214 .map_err(|e| v2_error(py, e))?;
226 .map_err(|e| v2_error(py, e))?;
215 Ok(py.None())
227 Ok(py.None())
216 }
228 }
217
229
218 def other_parent_entries(&self) -> PyResult<PyObject> {
230 def other_parent_entries(&self) -> PyResult<PyObject> {
219 let mut inner_shared = self.inner(py).borrow_mut();
231 let mut inner_shared = self.inner(py).borrow_mut();
220 let set = PySet::empty(py)?;
232 let set = PySet::empty(py)?;
221 for path in inner_shared.iter_other_parent_paths() {
233 for path in inner_shared.iter_other_parent_paths() {
222 let path = path.map_err(|e| v2_error(py, e))?;
234 let path = path.map_err(|e| v2_error(py, e))?;
223 set.add(py, PyBytes::new(py, path.as_bytes()))?;
235 set.add(py, PyBytes::new(py, path.as_bytes()))?;
224 }
236 }
225 Ok(set.into_object())
237 Ok(set.into_object())
226 }
238 }
227
239
228 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
240 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
229 NonNormalEntries::from_inner(py, self.clone_ref(py))
241 NonNormalEntries::from_inner(py, self.clone_ref(py))
230 }
242 }
231
243
232 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
244 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
233 let key = key.extract::<PyBytes>(py)?;
245 let key = key.extract::<PyBytes>(py)?;
234 self.inner(py)
246 self.inner(py)
235 .borrow_mut()
247 .borrow_mut()
236 .non_normal_entries_contains(HgPath::new(key.data(py)))
248 .non_normal_entries_contains(HgPath::new(key.data(py)))
237 .map_err(|e| v2_error(py, e))
249 .map_err(|e| v2_error(py, e))
238 }
250 }
239
251
240 def non_normal_entries_display(&self) -> PyResult<PyString> {
252 def non_normal_entries_display(&self) -> PyResult<PyString> {
241 let mut inner = self.inner(py).borrow_mut();
253 let mut inner = self.inner(py).borrow_mut();
242 let paths = inner
254 let paths = inner
243 .iter_non_normal_paths()
255 .iter_non_normal_paths()
244 .collect::<Result<Vec<_>, _>>()
256 .collect::<Result<Vec<_>, _>>()
245 .map_err(|e| v2_error(py, e))?;
257 .map_err(|e| v2_error(py, e))?;
246 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
258 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
247 Ok(PyString::new(py, &formatted))
259 Ok(PyString::new(py, &formatted))
248 }
260 }
249
261
250 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
262 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
251 let key = key.extract::<PyBytes>(py)?;
263 let key = key.extract::<PyBytes>(py)?;
252 self
264 self
253 .inner(py)
265 .inner(py)
254 .borrow_mut()
266 .borrow_mut()
255 .non_normal_entries_remove(HgPath::new(key.data(py)));
267 .non_normal_entries_remove(HgPath::new(key.data(py)));
256 Ok(py.None())
268 Ok(py.None())
257 }
269 }
258
270
259 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
271 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
260 let mut inner = self.inner(py).borrow_mut();
272 let mut inner = self.inner(py).borrow_mut();
261
273
262 let ret = PyList::new(py, &[]);
274 let ret = PyList::new(py, &[]);
263 for filename in inner.non_normal_or_other_parent_paths() {
275 for filename in inner.non_normal_or_other_parent_paths() {
264 let filename = filename.map_err(|e| v2_error(py, e))?;
276 let filename = filename.map_err(|e| v2_error(py, e))?;
265 let as_pystring = PyBytes::new(py, filename.as_bytes());
277 let as_pystring = PyBytes::new(py, filename.as_bytes());
266 ret.append(py, as_pystring.into_object());
278 ret.append(py, as_pystring.into_object());
267 }
279 }
268 Ok(ret)
280 Ok(ret)
269 }
281 }
270
282
271 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
283 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
272 // Make sure the sets are defined before we no longer have a mutable
284 // Make sure the sets are defined before we no longer have a mutable
273 // reference to the dmap.
285 // reference to the dmap.
274 self.inner(py)
286 self.inner(py)
275 .borrow_mut()
287 .borrow_mut()
276 .set_non_normal_other_parent_entries(false);
288 .set_non_normal_other_parent_entries(false);
277
289
278 let leaked_ref = self.inner(py).leak_immutable();
290 let leaked_ref = self.inner(py).leak_immutable();
279
291
280 NonNormalEntriesIterator::from_inner(py, unsafe {
292 NonNormalEntriesIterator::from_inner(py, unsafe {
281 leaked_ref.map(py, |o| {
293 leaked_ref.map(py, |o| {
282 o.iter_non_normal_paths_panic()
294 o.iter_non_normal_paths_panic()
283 })
295 })
284 })
296 })
285 }
297 }
286
298
287 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
299 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
288 let d = d.extract::<PyBytes>(py)?;
300 let d = d.extract::<PyBytes>(py)?;
289 Ok(self.inner(py).borrow_mut()
301 Ok(self.inner(py).borrow_mut()
290 .has_tracked_dir(HgPath::new(d.data(py)))
302 .has_tracked_dir(HgPath::new(d.data(py)))
291 .map_err(|e| {
303 .map_err(|e| {
292 PyErr::new::<exc::ValueError, _>(py, e.to_string())
304 PyErr::new::<exc::ValueError, _>(py, e.to_string())
293 })?
305 })?
294 .to_py_object(py))
306 .to_py_object(py))
295 }
307 }
296
308
297 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
309 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
298 let d = d.extract::<PyBytes>(py)?;
310 let d = d.extract::<PyBytes>(py)?;
299 Ok(self.inner(py).borrow_mut()
311 Ok(self.inner(py).borrow_mut()
300 .has_dir(HgPath::new(d.data(py)))
312 .has_dir(HgPath::new(d.data(py)))
301 .map_err(|e| {
313 .map_err(|e| {
302 PyErr::new::<exc::ValueError, _>(py, e.to_string())
314 PyErr::new::<exc::ValueError, _>(py, e.to_string())
303 })?
315 })?
304 .to_py_object(py))
316 .to_py_object(py))
305 }
317 }
306
318
307 def write(
319 def write_v1(
308 &self,
320 &self,
309 use_dirstate_v2: bool,
310 p1: PyObject,
321 p1: PyObject,
311 p2: PyObject,
322 p2: PyObject,
312 now: PyObject
323 now: PyObject
313 ) -> PyResult<PyBytes> {
324 ) -> PyResult<PyBytes> {
314 let now = Timestamp(now.extract(py)?);
325 let now = Timestamp(now.extract(py)?);
326
327 let mut inner = self.inner(py).borrow_mut();
315 let parents = DirstateParents {
328 let parents = DirstateParents {
316 p1: extract_node_id(py, &p1)?,
329 p1: extract_node_id(py, &p1)?,
317 p2: extract_node_id(py, &p2)?,
330 p2: extract_node_id(py, &p2)?,
318 };
331 };
332 let result = inner.pack_v1(parents, now);
333 match result {
334 Ok(packed) => Ok(PyBytes::new(py, &packed)),
335 Err(_) => Err(PyErr::new::<exc::OSError, _>(
336 py,
337 "Dirstate error".to_string(),
338 )),
339 }
340 }
341
342 def write_v2(
343 &self,
344 now: PyObject
345 ) -> PyResult<PyBytes> {
346 let now = Timestamp(now.extract(py)?);
319
347
320 let mut inner = self.inner(py).borrow_mut();
348 let mut inner = self.inner(py).borrow_mut();
321 let result = if use_dirstate_v2 {
349 let result = inner.pack_v2(now);
322 inner.pack_v2(parents, now)
323 } else {
324 inner.pack_v1(parents, now)
325 };
326 match result {
350 match result {
327 Ok(packed) => Ok(PyBytes::new(py, &packed)),
351 Ok(packed) => Ok(PyBytes::new(py, &packed)),
328 Err(_) => Err(PyErr::new::<exc::OSError, _>(
352 Err(_) => Err(PyErr::new::<exc::OSError, _>(
329 py,
353 py,
330 "Dirstate error".to_string(),
354 "Dirstate error".to_string(),
331 )),
355 )),
332 }
356 }
333 }
357 }
334
358
335 def filefoldmapasdict(&self) -> PyResult<PyDict> {
359 def filefoldmapasdict(&self) -> PyResult<PyDict> {
336 let dict = PyDict::new(py);
360 let dict = PyDict::new(py);
337 for item in self.inner(py).borrow_mut().iter() {
361 for item in self.inner(py).borrow_mut().iter() {
338 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
362 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
339 if entry.state != EntryState::Removed {
363 if entry.state != EntryState::Removed {
340 let key = normalize_case(path);
364 let key = normalize_case(path);
341 let value = path;
365 let value = path;
342 dict.set_item(
366 dict.set_item(
343 py,
367 py,
344 PyBytes::new(py, key.as_bytes()).into_object(),
368 PyBytes::new(py, key.as_bytes()).into_object(),
345 PyBytes::new(py, value.as_bytes()).into_object(),
369 PyBytes::new(py, value.as_bytes()).into_object(),
346 )?;
370 )?;
347 }
371 }
348 }
372 }
349 Ok(dict)
373 Ok(dict)
350 }
374 }
351
375
352 def __len__(&self) -> PyResult<usize> {
376 def __len__(&self) -> PyResult<usize> {
353 Ok(self.inner(py).borrow().len())
377 Ok(self.inner(py).borrow().len())
354 }
378 }
355
379
356 def __contains__(&self, key: PyObject) -> PyResult<bool> {
380 def __contains__(&self, key: PyObject) -> PyResult<bool> {
357 let key = key.extract::<PyBytes>(py)?;
381 let key = key.extract::<PyBytes>(py)?;
358 self.inner(py)
382 self.inner(py)
359 .borrow()
383 .borrow()
360 .contains_key(HgPath::new(key.data(py)))
384 .contains_key(HgPath::new(key.data(py)))
361 .map_err(|e| v2_error(py, e))
385 .map_err(|e| v2_error(py, e))
362 }
386 }
363
387
364 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
388 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
365 let key = key.extract::<PyBytes>(py)?;
389 let key = key.extract::<PyBytes>(py)?;
366 let key = HgPath::new(key.data(py));
390 let key = HgPath::new(key.data(py));
367 match self
391 match self
368 .inner(py)
392 .inner(py)
369 .borrow()
393 .borrow()
370 .get(key)
394 .get(key)
371 .map_err(|e| v2_error(py, e))?
395 .map_err(|e| v2_error(py, e))?
372 {
396 {
373 Some(entry) => {
397 Some(entry) => {
374 Ok(make_dirstate_item(py, &entry)?)
398 Ok(make_dirstate_item(py, &entry)?)
375 },
399 },
376 None => Err(PyErr::new::<exc::KeyError, _>(
400 None => Err(PyErr::new::<exc::KeyError, _>(
377 py,
401 py,
378 String::from_utf8_lossy(key.as_bytes()),
402 String::from_utf8_lossy(key.as_bytes()),
379 )),
403 )),
380 }
404 }
381 }
405 }
382
406
383 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
407 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
384 let leaked_ref = self.inner(py).leak_immutable();
408 let leaked_ref = self.inner(py).leak_immutable();
385 DirstateMapKeysIterator::from_inner(
409 DirstateMapKeysIterator::from_inner(
386 py,
410 py,
387 unsafe { leaked_ref.map(py, |o| o.iter()) },
411 unsafe { leaked_ref.map(py, |o| o.iter()) },
388 )
412 )
389 }
413 }
390
414
391 def items(&self) -> PyResult<DirstateMapItemsIterator> {
415 def items(&self) -> PyResult<DirstateMapItemsIterator> {
392 let leaked_ref = self.inner(py).leak_immutable();
416 let leaked_ref = self.inner(py).leak_immutable();
393 DirstateMapItemsIterator::from_inner(
417 DirstateMapItemsIterator::from_inner(
394 py,
418 py,
395 unsafe { leaked_ref.map(py, |o| o.iter()) },
419 unsafe { leaked_ref.map(py, |o| o.iter()) },
396 )
420 )
397 }
421 }
398
422
399 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
423 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
400 let leaked_ref = self.inner(py).leak_immutable();
424 let leaked_ref = self.inner(py).leak_immutable();
401 DirstateMapKeysIterator::from_inner(
425 DirstateMapKeysIterator::from_inner(
402 py,
426 py,
403 unsafe { leaked_ref.map(py, |o| o.iter()) },
427 unsafe { leaked_ref.map(py, |o| o.iter()) },
404 )
428 )
405 }
429 }
406
430
407 // TODO all copymap* methods, see docstring above
431 // TODO all copymap* methods, see docstring above
408 def copymapcopy(&self) -> PyResult<PyDict> {
432 def copymapcopy(&self) -> PyResult<PyDict> {
409 let dict = PyDict::new(py);
433 let dict = PyDict::new(py);
410 for item in self.inner(py).borrow().copy_map_iter() {
434 for item in self.inner(py).borrow().copy_map_iter() {
411 let (key, value) = item.map_err(|e| v2_error(py, e))?;
435 let (key, value) = item.map_err(|e| v2_error(py, e))?;
412 dict.set_item(
436 dict.set_item(
413 py,
437 py,
414 PyBytes::new(py, key.as_bytes()),
438 PyBytes::new(py, key.as_bytes()),
415 PyBytes::new(py, value.as_bytes()),
439 PyBytes::new(py, value.as_bytes()),
416 )?;
440 )?;
417 }
441 }
418 Ok(dict)
442 Ok(dict)
419 }
443 }
420
444
421 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
445 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
422 let key = key.extract::<PyBytes>(py)?;
446 let key = key.extract::<PyBytes>(py)?;
423 match self
447 match self
424 .inner(py)
448 .inner(py)
425 .borrow()
449 .borrow()
426 .copy_map_get(HgPath::new(key.data(py)))
450 .copy_map_get(HgPath::new(key.data(py)))
427 .map_err(|e| v2_error(py, e))?
451 .map_err(|e| v2_error(py, e))?
428 {
452 {
429 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
453 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
430 None => Err(PyErr::new::<exc::KeyError, _>(
454 None => Err(PyErr::new::<exc::KeyError, _>(
431 py,
455 py,
432 String::from_utf8_lossy(key.data(py)),
456 String::from_utf8_lossy(key.data(py)),
433 )),
457 )),
434 }
458 }
435 }
459 }
436 def copymap(&self) -> PyResult<CopyMap> {
460 def copymap(&self) -> PyResult<CopyMap> {
437 CopyMap::from_inner(py, self.clone_ref(py))
461 CopyMap::from_inner(py, self.clone_ref(py))
438 }
462 }
439
463
440 def copymaplen(&self) -> PyResult<usize> {
464 def copymaplen(&self) -> PyResult<usize> {
441 Ok(self.inner(py).borrow().copy_map_len())
465 Ok(self.inner(py).borrow().copy_map_len())
442 }
466 }
443 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
467 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
444 let key = key.extract::<PyBytes>(py)?;
468 let key = key.extract::<PyBytes>(py)?;
445 self.inner(py)
469 self.inner(py)
446 .borrow()
470 .borrow()
447 .copy_map_contains_key(HgPath::new(key.data(py)))
471 .copy_map_contains_key(HgPath::new(key.data(py)))
448 .map_err(|e| v2_error(py, e))
472 .map_err(|e| v2_error(py, e))
449 }
473 }
450 def copymapget(
474 def copymapget(
451 &self,
475 &self,
452 key: PyObject,
476 key: PyObject,
453 default: Option<PyObject>
477 default: Option<PyObject>
454 ) -> PyResult<Option<PyObject>> {
478 ) -> PyResult<Option<PyObject>> {
455 let key = key.extract::<PyBytes>(py)?;
479 let key = key.extract::<PyBytes>(py)?;
456 match self
480 match self
457 .inner(py)
481 .inner(py)
458 .borrow()
482 .borrow()
459 .copy_map_get(HgPath::new(key.data(py)))
483 .copy_map_get(HgPath::new(key.data(py)))
460 .map_err(|e| v2_error(py, e))?
484 .map_err(|e| v2_error(py, e))?
461 {
485 {
462 Some(copy) => Ok(Some(
486 Some(copy) => Ok(Some(
463 PyBytes::new(py, copy.as_bytes()).into_object(),
487 PyBytes::new(py, copy.as_bytes()).into_object(),
464 )),
488 )),
465 None => Ok(default),
489 None => Ok(default),
466 }
490 }
467 }
491 }
468 def copymapsetitem(
492 def copymapsetitem(
469 &self,
493 &self,
470 key: PyObject,
494 key: PyObject,
471 value: PyObject
495 value: PyObject
472 ) -> PyResult<PyObject> {
496 ) -> PyResult<PyObject> {
473 let key = key.extract::<PyBytes>(py)?;
497 let key = key.extract::<PyBytes>(py)?;
474 let value = value.extract::<PyBytes>(py)?;
498 let value = value.extract::<PyBytes>(py)?;
475 self.inner(py)
499 self.inner(py)
476 .borrow_mut()
500 .borrow_mut()
477 .copy_map_insert(
501 .copy_map_insert(
478 HgPathBuf::from_bytes(key.data(py)),
502 HgPathBuf::from_bytes(key.data(py)),
479 HgPathBuf::from_bytes(value.data(py)),
503 HgPathBuf::from_bytes(value.data(py)),
480 )
504 )
481 .map_err(|e| v2_error(py, e))?;
505 .map_err(|e| v2_error(py, e))?;
482 Ok(py.None())
506 Ok(py.None())
483 }
507 }
484 def copymappop(
508 def copymappop(
485 &self,
509 &self,
486 key: PyObject,
510 key: PyObject,
487 default: Option<PyObject>
511 default: Option<PyObject>
488 ) -> PyResult<Option<PyObject>> {
512 ) -> PyResult<Option<PyObject>> {
489 let key = key.extract::<PyBytes>(py)?;
513 let key = key.extract::<PyBytes>(py)?;
490 match self
514 match self
491 .inner(py)
515 .inner(py)
492 .borrow_mut()
516 .borrow_mut()
493 .copy_map_remove(HgPath::new(key.data(py)))
517 .copy_map_remove(HgPath::new(key.data(py)))
494 .map_err(|e| v2_error(py, e))?
518 .map_err(|e| v2_error(py, e))?
495 {
519 {
496 Some(_) => Ok(None),
520 Some(_) => Ok(None),
497 None => Ok(default),
521 None => Ok(default),
498 }
522 }
499 }
523 }
500
524
501 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
525 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
502 let leaked_ref = self.inner(py).leak_immutable();
526 let leaked_ref = self.inner(py).leak_immutable();
503 CopyMapKeysIterator::from_inner(
527 CopyMapKeysIterator::from_inner(
504 py,
528 py,
505 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
529 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
506 )
530 )
507 }
531 }
508
532
509 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
533 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
510 let leaked_ref = self.inner(py).leak_immutable();
534 let leaked_ref = self.inner(py).leak_immutable();
511 CopyMapItemsIterator::from_inner(
535 CopyMapItemsIterator::from_inner(
512 py,
536 py,
513 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
537 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
514 )
538 )
515 }
539 }
516
540
517 def directories(&self) -> PyResult<PyList> {
541 def directories(&self) -> PyResult<PyList> {
518 let dirs = PyList::new(py, &[]);
542 let dirs = PyList::new(py, &[]);
519 for item in self.inner(py).borrow().iter_directories() {
543 for item in self.inner(py).borrow().iter_directories() {
520 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
544 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
521 let path = PyBytes::new(py, path.as_bytes());
545 let path = PyBytes::new(py, path.as_bytes());
522 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
546 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
523 let item = make_directory_item(py, mtime as i32)?;
547 let item = make_directory_item(py, mtime as i32)?;
524 let tuple = (path, item);
548 let tuple = (path, item);
525 dirs.append(py, tuple.to_py_object(py).into_object())
549 dirs.append(py, tuple.to_py_object(py).into_object())
526 }
550 }
527 Ok(dirs)
551 Ok(dirs)
528 }
552 }
529
553
530 });
554 });
531
555
532 impl DirstateMap {
556 impl DirstateMap {
533 pub fn get_inner_mut<'a>(
557 pub fn get_inner_mut<'a>(
534 &'a self,
558 &'a self,
535 py: Python<'a>,
559 py: Python<'a>,
536 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
560 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
537 self.inner(py).borrow_mut()
561 self.inner(py).borrow_mut()
538 }
562 }
539 fn translate_key(
563 fn translate_key(
540 py: Python,
564 py: Python,
541 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
565 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
542 ) -> PyResult<Option<PyBytes>> {
566 ) -> PyResult<Option<PyBytes>> {
543 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
567 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
544 Ok(Some(PyBytes::new(py, f.as_bytes())))
568 Ok(Some(PyBytes::new(py, f.as_bytes())))
545 }
569 }
546 fn translate_key_value(
570 fn translate_key_value(
547 py: Python,
571 py: Python,
548 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
572 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
549 ) -> PyResult<Option<(PyBytes, PyObject)>> {
573 ) -> PyResult<Option<(PyBytes, PyObject)>> {
550 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
574 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
551 Ok(Some((
575 Ok(Some((
552 PyBytes::new(py, f.as_bytes()),
576 PyBytes::new(py, f.as_bytes()),
553 make_dirstate_item(py, &entry)?,
577 make_dirstate_item(py, &entry)?,
554 )))
578 )))
555 }
579 }
556 }
580 }
557
581
558 py_shared_iterator!(
582 py_shared_iterator!(
559 DirstateMapKeysIterator,
583 DirstateMapKeysIterator,
560 UnsafePyLeaked<StateMapIter<'static>>,
584 UnsafePyLeaked<StateMapIter<'static>>,
561 DirstateMap::translate_key,
585 DirstateMap::translate_key,
562 Option<PyBytes>
586 Option<PyBytes>
563 );
587 );
564
588
565 py_shared_iterator!(
589 py_shared_iterator!(
566 DirstateMapItemsIterator,
590 DirstateMapItemsIterator,
567 UnsafePyLeaked<StateMapIter<'static>>,
591 UnsafePyLeaked<StateMapIter<'static>>,
568 DirstateMap::translate_key_value,
592 DirstateMap::translate_key_value,
569 Option<(PyBytes, PyObject)>
593 Option<(PyBytes, PyObject)>
570 );
594 );
571
595
572 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
596 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
573 let bytes = obj.extract::<PyBytes>(py)?;
597 let bytes = obj.extract::<PyBytes>(py)?;
574 match bytes.data(py).try_into() {
598 match bytes.data(py).try_into() {
575 Ok(s) => Ok(s),
599 Ok(s) => Ok(s),
576 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
600 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
577 }
601 }
578 }
602 }
579
603
580 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
604 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
581 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
605 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
582 }
606 }
@@ -1,219 +1,215 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
13 use hg::StateMapIter;
14 use hg::StatusError;
14 use hg::StatusError;
15 use hg::StatusOptions;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
19 fn clear(&mut self) {
20 self.get_mut().clear()
20 self.get_mut().clear()
21 }
21 }
22
22
23 fn add_file(
23 fn add_file(
24 &mut self,
24 &mut self,
25 filename: &HgPath,
25 filename: &HgPath,
26 entry: DirstateEntry,
26 entry: DirstateEntry,
27 added: bool,
27 added: bool,
28 merged: bool,
28 merged: bool,
29 from_p2: bool,
29 from_p2: bool,
30 possibly_dirty: bool,
30 possibly_dirty: bool,
31 ) -> Result<(), DirstateError> {
31 ) -> Result<(), DirstateError> {
32 self.get_mut().add_file(
32 self.get_mut().add_file(
33 filename,
33 filename,
34 entry,
34 entry,
35 added,
35 added,
36 merged,
36 merged,
37 from_p2,
37 from_p2,
38 possibly_dirty,
38 possibly_dirty,
39 )
39 )
40 }
40 }
41
41
42 fn remove_file(
42 fn remove_file(
43 &mut self,
43 &mut self,
44 filename: &HgPath,
44 filename: &HgPath,
45 in_merge: bool,
45 in_merge: bool,
46 ) -> Result<(), DirstateError> {
46 ) -> Result<(), DirstateError> {
47 self.get_mut().remove_file(filename, in_merge)
47 self.get_mut().remove_file(filename, in_merge)
48 }
48 }
49
49
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
51 self.get_mut().drop_file(filename)
51 self.get_mut().drop_file(filename)
52 }
52 }
53
53
54 fn clear_ambiguous_times(
54 fn clear_ambiguous_times(
55 &mut self,
55 &mut self,
56 filenames: Vec<HgPathBuf>,
56 filenames: Vec<HgPathBuf>,
57 now: i32,
57 now: i32,
58 ) -> Result<(), DirstateV2ParseError> {
58 ) -> Result<(), DirstateV2ParseError> {
59 self.get_mut().clear_ambiguous_times(filenames, now)
59 self.get_mut().clear_ambiguous_times(filenames, now)
60 }
60 }
61
61
62 fn non_normal_entries_contains(
62 fn non_normal_entries_contains(
63 &mut self,
63 &mut self,
64 key: &HgPath,
64 key: &HgPath,
65 ) -> Result<bool, DirstateV2ParseError> {
65 ) -> Result<bool, DirstateV2ParseError> {
66 self.get_mut().non_normal_entries_contains(key)
66 self.get_mut().non_normal_entries_contains(key)
67 }
67 }
68
68
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
70 self.get_mut().non_normal_entries_remove(key)
70 self.get_mut().non_normal_entries_remove(key)
71 }
71 }
72
72
73 fn non_normal_or_other_parent_paths(
73 fn non_normal_or_other_parent_paths(
74 &mut self,
74 &mut self,
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
76 {
76 {
77 self.get_mut().non_normal_or_other_parent_paths()
77 self.get_mut().non_normal_or_other_parent_paths()
78 }
78 }
79
79
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
81 self.get_mut().set_non_normal_other_parent_entries(force)
81 self.get_mut().set_non_normal_other_parent_entries(force)
82 }
82 }
83
83
84 fn iter_non_normal_paths(
84 fn iter_non_normal_paths(
85 &mut self,
85 &mut self,
86 ) -> Box<
86 ) -> Box<
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
88 > {
88 > {
89 self.get_mut().iter_non_normal_paths()
89 self.get_mut().iter_non_normal_paths()
90 }
90 }
91
91
92 fn iter_non_normal_paths_panic(
92 fn iter_non_normal_paths_panic(
93 &self,
93 &self,
94 ) -> Box<
94 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
96 > {
97 self.get().iter_non_normal_paths_panic()
97 self.get().iter_non_normal_paths_panic()
98 }
98 }
99
99
100 fn iter_other_parent_paths(
100 fn iter_other_parent_paths(
101 &mut self,
101 &mut self,
102 ) -> Box<
102 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
104 > {
105 self.get_mut().iter_other_parent_paths()
105 self.get_mut().iter_other_parent_paths()
106 }
106 }
107
107
108 fn has_tracked_dir(
108 fn has_tracked_dir(
109 &mut self,
109 &mut self,
110 directory: &HgPath,
110 directory: &HgPath,
111 ) -> Result<bool, DirstateError> {
111 ) -> Result<bool, DirstateError> {
112 self.get_mut().has_tracked_dir(directory)
112 self.get_mut().has_tracked_dir(directory)
113 }
113 }
114
114
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
116 self.get_mut().has_dir(directory)
116 self.get_mut().has_dir(directory)
117 }
117 }
118
118
119 fn pack_v1(
119 fn pack_v1(
120 &mut self,
120 &mut self,
121 parents: DirstateParents,
121 parents: DirstateParents,
122 now: Timestamp,
122 now: Timestamp,
123 ) -> Result<Vec<u8>, DirstateError> {
123 ) -> Result<Vec<u8>, DirstateError> {
124 self.get_mut().pack_v1(parents, now)
124 self.get_mut().pack_v1(parents, now)
125 }
125 }
126
126
127 fn pack_v2(
127 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError> {
128 &mut self,
128 self.get_mut().pack_v2(now)
129 parents: DirstateParents,
130 now: Timestamp,
131 ) -> Result<Vec<u8>, DirstateError> {
132 self.get_mut().pack_v2(parents, now)
133 }
129 }
134
130
135 fn status<'a>(
131 fn status<'a>(
136 &'a mut self,
132 &'a mut self,
137 matcher: &'a (dyn Matcher + Sync),
133 matcher: &'a (dyn Matcher + Sync),
138 root_dir: PathBuf,
134 root_dir: PathBuf,
139 ignore_files: Vec<PathBuf>,
135 ignore_files: Vec<PathBuf>,
140 options: StatusOptions,
136 options: StatusOptions,
141 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
137 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
142 {
138 {
143 self.get_mut()
139 self.get_mut()
144 .status(matcher, root_dir, ignore_files, options)
140 .status(matcher, root_dir, ignore_files, options)
145 }
141 }
146
142
147 fn copy_map_len(&self) -> usize {
143 fn copy_map_len(&self) -> usize {
148 self.get().copy_map_len()
144 self.get().copy_map_len()
149 }
145 }
150
146
151 fn copy_map_iter(&self) -> CopyMapIter<'_> {
147 fn copy_map_iter(&self) -> CopyMapIter<'_> {
152 self.get().copy_map_iter()
148 self.get().copy_map_iter()
153 }
149 }
154
150
155 fn copy_map_contains_key(
151 fn copy_map_contains_key(
156 &self,
152 &self,
157 key: &HgPath,
153 key: &HgPath,
158 ) -> Result<bool, DirstateV2ParseError> {
154 ) -> Result<bool, DirstateV2ParseError> {
159 self.get().copy_map_contains_key(key)
155 self.get().copy_map_contains_key(key)
160 }
156 }
161
157
162 fn copy_map_get(
158 fn copy_map_get(
163 &self,
159 &self,
164 key: &HgPath,
160 key: &HgPath,
165 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
161 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
166 self.get().copy_map_get(key)
162 self.get().copy_map_get(key)
167 }
163 }
168
164
169 fn copy_map_remove(
165 fn copy_map_remove(
170 &mut self,
166 &mut self,
171 key: &HgPath,
167 key: &HgPath,
172 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
168 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
173 self.get_mut().copy_map_remove(key)
169 self.get_mut().copy_map_remove(key)
174 }
170 }
175
171
176 fn copy_map_insert(
172 fn copy_map_insert(
177 &mut self,
173 &mut self,
178 key: HgPathBuf,
174 key: HgPathBuf,
179 value: HgPathBuf,
175 value: HgPathBuf,
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
176 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
181 self.get_mut().copy_map_insert(key, value)
177 self.get_mut().copy_map_insert(key, value)
182 }
178 }
183
179
184 fn len(&self) -> usize {
180 fn len(&self) -> usize {
185 self.get().len()
181 self.get().len()
186 }
182 }
187
183
188 fn contains_key(
184 fn contains_key(
189 &self,
185 &self,
190 key: &HgPath,
186 key: &HgPath,
191 ) -> Result<bool, DirstateV2ParseError> {
187 ) -> Result<bool, DirstateV2ParseError> {
192 self.get().contains_key(key)
188 self.get().contains_key(key)
193 }
189 }
194
190
195 fn get(
191 fn get(
196 &self,
192 &self,
197 key: &HgPath,
193 key: &HgPath,
198 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
194 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
199 self.get().get(key)
195 self.get().get(key)
200 }
196 }
201
197
202 fn iter(&self) -> StateMapIter<'_> {
198 fn iter(&self) -> StateMapIter<'_> {
203 self.get().iter()
199 self.get().iter()
204 }
200 }
205
201
206 fn iter_directories(
202 fn iter_directories(
207 &self,
203 &self,
208 ) -> Box<
204 ) -> Box<
209 dyn Iterator<
205 dyn Iterator<
210 Item = Result<
206 Item = Result<
211 (&HgPath, Option<Timestamp>),
207 (&HgPath, Option<Timestamp>),
212 DirstateV2ParseError,
208 DirstateV2ParseError,
213 >,
209 >,
214 > + Send
210 > + Send
215 + '_,
211 + '_,
216 > {
212 > {
217 self.get().iter_directories()
213 self.get().iter_directories()
218 }
214 }
219 }
215 }
@@ -1,102 +1,114 b''
1 use cpython::PyBytes;
1 use cpython::PyBytes;
2 use cpython::Python;
2 use cpython::Python;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
3 use hg::dirstate_tree::dirstate_map::DirstateMap;
4 use hg::DirstateError;
4 use hg::DirstateError;
5 use hg::DirstateParents;
5 use hg::DirstateParents;
6
6
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
7 /// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
8 /// borrows. This is similar to the owning-ref crate.
8 /// borrows. This is similar to the owning-ref crate.
9 ///
9 ///
10 /// This is similar to [`OwningRef`] which is more limited because it
10 /// This is similar to [`OwningRef`] which is more limited because it
11 /// represents exactly one `&T` reference next to the value it borrows, as
11 /// represents exactly one `&T` reference next to the value it borrows, as
12 /// opposed to a struct that may contain an arbitrary number of references in
12 /// opposed to a struct that may contain an arbitrary number of references in
13 /// arbitrarily-nested data structures.
13 /// arbitrarily-nested data structures.
14 ///
14 ///
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
15 /// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
16 pub(super) struct OwningDirstateMap {
16 pub(super) struct OwningDirstateMap {
17 /// Owned handle to a bytes buffer with a stable address.
17 /// Owned handle to a bytes buffer with a stable address.
18 ///
18 ///
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
19 /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
20 on_disk: PyBytes,
20 on_disk: PyBytes,
21
21
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
22 /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
23 /// language cannot represent a lifetime referencing a sibling field.
23 /// language cannot represent a lifetime referencing a sibling field.
24 /// This is not quite a self-referencial struct (moving this struct is not
24 /// This is not quite a self-referencial struct (moving this struct is not
25 /// a problem as it doesn’t change the address of the bytes buffer owned
25 /// a problem as it doesn’t change the address of the bytes buffer owned
26 /// by `PyBytes`) but touches similar borrow-checker limitations.
26 /// by `PyBytes`) but touches similar borrow-checker limitations.
27 ptr: *mut (),
27 ptr: *mut (),
28 }
28 }
29
29
30 impl OwningDirstateMap {
30 impl OwningDirstateMap {
31 pub fn new(
31 pub fn new_v1(
32 py: Python,
32 py: Python,
33 on_disk: PyBytes,
33 on_disk: PyBytes,
34 use_dirstate_v2: bool,
35 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
34 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
36 let bytes: &'_ [u8] = on_disk.data(py);
35 let bytes: &'_ [u8] = on_disk.data(py);
37 let (map, parents) = if use_dirstate_v2 {
36 let (map, parents) = DirstateMap::new_v1(bytes)?;
38 DirstateMap::new_v2(bytes)?
39 } else {
40 DirstateMap::new_v1(bytes)?
41 };
42
37
43 // Like in `bytes` above, this `'_` lifetime parameter borrows from
38 // Like in `bytes` above, this `'_` lifetime parameter borrows from
44 // the bytes buffer owned by `on_disk`.
39 // the bytes buffer owned by `on_disk`.
45 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
40 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
46
41
47 // Erase the pointed type entirely in order to erase the lifetime.
42 // Erase the pointed type entirely in order to erase the lifetime.
48 let ptr: *mut () = ptr.cast();
43 let ptr: *mut () = ptr.cast();
49
44
50 Ok((Self { on_disk, ptr }, parents))
45 Ok((Self { on_disk, ptr }, parents))
51 }
46 }
52
47
48 pub fn new_v2(
49 py: Python,
50 on_disk: PyBytes,
51 ) -> Result<Self, DirstateError> {
52 let bytes: &'_ [u8] = on_disk.data(py);
53 let map = DirstateMap::new_v2(bytes)?;
54
55 // Like in `bytes` above, this `'_` lifetime parameter borrows from
56 // the bytes buffer owned by `on_disk`.
57 let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
58
59 // Erase the pointed type entirely in order to erase the lifetime.
60 let ptr: *mut () = ptr.cast();
61
62 Ok(Self { on_disk, ptr })
63 }
64
53 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
65 pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
54 // SAFETY: We cast the type-erased pointer back to the same type it had
66 // SAFETY: We cast the type-erased pointer back to the same type it had
55 // in `new`, except with a different lifetime parameter. This time we
67 // in `new`, except with a different lifetime parameter. This time we
56 // connect the lifetime to that of `self`. This cast is valid because
68 // connect the lifetime to that of `self`. This cast is valid because
57 // `self` owns the same `PyBytes` whose buffer `DirstateMap`
69 // `self` owns the same `PyBytes` whose buffer `DirstateMap`
58 // references. That buffer has a stable memory address because the byte
70 // references. That buffer has a stable memory address because the byte
59 // string value of a `PyBytes` is immutable.
71 // string value of a `PyBytes` is immutable.
60 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
72 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
61 // SAFETY: we dereference that pointer, connecting the lifetime of the
73 // SAFETY: we dereference that pointer, connecting the lifetime of the
62 // new `&mut` to that of `self`. This is valid because the
74 // new `&mut` to that of `self`. This is valid because the
63 // raw pointer is to a boxed value, and `self` owns that box.
75 // raw pointer is to a boxed value, and `self` owns that box.
64 unsafe { &mut *ptr }
76 unsafe { &mut *ptr }
65 }
77 }
66
78
67 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
79 pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
68 // SAFETY: same reasoning as in `get_mut` above.
80 // SAFETY: same reasoning as in `get_mut` above.
69 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
81 let ptr: *mut DirstateMap<'a> = self.ptr.cast();
70 unsafe { &*ptr }
82 unsafe { &*ptr }
71 }
83 }
72 }
84 }
73
85
74 impl Drop for OwningDirstateMap {
86 impl Drop for OwningDirstateMap {
75 fn drop(&mut self) {
87 fn drop(&mut self) {
76 // Silence a "field is never read" warning, and demonstrate that this
88 // Silence a "field is never read" warning, and demonstrate that this
77 // value is still alive.
89 // value is still alive.
78 let _ = &self.on_disk;
90 let _ = &self.on_disk;
79 // SAFETY: this cast is the same as in `get_mut`, and is valid for the
91 // SAFETY: this cast is the same as in `get_mut`, and is valid for the
80 // same reason. `self.on_disk` still exists at this point, drop glue
92 // same reason. `self.on_disk` still exists at this point, drop glue
81 // will drop it implicitly after this `drop` method returns.
93 // will drop it implicitly after this `drop` method returns.
82 let ptr: *mut DirstateMap<'_> = self.ptr.cast();
94 let ptr: *mut DirstateMap<'_> = self.ptr.cast();
83 // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
95 // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
84 // This is fine because drop glue does nothig for `*mut ()` and we’re
96 // This is fine because drop glue does nothig for `*mut ()` and we’re
85 // in `drop`, so `get` and `get_mut` cannot be called again.
97 // in `drop`, so `get` and `get_mut` cannot be called again.
86 unsafe { drop(Box::from_raw(ptr)) }
98 unsafe { drop(Box::from_raw(ptr)) }
87 }
99 }
88 }
100 }
89
101
90 fn _static_assert_is_send<T: Send>() {}
102 fn _static_assert_is_send<T: Send>() {}
91
103
92 fn _static_assert_fields_are_send() {
104 fn _static_assert_fields_are_send() {
93 _static_assert_is_send::<PyBytes>();
105 _static_assert_is_send::<PyBytes>();
94 _static_assert_is_send::<Box<DirstateMap<'_>>>();
106 _static_assert_is_send::<Box<DirstateMap<'_>>>();
95 }
107 }
96
108
97 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
109 // SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
98 // thread-safety of raw pointers is unknown in the general case. However this
110 // thread-safety of raw pointers is unknown in the general case. However this
99 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
111 // particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
100 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
112 // own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
101 // is sound to mark this struct as `Send` too.
113 // is sound to mark this struct as `Send` too.
102 unsafe impl Send for OwningDirstateMap {}
114 unsafe impl Send for OwningDirstateMap {}
@@ -1,322 +1,339 b''
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::error::CommandError;
8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::Ui;
10 use clap::{Arg, SubCommand};
10 use clap::{Arg, SubCommand};
11 use hg;
11 use hg;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
12 use hg::dirstate_tree::dirstate_map::DirstateMap;
13 use hg::dirstate_tree::on_disk;
13 use hg::errors::HgResultExt;
14 use hg::errors::HgResultExt;
14 use hg::errors::IoResultExt;
15 use hg::errors::IoResultExt;
15 use hg::matchers::AlwaysMatcher;
16 use hg::matchers::AlwaysMatcher;
16 use hg::operations::cat;
17 use hg::operations::cat;
17 use hg::repo::Repo;
18 use hg::repo::Repo;
18 use hg::revlog::node::Node;
19 use hg::revlog::node::Node;
19 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
20 use hg::utils::hg_path::{hg_path_to_os_string, HgPath};
20 use hg::StatusError;
21 use hg::StatusError;
21 use hg::{HgPathCow, StatusOptions};
22 use hg::{HgPathCow, StatusOptions};
22 use log::{info, warn};
23 use log::{info, warn};
23 use std::convert::TryInto;
24 use std::convert::TryInto;
24 use std::fs;
25 use std::fs;
25 use std::io::BufReader;
26 use std::io::BufReader;
26 use std::io::Read;
27 use std::io::Read;
27
28
28 pub const HELP_TEXT: &str = "
29 pub const HELP_TEXT: &str = "
29 Show changed files in the working directory
30 Show changed files in the working directory
30
31
31 This is a pure Rust version of `hg status`.
32 This is a pure Rust version of `hg status`.
32
33
33 Some options might be missing, check the list below.
34 Some options might be missing, check the list below.
34 ";
35 ";
35
36
36 pub fn args() -> clap::App<'static, 'static> {
37 pub fn args() -> clap::App<'static, 'static> {
37 SubCommand::with_name("status")
38 SubCommand::with_name("status")
38 .alias("st")
39 .alias("st")
39 .about(HELP_TEXT)
40 .about(HELP_TEXT)
40 .arg(
41 .arg(
41 Arg::with_name("all")
42 Arg::with_name("all")
42 .help("show status of all files")
43 .help("show status of all files")
43 .short("-A")
44 .short("-A")
44 .long("--all"),
45 .long("--all"),
45 )
46 )
46 .arg(
47 .arg(
47 Arg::with_name("modified")
48 Arg::with_name("modified")
48 .help("show only modified files")
49 .help("show only modified files")
49 .short("-m")
50 .short("-m")
50 .long("--modified"),
51 .long("--modified"),
51 )
52 )
52 .arg(
53 .arg(
53 Arg::with_name("added")
54 Arg::with_name("added")
54 .help("show only added files")
55 .help("show only added files")
55 .short("-a")
56 .short("-a")
56 .long("--added"),
57 .long("--added"),
57 )
58 )
58 .arg(
59 .arg(
59 Arg::with_name("removed")
60 Arg::with_name("removed")
60 .help("show only removed files")
61 .help("show only removed files")
61 .short("-r")
62 .short("-r")
62 .long("--removed"),
63 .long("--removed"),
63 )
64 )
64 .arg(
65 .arg(
65 Arg::with_name("clean")
66 Arg::with_name("clean")
66 .help("show only clean files")
67 .help("show only clean files")
67 .short("-c")
68 .short("-c")
68 .long("--clean"),
69 .long("--clean"),
69 )
70 )
70 .arg(
71 .arg(
71 Arg::with_name("deleted")
72 Arg::with_name("deleted")
72 .help("show only deleted files")
73 .help("show only deleted files")
73 .short("-d")
74 .short("-d")
74 .long("--deleted"),
75 .long("--deleted"),
75 )
76 )
76 .arg(
77 .arg(
77 Arg::with_name("unknown")
78 Arg::with_name("unknown")
78 .help("show only unknown (not tracked) files")
79 .help("show only unknown (not tracked) files")
79 .short("-u")
80 .short("-u")
80 .long("--unknown"),
81 .long("--unknown"),
81 )
82 )
82 .arg(
83 .arg(
83 Arg::with_name("ignored")
84 Arg::with_name("ignored")
84 .help("show only ignored files")
85 .help("show only ignored files")
85 .short("-i")
86 .short("-i")
86 .long("--ignored"),
87 .long("--ignored"),
87 )
88 )
88 }
89 }
89
90
90 /// Pure data type allowing the caller to specify file states to display
91 /// Pure data type allowing the caller to specify file states to display
91 #[derive(Copy, Clone, Debug)]
92 #[derive(Copy, Clone, Debug)]
92 pub struct DisplayStates {
93 pub struct DisplayStates {
93 pub modified: bool,
94 pub modified: bool,
94 pub added: bool,
95 pub added: bool,
95 pub removed: bool,
96 pub removed: bool,
96 pub clean: bool,
97 pub clean: bool,
97 pub deleted: bool,
98 pub deleted: bool,
98 pub unknown: bool,
99 pub unknown: bool,
99 pub ignored: bool,
100 pub ignored: bool,
100 }
101 }
101
102
102 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
103 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
103 modified: true,
104 modified: true,
104 added: true,
105 added: true,
105 removed: true,
106 removed: true,
106 clean: false,
107 clean: false,
107 deleted: true,
108 deleted: true,
108 unknown: true,
109 unknown: true,
109 ignored: false,
110 ignored: false,
110 };
111 };
111
112
112 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
113 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
113 modified: true,
114 modified: true,
114 added: true,
115 added: true,
115 removed: true,
116 removed: true,
116 clean: true,
117 clean: true,
117 deleted: true,
118 deleted: true,
118 unknown: true,
119 unknown: true,
119 ignored: true,
120 ignored: true,
120 };
121 };
121
122
122 impl DisplayStates {
123 impl DisplayStates {
123 pub fn is_empty(&self) -> bool {
124 pub fn is_empty(&self) -> bool {
124 !(self.modified
125 !(self.modified
125 || self.added
126 || self.added
126 || self.removed
127 || self.removed
127 || self.clean
128 || self.clean
128 || self.deleted
129 || self.deleted
129 || self.unknown
130 || self.unknown
130 || self.ignored)
131 || self.ignored)
131 }
132 }
132 }
133 }
133
134
134 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
135 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
135 let status_enabled_default = false;
136 let status_enabled_default = false;
136 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
137 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
137 if !status_enabled.unwrap_or(status_enabled_default) {
138 if !status_enabled.unwrap_or(status_enabled_default) {
138 return Err(CommandError::unsupported(
139 return Err(CommandError::unsupported(
139 "status is experimental in rhg (enable it with 'rhg.status = true' \
140 "status is experimental in rhg (enable it with 'rhg.status = true' \
140 or enable fallback with 'rhg.on-unsupported = fallback')"
141 or enable fallback with 'rhg.on-unsupported = fallback')"
141 ));
142 ));
142 }
143 }
143
144
144 let ui = invocation.ui;
145 let ui = invocation.ui;
145 let args = invocation.subcommand_args;
146 let args = invocation.subcommand_args;
146 let display_states = if args.is_present("all") {
147 let display_states = if args.is_present("all") {
147 // TODO when implementing `--quiet`: it excludes clean files
148 // TODO when implementing `--quiet`: it excludes clean files
148 // from `--all`
149 // from `--all`
149 ALL_DISPLAY_STATES
150 ALL_DISPLAY_STATES
150 } else {
151 } else {
151 let requested = DisplayStates {
152 let requested = DisplayStates {
152 modified: args.is_present("modified"),
153 modified: args.is_present("modified"),
153 added: args.is_present("added"),
154 added: args.is_present("added"),
154 removed: args.is_present("removed"),
155 removed: args.is_present("removed"),
155 clean: args.is_present("clean"),
156 clean: args.is_present("clean"),
156 deleted: args.is_present("deleted"),
157 deleted: args.is_present("deleted"),
157 unknown: args.is_present("unknown"),
158 unknown: args.is_present("unknown"),
158 ignored: args.is_present("ignored"),
159 ignored: args.is_present("ignored"),
159 };
160 };
160 if requested.is_empty() {
161 if requested.is_empty() {
161 DEFAULT_DISPLAY_STATES
162 DEFAULT_DISPLAY_STATES
162 } else {
163 } else {
163 requested
164 requested
164 }
165 }
165 };
166 };
166
167
167 let repo = invocation.repo?;
168 let repo = invocation.repo?;
168 let dirstate_data =
169 let dirstate_data_mmap;
170 let (mut dmap, parents) = if repo.has_dirstate_v2() {
171 let parents;
172 let dirstate_data;
173 if let Some(docket_data) =
174 repo.hg_vfs().read("dirstate").io_not_found_as_none()?
175 {
176 let docket = on_disk::read_docket(&docket_data)?;
177 parents = Some(docket.parents());
178 dirstate_data_mmap = repo
179 .hg_vfs()
180 .mmap_open(docket.data_filename())
181 .io_not_found_as_none()?;
182 dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
183 } else {
184 parents = None;
185 dirstate_data = b"";
186 }
187 let dmap = DirstateMap::new_v2(dirstate_data)?;
188 (dmap, parents)
189 } else {
190 dirstate_data_mmap =
169 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
191 repo.hg_vfs().mmap_open("dirstate").io_not_found_as_none()?;
170 let dirstate_data = match &dirstate_data {
192 let dirstate_data = dirstate_data_mmap.as_deref().unwrap_or(b"");
171 Some(mmap) => &**mmap,
172 None => b"",
173 };
174 let (mut dmap, parents) = if repo.has_dirstate_v2() {
175 DirstateMap::new_v2(dirstate_data)?
176 } else {
177 DirstateMap::new_v1(dirstate_data)?
193 DirstateMap::new_v1(dirstate_data)?
178 };
194 };
195
179 let options = StatusOptions {
196 let options = StatusOptions {
180 // TODO should be provided by the dirstate parsing and
197 // TODO should be provided by the dirstate parsing and
181 // hence be stored on dmap. Using a value that assumes we aren't
198 // hence be stored on dmap. Using a value that assumes we aren't
182 // below the time resolution granularity of the FS and the
199 // below the time resolution granularity of the FS and the
183 // dirstate.
200 // dirstate.
184 last_normal_time: 0,
201 last_normal_time: 0,
185 // we're currently supporting file systems with exec flags only
202 // we're currently supporting file systems with exec flags only
186 // anyway
203 // anyway
187 check_exec: true,
204 check_exec: true,
188 list_clean: display_states.clean,
205 list_clean: display_states.clean,
189 list_unknown: display_states.unknown,
206 list_unknown: display_states.unknown,
190 list_ignored: display_states.ignored,
207 list_ignored: display_states.ignored,
191 collect_traversed_dirs: false,
208 collect_traversed_dirs: false,
192 };
209 };
193 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
210 let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
194 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
211 let (mut ds_status, pattern_warnings) = hg::dirstate_tree::status::status(
195 &mut dmap,
212 &mut dmap,
196 &AlwaysMatcher,
213 &AlwaysMatcher,
197 repo.working_directory_path().to_owned(),
214 repo.working_directory_path().to_owned(),
198 vec![ignore_file],
215 vec![ignore_file],
199 options,
216 options,
200 )?;
217 )?;
201 if !pattern_warnings.is_empty() {
218 if !pattern_warnings.is_empty() {
202 warn!("Pattern warnings: {:?}", &pattern_warnings);
219 warn!("Pattern warnings: {:?}", &pattern_warnings);
203 }
220 }
204
221
205 if !ds_status.bad.is_empty() {
222 if !ds_status.bad.is_empty() {
206 warn!("Bad matches {:?}", &(ds_status.bad))
223 warn!("Bad matches {:?}", &(ds_status.bad))
207 }
224 }
208 if !ds_status.unsure.is_empty() {
225 if !ds_status.unsure.is_empty() {
209 info!(
226 info!(
210 "Files to be rechecked by retrieval from filelog: {:?}",
227 "Files to be rechecked by retrieval from filelog: {:?}",
211 &ds_status.unsure
228 &ds_status.unsure
212 );
229 );
213 }
230 }
214 if !ds_status.unsure.is_empty()
231 if !ds_status.unsure.is_empty()
215 && (display_states.modified || display_states.clean)
232 && (display_states.modified || display_states.clean)
216 {
233 {
217 let p1: Node = parents
234 let p1: Node = parents
218 .expect(
235 .expect(
219 "Dirstate with no parents should not list any file to
236 "Dirstate with no parents should not list any file to
220 be rechecked for modifications",
237 be rechecked for modifications",
221 )
238 )
222 .p1
239 .p1
223 .into();
240 .into();
224 let p1_hex = format!("{:x}", p1);
241 let p1_hex = format!("{:x}", p1);
225 for to_check in ds_status.unsure {
242 for to_check in ds_status.unsure {
226 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
243 if cat_file_is_modified(repo, &to_check, &p1_hex)? {
227 if display_states.modified {
244 if display_states.modified {
228 ds_status.modified.push(to_check);
245 ds_status.modified.push(to_check);
229 }
246 }
230 } else {
247 } else {
231 if display_states.clean {
248 if display_states.clean {
232 ds_status.clean.push(to_check);
249 ds_status.clean.push(to_check);
233 }
250 }
234 }
251 }
235 }
252 }
236 }
253 }
237 if display_states.modified {
254 if display_states.modified {
238 display_status_paths(ui, &mut ds_status.modified, b"M")?;
255 display_status_paths(ui, &mut ds_status.modified, b"M")?;
239 }
256 }
240 if display_states.added {
257 if display_states.added {
241 display_status_paths(ui, &mut ds_status.added, b"A")?;
258 display_status_paths(ui, &mut ds_status.added, b"A")?;
242 }
259 }
243 if display_states.removed {
260 if display_states.removed {
244 display_status_paths(ui, &mut ds_status.removed, b"R")?;
261 display_status_paths(ui, &mut ds_status.removed, b"R")?;
245 }
262 }
246 if display_states.deleted {
263 if display_states.deleted {
247 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
264 display_status_paths(ui, &mut ds_status.deleted, b"!")?;
248 }
265 }
249 if display_states.unknown {
266 if display_states.unknown {
250 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
267 display_status_paths(ui, &mut ds_status.unknown, b"?")?;
251 }
268 }
252 if display_states.ignored {
269 if display_states.ignored {
253 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
270 display_status_paths(ui, &mut ds_status.ignored, b"I")?;
254 }
271 }
255 if display_states.clean {
272 if display_states.clean {
256 display_status_paths(ui, &mut ds_status.clean, b"C")?;
273 display_status_paths(ui, &mut ds_status.clean, b"C")?;
257 }
274 }
258 Ok(())
275 Ok(())
259 }
276 }
260
277
261 // Probably more elegant to use a Deref or Borrow trait rather than
278 // Probably more elegant to use a Deref or Borrow trait rather than
262 // harcode HgPathBuf, but probably not really useful at this point
279 // harcode HgPathBuf, but probably not really useful at this point
263 fn display_status_paths(
280 fn display_status_paths(
264 ui: &Ui,
281 ui: &Ui,
265 paths: &mut [HgPathCow],
282 paths: &mut [HgPathCow],
266 status_prefix: &[u8],
283 status_prefix: &[u8],
267 ) -> Result<(), CommandError> {
284 ) -> Result<(), CommandError> {
268 paths.sort_unstable();
285 paths.sort_unstable();
269 for path in paths {
286 for path in paths {
270 // Same TODO as in commands::root
287 // Same TODO as in commands::root
271 let bytes: &[u8] = path.as_bytes();
288 let bytes: &[u8] = path.as_bytes();
272 // TODO optim, probably lots of unneeded copies here, especially
289 // TODO optim, probably lots of unneeded copies here, especially
273 // if out stream is buffered
290 // if out stream is buffered
274 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
291 ui.write_stdout(&[status_prefix, b" ", bytes, b"\n"].concat())?;
275 }
292 }
276 Ok(())
293 Ok(())
277 }
294 }
278
295
279 /// Check if a file is modified by comparing actual repo store and file system.
296 /// Check if a file is modified by comparing actual repo store and file system.
280 ///
297 ///
281 /// This meant to be used for those that the dirstate cannot resolve, due
298 /// This meant to be used for those that the dirstate cannot resolve, due
282 /// to time resolution limits.
299 /// to time resolution limits.
283 ///
300 ///
284 /// TODO: detect permission bits and similar metadata modifications
301 /// TODO: detect permission bits and similar metadata modifications
285 fn cat_file_is_modified(
302 fn cat_file_is_modified(
286 repo: &Repo,
303 repo: &Repo,
287 hg_path: &HgPath,
304 hg_path: &HgPath,
288 rev: &str,
305 rev: &str,
289 ) -> Result<bool, CommandError> {
306 ) -> Result<bool, CommandError> {
290 // TODO CatRev expects &[HgPathBuf], something like
307 // TODO CatRev expects &[HgPathBuf], something like
291 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
308 // &[impl Deref<HgPath>] would be nicer and should avoid the copy
292 let path_bufs = [hg_path.into()];
309 let path_bufs = [hg_path.into()];
293 // TODO IIUC CatRev returns a simple Vec<u8> for all files
310 // TODO IIUC CatRev returns a simple Vec<u8> for all files
294 // being able to tell them apart as (path, bytes) would be nicer
311 // being able to tell them apart as (path, bytes) would be nicer
295 // and OPTIM would allow manifest resolution just once.
312 // and OPTIM would allow manifest resolution just once.
296 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
313 let output = cat(repo, rev, &path_bufs).map_err(|e| (e, rev))?;
297
314
298 let fs_path = repo
315 let fs_path = repo
299 .working_directory_vfs()
316 .working_directory_vfs()
300 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
317 .join(hg_path_to_os_string(hg_path).expect("HgPath conversion"));
301 let hg_data_len: u64 = match output.concatenated.len().try_into() {
318 let hg_data_len: u64 = match output.concatenated.len().try_into() {
302 Ok(v) => v,
319 Ok(v) => v,
303 Err(_) => {
320 Err(_) => {
304 // conversion of data length to u64 failed,
321 // conversion of data length to u64 failed,
305 // good luck for any file to have this content
322 // good luck for any file to have this content
306 return Ok(true);
323 return Ok(true);
307 }
324 }
308 };
325 };
309 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
326 let fobj = fs::File::open(&fs_path).when_reading_file(&fs_path)?;
310 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
327 if fobj.metadata().map_err(|e| StatusError::from(e))?.len() != hg_data_len
311 {
328 {
312 return Ok(true);
329 return Ok(true);
313 }
330 }
314 for (fs_byte, hg_byte) in
331 for (fs_byte, hg_byte) in
315 BufReader::new(fobj).bytes().zip(output.concatenated)
332 BufReader::new(fobj).bytes().zip(output.concatenated)
316 {
333 {
317 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
334 if fs_byte.map_err(|e| StatusError::from(e))? != hg_byte {
318 return Ok(true);
335 return Ok(true);
319 }
336 }
320 }
337 }
321 Ok(false)
338 Ok(false)
322 }
339 }
@@ -1,201 +1,208 b''
1 use crate::ui::utf8_to_local;
1 use crate::ui::utf8_to_local;
2 use crate::ui::UiError;
2 use crate::ui::UiError;
3 use crate::NoRepoInCwdError;
3 use crate::NoRepoInCwdError;
4 use format_bytes::format_bytes;
4 use format_bytes::format_bytes;
5 use hg::config::{ConfigError, ConfigParseError, ConfigValueParseError};
5 use hg::config::{ConfigError, ConfigParseError, ConfigValueParseError};
6 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
6 use hg::errors::HgError;
7 use hg::errors::HgError;
7 use hg::exit_codes;
8 use hg::exit_codes;
8 use hg::repo::RepoError;
9 use hg::repo::RepoError;
9 use hg::revlog::revlog::RevlogError;
10 use hg::revlog::revlog::RevlogError;
10 use hg::utils::files::get_bytes_from_path;
11 use hg::utils::files::get_bytes_from_path;
11 use hg::{DirstateError, DirstateMapError, StatusError};
12 use hg::{DirstateError, DirstateMapError, StatusError};
12 use std::convert::From;
13 use std::convert::From;
13
14
14 /// The kind of command error
15 /// The kind of command error
15 #[derive(Debug)]
16 #[derive(Debug)]
16 pub enum CommandError {
17 pub enum CommandError {
17 /// Exit with an error message and "standard" failure exit code.
18 /// Exit with an error message and "standard" failure exit code.
18 Abort {
19 Abort {
19 message: Vec<u8>,
20 message: Vec<u8>,
20 detailed_exit_code: exit_codes::ExitCode,
21 detailed_exit_code: exit_codes::ExitCode,
21 },
22 },
22
23
23 /// Exit with a failure exit code but no message.
24 /// Exit with a failure exit code but no message.
24 Unsuccessful,
25 Unsuccessful,
25
26
26 /// Encountered something (such as a CLI argument, repository layout, …)
27 /// Encountered something (such as a CLI argument, repository layout, …)
27 /// not supported by this version of `rhg`. Depending on configuration
28 /// not supported by this version of `rhg`. Depending on configuration
28 /// `rhg` may attempt to silently fall back to Python-based `hg`, which
29 /// `rhg` may attempt to silently fall back to Python-based `hg`, which
29 /// may or may not support this feature.
30 /// may or may not support this feature.
30 UnsupportedFeature { message: Vec<u8> },
31 UnsupportedFeature { message: Vec<u8> },
31 }
32 }
32
33
33 impl CommandError {
34 impl CommandError {
34 pub fn abort(message: impl AsRef<str>) -> Self {
35 pub fn abort(message: impl AsRef<str>) -> Self {
35 CommandError::abort_with_exit_code(message, exit_codes::ABORT)
36 CommandError::abort_with_exit_code(message, exit_codes::ABORT)
36 }
37 }
37
38
38 pub fn abort_with_exit_code(
39 pub fn abort_with_exit_code(
39 message: impl AsRef<str>,
40 message: impl AsRef<str>,
40 detailed_exit_code: exit_codes::ExitCode,
41 detailed_exit_code: exit_codes::ExitCode,
41 ) -> Self {
42 ) -> Self {
42 CommandError::Abort {
43 CommandError::Abort {
43 // TODO: bytes-based (instead of Unicode-based) formatting
44 // TODO: bytes-based (instead of Unicode-based) formatting
44 // of error messages to handle non-UTF-8 filenames etc:
45 // of error messages to handle non-UTF-8 filenames etc:
45 // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
46 // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
46 message: utf8_to_local(message.as_ref()).into(),
47 message: utf8_to_local(message.as_ref()).into(),
47 detailed_exit_code: detailed_exit_code,
48 detailed_exit_code: detailed_exit_code,
48 }
49 }
49 }
50 }
50
51
51 pub fn unsupported(message: impl AsRef<str>) -> Self {
52 pub fn unsupported(message: impl AsRef<str>) -> Self {
52 CommandError::UnsupportedFeature {
53 CommandError::UnsupportedFeature {
53 message: utf8_to_local(message.as_ref()).into(),
54 message: utf8_to_local(message.as_ref()).into(),
54 }
55 }
55 }
56 }
56 }
57 }
57
58
58 /// For now we don’t differenciate between invalid CLI args and valid for `hg`
59 /// For now we don’t differenciate between invalid CLI args and valid for `hg`
59 /// but not supported yet by `rhg`.
60 /// but not supported yet by `rhg`.
60 impl From<clap::Error> for CommandError {
61 impl From<clap::Error> for CommandError {
61 fn from(error: clap::Error) -> Self {
62 fn from(error: clap::Error) -> Self {
62 CommandError::unsupported(error.to_string())
63 CommandError::unsupported(error.to_string())
63 }
64 }
64 }
65 }
65
66
66 impl From<HgError> for CommandError {
67 impl From<HgError> for CommandError {
67 fn from(error: HgError) -> Self {
68 fn from(error: HgError) -> Self {
68 match error {
69 match error {
69 HgError::UnsupportedFeature(message) => {
70 HgError::UnsupportedFeature(message) => {
70 CommandError::unsupported(message)
71 CommandError::unsupported(message)
71 }
72 }
72 HgError::Abort {
73 HgError::Abort {
73 message,
74 message,
74 detailed_exit_code,
75 detailed_exit_code,
75 } => {
76 } => {
76 CommandError::abort_with_exit_code(message, detailed_exit_code)
77 CommandError::abort_with_exit_code(message, detailed_exit_code)
77 }
78 }
78 _ => CommandError::abort(error.to_string()),
79 _ => CommandError::abort(error.to_string()),
79 }
80 }
80 }
81 }
81 }
82 }
82
83
83 impl From<ConfigValueParseError> for CommandError {
84 impl From<ConfigValueParseError> for CommandError {
84 fn from(error: ConfigValueParseError) -> Self {
85 fn from(error: ConfigValueParseError) -> Self {
85 CommandError::abort_with_exit_code(
86 CommandError::abort_with_exit_code(
86 error.to_string(),
87 error.to_string(),
87 exit_codes::CONFIG_ERROR_ABORT,
88 exit_codes::CONFIG_ERROR_ABORT,
88 )
89 )
89 }
90 }
90 }
91 }
91
92
92 impl From<UiError> for CommandError {
93 impl From<UiError> for CommandError {
93 fn from(_error: UiError) -> Self {
94 fn from(_error: UiError) -> Self {
94 // If we already failed writing to stdout or stderr,
95 // If we already failed writing to stdout or stderr,
95 // writing an error message to stderr about it would be likely to fail
96 // writing an error message to stderr about it would be likely to fail
96 // too.
97 // too.
97 CommandError::abort("")
98 CommandError::abort("")
98 }
99 }
99 }
100 }
100
101
101 impl From<RepoError> for CommandError {
102 impl From<RepoError> for CommandError {
102 fn from(error: RepoError) -> Self {
103 fn from(error: RepoError) -> Self {
103 match error {
104 match error {
104 RepoError::NotFound { at } => CommandError::Abort {
105 RepoError::NotFound { at } => CommandError::Abort {
105 message: format_bytes!(
106 message: format_bytes!(
106 b"abort: repository {} not found",
107 b"abort: repository {} not found",
107 get_bytes_from_path(at)
108 get_bytes_from_path(at)
108 ),
109 ),
109 detailed_exit_code: exit_codes::ABORT,
110 detailed_exit_code: exit_codes::ABORT,
110 },
111 },
111 RepoError::ConfigParseError(error) => error.into(),
112 RepoError::ConfigParseError(error) => error.into(),
112 RepoError::Other(error) => error.into(),
113 RepoError::Other(error) => error.into(),
113 }
114 }
114 }
115 }
115 }
116 }
116
117
117 impl<'a> From<&'a NoRepoInCwdError> for CommandError {
118 impl<'a> From<&'a NoRepoInCwdError> for CommandError {
118 fn from(error: &'a NoRepoInCwdError) -> Self {
119 fn from(error: &'a NoRepoInCwdError) -> Self {
119 let NoRepoInCwdError { cwd } = error;
120 let NoRepoInCwdError { cwd } = error;
120 CommandError::Abort {
121 CommandError::Abort {
121 message: format_bytes!(
122 message: format_bytes!(
122 b"abort: no repository found in '{}' (.hg not found)!",
123 b"abort: no repository found in '{}' (.hg not found)!",
123 get_bytes_from_path(cwd)
124 get_bytes_from_path(cwd)
124 ),
125 ),
125 detailed_exit_code: exit_codes::ABORT,
126 detailed_exit_code: exit_codes::ABORT,
126 }
127 }
127 }
128 }
128 }
129 }
129
130
130 impl From<ConfigError> for CommandError {
131 impl From<ConfigError> for CommandError {
131 fn from(error: ConfigError) -> Self {
132 fn from(error: ConfigError) -> Self {
132 match error {
133 match error {
133 ConfigError::Parse(error) => error.into(),
134 ConfigError::Parse(error) => error.into(),
134 ConfigError::Other(error) => error.into(),
135 ConfigError::Other(error) => error.into(),
135 }
136 }
136 }
137 }
137 }
138 }
138
139
139 impl From<ConfigParseError> for CommandError {
140 impl From<ConfigParseError> for CommandError {
140 fn from(error: ConfigParseError) -> Self {
141 fn from(error: ConfigParseError) -> Self {
141 let ConfigParseError {
142 let ConfigParseError {
142 origin,
143 origin,
143 line,
144 line,
144 message,
145 message,
145 } = error;
146 } = error;
146 let line_message = if let Some(line_number) = line {
147 let line_message = if let Some(line_number) = line {
147 format_bytes!(b":{}", line_number.to_string().into_bytes())
148 format_bytes!(b":{}", line_number.to_string().into_bytes())
148 } else {
149 } else {
149 Vec::new()
150 Vec::new()
150 };
151 };
151 CommandError::Abort {
152 CommandError::Abort {
152 message: format_bytes!(
153 message: format_bytes!(
153 b"config error at {}{}: {}",
154 b"config error at {}{}: {}",
154 origin,
155 origin,
155 line_message,
156 line_message,
156 message
157 message
157 ),
158 ),
158 detailed_exit_code: exit_codes::CONFIG_ERROR_ABORT,
159 detailed_exit_code: exit_codes::CONFIG_ERROR_ABORT,
159 }
160 }
160 }
161 }
161 }
162 }
162
163
163 impl From<(RevlogError, &str)> for CommandError {
164 impl From<(RevlogError, &str)> for CommandError {
164 fn from((err, rev): (RevlogError, &str)) -> CommandError {
165 fn from((err, rev): (RevlogError, &str)) -> CommandError {
165 match err {
166 match err {
166 RevlogError::WDirUnsupported => CommandError::abort(
167 RevlogError::WDirUnsupported => CommandError::abort(
167 "abort: working directory revision cannot be specified",
168 "abort: working directory revision cannot be specified",
168 ),
169 ),
169 RevlogError::InvalidRevision => CommandError::abort(format!(
170 RevlogError::InvalidRevision => CommandError::abort(format!(
170 "abort: invalid revision identifier: {}",
171 "abort: invalid revision identifier: {}",
171 rev
172 rev
172 )),
173 )),
173 RevlogError::AmbiguousPrefix => CommandError::abort(format!(
174 RevlogError::AmbiguousPrefix => CommandError::abort(format!(
174 "abort: ambiguous revision identifier: {}",
175 "abort: ambiguous revision identifier: {}",
175 rev
176 rev
176 )),
177 )),
177 RevlogError::Other(error) => error.into(),
178 RevlogError::Other(error) => error.into(),
178 }
179 }
179 }
180 }
180 }
181 }
181
182
182 impl From<StatusError> for CommandError {
183 impl From<StatusError> for CommandError {
183 fn from(error: StatusError) -> Self {
184 fn from(error: StatusError) -> Self {
184 CommandError::abort(format!("{}", error))
185 CommandError::abort(format!("{}", error))
185 }
186 }
186 }
187 }
187
188
188 impl From<DirstateMapError> for CommandError {
189 impl From<DirstateMapError> for CommandError {
189 fn from(error: DirstateMapError) -> Self {
190 fn from(error: DirstateMapError) -> Self {
190 CommandError::abort(format!("{}", error))
191 CommandError::abort(format!("{}", error))
191 }
192 }
192 }
193 }
193
194
194 impl From<DirstateError> for CommandError {
195 impl From<DirstateError> for CommandError {
195 fn from(error: DirstateError) -> Self {
196 fn from(error: DirstateError) -> Self {
196 match error {
197 match error {
197 DirstateError::Common(error) => error.into(),
198 DirstateError::Common(error) => error.into(),
198 DirstateError::Map(error) => error.into(),
199 DirstateError::Map(error) => error.into(),
199 }
200 }
200 }
201 }
201 }
202 }
203
204 impl From<DirstateV2ParseError> for CommandError {
205 fn from(error: DirstateV2ParseError) -> Self {
206 HgError::from(error).into()
207 }
208 }
@@ -1,1856 +1,1857 b''
1 #
1 #
2 # This is the mercurial setup script.
2 # This is the mercurial setup script.
3 #
3 #
4 # 'python setup.py install', or
4 # 'python setup.py install', or
5 # 'python setup.py --help' for more options
5 # 'python setup.py --help' for more options
6 import os
6 import os
7
7
8 # Mercurial will never work on Python 3 before 3.5 due to a lack
8 # Mercurial will never work on Python 3 before 3.5 due to a lack
9 # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1
9 # of % formatting on bytestrings, and can't work on 3.6.0 or 3.6.1
10 # due to a bug in % formatting in bytestrings.
10 # due to a bug in % formatting in bytestrings.
11 # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in
11 # We cannot support Python 3.5.0, 3.5.1, 3.5.2 because of bug in
12 # codecs.escape_encode() where it raises SystemError on empty bytestring
12 # codecs.escape_encode() where it raises SystemError on empty bytestring
13 # bug link: https://bugs.python.org/issue25270
13 # bug link: https://bugs.python.org/issue25270
14 supportedpy = ','.join(
14 supportedpy = ','.join(
15 [
15 [
16 '>=2.7.4',
16 '>=2.7.4',
17 '!=3.0.*',
17 '!=3.0.*',
18 '!=3.1.*',
18 '!=3.1.*',
19 '!=3.2.*',
19 '!=3.2.*',
20 '!=3.3.*',
20 '!=3.3.*',
21 '!=3.4.*',
21 '!=3.4.*',
22 '!=3.5.0',
22 '!=3.5.0',
23 '!=3.5.1',
23 '!=3.5.1',
24 '!=3.5.2',
24 '!=3.5.2',
25 '!=3.6.0',
25 '!=3.6.0',
26 '!=3.6.1',
26 '!=3.6.1',
27 ]
27 ]
28 )
28 )
29
29
30 import sys, platform
30 import sys, platform
31 import sysconfig
31 import sysconfig
32
32
33 if sys.version_info[0] >= 3:
33 if sys.version_info[0] >= 3:
34 printf = eval('print')
34 printf = eval('print')
35 libdir_escape = 'unicode_escape'
35 libdir_escape = 'unicode_escape'
36
36
37 def sysstr(s):
37 def sysstr(s):
38 return s.decode('latin-1')
38 return s.decode('latin-1')
39
39
40
40
41 else:
41 else:
42 libdir_escape = 'string_escape'
42 libdir_escape = 'string_escape'
43
43
44 def printf(*args, **kwargs):
44 def printf(*args, **kwargs):
45 f = kwargs.get('file', sys.stdout)
45 f = kwargs.get('file', sys.stdout)
46 end = kwargs.get('end', '\n')
46 end = kwargs.get('end', '\n')
47 f.write(b' '.join(args) + end)
47 f.write(b' '.join(args) + end)
48
48
49 def sysstr(s):
49 def sysstr(s):
50 return s
50 return s
51
51
52
52
53 # Attempt to guide users to a modern pip - this means that 2.6 users
53 # Attempt to guide users to a modern pip - this means that 2.6 users
54 # should have a chance of getting a 4.2 release, and when we ratchet
54 # should have a chance of getting a 4.2 release, and when we ratchet
55 # the version requirement forward again hopefully everyone will get
55 # the version requirement forward again hopefully everyone will get
56 # something that works for them.
56 # something that works for them.
57 if sys.version_info < (2, 7, 4, 'final'):
57 if sys.version_info < (2, 7, 4, 'final'):
58 pip_message = (
58 pip_message = (
59 'This may be due to an out of date pip. '
59 'This may be due to an out of date pip. '
60 'Make sure you have pip >= 9.0.1.'
60 'Make sure you have pip >= 9.0.1.'
61 )
61 )
62 try:
62 try:
63 import pip
63 import pip
64
64
65 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
65 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
66 if pip_version < (9, 0, 1):
66 if pip_version < (9, 0, 1):
67 pip_message = (
67 pip_message = (
68 'Your pip version is out of date, please install '
68 'Your pip version is out of date, please install '
69 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__)
69 'pip >= 9.0.1. pip {} detected.'.format(pip.__version__)
70 )
70 )
71 else:
71 else:
72 # pip is new enough - it must be something else
72 # pip is new enough - it must be something else
73 pip_message = ''
73 pip_message = ''
74 except Exception:
74 except Exception:
75 pass
75 pass
76 error = """
76 error = """
77 Mercurial does not support Python older than 2.7.4.
77 Mercurial does not support Python older than 2.7.4.
78 Python {py} detected.
78 Python {py} detected.
79 {pip}
79 {pip}
80 """.format(
80 """.format(
81 py=sys.version_info, pip=pip_message
81 py=sys.version_info, pip=pip_message
82 )
82 )
83 printf(error, file=sys.stderr)
83 printf(error, file=sys.stderr)
84 sys.exit(1)
84 sys.exit(1)
85
85
86 import ssl
86 import ssl
87
87
88 try:
88 try:
89 ssl.SSLContext
89 ssl.SSLContext
90 except AttributeError:
90 except AttributeError:
91 error = """
91 error = """
92 The `ssl` module does not have the `SSLContext` class. This indicates an old
92 The `ssl` module does not have the `SSLContext` class. This indicates an old
93 Python version which does not support modern security features (which were
93 Python version which does not support modern security features (which were
94 added to Python 2.7 as part of "PEP 466"). Please make sure you have installed
94 added to Python 2.7 as part of "PEP 466"). Please make sure you have installed
95 at least Python 2.7.9 or a Python version with backports of these security
95 at least Python 2.7.9 or a Python version with backports of these security
96 features.
96 features.
97 """
97 """
98 printf(error, file=sys.stderr)
98 printf(error, file=sys.stderr)
99 sys.exit(1)
99 sys.exit(1)
100
100
101 # ssl.HAS_TLSv1* are preferred to check support but they were added in Python
101 # ssl.HAS_TLSv1* are preferred to check support but they were added in Python
102 # 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
102 # 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
103 # (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
103 # (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
104 # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
104 # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
105 # support. At the mentioned commit, they were unconditionally defined.
105 # support. At the mentioned commit, they were unconditionally defined.
106 _notset = object()
106 _notset = object()
107 has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset)
107 has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset)
108 if has_tlsv1_1 is _notset:
108 if has_tlsv1_1 is _notset:
109 has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset
109 has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset
110 has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset)
110 has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset)
111 if has_tlsv1_2 is _notset:
111 if has_tlsv1_2 is _notset:
112 has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset
112 has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset
113 if not (has_tlsv1_1 or has_tlsv1_2):
113 if not (has_tlsv1_1 or has_tlsv1_2):
114 error = """
114 error = """
115 The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2.
115 The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2.
116 Please make sure that your Python installation was compiled against an OpenSSL
116 Please make sure that your Python installation was compiled against an OpenSSL
117 version enabling these features (likely this requires the OpenSSL version to
117 version enabling these features (likely this requires the OpenSSL version to
118 be at least 1.0.1).
118 be at least 1.0.1).
119 """
119 """
120 printf(error, file=sys.stderr)
120 printf(error, file=sys.stderr)
121 sys.exit(1)
121 sys.exit(1)
122
122
123 if sys.version_info[0] >= 3:
123 if sys.version_info[0] >= 3:
124 DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX']
124 DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX']
125 else:
125 else:
126 # deprecated in Python 3
126 # deprecated in Python 3
127 DYLIB_SUFFIX = sysconfig.get_config_vars()['SO']
127 DYLIB_SUFFIX = sysconfig.get_config_vars()['SO']
128
128
129 # Solaris Python packaging brain damage
129 # Solaris Python packaging brain damage
130 try:
130 try:
131 import hashlib
131 import hashlib
132
132
133 sha = hashlib.sha1()
133 sha = hashlib.sha1()
134 except ImportError:
134 except ImportError:
135 try:
135 try:
136 import sha
136 import sha
137
137
138 sha.sha # silence unused import warning
138 sha.sha # silence unused import warning
139 except ImportError:
139 except ImportError:
140 raise SystemExit(
140 raise SystemExit(
141 "Couldn't import standard hashlib (incomplete Python install)."
141 "Couldn't import standard hashlib (incomplete Python install)."
142 )
142 )
143
143
144 try:
144 try:
145 import zlib
145 import zlib
146
146
147 zlib.compressobj # silence unused import warning
147 zlib.compressobj # silence unused import warning
148 except ImportError:
148 except ImportError:
149 raise SystemExit(
149 raise SystemExit(
150 "Couldn't import standard zlib (incomplete Python install)."
150 "Couldn't import standard zlib (incomplete Python install)."
151 )
151 )
152
152
153 # The base IronPython distribution (as of 2.7.1) doesn't support bz2
153 # The base IronPython distribution (as of 2.7.1) doesn't support bz2
154 isironpython = False
154 isironpython = False
155 try:
155 try:
156 isironpython = (
156 isironpython = (
157 platform.python_implementation().lower().find("ironpython") != -1
157 platform.python_implementation().lower().find("ironpython") != -1
158 )
158 )
159 except AttributeError:
159 except AttributeError:
160 pass
160 pass
161
161
162 if isironpython:
162 if isironpython:
163 sys.stderr.write("warning: IronPython detected (no bz2 support)\n")
163 sys.stderr.write("warning: IronPython detected (no bz2 support)\n")
164 else:
164 else:
165 try:
165 try:
166 import bz2
166 import bz2
167
167
168 bz2.BZ2Compressor # silence unused import warning
168 bz2.BZ2Compressor # silence unused import warning
169 except ImportError:
169 except ImportError:
170 raise SystemExit(
170 raise SystemExit(
171 "Couldn't import standard bz2 (incomplete Python install)."
171 "Couldn't import standard bz2 (incomplete Python install)."
172 )
172 )
173
173
174 ispypy = "PyPy" in sys.version
174 ispypy = "PyPy" in sys.version
175
175
176 import ctypes
176 import ctypes
177 import errno
177 import errno
178 import stat, subprocess, time
178 import stat, subprocess, time
179 import re
179 import re
180 import shutil
180 import shutil
181 import tempfile
181 import tempfile
182
182
183 # We have issues with setuptools on some platforms and builders. Until
183 # We have issues with setuptools on some platforms and builders. Until
184 # those are resolved, setuptools is opt-in except for platforms where
184 # those are resolved, setuptools is opt-in except for platforms where
185 # we don't have issues.
185 # we don't have issues.
186 issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ
186 issetuptools = os.name == 'nt' or 'FORCE_SETUPTOOLS' in os.environ
187 if issetuptools:
187 if issetuptools:
188 from setuptools import setup
188 from setuptools import setup
189 else:
189 else:
190 from distutils.core import setup
190 from distutils.core import setup
191 from distutils.ccompiler import new_compiler
191 from distutils.ccompiler import new_compiler
192 from distutils.core import Command, Extension
192 from distutils.core import Command, Extension
193 from distutils.dist import Distribution
193 from distutils.dist import Distribution
194 from distutils.command.build import build
194 from distutils.command.build import build
195 from distutils.command.build_ext import build_ext
195 from distutils.command.build_ext import build_ext
196 from distutils.command.build_py import build_py
196 from distutils.command.build_py import build_py
197 from distutils.command.build_scripts import build_scripts
197 from distutils.command.build_scripts import build_scripts
198 from distutils.command.install import install
198 from distutils.command.install import install
199 from distutils.command.install_lib import install_lib
199 from distutils.command.install_lib import install_lib
200 from distutils.command.install_scripts import install_scripts
200 from distutils.command.install_scripts import install_scripts
201 from distutils import log
201 from distutils import log
202 from distutils.spawn import spawn, find_executable
202 from distutils.spawn import spawn, find_executable
203 from distutils import file_util
203 from distutils import file_util
204 from distutils.errors import (
204 from distutils.errors import (
205 CCompilerError,
205 CCompilerError,
206 DistutilsError,
206 DistutilsError,
207 DistutilsExecError,
207 DistutilsExecError,
208 )
208 )
209 from distutils.sysconfig import get_python_inc, get_config_var
209 from distutils.sysconfig import get_python_inc, get_config_var
210 from distutils.version import StrictVersion
210 from distutils.version import StrictVersion
211
211
212 # Explain to distutils.StrictVersion how our release candidates are versionned
212 # Explain to distutils.StrictVersion how our release candidates are versionned
213 StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
213 StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
214
214
215
215
216 def write_if_changed(path, content):
216 def write_if_changed(path, content):
217 """Write content to a file iff the content hasn't changed."""
217 """Write content to a file iff the content hasn't changed."""
218 if os.path.exists(path):
218 if os.path.exists(path):
219 with open(path, 'rb') as fh:
219 with open(path, 'rb') as fh:
220 current = fh.read()
220 current = fh.read()
221 else:
221 else:
222 current = b''
222 current = b''
223
223
224 if current != content:
224 if current != content:
225 with open(path, 'wb') as fh:
225 with open(path, 'wb') as fh:
226 fh.write(content)
226 fh.write(content)
227
227
228
228
229 scripts = ['hg']
229 scripts = ['hg']
230 if os.name == 'nt':
230 if os.name == 'nt':
231 # We remove hg.bat if we are able to build hg.exe.
231 # We remove hg.bat if we are able to build hg.exe.
232 scripts.append('contrib/win32/hg.bat')
232 scripts.append('contrib/win32/hg.bat')
233
233
234
234
235 def cancompile(cc, code):
235 def cancompile(cc, code):
236 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
236 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
237 devnull = oldstderr = None
237 devnull = oldstderr = None
238 try:
238 try:
239 fname = os.path.join(tmpdir, 'testcomp.c')
239 fname = os.path.join(tmpdir, 'testcomp.c')
240 f = open(fname, 'w')
240 f = open(fname, 'w')
241 f.write(code)
241 f.write(code)
242 f.close()
242 f.close()
243 # Redirect stderr to /dev/null to hide any error messages
243 # Redirect stderr to /dev/null to hide any error messages
244 # from the compiler.
244 # from the compiler.
245 # This will have to be changed if we ever have to check
245 # This will have to be changed if we ever have to check
246 # for a function on Windows.
246 # for a function on Windows.
247 devnull = open('/dev/null', 'w')
247 devnull = open('/dev/null', 'w')
248 oldstderr = os.dup(sys.stderr.fileno())
248 oldstderr = os.dup(sys.stderr.fileno())
249 os.dup2(devnull.fileno(), sys.stderr.fileno())
249 os.dup2(devnull.fileno(), sys.stderr.fileno())
250 objects = cc.compile([fname], output_dir=tmpdir)
250 objects = cc.compile([fname], output_dir=tmpdir)
251 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
251 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
252 return True
252 return True
253 except Exception:
253 except Exception:
254 return False
254 return False
255 finally:
255 finally:
256 if oldstderr is not None:
256 if oldstderr is not None:
257 os.dup2(oldstderr, sys.stderr.fileno())
257 os.dup2(oldstderr, sys.stderr.fileno())
258 if devnull is not None:
258 if devnull is not None:
259 devnull.close()
259 devnull.close()
260 shutil.rmtree(tmpdir)
260 shutil.rmtree(tmpdir)
261
261
262
262
263 # simplified version of distutils.ccompiler.CCompiler.has_function
263 # simplified version of distutils.ccompiler.CCompiler.has_function
264 # that actually removes its temporary files.
264 # that actually removes its temporary files.
265 def hasfunction(cc, funcname):
265 def hasfunction(cc, funcname):
266 code = 'int main(void) { %s(); }\n' % funcname
266 code = 'int main(void) { %s(); }\n' % funcname
267 return cancompile(cc, code)
267 return cancompile(cc, code)
268
268
269
269
270 def hasheader(cc, headername):
270 def hasheader(cc, headername):
271 code = '#include <%s>\nint main(void) { return 0; }\n' % headername
271 code = '#include <%s>\nint main(void) { return 0; }\n' % headername
272 return cancompile(cc, code)
272 return cancompile(cc, code)
273
273
274
274
275 # py2exe needs to be installed to work
275 # py2exe needs to be installed to work
276 try:
276 try:
277 import py2exe
277 import py2exe
278
278
279 py2exe.Distribution # silence unused import warning
279 py2exe.Distribution # silence unused import warning
280 py2exeloaded = True
280 py2exeloaded = True
281 # import py2exe's patched Distribution class
281 # import py2exe's patched Distribution class
282 from distutils.core import Distribution
282 from distutils.core import Distribution
283 except ImportError:
283 except ImportError:
284 py2exeloaded = False
284 py2exeloaded = False
285
285
286
286
287 def runcmd(cmd, env, cwd=None):
287 def runcmd(cmd, env, cwd=None):
288 p = subprocess.Popen(
288 p = subprocess.Popen(
289 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd
289 cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, cwd=cwd
290 )
290 )
291 out, err = p.communicate()
291 out, err = p.communicate()
292 return p.returncode, out, err
292 return p.returncode, out, err
293
293
294
294
295 class hgcommand(object):
295 class hgcommand(object):
296 def __init__(self, cmd, env):
296 def __init__(self, cmd, env):
297 self.cmd = cmd
297 self.cmd = cmd
298 self.env = env
298 self.env = env
299
299
300 def run(self, args):
300 def run(self, args):
301 cmd = self.cmd + args
301 cmd = self.cmd + args
302 returncode, out, err = runcmd(cmd, self.env)
302 returncode, out, err = runcmd(cmd, self.env)
303 err = filterhgerr(err)
303 err = filterhgerr(err)
304 if err or returncode != 0:
304 if err or returncode != 0:
305 printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr)
305 printf("stderr from '%s':" % (' '.join(cmd)), file=sys.stderr)
306 printf(err, file=sys.stderr)
306 printf(err, file=sys.stderr)
307 return b''
307 return b''
308 return out
308 return out
309
309
310
310
311 def filterhgerr(err):
311 def filterhgerr(err):
312 # If root is executing setup.py, but the repository is owned by
312 # If root is executing setup.py, but the repository is owned by
313 # another user (as in "sudo python setup.py install") we will get
313 # another user (as in "sudo python setup.py install") we will get
314 # trust warnings since the .hg/hgrc file is untrusted. That is
314 # trust warnings since the .hg/hgrc file is untrusted. That is
315 # fine, we don't want to load it anyway. Python may warn about
315 # fine, we don't want to load it anyway. Python may warn about
316 # a missing __init__.py in mercurial/locale, we also ignore that.
316 # a missing __init__.py in mercurial/locale, we also ignore that.
317 err = [
317 err = [
318 e
318 e
319 for e in err.splitlines()
319 for e in err.splitlines()
320 if (
320 if (
321 not e.startswith(b'not trusting file')
321 not e.startswith(b'not trusting file')
322 and not e.startswith(b'warning: Not importing')
322 and not e.startswith(b'warning: Not importing')
323 and not e.startswith(b'obsolete feature not enabled')
323 and not e.startswith(b'obsolete feature not enabled')
324 and not e.startswith(b'*** failed to import extension')
324 and not e.startswith(b'*** failed to import extension')
325 and not e.startswith(b'devel-warn:')
325 and not e.startswith(b'devel-warn:')
326 and not (
326 and not (
327 e.startswith(b'(third party extension')
327 e.startswith(b'(third party extension')
328 and e.endswith(b'or newer of Mercurial; disabling)')
328 and e.endswith(b'or newer of Mercurial; disabling)')
329 )
329 )
330 )
330 )
331 ]
331 ]
332 return b'\n'.join(b' ' + e for e in err)
332 return b'\n'.join(b' ' + e for e in err)
333
333
334
334
335 def findhg():
335 def findhg():
336 """Try to figure out how we should invoke hg for examining the local
336 """Try to figure out how we should invoke hg for examining the local
337 repository contents.
337 repository contents.
338
338
339 Returns an hgcommand object."""
339 Returns an hgcommand object."""
340 # By default, prefer the "hg" command in the user's path. This was
340 # By default, prefer the "hg" command in the user's path. This was
341 # presumably the hg command that the user used to create this repository.
341 # presumably the hg command that the user used to create this repository.
342 #
342 #
343 # This repository may require extensions or other settings that would not
343 # This repository may require extensions or other settings that would not
344 # be enabled by running the hg script directly from this local repository.
344 # be enabled by running the hg script directly from this local repository.
345 hgenv = os.environ.copy()
345 hgenv = os.environ.copy()
346 # Use HGPLAIN to disable hgrc settings that would change output formatting,
346 # Use HGPLAIN to disable hgrc settings that would change output formatting,
347 # and disable localization for the same reasons.
347 # and disable localization for the same reasons.
348 hgenv['HGPLAIN'] = '1'
348 hgenv['HGPLAIN'] = '1'
349 hgenv['LANGUAGE'] = 'C'
349 hgenv['LANGUAGE'] = 'C'
350 hgcmd = ['hg']
350 hgcmd = ['hg']
351 # Run a simple "hg log" command just to see if using hg from the user's
351 # Run a simple "hg log" command just to see if using hg from the user's
352 # path works and can successfully interact with this repository. Windows
352 # path works and can successfully interact with this repository. Windows
353 # gives precedence to hg.exe in the current directory, so fall back to the
353 # gives precedence to hg.exe in the current directory, so fall back to the
354 # python invocation of local hg, where pythonXY.dll can always be found.
354 # python invocation of local hg, where pythonXY.dll can always be found.
355 check_cmd = ['log', '-r.', '-Ttest']
355 check_cmd = ['log', '-r.', '-Ttest']
356 if os.name != 'nt' or not os.path.exists("hg.exe"):
356 if os.name != 'nt' or not os.path.exists("hg.exe"):
357 try:
357 try:
358 retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
358 retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
359 except EnvironmentError:
359 except EnvironmentError:
360 retcode = -1
360 retcode = -1
361 if retcode == 0 and not filterhgerr(err):
361 if retcode == 0 and not filterhgerr(err):
362 return hgcommand(hgcmd, hgenv)
362 return hgcommand(hgcmd, hgenv)
363
363
364 # Fall back to trying the local hg installation.
364 # Fall back to trying the local hg installation.
365 hgenv = localhgenv()
365 hgenv = localhgenv()
366 hgcmd = [sys.executable, 'hg']
366 hgcmd = [sys.executable, 'hg']
367 try:
367 try:
368 retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
368 retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
369 except EnvironmentError:
369 except EnvironmentError:
370 retcode = -1
370 retcode = -1
371 if retcode == 0 and not filterhgerr(err):
371 if retcode == 0 and not filterhgerr(err):
372 return hgcommand(hgcmd, hgenv)
372 return hgcommand(hgcmd, hgenv)
373
373
374 raise SystemExit(
374 raise SystemExit(
375 'Unable to find a working hg binary to extract the '
375 'Unable to find a working hg binary to extract the '
376 'version from the repository tags'
376 'version from the repository tags'
377 )
377 )
378
378
379
379
380 def localhgenv():
380 def localhgenv():
381 """Get an environment dictionary to use for invoking or importing
381 """Get an environment dictionary to use for invoking or importing
382 mercurial from the local repository."""
382 mercurial from the local repository."""
383 # Execute hg out of this directory with a custom environment which takes
383 # Execute hg out of this directory with a custom environment which takes
384 # care to not use any hgrc files and do no localization.
384 # care to not use any hgrc files and do no localization.
385 env = {
385 env = {
386 'HGMODULEPOLICY': 'py',
386 'HGMODULEPOLICY': 'py',
387 'HGRCPATH': '',
387 'HGRCPATH': '',
388 'LANGUAGE': 'C',
388 'LANGUAGE': 'C',
389 'PATH': '',
389 'PATH': '',
390 } # make pypi modules that use os.environ['PATH'] happy
390 } # make pypi modules that use os.environ['PATH'] happy
391 if 'LD_LIBRARY_PATH' in os.environ:
391 if 'LD_LIBRARY_PATH' in os.environ:
392 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
392 env['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH']
393 if 'SystemRoot' in os.environ:
393 if 'SystemRoot' in os.environ:
394 # SystemRoot is required by Windows to load various DLLs. See:
394 # SystemRoot is required by Windows to load various DLLs. See:
395 # https://bugs.python.org/issue13524#msg148850
395 # https://bugs.python.org/issue13524#msg148850
396 env['SystemRoot'] = os.environ['SystemRoot']
396 env['SystemRoot'] = os.environ['SystemRoot']
397 return env
397 return env
398
398
399
399
400 version = ''
400 version = ''
401
401
402 if os.path.isdir('.hg'):
402 if os.path.isdir('.hg'):
403 hg = findhg()
403 hg = findhg()
404 cmd = ['log', '-r', '.', '--template', '{tags}\n']
404 cmd = ['log', '-r', '.', '--template', '{tags}\n']
405 numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()]
405 numerictags = [t for t in sysstr(hg.run(cmd)).split() if t[0:1].isdigit()]
406 hgid = sysstr(hg.run(['id', '-i'])).strip()
406 hgid = sysstr(hg.run(['id', '-i'])).strip()
407 if not hgid:
407 if not hgid:
408 # Bail out if hg is having problems interacting with this repository,
408 # Bail out if hg is having problems interacting with this repository,
409 # rather than falling through and producing a bogus version number.
409 # rather than falling through and producing a bogus version number.
410 # Continuing with an invalid version number will break extensions
410 # Continuing with an invalid version number will break extensions
411 # that define minimumhgversion.
411 # that define minimumhgversion.
412 raise SystemExit('Unable to determine hg version from local repository')
412 raise SystemExit('Unable to determine hg version from local repository')
413 if numerictags: # tag(s) found
413 if numerictags: # tag(s) found
414 version = numerictags[-1]
414 version = numerictags[-1]
415 if hgid.endswith('+'): # propagate the dirty status to the tag
415 if hgid.endswith('+'): # propagate the dirty status to the tag
416 version += '+'
416 version += '+'
417 else: # no tag found
417 else: # no tag found
418 ltagcmd = ['parents', '--template', '{latesttag}']
418 ltagcmd = ['parents', '--template', '{latesttag}']
419 ltag = sysstr(hg.run(ltagcmd))
419 ltag = sysstr(hg.run(ltagcmd))
420 changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag]
420 changessincecmd = ['log', '-T', 'x\n', '-r', "only(.,'%s')" % ltag]
421 changessince = len(hg.run(changessincecmd).splitlines())
421 changessince = len(hg.run(changessincecmd).splitlines())
422 version = '%s+hg%s.%s' % (ltag, changessince, hgid)
422 version = '%s+hg%s.%s' % (ltag, changessince, hgid)
423 if version.endswith('+'):
423 if version.endswith('+'):
424 version = version[:-1] + 'local' + time.strftime('%Y%m%d')
424 version = version[:-1] + 'local' + time.strftime('%Y%m%d')
425 elif os.path.exists('.hg_archival.txt'):
425 elif os.path.exists('.hg_archival.txt'):
426 kw = dict(
426 kw = dict(
427 [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')]
427 [[t.strip() for t in l.split(':', 1)] for l in open('.hg_archival.txt')]
428 )
428 )
429 if 'tag' in kw:
429 if 'tag' in kw:
430 version = kw['tag']
430 version = kw['tag']
431 elif 'latesttag' in kw:
431 elif 'latesttag' in kw:
432 if 'changessincelatesttag' in kw:
432 if 'changessincelatesttag' in kw:
433 version = (
433 version = (
434 '%(latesttag)s+hg%(changessincelatesttag)s.%(node).12s' % kw
434 '%(latesttag)s+hg%(changessincelatesttag)s.%(node).12s' % kw
435 )
435 )
436 else:
436 else:
437 version = '%(latesttag)s+hg%(latesttagdistance)s.%(node).12s' % kw
437 version = '%(latesttag)s+hg%(latesttagdistance)s.%(node).12s' % kw
438 else:
438 else:
439 version = '0+hg' + kw.get('node', '')[:12]
439 version = '0+hg' + kw.get('node', '')[:12]
440 elif os.path.exists('mercurial/__version__.py'):
440 elif os.path.exists('mercurial/__version__.py'):
441 with open('mercurial/__version__.py') as f:
441 with open('mercurial/__version__.py') as f:
442 data = f.read()
442 data = f.read()
443 version = re.search('version = b"(.*)"', data).group(1)
443 version = re.search('version = b"(.*)"', data).group(1)
444
444
445 if version:
445 if version:
446 versionb = version
446 versionb = version
447 if not isinstance(versionb, bytes):
447 if not isinstance(versionb, bytes):
448 versionb = versionb.encode('ascii')
448 versionb = versionb.encode('ascii')
449
449
450 write_if_changed(
450 write_if_changed(
451 'mercurial/__version__.py',
451 'mercurial/__version__.py',
452 b''.join(
452 b''.join(
453 [
453 [
454 b'# this file is autogenerated by setup.py\n'
454 b'# this file is autogenerated by setup.py\n'
455 b'version = b"%s"\n' % versionb,
455 b'version = b"%s"\n' % versionb,
456 ]
456 ]
457 ),
457 ),
458 )
458 )
459
459
460
460
461 class hgbuild(build):
461 class hgbuild(build):
462 # Insert hgbuildmo first so that files in mercurial/locale/ are found
462 # Insert hgbuildmo first so that files in mercurial/locale/ are found
463 # when build_py is run next.
463 # when build_py is run next.
464 sub_commands = [('build_mo', None)] + build.sub_commands
464 sub_commands = [('build_mo', None)] + build.sub_commands
465
465
466
466
467 class hgbuildmo(build):
467 class hgbuildmo(build):
468
468
469 description = "build translations (.mo files)"
469 description = "build translations (.mo files)"
470
470
471 def run(self):
471 def run(self):
472 if not find_executable('msgfmt'):
472 if not find_executable('msgfmt'):
473 self.warn(
473 self.warn(
474 "could not find msgfmt executable, no translations "
474 "could not find msgfmt executable, no translations "
475 "will be built"
475 "will be built"
476 )
476 )
477 return
477 return
478
478
479 podir = 'i18n'
479 podir = 'i18n'
480 if not os.path.isdir(podir):
480 if not os.path.isdir(podir):
481 self.warn("could not find %s/ directory" % podir)
481 self.warn("could not find %s/ directory" % podir)
482 return
482 return
483
483
484 join = os.path.join
484 join = os.path.join
485 for po in os.listdir(podir):
485 for po in os.listdir(podir):
486 if not po.endswith('.po'):
486 if not po.endswith('.po'):
487 continue
487 continue
488 pofile = join(podir, po)
488 pofile = join(podir, po)
489 modir = join('locale', po[:-3], 'LC_MESSAGES')
489 modir = join('locale', po[:-3], 'LC_MESSAGES')
490 mofile = join(modir, 'hg.mo')
490 mofile = join(modir, 'hg.mo')
491 mobuildfile = join('mercurial', mofile)
491 mobuildfile = join('mercurial', mofile)
492 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
492 cmd = ['msgfmt', '-v', '-o', mobuildfile, pofile]
493 if sys.platform != 'sunos5':
493 if sys.platform != 'sunos5':
494 # msgfmt on Solaris does not know about -c
494 # msgfmt on Solaris does not know about -c
495 cmd.append('-c')
495 cmd.append('-c')
496 self.mkpath(join('mercurial', modir))
496 self.mkpath(join('mercurial', modir))
497 self.make_file([pofile], mobuildfile, spawn, (cmd,))
497 self.make_file([pofile], mobuildfile, spawn, (cmd,))
498
498
499
499
500 class hgdist(Distribution):
500 class hgdist(Distribution):
501 pure = False
501 pure = False
502 rust = False
502 rust = False
503 no_rust = False
503 no_rust = False
504 cffi = ispypy
504 cffi = ispypy
505
505
506 global_options = Distribution.global_options + [
506 global_options = Distribution.global_options + [
507 ('pure', None, "use pure (slow) Python code instead of C extensions"),
507 ('pure', None, "use pure (slow) Python code instead of C extensions"),
508 ('rust', None, "use Rust extensions additionally to C extensions"),
508 ('rust', None, "use Rust extensions additionally to C extensions"),
509 (
509 (
510 'no-rust',
510 'no-rust',
511 None,
511 None,
512 "do not use Rust extensions additionally to C extensions",
512 "do not use Rust extensions additionally to C extensions",
513 ),
513 ),
514 ]
514 ]
515
515
516 negative_opt = Distribution.negative_opt.copy()
516 negative_opt = Distribution.negative_opt.copy()
517 boolean_options = ['pure', 'rust', 'no-rust']
517 boolean_options = ['pure', 'rust', 'no-rust']
518 negative_opt['no-rust'] = 'rust'
518 negative_opt['no-rust'] = 'rust'
519
519
520 def _set_command_options(self, command_obj, option_dict=None):
520 def _set_command_options(self, command_obj, option_dict=None):
521 # Not all distutils versions in the wild have boolean_options.
521 # Not all distutils versions in the wild have boolean_options.
522 # This should be cleaned up when we're Python 3 only.
522 # This should be cleaned up when we're Python 3 only.
523 command_obj.boolean_options = (
523 command_obj.boolean_options = (
524 getattr(command_obj, 'boolean_options', []) + self.boolean_options
524 getattr(command_obj, 'boolean_options', []) + self.boolean_options
525 )
525 )
526 return Distribution._set_command_options(
526 return Distribution._set_command_options(
527 self, command_obj, option_dict=option_dict
527 self, command_obj, option_dict=option_dict
528 )
528 )
529
529
530 def parse_command_line(self):
530 def parse_command_line(self):
531 ret = Distribution.parse_command_line(self)
531 ret = Distribution.parse_command_line(self)
532 if not (self.rust or self.no_rust):
532 if not (self.rust or self.no_rust):
533 hgrustext = os.environ.get('HGWITHRUSTEXT')
533 hgrustext = os.environ.get('HGWITHRUSTEXT')
534 # TODO record it for proper rebuild upon changes
534 # TODO record it for proper rebuild upon changes
535 # (see mercurial/__modulepolicy__.py)
535 # (see mercurial/__modulepolicy__.py)
536 if hgrustext != 'cpython' and hgrustext is not None:
536 if hgrustext != 'cpython' and hgrustext is not None:
537 if hgrustext:
537 if hgrustext:
538 msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext
538 msg = 'unkown HGWITHRUSTEXT value: %s' % hgrustext
539 printf(msg, file=sys.stderr)
539 printf(msg, file=sys.stderr)
540 hgrustext = None
540 hgrustext = None
541 self.rust = hgrustext is not None
541 self.rust = hgrustext is not None
542 self.no_rust = not self.rust
542 self.no_rust = not self.rust
543 return ret
543 return ret
544
544
545 def has_ext_modules(self):
545 def has_ext_modules(self):
546 # self.ext_modules is emptied in hgbuildpy.finalize_options which is
546 # self.ext_modules is emptied in hgbuildpy.finalize_options which is
547 # too late for some cases
547 # too late for some cases
548 return not self.pure and Distribution.has_ext_modules(self)
548 return not self.pure and Distribution.has_ext_modules(self)
549
549
550
550
551 # This is ugly as a one-liner. So use a variable.
551 # This is ugly as a one-liner. So use a variable.
552 buildextnegops = dict(getattr(build_ext, 'negative_options', {}))
552 buildextnegops = dict(getattr(build_ext, 'negative_options', {}))
553 buildextnegops['no-zstd'] = 'zstd'
553 buildextnegops['no-zstd'] = 'zstd'
554 buildextnegops['no-rust'] = 'rust'
554 buildextnegops['no-rust'] = 'rust'
555
555
556
556
557 class hgbuildext(build_ext):
557 class hgbuildext(build_ext):
558 user_options = build_ext.user_options + [
558 user_options = build_ext.user_options + [
559 ('zstd', None, 'compile zstd bindings [default]'),
559 ('zstd', None, 'compile zstd bindings [default]'),
560 ('no-zstd', None, 'do not compile zstd bindings'),
560 ('no-zstd', None, 'do not compile zstd bindings'),
561 (
561 (
562 'rust',
562 'rust',
563 None,
563 None,
564 'compile Rust extensions if they are in use '
564 'compile Rust extensions if they are in use '
565 '(requires Cargo) [default]',
565 '(requires Cargo) [default]',
566 ),
566 ),
567 ('no-rust', None, 'do not compile Rust extensions'),
567 ('no-rust', None, 'do not compile Rust extensions'),
568 ]
568 ]
569
569
570 boolean_options = build_ext.boolean_options + ['zstd', 'rust']
570 boolean_options = build_ext.boolean_options + ['zstd', 'rust']
571 negative_opt = buildextnegops
571 negative_opt = buildextnegops
572
572
573 def initialize_options(self):
573 def initialize_options(self):
574 self.zstd = True
574 self.zstd = True
575 self.rust = True
575 self.rust = True
576
576
577 return build_ext.initialize_options(self)
577 return build_ext.initialize_options(self)
578
578
579 def finalize_options(self):
579 def finalize_options(self):
580 # Unless overridden by the end user, build extensions in parallel.
580 # Unless overridden by the end user, build extensions in parallel.
581 # Only influences behavior on Python 3.5+.
581 # Only influences behavior on Python 3.5+.
582 if getattr(self, 'parallel', None) is None:
582 if getattr(self, 'parallel', None) is None:
583 self.parallel = True
583 self.parallel = True
584
584
585 return build_ext.finalize_options(self)
585 return build_ext.finalize_options(self)
586
586
587 def build_extensions(self):
587 def build_extensions(self):
588 ruststandalones = [
588 ruststandalones = [
589 e for e in self.extensions if isinstance(e, RustStandaloneExtension)
589 e for e in self.extensions if isinstance(e, RustStandaloneExtension)
590 ]
590 ]
591 self.extensions = [
591 self.extensions = [
592 e for e in self.extensions if e not in ruststandalones
592 e for e in self.extensions if e not in ruststandalones
593 ]
593 ]
594 # Filter out zstd if disabled via argument.
594 # Filter out zstd if disabled via argument.
595 if not self.zstd:
595 if not self.zstd:
596 self.extensions = [
596 self.extensions = [
597 e for e in self.extensions if e.name != 'mercurial.zstd'
597 e for e in self.extensions if e.name != 'mercurial.zstd'
598 ]
598 ]
599
599
600 # Build Rust standalon extensions if it'll be used
600 # Build Rust standalon extensions if it'll be used
601 # and its build is not explictely disabled (for external build
601 # and its build is not explictely disabled (for external build
602 # as Linux distributions would do)
602 # as Linux distributions would do)
603 if self.distribution.rust and self.rust:
603 if self.distribution.rust and self.rust:
604 if not sys.platform.startswith('linux'):
604 if not sys.platform.startswith('linux'):
605 self.warn(
605 self.warn(
606 "rust extensions have only been tested on Linux "
606 "rust extensions have only been tested on Linux "
607 "and may not behave correctly on other platforms"
607 "and may not behave correctly on other platforms"
608 )
608 )
609
609
610 for rustext in ruststandalones:
610 for rustext in ruststandalones:
611 rustext.build('' if self.inplace else self.build_lib)
611 rustext.build('' if self.inplace else self.build_lib)
612
612
613 return build_ext.build_extensions(self)
613 return build_ext.build_extensions(self)
614
614
615 def build_extension(self, ext):
615 def build_extension(self, ext):
616 if (
616 if (
617 self.distribution.rust
617 self.distribution.rust
618 and self.rust
618 and self.rust
619 and isinstance(ext, RustExtension)
619 and isinstance(ext, RustExtension)
620 ):
620 ):
621 ext.rustbuild()
621 ext.rustbuild()
622 try:
622 try:
623 build_ext.build_extension(self, ext)
623 build_ext.build_extension(self, ext)
624 except CCompilerError:
624 except CCompilerError:
625 if not getattr(ext, 'optional', False):
625 if not getattr(ext, 'optional', False):
626 raise
626 raise
627 log.warn(
627 log.warn(
628 "Failed to build optional extension '%s' (skipping)", ext.name
628 "Failed to build optional extension '%s' (skipping)", ext.name
629 )
629 )
630
630
631
631
632 class hgbuildscripts(build_scripts):
632 class hgbuildscripts(build_scripts):
633 def run(self):
633 def run(self):
634 if os.name != 'nt' or self.distribution.pure:
634 if os.name != 'nt' or self.distribution.pure:
635 return build_scripts.run(self)
635 return build_scripts.run(self)
636
636
637 exebuilt = False
637 exebuilt = False
638 try:
638 try:
639 self.run_command('build_hgexe')
639 self.run_command('build_hgexe')
640 exebuilt = True
640 exebuilt = True
641 except (DistutilsError, CCompilerError):
641 except (DistutilsError, CCompilerError):
642 log.warn('failed to build optional hg.exe')
642 log.warn('failed to build optional hg.exe')
643
643
644 if exebuilt:
644 if exebuilt:
645 # Copying hg.exe to the scripts build directory ensures it is
645 # Copying hg.exe to the scripts build directory ensures it is
646 # installed by the install_scripts command.
646 # installed by the install_scripts command.
647 hgexecommand = self.get_finalized_command('build_hgexe')
647 hgexecommand = self.get_finalized_command('build_hgexe')
648 dest = os.path.join(self.build_dir, 'hg.exe')
648 dest = os.path.join(self.build_dir, 'hg.exe')
649 self.mkpath(self.build_dir)
649 self.mkpath(self.build_dir)
650 self.copy_file(hgexecommand.hgexepath, dest)
650 self.copy_file(hgexecommand.hgexepath, dest)
651
651
652 # Remove hg.bat because it is redundant with hg.exe.
652 # Remove hg.bat because it is redundant with hg.exe.
653 self.scripts.remove('contrib/win32/hg.bat')
653 self.scripts.remove('contrib/win32/hg.bat')
654
654
655 return build_scripts.run(self)
655 return build_scripts.run(self)
656
656
657
657
658 class hgbuildpy(build_py):
658 class hgbuildpy(build_py):
659 def finalize_options(self):
659 def finalize_options(self):
660 build_py.finalize_options(self)
660 build_py.finalize_options(self)
661
661
662 if self.distribution.pure:
662 if self.distribution.pure:
663 self.distribution.ext_modules = []
663 self.distribution.ext_modules = []
664 elif self.distribution.cffi:
664 elif self.distribution.cffi:
665 from mercurial.cffi import (
665 from mercurial.cffi import (
666 bdiffbuild,
666 bdiffbuild,
667 mpatchbuild,
667 mpatchbuild,
668 )
668 )
669
669
670 exts = [
670 exts = [
671 mpatchbuild.ffi.distutils_extension(),
671 mpatchbuild.ffi.distutils_extension(),
672 bdiffbuild.ffi.distutils_extension(),
672 bdiffbuild.ffi.distutils_extension(),
673 ]
673 ]
674 # cffi modules go here
674 # cffi modules go here
675 if sys.platform == 'darwin':
675 if sys.platform == 'darwin':
676 from mercurial.cffi import osutilbuild
676 from mercurial.cffi import osutilbuild
677
677
678 exts.append(osutilbuild.ffi.distutils_extension())
678 exts.append(osutilbuild.ffi.distutils_extension())
679 self.distribution.ext_modules = exts
679 self.distribution.ext_modules = exts
680 else:
680 else:
681 h = os.path.join(get_python_inc(), 'Python.h')
681 h = os.path.join(get_python_inc(), 'Python.h')
682 if not os.path.exists(h):
682 if not os.path.exists(h):
683 raise SystemExit(
683 raise SystemExit(
684 'Python headers are required to build '
684 'Python headers are required to build '
685 'Mercurial but weren\'t found in %s' % h
685 'Mercurial but weren\'t found in %s' % h
686 )
686 )
687
687
688 def run(self):
688 def run(self):
689 basepath = os.path.join(self.build_lib, 'mercurial')
689 basepath = os.path.join(self.build_lib, 'mercurial')
690 self.mkpath(basepath)
690 self.mkpath(basepath)
691
691
692 rust = self.distribution.rust
692 rust = self.distribution.rust
693 if self.distribution.pure:
693 if self.distribution.pure:
694 modulepolicy = 'py'
694 modulepolicy = 'py'
695 elif self.build_lib == '.':
695 elif self.build_lib == '.':
696 # in-place build should run without rebuilding and Rust extensions
696 # in-place build should run without rebuilding and Rust extensions
697 modulepolicy = 'rust+c-allow' if rust else 'allow'
697 modulepolicy = 'rust+c-allow' if rust else 'allow'
698 else:
698 else:
699 modulepolicy = 'rust+c' if rust else 'c'
699 modulepolicy = 'rust+c' if rust else 'c'
700
700
701 content = b''.join(
701 content = b''.join(
702 [
702 [
703 b'# this file is autogenerated by setup.py\n',
703 b'# this file is autogenerated by setup.py\n',
704 b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'),
704 b'modulepolicy = b"%s"\n' % modulepolicy.encode('ascii'),
705 ]
705 ]
706 )
706 )
707 write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content)
707 write_if_changed(os.path.join(basepath, '__modulepolicy__.py'), content)
708
708
709 build_py.run(self)
709 build_py.run(self)
710
710
711
711
712 class buildhgextindex(Command):
712 class buildhgextindex(Command):
713 description = 'generate prebuilt index of hgext (for frozen package)'
713 description = 'generate prebuilt index of hgext (for frozen package)'
714 user_options = []
714 user_options = []
715 _indexfilename = 'hgext/__index__.py'
715 _indexfilename = 'hgext/__index__.py'
716
716
717 def initialize_options(self):
717 def initialize_options(self):
718 pass
718 pass
719
719
720 def finalize_options(self):
720 def finalize_options(self):
721 pass
721 pass
722
722
723 def run(self):
723 def run(self):
724 if os.path.exists(self._indexfilename):
724 if os.path.exists(self._indexfilename):
725 with open(self._indexfilename, 'w') as f:
725 with open(self._indexfilename, 'w') as f:
726 f.write('# empty\n')
726 f.write('# empty\n')
727
727
728 # here no extension enabled, disabled() lists up everything
728 # here no extension enabled, disabled() lists up everything
729 code = (
729 code = (
730 'import pprint; from mercurial import extensions; '
730 'import pprint; from mercurial import extensions; '
731 'ext = extensions.disabled();'
731 'ext = extensions.disabled();'
732 'ext.pop("__index__", None);'
732 'ext.pop("__index__", None);'
733 'pprint.pprint(ext)'
733 'pprint.pprint(ext)'
734 )
734 )
735 returncode, out, err = runcmd(
735 returncode, out, err = runcmd(
736 [sys.executable, '-c', code], localhgenv()
736 [sys.executable, '-c', code], localhgenv()
737 )
737 )
738 if err or returncode != 0:
738 if err or returncode != 0:
739 raise DistutilsExecError(err)
739 raise DistutilsExecError(err)
740
740
741 with open(self._indexfilename, 'wb') as f:
741 with open(self._indexfilename, 'wb') as f:
742 f.write(b'# this file is autogenerated by setup.py\n')
742 f.write(b'# this file is autogenerated by setup.py\n')
743 f.write(b'docs = ')
743 f.write(b'docs = ')
744 f.write(out)
744 f.write(out)
745
745
746
746
747 class buildhgexe(build_ext):
747 class buildhgexe(build_ext):
748 description = 'compile hg.exe from mercurial/exewrapper.c'
748 description = 'compile hg.exe from mercurial/exewrapper.c'
749 user_options = build_ext.user_options + [
749 user_options = build_ext.user_options + [
750 (
750 (
751 'long-paths-support',
751 'long-paths-support',
752 None,
752 None,
753 'enable support for long paths on '
753 'enable support for long paths on '
754 'Windows (off by default and '
754 'Windows (off by default and '
755 'experimental)',
755 'experimental)',
756 ),
756 ),
757 ]
757 ]
758
758
759 LONG_PATHS_MANIFEST = """
759 LONG_PATHS_MANIFEST = """
760 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
760 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
761 <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
761 <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
762 <application>
762 <application>
763 <windowsSettings
763 <windowsSettings
764 xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings">
764 xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings">
765 <ws2:longPathAware>true</ws2:longPathAware>
765 <ws2:longPathAware>true</ws2:longPathAware>
766 </windowsSettings>
766 </windowsSettings>
767 </application>
767 </application>
768 </assembly>"""
768 </assembly>"""
769
769
770 def initialize_options(self):
770 def initialize_options(self):
771 build_ext.initialize_options(self)
771 build_ext.initialize_options(self)
772 self.long_paths_support = False
772 self.long_paths_support = False
773
773
774 def build_extensions(self):
774 def build_extensions(self):
775 if os.name != 'nt':
775 if os.name != 'nt':
776 return
776 return
777 if isinstance(self.compiler, HackedMingw32CCompiler):
777 if isinstance(self.compiler, HackedMingw32CCompiler):
778 self.compiler.compiler_so = self.compiler.compiler # no -mdll
778 self.compiler.compiler_so = self.compiler.compiler # no -mdll
779 self.compiler.dll_libraries = [] # no -lmsrvc90
779 self.compiler.dll_libraries = [] # no -lmsrvc90
780
780
781 pythonlib = None
781 pythonlib = None
782
782
783 dir = os.path.dirname(self.get_ext_fullpath('dummy'))
783 dir = os.path.dirname(self.get_ext_fullpath('dummy'))
784 self.hgtarget = os.path.join(dir, 'hg')
784 self.hgtarget = os.path.join(dir, 'hg')
785
785
786 if getattr(sys, 'dllhandle', None):
786 if getattr(sys, 'dllhandle', None):
787 # Different Python installs can have different Python library
787 # Different Python installs can have different Python library
788 # names. e.g. the official CPython distribution uses pythonXY.dll
788 # names. e.g. the official CPython distribution uses pythonXY.dll
789 # and MinGW uses libpythonX.Y.dll.
789 # and MinGW uses libpythonX.Y.dll.
790 _kernel32 = ctypes.windll.kernel32
790 _kernel32 = ctypes.windll.kernel32
791 _kernel32.GetModuleFileNameA.argtypes = [
791 _kernel32.GetModuleFileNameA.argtypes = [
792 ctypes.c_void_p,
792 ctypes.c_void_p,
793 ctypes.c_void_p,
793 ctypes.c_void_p,
794 ctypes.c_ulong,
794 ctypes.c_ulong,
795 ]
795 ]
796 _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong
796 _kernel32.GetModuleFileNameA.restype = ctypes.c_ulong
797 size = 1000
797 size = 1000
798 buf = ctypes.create_string_buffer(size + 1)
798 buf = ctypes.create_string_buffer(size + 1)
799 filelen = _kernel32.GetModuleFileNameA(
799 filelen = _kernel32.GetModuleFileNameA(
800 sys.dllhandle, ctypes.byref(buf), size
800 sys.dllhandle, ctypes.byref(buf), size
801 )
801 )
802
802
803 if filelen > 0 and filelen != size:
803 if filelen > 0 and filelen != size:
804 dllbasename = os.path.basename(buf.value)
804 dllbasename = os.path.basename(buf.value)
805 if not dllbasename.lower().endswith(b'.dll'):
805 if not dllbasename.lower().endswith(b'.dll'):
806 raise SystemExit(
806 raise SystemExit(
807 'Python DLL does not end with .dll: %s' % dllbasename
807 'Python DLL does not end with .dll: %s' % dllbasename
808 )
808 )
809 pythonlib = dllbasename[:-4]
809 pythonlib = dllbasename[:-4]
810
810
811 # Copy the pythonXY.dll next to the binary so that it runs
811 # Copy the pythonXY.dll next to the binary so that it runs
812 # without tampering with PATH.
812 # without tampering with PATH.
813 fsdecode = lambda x: x
813 fsdecode = lambda x: x
814 if sys.version_info[0] >= 3:
814 if sys.version_info[0] >= 3:
815 fsdecode = os.fsdecode
815 fsdecode = os.fsdecode
816 dest = os.path.join(
816 dest = os.path.join(
817 os.path.dirname(self.hgtarget),
817 os.path.dirname(self.hgtarget),
818 fsdecode(dllbasename),
818 fsdecode(dllbasename),
819 )
819 )
820
820
821 if not os.path.exists(dest):
821 if not os.path.exists(dest):
822 shutil.copy(buf.value, dest)
822 shutil.copy(buf.value, dest)
823
823
824 # Also overwrite python3.dll so that hgext.git is usable.
824 # Also overwrite python3.dll so that hgext.git is usable.
825 # TODO: also handle the MSYS flavor
825 # TODO: also handle the MSYS flavor
826 if sys.version_info[0] >= 3:
826 if sys.version_info[0] >= 3:
827 python_x = os.path.join(
827 python_x = os.path.join(
828 os.path.dirname(fsdecode(buf.value)),
828 os.path.dirname(fsdecode(buf.value)),
829 "python3.dll",
829 "python3.dll",
830 )
830 )
831
831
832 if os.path.exists(python_x):
832 if os.path.exists(python_x):
833 dest = os.path.join(
833 dest = os.path.join(
834 os.path.dirname(self.hgtarget),
834 os.path.dirname(self.hgtarget),
835 os.path.basename(python_x),
835 os.path.basename(python_x),
836 )
836 )
837
837
838 shutil.copy(python_x, dest)
838 shutil.copy(python_x, dest)
839
839
840 if not pythonlib:
840 if not pythonlib:
841 log.warn(
841 log.warn(
842 'could not determine Python DLL filename; assuming pythonXY'
842 'could not determine Python DLL filename; assuming pythonXY'
843 )
843 )
844
844
845 hv = sys.hexversion
845 hv = sys.hexversion
846 pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF)
846 pythonlib = b'python%d%d' % (hv >> 24, (hv >> 16) & 0xFF)
847
847
848 log.info('using %s as Python library name' % pythonlib)
848 log.info('using %s as Python library name' % pythonlib)
849 with open('mercurial/hgpythonlib.h', 'wb') as f:
849 with open('mercurial/hgpythonlib.h', 'wb') as f:
850 f.write(b'/* this file is autogenerated by setup.py */\n')
850 f.write(b'/* this file is autogenerated by setup.py */\n')
851 f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib)
851 f.write(b'#define HGPYTHONLIB "%s"\n' % pythonlib)
852
852
853 macros = None
853 macros = None
854 if sys.version_info[0] >= 3:
854 if sys.version_info[0] >= 3:
855 macros = [('_UNICODE', None), ('UNICODE', None)]
855 macros = [('_UNICODE', None), ('UNICODE', None)]
856
856
857 objects = self.compiler.compile(
857 objects = self.compiler.compile(
858 ['mercurial/exewrapper.c'],
858 ['mercurial/exewrapper.c'],
859 output_dir=self.build_temp,
859 output_dir=self.build_temp,
860 macros=macros,
860 macros=macros,
861 )
861 )
862 self.compiler.link_executable(
862 self.compiler.link_executable(
863 objects, self.hgtarget, libraries=[], output_dir=self.build_temp
863 objects, self.hgtarget, libraries=[], output_dir=self.build_temp
864 )
864 )
865 if self.long_paths_support:
865 if self.long_paths_support:
866 self.addlongpathsmanifest()
866 self.addlongpathsmanifest()
867
867
868 def addlongpathsmanifest(self):
868 def addlongpathsmanifest(self):
869 r"""Add manifest pieces so that hg.exe understands long paths
869 r"""Add manifest pieces so that hg.exe understands long paths
870
870
871 This is an EXPERIMENTAL feature, use with care.
871 This is an EXPERIMENTAL feature, use with care.
872 To enable long paths support, one needs to do two things:
872 To enable long paths support, one needs to do two things:
873 - build Mercurial with --long-paths-support option
873 - build Mercurial with --long-paths-support option
874 - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\
874 - change HKLM\SYSTEM\CurrentControlSet\Control\FileSystem\
875 LongPathsEnabled to have value 1.
875 LongPathsEnabled to have value 1.
876
876
877 Please ignore 'warning 81010002: Unrecognized Element "longPathAware"';
877 Please ignore 'warning 81010002: Unrecognized Element "longPathAware"';
878 it happens because Mercurial uses mt.exe circa 2008, which is not
878 it happens because Mercurial uses mt.exe circa 2008, which is not
879 yet aware of long paths support in the manifest (I think so at least).
879 yet aware of long paths support in the manifest (I think so at least).
880 This does not stop mt.exe from embedding/merging the XML properly.
880 This does not stop mt.exe from embedding/merging the XML properly.
881
881
882 Why resource #1 should be used for .exe manifests? I don't know and
882 Why resource #1 should be used for .exe manifests? I don't know and
883 wasn't able to find an explanation for mortals. But it seems to work.
883 wasn't able to find an explanation for mortals. But it seems to work.
884 """
884 """
885 exefname = self.compiler.executable_filename(self.hgtarget)
885 exefname = self.compiler.executable_filename(self.hgtarget)
886 fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest')
886 fdauto, manfname = tempfile.mkstemp(suffix='.hg.exe.manifest')
887 os.close(fdauto)
887 os.close(fdauto)
888 with open(manfname, 'w') as f:
888 with open(manfname, 'w') as f:
889 f.write(self.LONG_PATHS_MANIFEST)
889 f.write(self.LONG_PATHS_MANIFEST)
890 log.info("long paths manifest is written to '%s'" % manfname)
890 log.info("long paths manifest is written to '%s'" % manfname)
891 inputresource = '-inputresource:%s;#1' % exefname
891 inputresource = '-inputresource:%s;#1' % exefname
892 outputresource = '-outputresource:%s;#1' % exefname
892 outputresource = '-outputresource:%s;#1' % exefname
893 log.info("running mt.exe to update hg.exe's manifest in-place")
893 log.info("running mt.exe to update hg.exe's manifest in-place")
894 # supplying both -manifest and -inputresource to mt.exe makes
894 # supplying both -manifest and -inputresource to mt.exe makes
895 # it merge the embedded and supplied manifests in the -outputresource
895 # it merge the embedded and supplied manifests in the -outputresource
896 self.spawn(
896 self.spawn(
897 [
897 [
898 'mt.exe',
898 'mt.exe',
899 '-nologo',
899 '-nologo',
900 '-manifest',
900 '-manifest',
901 manfname,
901 manfname,
902 inputresource,
902 inputresource,
903 outputresource,
903 outputresource,
904 ]
904 ]
905 )
905 )
906 log.info("done updating hg.exe's manifest")
906 log.info("done updating hg.exe's manifest")
907 os.remove(manfname)
907 os.remove(manfname)
908
908
909 @property
909 @property
910 def hgexepath(self):
910 def hgexepath(self):
911 dir = os.path.dirname(self.get_ext_fullpath('dummy'))
911 dir = os.path.dirname(self.get_ext_fullpath('dummy'))
912 return os.path.join(self.build_temp, dir, 'hg.exe')
912 return os.path.join(self.build_temp, dir, 'hg.exe')
913
913
914
914
915 class hgbuilddoc(Command):
915 class hgbuilddoc(Command):
916 description = 'build documentation'
916 description = 'build documentation'
917 user_options = [
917 user_options = [
918 ('man', None, 'generate man pages'),
918 ('man', None, 'generate man pages'),
919 ('html', None, 'generate html pages'),
919 ('html', None, 'generate html pages'),
920 ]
920 ]
921
921
922 def initialize_options(self):
922 def initialize_options(self):
923 self.man = None
923 self.man = None
924 self.html = None
924 self.html = None
925
925
926 def finalize_options(self):
926 def finalize_options(self):
927 # If --man or --html are set, only generate what we're told to.
927 # If --man or --html are set, only generate what we're told to.
928 # Otherwise generate everything.
928 # Otherwise generate everything.
929 have_subset = self.man is not None or self.html is not None
929 have_subset = self.man is not None or self.html is not None
930
930
931 if have_subset:
931 if have_subset:
932 self.man = True if self.man else False
932 self.man = True if self.man else False
933 self.html = True if self.html else False
933 self.html = True if self.html else False
934 else:
934 else:
935 self.man = True
935 self.man = True
936 self.html = True
936 self.html = True
937
937
938 def run(self):
938 def run(self):
939 def normalizecrlf(p):
939 def normalizecrlf(p):
940 with open(p, 'rb') as fh:
940 with open(p, 'rb') as fh:
941 orig = fh.read()
941 orig = fh.read()
942
942
943 if b'\r\n' not in orig:
943 if b'\r\n' not in orig:
944 return
944 return
945
945
946 log.info('normalizing %s to LF line endings' % p)
946 log.info('normalizing %s to LF line endings' % p)
947 with open(p, 'wb') as fh:
947 with open(p, 'wb') as fh:
948 fh.write(orig.replace(b'\r\n', b'\n'))
948 fh.write(orig.replace(b'\r\n', b'\n'))
949
949
950 def gentxt(root):
950 def gentxt(root):
951 txt = 'doc/%s.txt' % root
951 txt = 'doc/%s.txt' % root
952 log.info('generating %s' % txt)
952 log.info('generating %s' % txt)
953 res, out, err = runcmd(
953 res, out, err = runcmd(
954 [sys.executable, 'gendoc.py', root], os.environ, cwd='doc'
954 [sys.executable, 'gendoc.py', root], os.environ, cwd='doc'
955 )
955 )
956 if res:
956 if res:
957 raise SystemExit(
957 raise SystemExit(
958 'error running gendoc.py: %s'
958 'error running gendoc.py: %s'
959 % '\n'.join([sysstr(out), sysstr(err)])
959 % '\n'.join([sysstr(out), sysstr(err)])
960 )
960 )
961
961
962 with open(txt, 'wb') as fh:
962 with open(txt, 'wb') as fh:
963 fh.write(out)
963 fh.write(out)
964
964
965 def gengendoc(root):
965 def gengendoc(root):
966 gendoc = 'doc/%s.gendoc.txt' % root
966 gendoc = 'doc/%s.gendoc.txt' % root
967
967
968 log.info('generating %s' % gendoc)
968 log.info('generating %s' % gendoc)
969 res, out, err = runcmd(
969 res, out, err = runcmd(
970 [sys.executable, 'gendoc.py', '%s.gendoc' % root],
970 [sys.executable, 'gendoc.py', '%s.gendoc' % root],
971 os.environ,
971 os.environ,
972 cwd='doc',
972 cwd='doc',
973 )
973 )
974 if res:
974 if res:
975 raise SystemExit(
975 raise SystemExit(
976 'error running gendoc: %s'
976 'error running gendoc: %s'
977 % '\n'.join([sysstr(out), sysstr(err)])
977 % '\n'.join([sysstr(out), sysstr(err)])
978 )
978 )
979
979
980 with open(gendoc, 'wb') as fh:
980 with open(gendoc, 'wb') as fh:
981 fh.write(out)
981 fh.write(out)
982
982
983 def genman(root):
983 def genman(root):
984 log.info('generating doc/%s' % root)
984 log.info('generating doc/%s' % root)
985 res, out, err = runcmd(
985 res, out, err = runcmd(
986 [
986 [
987 sys.executable,
987 sys.executable,
988 'runrst',
988 'runrst',
989 'hgmanpage',
989 'hgmanpage',
990 '--halt',
990 '--halt',
991 'warning',
991 'warning',
992 '--strip-elements-with-class',
992 '--strip-elements-with-class',
993 'htmlonly',
993 'htmlonly',
994 '%s.txt' % root,
994 '%s.txt' % root,
995 root,
995 root,
996 ],
996 ],
997 os.environ,
997 os.environ,
998 cwd='doc',
998 cwd='doc',
999 )
999 )
1000 if res:
1000 if res:
1001 raise SystemExit(
1001 raise SystemExit(
1002 'error running runrst: %s'
1002 'error running runrst: %s'
1003 % '\n'.join([sysstr(out), sysstr(err)])
1003 % '\n'.join([sysstr(out), sysstr(err)])
1004 )
1004 )
1005
1005
1006 normalizecrlf('doc/%s' % root)
1006 normalizecrlf('doc/%s' % root)
1007
1007
1008 def genhtml(root):
1008 def genhtml(root):
1009 log.info('generating doc/%s.html' % root)
1009 log.info('generating doc/%s.html' % root)
1010 res, out, err = runcmd(
1010 res, out, err = runcmd(
1011 [
1011 [
1012 sys.executable,
1012 sys.executable,
1013 'runrst',
1013 'runrst',
1014 'html',
1014 'html',
1015 '--halt',
1015 '--halt',
1016 'warning',
1016 'warning',
1017 '--link-stylesheet',
1017 '--link-stylesheet',
1018 '--stylesheet-path',
1018 '--stylesheet-path',
1019 'style.css',
1019 'style.css',
1020 '%s.txt' % root,
1020 '%s.txt' % root,
1021 '%s.html' % root,
1021 '%s.html' % root,
1022 ],
1022 ],
1023 os.environ,
1023 os.environ,
1024 cwd='doc',
1024 cwd='doc',
1025 )
1025 )
1026 if res:
1026 if res:
1027 raise SystemExit(
1027 raise SystemExit(
1028 'error running runrst: %s'
1028 'error running runrst: %s'
1029 % '\n'.join([sysstr(out), sysstr(err)])
1029 % '\n'.join([sysstr(out), sysstr(err)])
1030 )
1030 )
1031
1031
1032 normalizecrlf('doc/%s.html' % root)
1032 normalizecrlf('doc/%s.html' % root)
1033
1033
1034 # This logic is duplicated in doc/Makefile.
1034 # This logic is duplicated in doc/Makefile.
1035 sources = {
1035 sources = {
1036 f
1036 f
1037 for f in os.listdir('mercurial/helptext')
1037 for f in os.listdir('mercurial/helptext')
1038 if re.search(r'[0-9]\.txt$', f)
1038 if re.search(r'[0-9]\.txt$', f)
1039 }
1039 }
1040
1040
1041 # common.txt is a one-off.
1041 # common.txt is a one-off.
1042 gentxt('common')
1042 gentxt('common')
1043
1043
1044 for source in sorted(sources):
1044 for source in sorted(sources):
1045 assert source[-4:] == '.txt'
1045 assert source[-4:] == '.txt'
1046 root = source[:-4]
1046 root = source[:-4]
1047
1047
1048 gentxt(root)
1048 gentxt(root)
1049 gengendoc(root)
1049 gengendoc(root)
1050
1050
1051 if self.man:
1051 if self.man:
1052 genman(root)
1052 genman(root)
1053 if self.html:
1053 if self.html:
1054 genhtml(root)
1054 genhtml(root)
1055
1055
1056
1056
1057 class hginstall(install):
1057 class hginstall(install):
1058
1058
1059 user_options = install.user_options + [
1059 user_options = install.user_options + [
1060 (
1060 (
1061 'old-and-unmanageable',
1061 'old-and-unmanageable',
1062 None,
1062 None,
1063 'noop, present for eggless setuptools compat',
1063 'noop, present for eggless setuptools compat',
1064 ),
1064 ),
1065 (
1065 (
1066 'single-version-externally-managed',
1066 'single-version-externally-managed',
1067 None,
1067 None,
1068 'noop, present for eggless setuptools compat',
1068 'noop, present for eggless setuptools compat',
1069 ),
1069 ),
1070 ]
1070 ]
1071
1071
1072 # Also helps setuptools not be sad while we refuse to create eggs.
1072 # Also helps setuptools not be sad while we refuse to create eggs.
1073 single_version_externally_managed = True
1073 single_version_externally_managed = True
1074
1074
1075 def get_sub_commands(self):
1075 def get_sub_commands(self):
1076 # Screen out egg related commands to prevent egg generation. But allow
1076 # Screen out egg related commands to prevent egg generation. But allow
1077 # mercurial.egg-info generation, since that is part of modern
1077 # mercurial.egg-info generation, since that is part of modern
1078 # packaging.
1078 # packaging.
1079 excl = {'bdist_egg'}
1079 excl = {'bdist_egg'}
1080 return filter(lambda x: x not in excl, install.get_sub_commands(self))
1080 return filter(lambda x: x not in excl, install.get_sub_commands(self))
1081
1081
1082
1082
1083 class hginstalllib(install_lib):
1083 class hginstalllib(install_lib):
1084 """
1084 """
1085 This is a specialization of install_lib that replaces the copy_file used
1085 This is a specialization of install_lib that replaces the copy_file used
1086 there so that it supports setting the mode of files after copying them,
1086 there so that it supports setting the mode of files after copying them,
1087 instead of just preserving the mode that the files originally had. If your
1087 instead of just preserving the mode that the files originally had. If your
1088 system has a umask of something like 027, preserving the permissions when
1088 system has a umask of something like 027, preserving the permissions when
1089 copying will lead to a broken install.
1089 copying will lead to a broken install.
1090
1090
1091 Note that just passing keep_permissions=False to copy_file would be
1091 Note that just passing keep_permissions=False to copy_file would be
1092 insufficient, as it might still be applying a umask.
1092 insufficient, as it might still be applying a umask.
1093 """
1093 """
1094
1094
1095 def run(self):
1095 def run(self):
1096 realcopyfile = file_util.copy_file
1096 realcopyfile = file_util.copy_file
1097
1097
1098 def copyfileandsetmode(*args, **kwargs):
1098 def copyfileandsetmode(*args, **kwargs):
1099 src, dst = args[0], args[1]
1099 src, dst = args[0], args[1]
1100 dst, copied = realcopyfile(*args, **kwargs)
1100 dst, copied = realcopyfile(*args, **kwargs)
1101 if copied:
1101 if copied:
1102 st = os.stat(src)
1102 st = os.stat(src)
1103 # Persist executable bit (apply it to group and other if user
1103 # Persist executable bit (apply it to group and other if user
1104 # has it)
1104 # has it)
1105 if st[stat.ST_MODE] & stat.S_IXUSR:
1105 if st[stat.ST_MODE] & stat.S_IXUSR:
1106 setmode = int('0755', 8)
1106 setmode = int('0755', 8)
1107 else:
1107 else:
1108 setmode = int('0644', 8)
1108 setmode = int('0644', 8)
1109 m = stat.S_IMODE(st[stat.ST_MODE])
1109 m = stat.S_IMODE(st[stat.ST_MODE])
1110 m = (m & ~int('0777', 8)) | setmode
1110 m = (m & ~int('0777', 8)) | setmode
1111 os.chmod(dst, m)
1111 os.chmod(dst, m)
1112
1112
1113 file_util.copy_file = copyfileandsetmode
1113 file_util.copy_file = copyfileandsetmode
1114 try:
1114 try:
1115 install_lib.run(self)
1115 install_lib.run(self)
1116 finally:
1116 finally:
1117 file_util.copy_file = realcopyfile
1117 file_util.copy_file = realcopyfile
1118
1118
1119
1119
1120 class hginstallscripts(install_scripts):
1120 class hginstallscripts(install_scripts):
1121 """
1121 """
1122 This is a specialization of install_scripts that replaces the @LIBDIR@ with
1122 This is a specialization of install_scripts that replaces the @LIBDIR@ with
1123 the configured directory for modules. If possible, the path is made relative
1123 the configured directory for modules. If possible, the path is made relative
1124 to the directory for scripts.
1124 to the directory for scripts.
1125 """
1125 """
1126
1126
1127 def initialize_options(self):
1127 def initialize_options(self):
1128 install_scripts.initialize_options(self)
1128 install_scripts.initialize_options(self)
1129
1129
1130 self.install_lib = None
1130 self.install_lib = None
1131
1131
1132 def finalize_options(self):
1132 def finalize_options(self):
1133 install_scripts.finalize_options(self)
1133 install_scripts.finalize_options(self)
1134 self.set_undefined_options('install', ('install_lib', 'install_lib'))
1134 self.set_undefined_options('install', ('install_lib', 'install_lib'))
1135
1135
1136 def run(self):
1136 def run(self):
1137 install_scripts.run(self)
1137 install_scripts.run(self)
1138
1138
1139 # It only makes sense to replace @LIBDIR@ with the install path if
1139 # It only makes sense to replace @LIBDIR@ with the install path if
1140 # the install path is known. For wheels, the logic below calculates
1140 # the install path is known. For wheels, the logic below calculates
1141 # the libdir to be "../..". This is because the internal layout of a
1141 # the libdir to be "../..". This is because the internal layout of a
1142 # wheel archive looks like:
1142 # wheel archive looks like:
1143 #
1143 #
1144 # mercurial-3.6.1.data/scripts/hg
1144 # mercurial-3.6.1.data/scripts/hg
1145 # mercurial/__init__.py
1145 # mercurial/__init__.py
1146 #
1146 #
1147 # When installing wheels, the subdirectories of the "<pkg>.data"
1147 # When installing wheels, the subdirectories of the "<pkg>.data"
1148 # directory are translated to system local paths and files therein
1148 # directory are translated to system local paths and files therein
1149 # are copied in place. The mercurial/* files are installed into the
1149 # are copied in place. The mercurial/* files are installed into the
1150 # site-packages directory. However, the site-packages directory
1150 # site-packages directory. However, the site-packages directory
1151 # isn't known until wheel install time. This means we have no clue
1151 # isn't known until wheel install time. This means we have no clue
1152 # at wheel generation time what the installed site-packages directory
1152 # at wheel generation time what the installed site-packages directory
1153 # will be. And, wheels don't appear to provide the ability to register
1153 # will be. And, wheels don't appear to provide the ability to register
1154 # custom code to run during wheel installation. This all means that
1154 # custom code to run during wheel installation. This all means that
1155 # we can't reliably set the libdir in wheels: the default behavior
1155 # we can't reliably set the libdir in wheels: the default behavior
1156 # of looking in sys.path must do.
1156 # of looking in sys.path must do.
1157
1157
1158 if (
1158 if (
1159 os.path.splitdrive(self.install_dir)[0]
1159 os.path.splitdrive(self.install_dir)[0]
1160 != os.path.splitdrive(self.install_lib)[0]
1160 != os.path.splitdrive(self.install_lib)[0]
1161 ):
1161 ):
1162 # can't make relative paths from one drive to another, so use an
1162 # can't make relative paths from one drive to another, so use an
1163 # absolute path instead
1163 # absolute path instead
1164 libdir = self.install_lib
1164 libdir = self.install_lib
1165 else:
1165 else:
1166 libdir = os.path.relpath(self.install_lib, self.install_dir)
1166 libdir = os.path.relpath(self.install_lib, self.install_dir)
1167
1167
1168 for outfile in self.outfiles:
1168 for outfile in self.outfiles:
1169 with open(outfile, 'rb') as fp:
1169 with open(outfile, 'rb') as fp:
1170 data = fp.read()
1170 data = fp.read()
1171
1171
1172 # skip binary files
1172 # skip binary files
1173 if b'\0' in data:
1173 if b'\0' in data:
1174 continue
1174 continue
1175
1175
1176 # During local installs, the shebang will be rewritten to the final
1176 # During local installs, the shebang will be rewritten to the final
1177 # install path. During wheel packaging, the shebang has a special
1177 # install path. During wheel packaging, the shebang has a special
1178 # value.
1178 # value.
1179 if data.startswith(b'#!python'):
1179 if data.startswith(b'#!python'):
1180 log.info(
1180 log.info(
1181 'not rewriting @LIBDIR@ in %s because install path '
1181 'not rewriting @LIBDIR@ in %s because install path '
1182 'not known' % outfile
1182 'not known' % outfile
1183 )
1183 )
1184 continue
1184 continue
1185
1185
1186 data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape))
1186 data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape))
1187 with open(outfile, 'wb') as fp:
1187 with open(outfile, 'wb') as fp:
1188 fp.write(data)
1188 fp.write(data)
1189
1189
1190
1190
1191 # virtualenv installs custom distutils/__init__.py and
1191 # virtualenv installs custom distutils/__init__.py and
1192 # distutils/distutils.cfg files which essentially proxy back to the
1192 # distutils/distutils.cfg files which essentially proxy back to the
1193 # "real" distutils in the main Python install. The presence of this
1193 # "real" distutils in the main Python install. The presence of this
1194 # directory causes py2exe to pick up the "hacked" distutils package
1194 # directory causes py2exe to pick up the "hacked" distutils package
1195 # from the virtualenv and "import distutils" will fail from the py2exe
1195 # from the virtualenv and "import distutils" will fail from the py2exe
1196 # build because the "real" distutils files can't be located.
1196 # build because the "real" distutils files can't be located.
1197 #
1197 #
1198 # We work around this by monkeypatching the py2exe code finding Python
1198 # We work around this by monkeypatching the py2exe code finding Python
1199 # modules to replace the found virtualenv distutils modules with the
1199 # modules to replace the found virtualenv distutils modules with the
1200 # original versions via filesystem scanning. This is a bit hacky. But
1200 # original versions via filesystem scanning. This is a bit hacky. But
1201 # it allows us to use virtualenvs for py2exe packaging, which is more
1201 # it allows us to use virtualenvs for py2exe packaging, which is more
1202 # deterministic and reproducible.
1202 # deterministic and reproducible.
1203 #
1203 #
1204 # It's worth noting that the common StackOverflow suggestions for this
1204 # It's worth noting that the common StackOverflow suggestions for this
1205 # problem involve copying the original distutils files into the
1205 # problem involve copying the original distutils files into the
1206 # virtualenv or into the staging directory after setup() is invoked.
1206 # virtualenv or into the staging directory after setup() is invoked.
1207 # The former is very brittle and can easily break setup(). Our hacking
1207 # The former is very brittle and can easily break setup(). Our hacking
1208 # of the found modules routine has a similar result as copying the files
1208 # of the found modules routine has a similar result as copying the files
1209 # manually. But it makes fewer assumptions about how py2exe works and
1209 # manually. But it makes fewer assumptions about how py2exe works and
1210 # is less brittle.
1210 # is less brittle.
1211
1211
1212 # This only catches virtualenvs made with virtualenv (as opposed to
1212 # This only catches virtualenvs made with virtualenv (as opposed to
1213 # venv, which is likely what Python 3 uses).
1213 # venv, which is likely what Python 3 uses).
1214 py2exehacked = py2exeloaded and getattr(sys, 'real_prefix', None) is not None
1214 py2exehacked = py2exeloaded and getattr(sys, 'real_prefix', None) is not None
1215
1215
1216 if py2exehacked:
1216 if py2exehacked:
1217 from distutils.command.py2exe import py2exe as buildpy2exe
1217 from distutils.command.py2exe import py2exe as buildpy2exe
1218 from py2exe.mf import Module as py2exemodule
1218 from py2exe.mf import Module as py2exemodule
1219
1219
1220 class hgbuildpy2exe(buildpy2exe):
1220 class hgbuildpy2exe(buildpy2exe):
1221 def find_needed_modules(self, mf, files, modules):
1221 def find_needed_modules(self, mf, files, modules):
1222 res = buildpy2exe.find_needed_modules(self, mf, files, modules)
1222 res = buildpy2exe.find_needed_modules(self, mf, files, modules)
1223
1223
1224 # Replace virtualenv's distutils modules with the real ones.
1224 # Replace virtualenv's distutils modules with the real ones.
1225 modules = {}
1225 modules = {}
1226 for k, v in res.modules.items():
1226 for k, v in res.modules.items():
1227 if k != 'distutils' and not k.startswith('distutils.'):
1227 if k != 'distutils' and not k.startswith('distutils.'):
1228 modules[k] = v
1228 modules[k] = v
1229
1229
1230 res.modules = modules
1230 res.modules = modules
1231
1231
1232 import opcode
1232 import opcode
1233
1233
1234 distutilsreal = os.path.join(
1234 distutilsreal = os.path.join(
1235 os.path.dirname(opcode.__file__), 'distutils'
1235 os.path.dirname(opcode.__file__), 'distutils'
1236 )
1236 )
1237
1237
1238 for root, dirs, files in os.walk(distutilsreal):
1238 for root, dirs, files in os.walk(distutilsreal):
1239 for f in sorted(files):
1239 for f in sorted(files):
1240 if not f.endswith('.py'):
1240 if not f.endswith('.py'):
1241 continue
1241 continue
1242
1242
1243 full = os.path.join(root, f)
1243 full = os.path.join(root, f)
1244
1244
1245 parents = ['distutils']
1245 parents = ['distutils']
1246
1246
1247 if root != distutilsreal:
1247 if root != distutilsreal:
1248 rel = os.path.relpath(root, distutilsreal)
1248 rel = os.path.relpath(root, distutilsreal)
1249 parents.extend(p for p in rel.split(os.sep))
1249 parents.extend(p for p in rel.split(os.sep))
1250
1250
1251 modname = '%s.%s' % ('.'.join(parents), f[:-3])
1251 modname = '%s.%s' % ('.'.join(parents), f[:-3])
1252
1252
1253 if modname.startswith('distutils.tests.'):
1253 if modname.startswith('distutils.tests.'):
1254 continue
1254 continue
1255
1255
1256 if modname.endswith('.__init__'):
1256 if modname.endswith('.__init__'):
1257 modname = modname[: -len('.__init__')]
1257 modname = modname[: -len('.__init__')]
1258 path = os.path.dirname(full)
1258 path = os.path.dirname(full)
1259 else:
1259 else:
1260 path = None
1260 path = None
1261
1261
1262 res.modules[modname] = py2exemodule(
1262 res.modules[modname] = py2exemodule(
1263 modname, full, path=path
1263 modname, full, path=path
1264 )
1264 )
1265
1265
1266 if 'distutils' not in res.modules:
1266 if 'distutils' not in res.modules:
1267 raise SystemExit('could not find distutils modules')
1267 raise SystemExit('could not find distutils modules')
1268
1268
1269 return res
1269 return res
1270
1270
1271
1271
1272 cmdclass = {
1272 cmdclass = {
1273 'build': hgbuild,
1273 'build': hgbuild,
1274 'build_doc': hgbuilddoc,
1274 'build_doc': hgbuilddoc,
1275 'build_mo': hgbuildmo,
1275 'build_mo': hgbuildmo,
1276 'build_ext': hgbuildext,
1276 'build_ext': hgbuildext,
1277 'build_py': hgbuildpy,
1277 'build_py': hgbuildpy,
1278 'build_scripts': hgbuildscripts,
1278 'build_scripts': hgbuildscripts,
1279 'build_hgextindex': buildhgextindex,
1279 'build_hgextindex': buildhgextindex,
1280 'install': hginstall,
1280 'install': hginstall,
1281 'install_lib': hginstalllib,
1281 'install_lib': hginstalllib,
1282 'install_scripts': hginstallscripts,
1282 'install_scripts': hginstallscripts,
1283 'build_hgexe': buildhgexe,
1283 'build_hgexe': buildhgexe,
1284 }
1284 }
1285
1285
1286 if py2exehacked:
1286 if py2exehacked:
1287 cmdclass['py2exe'] = hgbuildpy2exe
1287 cmdclass['py2exe'] = hgbuildpy2exe
1288
1288
1289 packages = [
1289 packages = [
1290 'mercurial',
1290 'mercurial',
1291 'mercurial.cext',
1291 'mercurial.cext',
1292 'mercurial.cffi',
1292 'mercurial.cffi',
1293 'mercurial.defaultrc',
1293 'mercurial.defaultrc',
1294 'mercurial.dirstateutils',
1294 'mercurial.helptext',
1295 'mercurial.helptext',
1295 'mercurial.helptext.internals',
1296 'mercurial.helptext.internals',
1296 'mercurial.hgweb',
1297 'mercurial.hgweb',
1297 'mercurial.interfaces',
1298 'mercurial.interfaces',
1298 'mercurial.pure',
1299 'mercurial.pure',
1299 'mercurial.templates',
1300 'mercurial.templates',
1300 'mercurial.thirdparty',
1301 'mercurial.thirdparty',
1301 'mercurial.thirdparty.attr',
1302 'mercurial.thirdparty.attr',
1302 'mercurial.thirdparty.zope',
1303 'mercurial.thirdparty.zope',
1303 'mercurial.thirdparty.zope.interface',
1304 'mercurial.thirdparty.zope.interface',
1304 'mercurial.upgrade_utils',
1305 'mercurial.upgrade_utils',
1305 'mercurial.utils',
1306 'mercurial.utils',
1306 'mercurial.revlogutils',
1307 'mercurial.revlogutils',
1307 'mercurial.testing',
1308 'mercurial.testing',
1308 'hgext',
1309 'hgext',
1309 'hgext.convert',
1310 'hgext.convert',
1310 'hgext.fsmonitor',
1311 'hgext.fsmonitor',
1311 'hgext.fastannotate',
1312 'hgext.fastannotate',
1312 'hgext.fsmonitor.pywatchman',
1313 'hgext.fsmonitor.pywatchman',
1313 'hgext.git',
1314 'hgext.git',
1314 'hgext.highlight',
1315 'hgext.highlight',
1315 'hgext.hooklib',
1316 'hgext.hooklib',
1316 'hgext.infinitepush',
1317 'hgext.infinitepush',
1317 'hgext.largefiles',
1318 'hgext.largefiles',
1318 'hgext.lfs',
1319 'hgext.lfs',
1319 'hgext.narrow',
1320 'hgext.narrow',
1320 'hgext.remotefilelog',
1321 'hgext.remotefilelog',
1321 'hgext.zeroconf',
1322 'hgext.zeroconf',
1322 'hgext3rd',
1323 'hgext3rd',
1323 'hgdemandimport',
1324 'hgdemandimport',
1324 ]
1325 ]
1325
1326
1326 # The pygit2 dependency dropped py2 support with the 1.0 release in Dec 2019.
1327 # The pygit2 dependency dropped py2 support with the 1.0 release in Dec 2019.
1327 # Prior releases do not build at all on Windows, because Visual Studio 2008
1328 # Prior releases do not build at all on Windows, because Visual Studio 2008
1328 # doesn't understand C 11. Older Linux releases are buggy.
1329 # doesn't understand C 11. Older Linux releases are buggy.
1329 if sys.version_info[0] == 2:
1330 if sys.version_info[0] == 2:
1330 packages.remove('hgext.git')
1331 packages.remove('hgext.git')
1331
1332
1332
1333
1333 for name in os.listdir(os.path.join('mercurial', 'templates')):
1334 for name in os.listdir(os.path.join('mercurial', 'templates')):
1334 if name != '__pycache__' and os.path.isdir(
1335 if name != '__pycache__' and os.path.isdir(
1335 os.path.join('mercurial', 'templates', name)
1336 os.path.join('mercurial', 'templates', name)
1336 ):
1337 ):
1337 packages.append('mercurial.templates.%s' % name)
1338 packages.append('mercurial.templates.%s' % name)
1338
1339
1339 if sys.version_info[0] == 2:
1340 if sys.version_info[0] == 2:
1340 packages.extend(
1341 packages.extend(
1341 [
1342 [
1342 'mercurial.thirdparty.concurrent',
1343 'mercurial.thirdparty.concurrent',
1343 'mercurial.thirdparty.concurrent.futures',
1344 'mercurial.thirdparty.concurrent.futures',
1344 ]
1345 ]
1345 )
1346 )
1346
1347
1347 if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ:
1348 if 'HG_PY2EXE_EXTRA_INSTALL_PACKAGES' in os.environ:
1348 # py2exe can't cope with namespace packages very well, so we have to
1349 # py2exe can't cope with namespace packages very well, so we have to
1349 # install any hgext3rd.* extensions that we want in the final py2exe
1350 # install any hgext3rd.* extensions that we want in the final py2exe
1350 # image here. This is gross, but you gotta do what you gotta do.
1351 # image here. This is gross, but you gotta do what you gotta do.
1351 packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' '))
1352 packages.extend(os.environ['HG_PY2EXE_EXTRA_INSTALL_PACKAGES'].split(' '))
1352
1353
1353 common_depends = [
1354 common_depends = [
1354 'mercurial/bitmanipulation.h',
1355 'mercurial/bitmanipulation.h',
1355 'mercurial/compat.h',
1356 'mercurial/compat.h',
1356 'mercurial/cext/util.h',
1357 'mercurial/cext/util.h',
1357 ]
1358 ]
1358 common_include_dirs = ['mercurial']
1359 common_include_dirs = ['mercurial']
1359
1360
1360 common_cflags = []
1361 common_cflags = []
1361
1362
1362 # MSVC 2008 still needs declarations at the top of the scope, but Python 3.9
1363 # MSVC 2008 still needs declarations at the top of the scope, but Python 3.9
1363 # makes declarations not at the top of a scope in the headers.
1364 # makes declarations not at the top of a scope in the headers.
1364 if os.name != 'nt' and sys.version_info[1] < 9:
1365 if os.name != 'nt' and sys.version_info[1] < 9:
1365 common_cflags = ['-Werror=declaration-after-statement']
1366 common_cflags = ['-Werror=declaration-after-statement']
1366
1367
1367 osutil_cflags = []
1368 osutil_cflags = []
1368 osutil_ldflags = []
1369 osutil_ldflags = []
1369
1370
1370 # platform specific macros
1371 # platform specific macros
1371 for plat, func in [('bsd', 'setproctitle')]:
1372 for plat, func in [('bsd', 'setproctitle')]:
1372 if re.search(plat, sys.platform) and hasfunction(new_compiler(), func):
1373 if re.search(plat, sys.platform) and hasfunction(new_compiler(), func):
1373 osutil_cflags.append('-DHAVE_%s' % func.upper())
1374 osutil_cflags.append('-DHAVE_%s' % func.upper())
1374
1375
1375 for plat, macro, code in [
1376 for plat, macro, code in [
1376 (
1377 (
1377 'bsd|darwin',
1378 'bsd|darwin',
1378 'BSD_STATFS',
1379 'BSD_STATFS',
1379 '''
1380 '''
1380 #include <sys/param.h>
1381 #include <sys/param.h>
1381 #include <sys/mount.h>
1382 #include <sys/mount.h>
1382 int main() { struct statfs s; return sizeof(s.f_fstypename); }
1383 int main() { struct statfs s; return sizeof(s.f_fstypename); }
1383 ''',
1384 ''',
1384 ),
1385 ),
1385 (
1386 (
1386 'linux',
1387 'linux',
1387 'LINUX_STATFS',
1388 'LINUX_STATFS',
1388 '''
1389 '''
1389 #include <linux/magic.h>
1390 #include <linux/magic.h>
1390 #include <sys/vfs.h>
1391 #include <sys/vfs.h>
1391 int main() { struct statfs s; return sizeof(s.f_type); }
1392 int main() { struct statfs s; return sizeof(s.f_type); }
1392 ''',
1393 ''',
1393 ),
1394 ),
1394 ]:
1395 ]:
1395 if re.search(plat, sys.platform) and cancompile(new_compiler(), code):
1396 if re.search(plat, sys.platform) and cancompile(new_compiler(), code):
1396 osutil_cflags.append('-DHAVE_%s' % macro)
1397 osutil_cflags.append('-DHAVE_%s' % macro)
1397
1398
1398 if sys.platform == 'darwin':
1399 if sys.platform == 'darwin':
1399 osutil_ldflags += ['-framework', 'ApplicationServices']
1400 osutil_ldflags += ['-framework', 'ApplicationServices']
1400
1401
1401 if sys.platform == 'sunos5':
1402 if sys.platform == 'sunos5':
1402 osutil_ldflags += ['-lsocket']
1403 osutil_ldflags += ['-lsocket']
1403
1404
1404 xdiff_srcs = [
1405 xdiff_srcs = [
1405 'mercurial/thirdparty/xdiff/xdiffi.c',
1406 'mercurial/thirdparty/xdiff/xdiffi.c',
1406 'mercurial/thirdparty/xdiff/xprepare.c',
1407 'mercurial/thirdparty/xdiff/xprepare.c',
1407 'mercurial/thirdparty/xdiff/xutils.c',
1408 'mercurial/thirdparty/xdiff/xutils.c',
1408 ]
1409 ]
1409
1410
1410 xdiff_headers = [
1411 xdiff_headers = [
1411 'mercurial/thirdparty/xdiff/xdiff.h',
1412 'mercurial/thirdparty/xdiff/xdiff.h',
1412 'mercurial/thirdparty/xdiff/xdiffi.h',
1413 'mercurial/thirdparty/xdiff/xdiffi.h',
1413 'mercurial/thirdparty/xdiff/xinclude.h',
1414 'mercurial/thirdparty/xdiff/xinclude.h',
1414 'mercurial/thirdparty/xdiff/xmacros.h',
1415 'mercurial/thirdparty/xdiff/xmacros.h',
1415 'mercurial/thirdparty/xdiff/xprepare.h',
1416 'mercurial/thirdparty/xdiff/xprepare.h',
1416 'mercurial/thirdparty/xdiff/xtypes.h',
1417 'mercurial/thirdparty/xdiff/xtypes.h',
1417 'mercurial/thirdparty/xdiff/xutils.h',
1418 'mercurial/thirdparty/xdiff/xutils.h',
1418 ]
1419 ]
1419
1420
1420
1421
1421 class RustCompilationError(CCompilerError):
1422 class RustCompilationError(CCompilerError):
1422 """Exception class for Rust compilation errors."""
1423 """Exception class for Rust compilation errors."""
1423
1424
1424
1425
1425 class RustExtension(Extension):
1426 class RustExtension(Extension):
1426 """Base classes for concrete Rust Extension classes."""
1427 """Base classes for concrete Rust Extension classes."""
1427
1428
1428 rusttargetdir = os.path.join('rust', 'target', 'release')
1429 rusttargetdir = os.path.join('rust', 'target', 'release')
1429
1430
1430 def __init__(
1431 def __init__(
1431 self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw
1432 self, mpath, sources, rustlibname, subcrate, py3_features=None, **kw
1432 ):
1433 ):
1433 Extension.__init__(self, mpath, sources, **kw)
1434 Extension.__init__(self, mpath, sources, **kw)
1434 srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
1435 srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
1435 self.py3_features = py3_features
1436 self.py3_features = py3_features
1436
1437
1437 # adding Rust source and control files to depends so that the extension
1438 # adding Rust source and control files to depends so that the extension
1438 # gets rebuilt if they've changed
1439 # gets rebuilt if they've changed
1439 self.depends.append(os.path.join(srcdir, 'Cargo.toml'))
1440 self.depends.append(os.path.join(srcdir, 'Cargo.toml'))
1440 cargo_lock = os.path.join(srcdir, 'Cargo.lock')
1441 cargo_lock = os.path.join(srcdir, 'Cargo.lock')
1441 if os.path.exists(cargo_lock):
1442 if os.path.exists(cargo_lock):
1442 self.depends.append(cargo_lock)
1443 self.depends.append(cargo_lock)
1443 for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')):
1444 for dirpath, subdir, fnames in os.walk(os.path.join(srcdir, 'src')):
1444 self.depends.extend(
1445 self.depends.extend(
1445 os.path.join(dirpath, fname)
1446 os.path.join(dirpath, fname)
1446 for fname in fnames
1447 for fname in fnames
1447 if os.path.splitext(fname)[1] == '.rs'
1448 if os.path.splitext(fname)[1] == '.rs'
1448 )
1449 )
1449
1450
1450 @staticmethod
1451 @staticmethod
1451 def rustdylibsuffix():
1452 def rustdylibsuffix():
1452 """Return the suffix for shared libraries produced by rustc.
1453 """Return the suffix for shared libraries produced by rustc.
1453
1454
1454 See also: https://doc.rust-lang.org/reference/linkage.html
1455 See also: https://doc.rust-lang.org/reference/linkage.html
1455 """
1456 """
1456 if sys.platform == 'darwin':
1457 if sys.platform == 'darwin':
1457 return '.dylib'
1458 return '.dylib'
1458 elif os.name == 'nt':
1459 elif os.name == 'nt':
1459 return '.dll'
1460 return '.dll'
1460 else:
1461 else:
1461 return '.so'
1462 return '.so'
1462
1463
1463 def rustbuild(self):
1464 def rustbuild(self):
1464 env = os.environ.copy()
1465 env = os.environ.copy()
1465 if 'HGTEST_RESTOREENV' in env:
1466 if 'HGTEST_RESTOREENV' in env:
1466 # Mercurial tests change HOME to a temporary directory,
1467 # Mercurial tests change HOME to a temporary directory,
1467 # but, if installed with rustup, the Rust toolchain needs
1468 # but, if installed with rustup, the Rust toolchain needs
1468 # HOME to be correct (otherwise the 'no default toolchain'
1469 # HOME to be correct (otherwise the 'no default toolchain'
1469 # error message is issued and the build fails).
1470 # error message is issued and the build fails).
1470 # This happens currently with test-hghave.t, which does
1471 # This happens currently with test-hghave.t, which does
1471 # invoke this build.
1472 # invoke this build.
1472
1473
1473 # Unix only fix (os.path.expanduser not really reliable if
1474 # Unix only fix (os.path.expanduser not really reliable if
1474 # HOME is shadowed like this)
1475 # HOME is shadowed like this)
1475 import pwd
1476 import pwd
1476
1477
1477 env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
1478 env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
1478
1479
1479 cargocmd = ['cargo', 'rustc', '--release']
1480 cargocmd = ['cargo', 'rustc', '--release']
1480
1481
1481 feature_flags = []
1482 feature_flags = []
1482
1483
1483 if sys.version_info[0] == 3 and self.py3_features is not None:
1484 if sys.version_info[0] == 3 and self.py3_features is not None:
1484 feature_flags.append(self.py3_features)
1485 feature_flags.append(self.py3_features)
1485 cargocmd.append('--no-default-features')
1486 cargocmd.append('--no-default-features')
1486
1487
1487 rust_features = env.get("HG_RUST_FEATURES")
1488 rust_features = env.get("HG_RUST_FEATURES")
1488 if rust_features:
1489 if rust_features:
1489 feature_flags.append(rust_features)
1490 feature_flags.append(rust_features)
1490
1491
1491 cargocmd.extend(('--features', " ".join(feature_flags)))
1492 cargocmd.extend(('--features', " ".join(feature_flags)))
1492
1493
1493 cargocmd.append('--')
1494 cargocmd.append('--')
1494 if sys.platform == 'darwin':
1495 if sys.platform == 'darwin':
1495 cargocmd.extend(
1496 cargocmd.extend(
1496 ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup")
1497 ("-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup")
1497 )
1498 )
1498 try:
1499 try:
1499 subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir)
1500 subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir)
1500 except OSError as exc:
1501 except OSError as exc:
1501 if exc.errno == errno.ENOENT:
1502 if exc.errno == errno.ENOENT:
1502 raise RustCompilationError("Cargo not found")
1503 raise RustCompilationError("Cargo not found")
1503 elif exc.errno == errno.EACCES:
1504 elif exc.errno == errno.EACCES:
1504 raise RustCompilationError(
1505 raise RustCompilationError(
1505 "Cargo found, but permisssion to execute it is denied"
1506 "Cargo found, but permisssion to execute it is denied"
1506 )
1507 )
1507 else:
1508 else:
1508 raise
1509 raise
1509 except subprocess.CalledProcessError:
1510 except subprocess.CalledProcessError:
1510 raise RustCompilationError(
1511 raise RustCompilationError(
1511 "Cargo failed. Working directory: %r, "
1512 "Cargo failed. Working directory: %r, "
1512 "command: %r, environment: %r"
1513 "command: %r, environment: %r"
1513 % (self.rustsrcdir, cargocmd, env)
1514 % (self.rustsrcdir, cargocmd, env)
1514 )
1515 )
1515
1516
1516
1517
1517 class RustStandaloneExtension(RustExtension):
1518 class RustStandaloneExtension(RustExtension):
1518 def __init__(self, pydottedname, rustcrate, dylibname, **kw):
1519 def __init__(self, pydottedname, rustcrate, dylibname, **kw):
1519 RustExtension.__init__(
1520 RustExtension.__init__(
1520 self, pydottedname, [], dylibname, rustcrate, **kw
1521 self, pydottedname, [], dylibname, rustcrate, **kw
1521 )
1522 )
1522 self.dylibname = dylibname
1523 self.dylibname = dylibname
1523
1524
1524 def build(self, target_dir):
1525 def build(self, target_dir):
1525 self.rustbuild()
1526 self.rustbuild()
1526 target = [target_dir]
1527 target = [target_dir]
1527 target.extend(self.name.split('.'))
1528 target.extend(self.name.split('.'))
1528 target[-1] += DYLIB_SUFFIX
1529 target[-1] += DYLIB_SUFFIX
1529 shutil.copy2(
1530 shutil.copy2(
1530 os.path.join(
1531 os.path.join(
1531 self.rusttargetdir, self.dylibname + self.rustdylibsuffix()
1532 self.rusttargetdir, self.dylibname + self.rustdylibsuffix()
1532 ),
1533 ),
1533 os.path.join(*target),
1534 os.path.join(*target),
1534 )
1535 )
1535
1536
1536
1537
1537 extmodules = [
1538 extmodules = [
1538 Extension(
1539 Extension(
1539 'mercurial.cext.base85',
1540 'mercurial.cext.base85',
1540 ['mercurial/cext/base85.c'],
1541 ['mercurial/cext/base85.c'],
1541 include_dirs=common_include_dirs,
1542 include_dirs=common_include_dirs,
1542 extra_compile_args=common_cflags,
1543 extra_compile_args=common_cflags,
1543 depends=common_depends,
1544 depends=common_depends,
1544 ),
1545 ),
1545 Extension(
1546 Extension(
1546 'mercurial.cext.bdiff',
1547 'mercurial.cext.bdiff',
1547 ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs,
1548 ['mercurial/bdiff.c', 'mercurial/cext/bdiff.c'] + xdiff_srcs,
1548 include_dirs=common_include_dirs,
1549 include_dirs=common_include_dirs,
1549 extra_compile_args=common_cflags,
1550 extra_compile_args=common_cflags,
1550 depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers,
1551 depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers,
1551 ),
1552 ),
1552 Extension(
1553 Extension(
1553 'mercurial.cext.mpatch',
1554 'mercurial.cext.mpatch',
1554 ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'],
1555 ['mercurial/mpatch.c', 'mercurial/cext/mpatch.c'],
1555 include_dirs=common_include_dirs,
1556 include_dirs=common_include_dirs,
1556 extra_compile_args=common_cflags,
1557 extra_compile_args=common_cflags,
1557 depends=common_depends,
1558 depends=common_depends,
1558 ),
1559 ),
1559 Extension(
1560 Extension(
1560 'mercurial.cext.parsers',
1561 'mercurial.cext.parsers',
1561 [
1562 [
1562 'mercurial/cext/charencode.c',
1563 'mercurial/cext/charencode.c',
1563 'mercurial/cext/dirs.c',
1564 'mercurial/cext/dirs.c',
1564 'mercurial/cext/manifest.c',
1565 'mercurial/cext/manifest.c',
1565 'mercurial/cext/parsers.c',
1566 'mercurial/cext/parsers.c',
1566 'mercurial/cext/pathencode.c',
1567 'mercurial/cext/pathencode.c',
1567 'mercurial/cext/revlog.c',
1568 'mercurial/cext/revlog.c',
1568 ],
1569 ],
1569 include_dirs=common_include_dirs,
1570 include_dirs=common_include_dirs,
1570 extra_compile_args=common_cflags,
1571 extra_compile_args=common_cflags,
1571 depends=common_depends
1572 depends=common_depends
1572 + [
1573 + [
1573 'mercurial/cext/charencode.h',
1574 'mercurial/cext/charencode.h',
1574 'mercurial/cext/revlog.h',
1575 'mercurial/cext/revlog.h',
1575 ],
1576 ],
1576 ),
1577 ),
1577 Extension(
1578 Extension(
1578 'mercurial.cext.osutil',
1579 'mercurial.cext.osutil',
1579 ['mercurial/cext/osutil.c'],
1580 ['mercurial/cext/osutil.c'],
1580 include_dirs=common_include_dirs,
1581 include_dirs=common_include_dirs,
1581 extra_compile_args=common_cflags + osutil_cflags,
1582 extra_compile_args=common_cflags + osutil_cflags,
1582 extra_link_args=osutil_ldflags,
1583 extra_link_args=osutil_ldflags,
1583 depends=common_depends,
1584 depends=common_depends,
1584 ),
1585 ),
1585 Extension(
1586 Extension(
1586 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations',
1587 'mercurial.thirdparty.zope.interface._zope_interface_coptimizations',
1587 [
1588 [
1588 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c',
1589 'mercurial/thirdparty/zope/interface/_zope_interface_coptimizations.c',
1589 ],
1590 ],
1590 extra_compile_args=common_cflags,
1591 extra_compile_args=common_cflags,
1591 ),
1592 ),
1592 Extension(
1593 Extension(
1593 'mercurial.thirdparty.sha1dc',
1594 'mercurial.thirdparty.sha1dc',
1594 [
1595 [
1595 'mercurial/thirdparty/sha1dc/cext.c',
1596 'mercurial/thirdparty/sha1dc/cext.c',
1596 'mercurial/thirdparty/sha1dc/lib/sha1.c',
1597 'mercurial/thirdparty/sha1dc/lib/sha1.c',
1597 'mercurial/thirdparty/sha1dc/lib/ubc_check.c',
1598 'mercurial/thirdparty/sha1dc/lib/ubc_check.c',
1598 ],
1599 ],
1599 extra_compile_args=common_cflags,
1600 extra_compile_args=common_cflags,
1600 ),
1601 ),
1601 Extension(
1602 Extension(
1602 'hgext.fsmonitor.pywatchman.bser',
1603 'hgext.fsmonitor.pywatchman.bser',
1603 ['hgext/fsmonitor/pywatchman/bser.c'],
1604 ['hgext/fsmonitor/pywatchman/bser.c'],
1604 extra_compile_args=common_cflags,
1605 extra_compile_args=common_cflags,
1605 ),
1606 ),
1606 RustStandaloneExtension(
1607 RustStandaloneExtension(
1607 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3'
1608 'mercurial.rustext', 'hg-cpython', 'librusthg', py3_features='python3'
1608 ),
1609 ),
1609 ]
1610 ]
1610
1611
1611
1612
1612 sys.path.insert(0, 'contrib/python-zstandard')
1613 sys.path.insert(0, 'contrib/python-zstandard')
1613 import setup_zstd
1614 import setup_zstd
1614
1615
1615 zstd = setup_zstd.get_c_extension(
1616 zstd = setup_zstd.get_c_extension(
1616 name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__))
1617 name='mercurial.zstd', root=os.path.abspath(os.path.dirname(__file__))
1617 )
1618 )
1618 zstd.extra_compile_args += common_cflags
1619 zstd.extra_compile_args += common_cflags
1619 extmodules.append(zstd)
1620 extmodules.append(zstd)
1620
1621
1621 try:
1622 try:
1622 from distutils import cygwinccompiler
1623 from distutils import cygwinccompiler
1623
1624
1624 # the -mno-cygwin option has been deprecated for years
1625 # the -mno-cygwin option has been deprecated for years
1625 mingw32compilerclass = cygwinccompiler.Mingw32CCompiler
1626 mingw32compilerclass = cygwinccompiler.Mingw32CCompiler
1626
1627
1627 class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler):
1628 class HackedMingw32CCompiler(cygwinccompiler.Mingw32CCompiler):
1628 def __init__(self, *args, **kwargs):
1629 def __init__(self, *args, **kwargs):
1629 mingw32compilerclass.__init__(self, *args, **kwargs)
1630 mingw32compilerclass.__init__(self, *args, **kwargs)
1630 for i in 'compiler compiler_so linker_exe linker_so'.split():
1631 for i in 'compiler compiler_so linker_exe linker_so'.split():
1631 try:
1632 try:
1632 getattr(self, i).remove('-mno-cygwin')
1633 getattr(self, i).remove('-mno-cygwin')
1633 except ValueError:
1634 except ValueError:
1634 pass
1635 pass
1635
1636
1636 cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler
1637 cygwinccompiler.Mingw32CCompiler = HackedMingw32CCompiler
1637 except ImportError:
1638 except ImportError:
1638 # the cygwinccompiler package is not available on some Python
1639 # the cygwinccompiler package is not available on some Python
1639 # distributions like the ones from the optware project for Synology
1640 # distributions like the ones from the optware project for Synology
1640 # DiskStation boxes
1641 # DiskStation boxes
1641 class HackedMingw32CCompiler(object):
1642 class HackedMingw32CCompiler(object):
1642 pass
1643 pass
1643
1644
1644
1645
1645 if os.name == 'nt':
1646 if os.name == 'nt':
1646 # Allow compiler/linker flags to be added to Visual Studio builds. Passing
1647 # Allow compiler/linker flags to be added to Visual Studio builds. Passing
1647 # extra_link_args to distutils.extensions.Extension() doesn't have any
1648 # extra_link_args to distutils.extensions.Extension() doesn't have any
1648 # effect.
1649 # effect.
1649 from distutils import msvccompiler
1650 from distutils import msvccompiler
1650
1651
1651 msvccompilerclass = msvccompiler.MSVCCompiler
1652 msvccompilerclass = msvccompiler.MSVCCompiler
1652
1653
1653 class HackedMSVCCompiler(msvccompiler.MSVCCompiler):
1654 class HackedMSVCCompiler(msvccompiler.MSVCCompiler):
1654 def initialize(self):
1655 def initialize(self):
1655 msvccompilerclass.initialize(self)
1656 msvccompilerclass.initialize(self)
1656 # "warning LNK4197: export 'func' specified multiple times"
1657 # "warning LNK4197: export 'func' specified multiple times"
1657 self.ldflags_shared.append('/ignore:4197')
1658 self.ldflags_shared.append('/ignore:4197')
1658 self.ldflags_shared_debug.append('/ignore:4197')
1659 self.ldflags_shared_debug.append('/ignore:4197')
1659
1660
1660 msvccompiler.MSVCCompiler = HackedMSVCCompiler
1661 msvccompiler.MSVCCompiler = HackedMSVCCompiler
1661
1662
1662 packagedata = {
1663 packagedata = {
1663 'mercurial': [
1664 'mercurial': [
1664 'locale/*/LC_MESSAGES/hg.mo',
1665 'locale/*/LC_MESSAGES/hg.mo',
1665 'dummycert.pem',
1666 'dummycert.pem',
1666 ],
1667 ],
1667 'mercurial.defaultrc': [
1668 'mercurial.defaultrc': [
1668 '*.rc',
1669 '*.rc',
1669 ],
1670 ],
1670 'mercurial.helptext': [
1671 'mercurial.helptext': [
1671 '*.txt',
1672 '*.txt',
1672 ],
1673 ],
1673 'mercurial.helptext.internals': [
1674 'mercurial.helptext.internals': [
1674 '*.txt',
1675 '*.txt',
1675 ],
1676 ],
1676 }
1677 }
1677
1678
1678
1679
1679 def ordinarypath(p):
1680 def ordinarypath(p):
1680 return p and p[0] != '.' and p[-1] != '~'
1681 return p and p[0] != '.' and p[-1] != '~'
1681
1682
1682
1683
1683 for root in ('templates',):
1684 for root in ('templates',):
1684 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
1685 for curdir, dirs, files in os.walk(os.path.join('mercurial', root)):
1685 packagename = curdir.replace(os.sep, '.')
1686 packagename = curdir.replace(os.sep, '.')
1686 packagedata[packagename] = list(filter(ordinarypath, files))
1687 packagedata[packagename] = list(filter(ordinarypath, files))
1687
1688
1688 datafiles = []
1689 datafiles = []
1689
1690
1690 # distutils expects version to be str/unicode. Converting it to
1691 # distutils expects version to be str/unicode. Converting it to
1691 # unicode on Python 2 still works because it won't contain any
1692 # unicode on Python 2 still works because it won't contain any
1692 # non-ascii bytes and will be implicitly converted back to bytes
1693 # non-ascii bytes and will be implicitly converted back to bytes
1693 # when operated on.
1694 # when operated on.
1694 assert isinstance(version, str)
1695 assert isinstance(version, str)
1695 setupversion = version
1696 setupversion = version
1696
1697
1697 extra = {}
1698 extra = {}
1698
1699
1699 py2exepackages = [
1700 py2exepackages = [
1700 'hgdemandimport',
1701 'hgdemandimport',
1701 'hgext3rd',
1702 'hgext3rd',
1702 'hgext',
1703 'hgext',
1703 'email',
1704 'email',
1704 # implicitly imported per module policy
1705 # implicitly imported per module policy
1705 # (cffi wouldn't be used as a frozen exe)
1706 # (cffi wouldn't be used as a frozen exe)
1706 'mercurial.cext',
1707 'mercurial.cext',
1707 #'mercurial.cffi',
1708 #'mercurial.cffi',
1708 'mercurial.pure',
1709 'mercurial.pure',
1709 ]
1710 ]
1710
1711
1711 py2exe_includes = []
1712 py2exe_includes = []
1712
1713
1713 py2exeexcludes = []
1714 py2exeexcludes = []
1714 py2exedllexcludes = ['crypt32.dll']
1715 py2exedllexcludes = ['crypt32.dll']
1715
1716
1716 if issetuptools:
1717 if issetuptools:
1717 extra['python_requires'] = supportedpy
1718 extra['python_requires'] = supportedpy
1718
1719
1719 if py2exeloaded:
1720 if py2exeloaded:
1720 extra['console'] = [
1721 extra['console'] = [
1721 {
1722 {
1722 'script': 'hg',
1723 'script': 'hg',
1723 'copyright': 'Copyright (C) 2005-2021 Olivia Mackall and others',
1724 'copyright': 'Copyright (C) 2005-2021 Olivia Mackall and others',
1724 'product_version': version,
1725 'product_version': version,
1725 }
1726 }
1726 ]
1727 ]
1727 # Sub command of 'build' because 'py2exe' does not handle sub_commands.
1728 # Sub command of 'build' because 'py2exe' does not handle sub_commands.
1728 # Need to override hgbuild because it has a private copy of
1729 # Need to override hgbuild because it has a private copy of
1729 # build.sub_commands.
1730 # build.sub_commands.
1730 hgbuild.sub_commands.insert(0, ('build_hgextindex', None))
1731 hgbuild.sub_commands.insert(0, ('build_hgextindex', None))
1731 # put dlls in sub directory so that they won't pollute PATH
1732 # put dlls in sub directory so that they won't pollute PATH
1732 extra['zipfile'] = 'lib/library.zip'
1733 extra['zipfile'] = 'lib/library.zip'
1733
1734
1734 # We allow some configuration to be supplemented via environment
1735 # We allow some configuration to be supplemented via environment
1735 # variables. This is better than setup.cfg files because it allows
1736 # variables. This is better than setup.cfg files because it allows
1736 # supplementing configs instead of replacing them.
1737 # supplementing configs instead of replacing them.
1737 extrapackages = os.environ.get('HG_PY2EXE_EXTRA_PACKAGES')
1738 extrapackages = os.environ.get('HG_PY2EXE_EXTRA_PACKAGES')
1738 if extrapackages:
1739 if extrapackages:
1739 py2exepackages.extend(extrapackages.split(' '))
1740 py2exepackages.extend(extrapackages.split(' '))
1740
1741
1741 extra_includes = os.environ.get('HG_PY2EXE_EXTRA_INCLUDES')
1742 extra_includes = os.environ.get('HG_PY2EXE_EXTRA_INCLUDES')
1742 if extra_includes:
1743 if extra_includes:
1743 py2exe_includes.extend(extra_includes.split(' '))
1744 py2exe_includes.extend(extra_includes.split(' '))
1744
1745
1745 excludes = os.environ.get('HG_PY2EXE_EXTRA_EXCLUDES')
1746 excludes = os.environ.get('HG_PY2EXE_EXTRA_EXCLUDES')
1746 if excludes:
1747 if excludes:
1747 py2exeexcludes.extend(excludes.split(' '))
1748 py2exeexcludes.extend(excludes.split(' '))
1748
1749
1749 dllexcludes = os.environ.get('HG_PY2EXE_EXTRA_DLL_EXCLUDES')
1750 dllexcludes = os.environ.get('HG_PY2EXE_EXTRA_DLL_EXCLUDES')
1750 if dllexcludes:
1751 if dllexcludes:
1751 py2exedllexcludes.extend(dllexcludes.split(' '))
1752 py2exedllexcludes.extend(dllexcludes.split(' '))
1752
1753
1753 if os.environ.get('PYOXIDIZER'):
1754 if os.environ.get('PYOXIDIZER'):
1754 hgbuild.sub_commands.insert(0, ('build_hgextindex', None))
1755 hgbuild.sub_commands.insert(0, ('build_hgextindex', None))
1755
1756
1756 if os.name == 'nt':
1757 if os.name == 'nt':
1757 # Windows binary file versions for exe/dll files must have the
1758 # Windows binary file versions for exe/dll files must have the
1758 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
1759 # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
1759 setupversion = setupversion.split(r'+', 1)[0]
1760 setupversion = setupversion.split(r'+', 1)[0]
1760
1761
1761 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
1762 if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
1762 version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines()
1763 version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines()
1763 if version:
1764 if version:
1764 version = version[0]
1765 version = version[0]
1765 if sys.version_info[0] == 3:
1766 if sys.version_info[0] == 3:
1766 version = version.decode('utf-8')
1767 version = version.decode('utf-8')
1767 xcode4 = version.startswith('Xcode') and StrictVersion(
1768 xcode4 = version.startswith('Xcode') and StrictVersion(
1768 version.split()[1]
1769 version.split()[1]
1769 ) >= StrictVersion('4.0')
1770 ) >= StrictVersion('4.0')
1770 xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None
1771 xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None
1771 else:
1772 else:
1772 # xcodebuild returns empty on OS X Lion with XCode 4.3 not
1773 # xcodebuild returns empty on OS X Lion with XCode 4.3 not
1773 # installed, but instead with only command-line tools. Assume
1774 # installed, but instead with only command-line tools. Assume
1774 # that only happens on >= Lion, thus no PPC support.
1775 # that only happens on >= Lion, thus no PPC support.
1775 xcode4 = True
1776 xcode4 = True
1776 xcode51 = False
1777 xcode51 = False
1777
1778
1778 # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
1779 # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
1779 # distutils.sysconfig
1780 # distutils.sysconfig
1780 if xcode4:
1781 if xcode4:
1781 os.environ['ARCHFLAGS'] = ''
1782 os.environ['ARCHFLAGS'] = ''
1782
1783
1783 # XCode 5.1 changes clang such that it now fails to compile if the
1784 # XCode 5.1 changes clang such that it now fails to compile if the
1784 # -mno-fused-madd flag is passed, but the version of Python shipped with
1785 # -mno-fused-madd flag is passed, but the version of Python shipped with
1785 # OS X 10.9 Mavericks includes this flag. This causes problems in all
1786 # OS X 10.9 Mavericks includes this flag. This causes problems in all
1786 # C extension modules, and a bug has been filed upstream at
1787 # C extension modules, and a bug has been filed upstream at
1787 # http://bugs.python.org/issue21244. We also need to patch this here
1788 # http://bugs.python.org/issue21244. We also need to patch this here
1788 # so Mercurial can continue to compile in the meantime.
1789 # so Mercurial can continue to compile in the meantime.
1789 if xcode51:
1790 if xcode51:
1790 cflags = get_config_var('CFLAGS')
1791 cflags = get_config_var('CFLAGS')
1791 if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None:
1792 if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None:
1792 os.environ['CFLAGS'] = (
1793 os.environ['CFLAGS'] = (
1793 os.environ.get('CFLAGS', '') + ' -Qunused-arguments'
1794 os.environ.get('CFLAGS', '') + ' -Qunused-arguments'
1794 )
1795 )
1795
1796
1796 setup(
1797 setup(
1797 name='mercurial',
1798 name='mercurial',
1798 version=setupversion,
1799 version=setupversion,
1799 author='Olivia Mackall and many others',
1800 author='Olivia Mackall and many others',
1800 author_email='mercurial@mercurial-scm.org',
1801 author_email='mercurial@mercurial-scm.org',
1801 url='https://mercurial-scm.org/',
1802 url='https://mercurial-scm.org/',
1802 download_url='https://mercurial-scm.org/release/',
1803 download_url='https://mercurial-scm.org/release/',
1803 description=(
1804 description=(
1804 'Fast scalable distributed SCM (revision control, version '
1805 'Fast scalable distributed SCM (revision control, version '
1805 'control) system'
1806 'control) system'
1806 ),
1807 ),
1807 long_description=(
1808 long_description=(
1808 'Mercurial is a distributed SCM tool written in Python.'
1809 'Mercurial is a distributed SCM tool written in Python.'
1809 ' It is used by a number of large projects that require'
1810 ' It is used by a number of large projects that require'
1810 ' fast, reliable distributed revision control, such as '
1811 ' fast, reliable distributed revision control, such as '
1811 'Mozilla.'
1812 'Mozilla.'
1812 ),
1813 ),
1813 license='GNU GPLv2 or any later version',
1814 license='GNU GPLv2 or any later version',
1814 classifiers=[
1815 classifiers=[
1815 'Development Status :: 6 - Mature',
1816 'Development Status :: 6 - Mature',
1816 'Environment :: Console',
1817 'Environment :: Console',
1817 'Intended Audience :: Developers',
1818 'Intended Audience :: Developers',
1818 'Intended Audience :: System Administrators',
1819 'Intended Audience :: System Administrators',
1819 'License :: OSI Approved :: GNU General Public License (GPL)',
1820 'License :: OSI Approved :: GNU General Public License (GPL)',
1820 'Natural Language :: Danish',
1821 'Natural Language :: Danish',
1821 'Natural Language :: English',
1822 'Natural Language :: English',
1822 'Natural Language :: German',
1823 'Natural Language :: German',
1823 'Natural Language :: Italian',
1824 'Natural Language :: Italian',
1824 'Natural Language :: Japanese',
1825 'Natural Language :: Japanese',
1825 'Natural Language :: Portuguese (Brazilian)',
1826 'Natural Language :: Portuguese (Brazilian)',
1826 'Operating System :: Microsoft :: Windows',
1827 'Operating System :: Microsoft :: Windows',
1827 'Operating System :: OS Independent',
1828 'Operating System :: OS Independent',
1828 'Operating System :: POSIX',
1829 'Operating System :: POSIX',
1829 'Programming Language :: C',
1830 'Programming Language :: C',
1830 'Programming Language :: Python',
1831 'Programming Language :: Python',
1831 'Topic :: Software Development :: Version Control',
1832 'Topic :: Software Development :: Version Control',
1832 ],
1833 ],
1833 scripts=scripts,
1834 scripts=scripts,
1834 packages=packages,
1835 packages=packages,
1835 ext_modules=extmodules,
1836 ext_modules=extmodules,
1836 data_files=datafiles,
1837 data_files=datafiles,
1837 package_data=packagedata,
1838 package_data=packagedata,
1838 cmdclass=cmdclass,
1839 cmdclass=cmdclass,
1839 distclass=hgdist,
1840 distclass=hgdist,
1840 options={
1841 options={
1841 'py2exe': {
1842 'py2exe': {
1842 'bundle_files': 3,
1843 'bundle_files': 3,
1843 'dll_excludes': py2exedllexcludes,
1844 'dll_excludes': py2exedllexcludes,
1844 'includes': py2exe_includes,
1845 'includes': py2exe_includes,
1845 'excludes': py2exeexcludes,
1846 'excludes': py2exeexcludes,
1846 'packages': py2exepackages,
1847 'packages': py2exepackages,
1847 },
1848 },
1848 'bdist_mpkg': {
1849 'bdist_mpkg': {
1849 'zipdist': False,
1850 'zipdist': False,
1850 'license': 'COPYING',
1851 'license': 'COPYING',
1851 'readme': 'contrib/packaging/macosx/Readme.html',
1852 'readme': 'contrib/packaging/macosx/Readme.html',
1852 'welcome': 'contrib/packaging/macosx/Welcome.html',
1853 'welcome': 'contrib/packaging/macosx/Welcome.html',
1853 },
1854 },
1854 },
1855 },
1855 **extra
1856 **extra
1856 )
1857 )
@@ -1,106 +1,106 b''
1 # extension to emulate invoking 'dirstate.write()' at the time
1 # extension to emulate invoking 'dirstate.write()' at the time
2 # specified by '[fakedirstatewritetime] fakenow', only when
2 # specified by '[fakedirstatewritetime] fakenow', only when
3 # 'dirstate.write()' is invoked via functions below:
3 # 'dirstate.write()' is invoked via functions below:
4 #
4 #
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 # - 'committablectx.markcommitted()'
6 # - 'committablectx.markcommitted()'
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 context,
11 context,
12 dirstate,
12 dirstate,
13 dirstatemap as dirstatemapmod,
13 dirstatemap as dirstatemapmod,
14 extensions,
14 extensions,
15 policy,
15 policy,
16 registrar,
16 registrar,
17 )
17 )
18 from mercurial.utils import dateutil
18 from mercurial.utils import dateutil
19
19
20 try:
20 try:
21 from mercurial import rustext
21 from mercurial import rustext
22
22
23 rustext.__name__ # force actual import (see hgdemandimport)
23 rustext.__name__ # force actual import (see hgdemandimport)
24 except ImportError:
24 except ImportError:
25 rustext = None
25 rustext = None
26
26
27 configtable = {}
27 configtable = {}
28 configitem = registrar.configitem(configtable)
28 configitem = registrar.configitem(configtable)
29
29
30 configitem(
30 configitem(
31 b'fakedirstatewritetime',
31 b'fakedirstatewritetime',
32 b'fakenow',
32 b'fakenow',
33 default=None,
33 default=None,
34 )
34 )
35
35
36 parsers = policy.importmod('parsers')
36 parsers = policy.importmod('parsers')
37 rustmod = policy.importrust('parsers')
37 rustmod = policy.importrust('parsers')
38
38
39
39
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
41 # execute what original parsers.pack_dirstate should do actually
41 # execute what original parsers.pack_dirstate should do actually
42 # for consistency
42 # for consistency
43 actualnow = int(now)
43 actualnow = int(now)
44 for f, e in dmap.items():
44 for f, e in dmap.items():
45 if e.need_delay(actualnow):
45 if e.need_delay(actualnow):
46 e.set_possibly_dirty()
46 e.set_possibly_dirty()
47
47
48 return orig(dmap, copymap, pl, fakenow)
48 return orig(dmap, copymap, pl, fakenow)
49
49
50
50
51 def fakewrite(ui, func):
51 def fakewrite(ui, func):
52 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
52 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
53
53
54 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
54 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
55 if not fakenow:
55 if not fakenow:
56 # Execute original one, if fakenow isn't configured. This is
56 # Execute original one, if fakenow isn't configured. This is
57 # useful to prevent subrepos from executing replaced one,
57 # useful to prevent subrepos from executing replaced one,
58 # because replacing 'parsers.pack_dirstate' is also effective
58 # because replacing 'parsers.pack_dirstate' is also effective
59 # in subrepos.
59 # in subrepos.
60 return func()
60 return func()
61
61
62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65
65
66 if rustmod is not None:
66 if rustmod is not None:
67 # The Rust implementation does not use public parse/pack dirstate
67 # The Rust implementation does not use public parse/pack dirstate
68 # to prevent conversion round-trips
68 # to prevent conversion round-trips
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
70 wrapper = lambda self, st, now: orig_dirstatemap_write(
70 wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
71 self, st, fakenow
71 self, tr, st, fakenow
72 )
72 )
73 dirstatemapmod.dirstatemap.write = wrapper
73 dirstatemapmod.dirstatemap.write = wrapper
74
74
75 orig_dirstate_getfsnow = dirstate._getfsnow
75 orig_dirstate_getfsnow = dirstate._getfsnow
76 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
76 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
77
77
78 orig_module = parsers
78 orig_module = parsers
79 orig_pack_dirstate = parsers.pack_dirstate
79 orig_pack_dirstate = parsers.pack_dirstate
80
80
81 orig_module.pack_dirstate = wrapper
81 orig_module.pack_dirstate = wrapper
82 dirstate._getfsnow = lambda *args: fakenow
82 dirstate._getfsnow = lambda *args: fakenow
83 try:
83 try:
84 return func()
84 return func()
85 finally:
85 finally:
86 orig_module.pack_dirstate = orig_pack_dirstate
86 orig_module.pack_dirstate = orig_pack_dirstate
87 dirstate._getfsnow = orig_dirstate_getfsnow
87 dirstate._getfsnow = orig_dirstate_getfsnow
88 if rustmod is not None:
88 if rustmod is not None:
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
90
90
91
91
92 def _poststatusfixup(orig, workingctx, status, fixup):
92 def _poststatusfixup(orig, workingctx, status, fixup):
93 ui = workingctx.repo().ui
93 ui = workingctx.repo().ui
94 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
94 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
95
95
96
96
97 def markcommitted(orig, committablectx, node):
97 def markcommitted(orig, committablectx, node):
98 ui = committablectx.repo().ui
98 ui = committablectx.repo().ui
99 return fakewrite(ui, lambda: orig(committablectx, node))
99 return fakewrite(ui, lambda: orig(committablectx, node))
100
100
101
101
102 def extsetup(ui):
102 def extsetup(ui):
103 extensions.wrapfunction(
103 extensions.wrapfunction(
104 context.workingctx, '_poststatusfixup', _poststatusfixup
104 context.workingctx, '_poststatusfixup', _poststatusfixup
105 )
105 )
106 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
106 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
@@ -1,443 +1,445 b''
1 Show all commands except debug commands
1 Show all commands except debug commands
2 $ hg debugcomplete
2 $ hg debugcomplete
3 abort
3 abort
4 add
4 add
5 addremove
5 addremove
6 annotate
6 annotate
7 archive
7 archive
8 backout
8 backout
9 bisect
9 bisect
10 bookmarks
10 bookmarks
11 branch
11 branch
12 branches
12 branches
13 bundle
13 bundle
14 cat
14 cat
15 clone
15 clone
16 commit
16 commit
17 config
17 config
18 continue
18 continue
19 copy
19 copy
20 diff
20 diff
21 export
21 export
22 files
22 files
23 forget
23 forget
24 graft
24 graft
25 grep
25 grep
26 heads
26 heads
27 help
27 help
28 identify
28 identify
29 import
29 import
30 incoming
30 incoming
31 init
31 init
32 locate
32 locate
33 log
33 log
34 manifest
34 manifest
35 merge
35 merge
36 outgoing
36 outgoing
37 parents
37 parents
38 paths
38 paths
39 phase
39 phase
40 pull
40 pull
41 purge
41 purge
42 push
42 push
43 recover
43 recover
44 remove
44 remove
45 rename
45 rename
46 resolve
46 resolve
47 revert
47 revert
48 rollback
48 rollback
49 root
49 root
50 serve
50 serve
51 shelve
51 shelve
52 status
52 status
53 summary
53 summary
54 tag
54 tag
55 tags
55 tags
56 tip
56 tip
57 unbundle
57 unbundle
58 unshelve
58 unshelve
59 update
59 update
60 verify
60 verify
61 version
61 version
62
62
63 Show all commands that start with "a"
63 Show all commands that start with "a"
64 $ hg debugcomplete a
64 $ hg debugcomplete a
65 abort
65 abort
66 add
66 add
67 addremove
67 addremove
68 annotate
68 annotate
69 archive
69 archive
70
70
71 Do not show debug commands if there are other candidates
71 Do not show debug commands if there are other candidates
72 $ hg debugcomplete d
72 $ hg debugcomplete d
73 diff
73 diff
74
74
75 Show debug commands if there are no other candidates
75 Show debug commands if there are no other candidates
76 $ hg debugcomplete debug
76 $ hg debugcomplete debug
77 debugancestor
77 debugancestor
78 debugantivirusrunning
78 debugantivirusrunning
79 debugapplystreamclonebundle
79 debugapplystreamclonebundle
80 debugbackupbundle
80 debugbackupbundle
81 debugbuilddag
81 debugbuilddag
82 debugbundle
82 debugbundle
83 debugcapabilities
83 debugcapabilities
84 debugchangedfiles
84 debugchangedfiles
85 debugcheckstate
85 debugcheckstate
86 debugcolor
86 debugcolor
87 debugcommands
87 debugcommands
88 debugcomplete
88 debugcomplete
89 debugconfig
89 debugconfig
90 debugcreatestreamclonebundle
90 debugcreatestreamclonebundle
91 debugdag
91 debugdag
92 debugdata
92 debugdata
93 debugdate
93 debugdate
94 debugdeltachain
94 debugdeltachain
95 debugdirstate
95 debugdirstate
96 debugdirstateignorepatternshash
96 debugdiscovery
97 debugdiscovery
97 debugdownload
98 debugdownload
98 debugextensions
99 debugextensions
99 debugfileset
100 debugfileset
100 debugformat
101 debugformat
101 debugfsinfo
102 debugfsinfo
102 debuggetbundle
103 debuggetbundle
103 debugignore
104 debugignore
104 debugindex
105 debugindex
105 debugindexdot
106 debugindexdot
106 debugindexstats
107 debugindexstats
107 debuginstall
108 debuginstall
108 debugknown
109 debugknown
109 debuglabelcomplete
110 debuglabelcomplete
110 debuglocks
111 debuglocks
111 debugmanifestfulltextcache
112 debugmanifestfulltextcache
112 debugmergestate
113 debugmergestate
113 debugnamecomplete
114 debugnamecomplete
114 debugnodemap
115 debugnodemap
115 debugobsolete
116 debugobsolete
116 debugp1copies
117 debugp1copies
117 debugp2copies
118 debugp2copies
118 debugpathcomplete
119 debugpathcomplete
119 debugpathcopies
120 debugpathcopies
120 debugpeer
121 debugpeer
121 debugpickmergetool
122 debugpickmergetool
122 debugpushkey
123 debugpushkey
123 debugpvec
124 debugpvec
124 debugrebuilddirstate
125 debugrebuilddirstate
125 debugrebuildfncache
126 debugrebuildfncache
126 debugrename
127 debugrename
127 debugrequires
128 debugrequires
128 debugrevlog
129 debugrevlog
129 debugrevlogindex
130 debugrevlogindex
130 debugrevspec
131 debugrevspec
131 debugserve
132 debugserve
132 debugsetparents
133 debugsetparents
133 debugshell
134 debugshell
134 debugsidedata
135 debugsidedata
135 debugssl
136 debugssl
136 debugstrip
137 debugstrip
137 debugsub
138 debugsub
138 debugsuccessorssets
139 debugsuccessorssets
139 debugtagscache
140 debugtagscache
140 debugtemplate
141 debugtemplate
141 debuguigetpass
142 debuguigetpass
142 debuguiprompt
143 debuguiprompt
143 debugupdatecaches
144 debugupdatecaches
144 debugupgraderepo
145 debugupgraderepo
145 debugwalk
146 debugwalk
146 debugwhyunstable
147 debugwhyunstable
147 debugwireargs
148 debugwireargs
148 debugwireproto
149 debugwireproto
149
150
150 Do not show the alias of a debug command if there are other candidates
151 Do not show the alias of a debug command if there are other candidates
151 (this should hide rawcommit)
152 (this should hide rawcommit)
152 $ hg debugcomplete r
153 $ hg debugcomplete r
153 recover
154 recover
154 remove
155 remove
155 rename
156 rename
156 resolve
157 resolve
157 revert
158 revert
158 rollback
159 rollback
159 root
160 root
160 Show the alias of a debug command if there are no other candidates
161 Show the alias of a debug command if there are no other candidates
161 $ hg debugcomplete rawc
162 $ hg debugcomplete rawc
162
163
163
164
164 Show the global options
165 Show the global options
165 $ hg debugcomplete --options | sort
166 $ hg debugcomplete --options | sort
166 --color
167 --color
167 --config
168 --config
168 --cwd
169 --cwd
169 --debug
170 --debug
170 --debugger
171 --debugger
171 --encoding
172 --encoding
172 --encodingmode
173 --encodingmode
173 --help
174 --help
174 --hidden
175 --hidden
175 --noninteractive
176 --noninteractive
176 --pager
177 --pager
177 --profile
178 --profile
178 --quiet
179 --quiet
179 --repository
180 --repository
180 --time
181 --time
181 --traceback
182 --traceback
182 --verbose
183 --verbose
183 --version
184 --version
184 -R
185 -R
185 -h
186 -h
186 -q
187 -q
187 -v
188 -v
188 -y
189 -y
189
190
190 Show the options for the "serve" command
191 Show the options for the "serve" command
191 $ hg debugcomplete --options serve | sort
192 $ hg debugcomplete --options serve | sort
192 --accesslog
193 --accesslog
193 --address
194 --address
194 --certificate
195 --certificate
195 --cmdserver
196 --cmdserver
196 --color
197 --color
197 --config
198 --config
198 --cwd
199 --cwd
199 --daemon
200 --daemon
200 --daemon-postexec
201 --daemon-postexec
201 --debug
202 --debug
202 --debugger
203 --debugger
203 --encoding
204 --encoding
204 --encodingmode
205 --encodingmode
205 --errorlog
206 --errorlog
206 --help
207 --help
207 --hidden
208 --hidden
208 --ipv6
209 --ipv6
209 --name
210 --name
210 --noninteractive
211 --noninteractive
211 --pager
212 --pager
212 --pid-file
213 --pid-file
213 --port
214 --port
214 --prefix
215 --prefix
215 --print-url
216 --print-url
216 --profile
217 --profile
217 --quiet
218 --quiet
218 --repository
219 --repository
219 --stdio
220 --stdio
220 --style
221 --style
221 --subrepos
222 --subrepos
222 --templates
223 --templates
223 --time
224 --time
224 --traceback
225 --traceback
225 --verbose
226 --verbose
226 --version
227 --version
227 --web-conf
228 --web-conf
228 -6
229 -6
229 -A
230 -A
230 -E
231 -E
231 -R
232 -R
232 -S
233 -S
233 -a
234 -a
234 -d
235 -d
235 -h
236 -h
236 -n
237 -n
237 -p
238 -p
238 -q
239 -q
239 -t
240 -t
240 -v
241 -v
241 -y
242 -y
242
243
243 Show an error if we use --options with an ambiguous abbreviation
244 Show an error if we use --options with an ambiguous abbreviation
244 $ hg debugcomplete --options s
245 $ hg debugcomplete --options s
245 hg: command 's' is ambiguous:
246 hg: command 's' is ambiguous:
246 serve shelve showconfig status summary
247 serve shelve showconfig status summary
247 [10]
248 [10]
248
249
249 Show all commands + options
250 Show all commands + options
250 $ hg debugcommands
251 $ hg debugcommands
251 abort: dry-run
252 abort: dry-run
252 add: include, exclude, subrepos, dry-run
253 add: include, exclude, subrepos, dry-run
253 addremove: similarity, subrepos, include, exclude, dry-run
254 addremove: similarity, subrepos, include, exclude, dry-run
254 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
255 annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
255 archive: no-decode, prefix, rev, type, subrepos, include, exclude
256 archive: no-decode, prefix, rev, type, subrepos, include, exclude
256 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
257 backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
257 bisect: reset, good, bad, skip, extend, command, noupdate
258 bisect: reset, good, bad, skip, extend, command, noupdate
258 bookmarks: force, rev, delete, rename, inactive, list, template
259 bookmarks: force, rev, delete, rename, inactive, list, template
259 branch: force, clean, rev
260 branch: force, clean, rev
260 branches: active, closed, rev, template
261 branches: active, closed, rev, template
261 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
262 bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
262 cat: output, rev, decode, include, exclude, template
263 cat: output, rev, decode, include, exclude, template
263 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
264 clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
264 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
265 commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
265 config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
266 config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
266 continue: dry-run
267 continue: dry-run
267 copy: forget, after, at-rev, force, include, exclude, dry-run
268 copy: forget, after, at-rev, force, include, exclude, dry-run
268 debugancestor:
269 debugancestor:
269 debugantivirusrunning:
270 debugantivirusrunning:
270 debugapplystreamclonebundle:
271 debugapplystreamclonebundle:
271 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
272 debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
272 debugbuilddag: mergeable-file, overwritten-file, new-file
273 debugbuilddag: mergeable-file, overwritten-file, new-file
273 debugbundle: all, part-type, spec
274 debugbundle: all, part-type, spec
274 debugcapabilities:
275 debugcapabilities:
275 debugchangedfiles: compute
276 debugchangedfiles: compute
276 debugcheckstate:
277 debugcheckstate:
277 debugcolor: style
278 debugcolor: style
278 debugcommands:
279 debugcommands:
279 debugcomplete: options
280 debugcomplete: options
280 debugcreatestreamclonebundle:
281 debugcreatestreamclonebundle:
281 debugdag: tags, branches, dots, spaces
282 debugdag: tags, branches, dots, spaces
282 debugdata: changelog, manifest, dir
283 debugdata: changelog, manifest, dir
283 debugdate: extended
284 debugdate: extended
284 debugdeltachain: changelog, manifest, dir, template
285 debugdeltachain: changelog, manifest, dir, template
286 debugdirstateignorepatternshash:
285 debugdirstate: nodates, dates, datesort, dirs
287 debugdirstate: nodates, dates, datesort, dirs
286 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
288 debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
287 debugdownload: output
289 debugdownload: output
288 debugextensions: template
290 debugextensions: template
289 debugfileset: rev, all-files, show-matcher, show-stage
291 debugfileset: rev, all-files, show-matcher, show-stage
290 debugformat: template
292 debugformat: template
291 debugfsinfo:
293 debugfsinfo:
292 debuggetbundle: head, common, type
294 debuggetbundle: head, common, type
293 debugignore:
295 debugignore:
294 debugindex: changelog, manifest, dir, template
296 debugindex: changelog, manifest, dir, template
295 debugindexdot: changelog, manifest, dir
297 debugindexdot: changelog, manifest, dir
296 debugindexstats:
298 debugindexstats:
297 debuginstall: template
299 debuginstall: template
298 debugknown:
300 debugknown:
299 debuglabelcomplete:
301 debuglabelcomplete:
300 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
302 debuglocks: force-free-lock, force-free-wlock, set-lock, set-wlock
301 debugmanifestfulltextcache: clear, add
303 debugmanifestfulltextcache: clear, add
302 debugmergestate: style, template
304 debugmergestate: style, template
303 debugnamecomplete:
305 debugnamecomplete:
304 debugnodemap: dump-new, dump-disk, check, metadata
306 debugnodemap: dump-new, dump-disk, check, metadata
305 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
307 debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
306 debugp1copies: rev
308 debugp1copies: rev
307 debugp2copies: rev
309 debugp2copies: rev
308 debugpathcomplete: full, normal, added, removed
310 debugpathcomplete: full, normal, added, removed
309 debugpathcopies: include, exclude
311 debugpathcopies: include, exclude
310 debugpeer:
312 debugpeer:
311 debugpickmergetool: rev, changedelete, include, exclude, tool
313 debugpickmergetool: rev, changedelete, include, exclude, tool
312 debugpushkey:
314 debugpushkey:
313 debugpvec:
315 debugpvec:
314 debugrebuilddirstate: rev, minimal
316 debugrebuilddirstate: rev, minimal
315 debugrebuildfncache:
317 debugrebuildfncache:
316 debugrename: rev
318 debugrename: rev
317 debugrequires:
319 debugrequires:
318 debugrevlog: changelog, manifest, dir, dump
320 debugrevlog: changelog, manifest, dir, dump
319 debugrevlogindex: changelog, manifest, dir, format
321 debugrevlogindex: changelog, manifest, dir, format
320 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
322 debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
321 debugserve: sshstdio, logiofd, logiofile
323 debugserve: sshstdio, logiofd, logiofile
322 debugsetparents:
324 debugsetparents:
323 debugshell:
325 debugshell:
324 debugsidedata: changelog, manifest, dir
326 debugsidedata: changelog, manifest, dir
325 debugssl:
327 debugssl:
326 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
328 debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
327 debugsub: rev
329 debugsub: rev
328 debugsuccessorssets: closest
330 debugsuccessorssets: closest
329 debugtagscache:
331 debugtagscache:
330 debugtemplate: rev, define
332 debugtemplate: rev, define
331 debuguigetpass: prompt
333 debuguigetpass: prompt
332 debuguiprompt: prompt
334 debuguiprompt: prompt
333 debugupdatecaches:
335 debugupdatecaches:
334 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
336 debugupgraderepo: optimize, run, backup, changelog, manifest, filelogs
335 debugwalk: include, exclude
337 debugwalk: include, exclude
336 debugwhyunstable:
338 debugwhyunstable:
337 debugwireargs: three, four, five, ssh, remotecmd, insecure
339 debugwireargs: three, four, five, ssh, remotecmd, insecure
338 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
340 debugwireproto: localssh, peer, noreadstderr, nologhandshake, ssh, remotecmd, insecure
339 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
341 diff: rev, from, to, change, text, git, binary, nodates, noprefix, show-function, reverse, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, unified, stat, root, include, exclude, subrepos
340 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
342 export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
341 files: rev, print0, include, exclude, template, subrepos
343 files: rev, print0, include, exclude, template, subrepos
342 forget: interactive, include, exclude, dry-run
344 forget: interactive, include, exclude, dry-run
343 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
345 graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
344 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
346 grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
345 heads: rev, topo, active, closed, style, template
347 heads: rev, topo, active, closed, style, template
346 help: extension, command, keyword, system
348 help: extension, command, keyword, system
347 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
349 identify: rev, num, id, branch, tags, bookmarks, ssh, remotecmd, insecure, template
348 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
350 import: strip, base, secret, edit, force, no-commit, bypass, partial, exact, prefix, import-branch, message, logfile, date, user, similarity
349 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
351 incoming: force, newest-first, bundle, rev, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
350 init: ssh, remotecmd, insecure
352 init: ssh, remotecmd, insecure
351 locate: rev, print0, fullpath, include, exclude
353 locate: rev, print0, fullpath, include, exclude
352 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
354 log: follow, follow-first, date, copies, keyword, rev, line-range, removed, only-merges, user, only-branch, branch, bookmark, prune, patch, git, limit, no-merges, stat, graph, style, template, include, exclude
353 manifest: rev, all, template
355 manifest: rev, all, template
354 merge: force, rev, preview, abort, tool
356 merge: force, rev, preview, abort, tool
355 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
357 outgoing: force, rev, newest-first, bookmarks, branch, patch, git, limit, no-merges, stat, graph, style, template, ssh, remotecmd, insecure, subrepos
356 parents: rev, style, template
358 parents: rev, style, template
357 paths: template
359 paths: template
358 phase: public, draft, secret, force, rev
360 phase: public, draft, secret, force, rev
359 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
361 pull: update, force, confirm, rev, bookmark, branch, ssh, remotecmd, insecure
360 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
362 purge: abort-on-err, all, ignored, dirs, files, print, print0, confirm, include, exclude
361 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
363 push: force, rev, bookmark, all-bookmarks, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
362 recover: verify
364 recover: verify
363 remove: after, force, subrepos, include, exclude, dry-run
365 remove: after, force, subrepos, include, exclude, dry-run
364 rename: forget, after, at-rev, force, include, exclude, dry-run
366 rename: forget, after, at-rev, force, include, exclude, dry-run
365 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
367 resolve: all, list, mark, unmark, no-status, re-merge, tool, include, exclude, template
366 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
368 revert: all, date, rev, no-backup, interactive, include, exclude, dry-run
367 rollback: dry-run, force
369 rollback: dry-run, force
368 root: template
370 root: template
369 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
371 serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate, print-url, subrepos
370 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
372 shelve: addremove, unknown, cleanup, date, delete, edit, keep, list, message, name, patch, interactive, stat, include, exclude
371 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
373 status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, terse, copies, print0, rev, change, include, exclude, subrepos, template
372 summary: remote
374 summary: remote
373 tag: force, local, rev, remove, edit, message, date, user
375 tag: force, local, rev, remove, edit, message, date, user
374 tags: template
376 tags: template
375 tip: patch, git, style, template
377 tip: patch, git, style, template
376 unbundle: update
378 unbundle: update
377 unshelve: abort, continue, interactive, keep, name, tool, date
379 unshelve: abort, continue, interactive, keep, name, tool, date
378 update: clean, check, merge, date, rev, tool
380 update: clean, check, merge, date, rev, tool
379 verify: full
381 verify: full
380 version: template
382 version: template
381
383
382 $ hg init a
384 $ hg init a
383 $ cd a
385 $ cd a
384 $ echo fee > fee
386 $ echo fee > fee
385 $ hg ci -q -Amfee
387 $ hg ci -q -Amfee
386 $ hg tag fee
388 $ hg tag fee
387 $ mkdir fie
389 $ mkdir fie
388 $ echo dead > fie/dead
390 $ echo dead > fie/dead
389 $ echo live > fie/live
391 $ echo live > fie/live
390 $ hg bookmark fo
392 $ hg bookmark fo
391 $ hg branch -q fie
393 $ hg branch -q fie
392 $ hg ci -q -Amfie
394 $ hg ci -q -Amfie
393 $ echo fo > fo
395 $ echo fo > fo
394 $ hg branch -qf default
396 $ hg branch -qf default
395 $ hg ci -q -Amfo
397 $ hg ci -q -Amfo
396 $ echo Fum > Fum
398 $ echo Fum > Fum
397 $ hg ci -q -AmFum
399 $ hg ci -q -AmFum
398 $ hg bookmark Fum
400 $ hg bookmark Fum
399
401
400 Test debugpathcomplete
402 Test debugpathcomplete
401
403
402 $ hg debugpathcomplete f
404 $ hg debugpathcomplete f
403 fee
405 fee
404 fie
406 fie
405 fo
407 fo
406 $ hg debugpathcomplete -f f
408 $ hg debugpathcomplete -f f
407 fee
409 fee
408 fie/dead
410 fie/dead
409 fie/live
411 fie/live
410 fo
412 fo
411
413
412 $ hg rm Fum
414 $ hg rm Fum
413 $ hg debugpathcomplete -r F
415 $ hg debugpathcomplete -r F
414 Fum
416 Fum
415
417
416 Test debugnamecomplete
418 Test debugnamecomplete
417
419
418 $ hg debugnamecomplete
420 $ hg debugnamecomplete
419 Fum
421 Fum
420 default
422 default
421 fee
423 fee
422 fie
424 fie
423 fo
425 fo
424 tip
426 tip
425 $ hg debugnamecomplete f
427 $ hg debugnamecomplete f
426 fee
428 fee
427 fie
429 fie
428 fo
430 fo
429
431
430 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
432 Test debuglabelcomplete, a deprecated name for debugnamecomplete that is still
431 used for completions in some shells.
433 used for completions in some shells.
432
434
433 $ hg debuglabelcomplete
435 $ hg debuglabelcomplete
434 Fum
436 Fum
435 default
437 default
436 fee
438 fee
437 fie
439 fie
438 fo
440 fo
439 tip
441 tip
440 $ hg debuglabelcomplete f
442 $ hg debuglabelcomplete f
441 fee
443 fee
442 fie
444 fie
443 fo
445 fo
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now