##// END OF EJS Templates
dirstate-v2: Write .hg/dirstate back to disk on directory cache changes...
Simon Sapin -
r48139:04d1f17f default
parent child Browse files
Show More
@@ -1,3124 +1,3124 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
70 return "<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(
102 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
103 self, other, s, match, listignored, listclean, listunknown
104 ):
104 ):
105 """build a status with respect to another context"""
105 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
106 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
111 # delta application.
112 mf2 = None
112 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
113 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
116 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118
118
119 modified, added = [], []
119 modified, added = [], []
120 removed = []
120 removed = []
121 clean = []
121 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
123 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
124 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
125 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
126 if fn in deletedset:
127 continue
127 continue
128 if value is None:
128 if value is None:
129 clean.append(fn)
129 clean.append(fn)
130 continue
130 continue
131 (node1, flag1), (node2, flag2) = value
131 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
132 if node1 is None:
133 added.append(fn)
133 added.append(fn)
134 elif node2 is None:
134 elif node2 is None:
135 removed.append(fn)
135 removed.append(fn)
136 elif flag1 != flag2:
136 elif flag1 != flag2:
137 modified.append(fn)
137 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
139 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
140 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
141 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
142 # to a file as a modification.
143 modified.append(fn)
143 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
144 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
145 modified.append(fn)
146 else:
146 else:
147 clean.append(fn)
147 clean.append(fn)
148
148
149 if removed:
149 if removed:
150 # need to filter files if they are already reported as removed
150 # need to filter files if they are already reported as removed
151 unknown = [
151 unknown = [
152 fn
152 fn
153 for fn in unknown
153 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
154 if fn not in mf1 and (not match or match(fn))
155 ]
155 ]
156 ignored = [
156 ignored = [
157 fn
157 fn
158 for fn in ignored
158 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
163
163
164 return scmutil.status(
164 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
165 modified, added, removed, deleted, unknown, ignored, clean
166 )
166 )
167
167
168 @propertycache
168 @propertycache
169 def substate(self):
169 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
170 return subrepoutil.state(self, self._repo.ui)
171
171
172 def subrev(self, subpath):
172 def subrev(self, subpath):
173 return self.substate[subpath][1]
173 return self.substate[subpath][1]
174
174
175 def rev(self):
175 def rev(self):
176 return self._rev
176 return self._rev
177
177
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180
180
181 def hex(self):
181 def hex(self):
182 return hex(self.node())
182 return hex(self.node())
183
183
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186
186
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189
189
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192
192
193 def phasestr(self):
193 def phasestr(self):
194 return phases.phasenames[self.phase()]
194 return phases.phasenames[self.phase()]
195
195
196 def mutable(self):
196 def mutable(self):
197 return self.phase() > phases.public
197 return self.phase() > phases.public
198
198
199 def matchfileset(self, cwd, expr, badfn=None):
199 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
200 return fileset.match(self, cwd, expr, badfn=badfn)
201
201
202 def obsolete(self):
202 def obsolete(self):
203 """True if the changeset is obsolete"""
203 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
205
206 def extinct(self):
206 def extinct(self):
207 """True if the changeset is extinct"""
207 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
211 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
213
214 def phasedivergent(self):
214 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
215 """True if the changeset tries to be a successor of a public changeset
216
216
217 Only non-public and non-obsolete changesets may be phase-divergent.
217 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
218 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
220
221 def contentdivergent(self):
221 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
222 """Is a successor of a changeset with multiple possible successor sets
223
223
224 Only non-public and non-obsolete changesets may be content-divergent.
224 Only non-public and non-obsolete changesets may be content-divergent.
225 """
225 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
227
228 def isunstable(self):
228 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
229 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
230 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
232
233 def instabilities(self):
233 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
234 """return the list of instabilities affecting this changeset.
235
235
236 Instabilities are returned as strings. possible values are:
236 Instabilities are returned as strings. possible values are:
237 - orphan,
237 - orphan,
238 - phase-divergent,
238 - phase-divergent,
239 - content-divergent.
239 - content-divergent.
240 """
240 """
241 instabilities = []
241 instabilities = []
242 if self.orphan():
242 if self.orphan():
243 instabilities.append(b'orphan')
243 instabilities.append(b'orphan')
244 if self.phasedivergent():
244 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
245 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
246 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
247 instabilities.append(b'content-divergent')
248 return instabilities
248 return instabilities
249
249
250 def parents(self):
250 def parents(self):
251 """return contexts for each parent changeset"""
251 """return contexts for each parent changeset"""
252 return self._parents
252 return self._parents
253
253
254 def p1(self):
254 def p1(self):
255 return self._parents[0]
255 return self._parents[0]
256
256
257 def p2(self):
257 def p2(self):
258 parents = self._parents
258 parents = self._parents
259 if len(parents) == 2:
259 if len(parents) == 2:
260 return parents[1]
260 return parents[1]
261 return self._repo[nullrev]
261 return self._repo[nullrev]
262
262
263 def _fileinfo(self, path):
263 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
264 if '_manifest' in self.__dict__:
265 try:
265 try:
266 return self._manifest.find(path)
266 return self._manifest.find(path)
267 except KeyError:
267 except KeyError:
268 raise error.ManifestLookupError(
268 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
269 self._node or b'None', path, _(b'not found in manifest')
270 )
270 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
272 if path in self._manifestdelta:
273 return (
273 return (
274 self._manifestdelta[path],
274 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
276 )
276 )
277 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
278 try:
278 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
280 except KeyError:
281 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
283 )
283 )
284
284
285 return node, flag
285 return node, flag
286
286
287 def filenode(self, path):
287 def filenode(self, path):
288 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
289
289
290 def flags(self, path):
290 def flags(self, path):
291 try:
291 try:
292 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
293 except error.LookupError:
293 except error.LookupError:
294 return b''
294 return b''
295
295
296 @propertycache
296 @propertycache
297 def _copies(self):
297 def _copies(self):
298 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
299
299
300 def p1copies(self):
300 def p1copies(self):
301 return self._copies[0]
301 return self._copies[0]
302
302
303 def p2copies(self):
303 def p2copies(self):
304 return self._copies[1]
304 return self._copies[1]
305
305
306 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
309
310 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
312
312
313 def workingsub(self, path):
313 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
315 context.
316 """
316 """
317 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
318
318
319 def match(
319 def match(
320 self,
320 self,
321 pats=None,
321 pats=None,
322 include=None,
322 include=None,
323 exclude=None,
323 exclude=None,
324 default=b'glob',
324 default=b'glob',
325 listsubrepos=False,
325 listsubrepos=False,
326 badfn=None,
326 badfn=None,
327 cwd=None,
327 cwd=None,
328 ):
328 ):
329 r = self._repo
329 r = self._repo
330 if not cwd:
330 if not cwd:
331 cwd = r.getcwd()
331 cwd = r.getcwd()
332 return matchmod.match(
332 return matchmod.match(
333 r.root,
333 r.root,
334 cwd,
334 cwd,
335 pats,
335 pats,
336 include,
336 include,
337 exclude,
337 exclude,
338 default,
338 default,
339 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
340 ctx=self,
340 ctx=self,
341 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
342 badfn=badfn,
342 badfn=badfn,
343 )
343 )
344
344
345 def diff(
345 def diff(
346 self,
346 self,
347 ctx2=None,
347 ctx2=None,
348 match=None,
348 match=None,
349 changes=None,
349 changes=None,
350 opts=None,
350 opts=None,
351 losedatafn=None,
351 losedatafn=None,
352 pathfn=None,
352 pathfn=None,
353 copy=None,
353 copy=None,
354 copysourcematch=None,
354 copysourcematch=None,
355 hunksfilterfn=None,
355 hunksfilterfn=None,
356 ):
356 ):
357 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
358 if ctx2 is None:
359 ctx2 = self.p1()
359 ctx2 = self.p1()
360 if ctx2 is not None:
360 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
362 return patch.diff(
362 return patch.diff(
363 self._repo,
363 self._repo,
364 ctx2,
364 ctx2,
365 self,
365 self,
366 match=match,
366 match=match,
367 changes=changes,
367 changes=changes,
368 opts=opts,
368 opts=opts,
369 losedatafn=losedatafn,
369 losedatafn=losedatafn,
370 pathfn=pathfn,
370 pathfn=pathfn,
371 copy=copy,
371 copy=copy,
372 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
374 )
374 )
375
375
376 def dirs(self):
376 def dirs(self):
377 return self._manifest.dirs()
377 return self._manifest.dirs()
378
378
379 def hasdir(self, dir):
379 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
381
381
382 def status(
382 def status(
383 self,
383 self,
384 other=None,
384 other=None,
385 match=None,
385 match=None,
386 listignored=False,
386 listignored=False,
387 listclean=False,
387 listclean=False,
388 listunknown=False,
388 listunknown=False,
389 listsubrepos=False,
389 listsubrepos=False,
390 ):
390 ):
391 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
392 directory.
392 directory.
393
393
394 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
395
395
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
397
398 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
399
399
400 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
401
401
402 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
403
403
404 or direct attribute access:
404 or direct attribute access:
405
405
406 s.modified, s.added, ...
406 s.modified, s.added, ...
407 """
407 """
408
408
409 ctx1 = self
409 ctx1 = self
410 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
411
411
412 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
415 # with its first parent.
416 #
416 #
417 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
418 #
418 #
419 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
420 #
420 #
421 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
424 reversed = False
424 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
426 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
428
428
429 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
434 )
434 )
435
435
436 if reversed:
436 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
438 # these make no sense to reverse.
439 r = scmutil.status(
439 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
441 )
442
442
443 if listsubrepos:
443 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
445 try:
446 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
447 except KeyError:
447 except KeyError:
448 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
451 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
453 s = sub.status(
454 rev2,
454 rev2,
455 match=submatch,
455 match=submatch,
456 ignored=listignored,
456 ignored=listignored,
457 clean=listclean,
457 clean=listclean,
458 unknown=listunknown,
458 unknown=listunknown,
459 listsubrepos=True,
459 listsubrepos=True,
460 )
460 )
461 for k in (
461 for k in (
462 'modified',
462 'modified',
463 'added',
463 'added',
464 'removed',
464 'removed',
465 'deleted',
465 'deleted',
466 'unknown',
466 'unknown',
467 'ignored',
467 'ignored',
468 'clean',
468 'clean',
469 ):
469 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
472
473 r.modified.sort()
473 r.modified.sort()
474 r.added.sort()
474 r.added.sort()
475 r.removed.sort()
475 r.removed.sort()
476 r.deleted.sort()
476 r.deleted.sort()
477 r.unknown.sort()
477 r.unknown.sort()
478 r.ignored.sort()
478 r.ignored.sort()
479 r.clean.sort()
479 r.clean.sort()
480
480
481 return r
481 return r
482
482
483 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
485 raise NotImplementedError(
485 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
487 )
487 )
488
488
489 def isempty(self):
489 def isempty(self):
490 return not (
490 return not (
491 len(self.parents()) > 1
491 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
493 or self.closesbranch()
493 or self.closesbranch()
494 or self.files()
494 or self.files()
495 )
495 )
496
496
497
497
498 class changectx(basectx):
498 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
501 the repo."""
501 the repo."""
502
502
503 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
505 self._rev = rev
505 self._rev = rev
506 self._node = node
506 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
509 #
509 #
510 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
514
514
515 def __hash__(self):
515 def __hash__(self):
516 try:
516 try:
517 return hash(self._rev)
517 return hash(self._rev)
518 except AttributeError:
518 except AttributeError:
519 return id(self)
519 return id(self)
520
520
521 def __nonzero__(self):
521 def __nonzero__(self):
522 return self._rev != nullrev
522 return self._rev != nullrev
523
523
524 __bool__ = __nonzero__
524 __bool__ = __nonzero__
525
525
526 @propertycache
526 @propertycache
527 def _changeset(self):
527 def _changeset(self):
528 if self._maybe_filtered:
528 if self._maybe_filtered:
529 repo = self._repo
529 repo = self._repo
530 else:
530 else:
531 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
533
533
534 @propertycache
534 @propertycache
535 def _manifest(self):
535 def _manifest(self):
536 return self._manifestctx.read()
536 return self._manifestctx.read()
537
537
538 @property
538 @property
539 def _manifestctx(self):
539 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
541
541
542 @propertycache
542 @propertycache
543 def _manifestdelta(self):
543 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
545
545
546 @propertycache
546 @propertycache
547 def _parents(self):
547 def _parents(self):
548 repo = self._repo
548 repo = self._repo
549 if self._maybe_filtered:
549 if self._maybe_filtered:
550 cl = repo.changelog
550 cl = repo.changelog
551 else:
551 else:
552 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
553
553
554 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
555 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
557 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
560 ]
561
561
562 def changeset(self):
562 def changeset(self):
563 c = self._changeset
563 c = self._changeset
564 return (
564 return (
565 c.manifest,
565 c.manifest,
566 c.user,
566 c.user,
567 c.date,
567 c.date,
568 c.files,
568 c.files,
569 c.description,
569 c.description,
570 c.extra,
570 c.extra,
571 )
571 )
572
572
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578
578
579 def date(self):
579 def date(self):
580 return self._changeset.date
580 return self._changeset.date
581
581
582 def files(self):
582 def files(self):
583 return self._changeset.files
583 return self._changeset.files
584
584
585 def filesmodified(self):
585 def filesmodified(self):
586 modified = set(self.files())
586 modified = set(self.files())
587 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
589 return sorted(modified)
589 return sorted(modified)
590
590
591 def filesadded(self):
591 def filesadded(self):
592 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
593 compute_on_none = True
593 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
595 compute_on_none = False
596 else:
596 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
598 if source == b'changeset-only':
599 compute_on_none = False
599 compute_on_none = False
600 elif source != b'compatibility':
600 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
602 filesadded = None
602 filesadded = None
603 if filesadded is None:
603 if filesadded is None:
604 if compute_on_none:
604 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
606 else:
606 else:
607 filesadded = []
607 filesadded = []
608 return filesadded
608 return filesadded
609
609
610 def filesremoved(self):
610 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
612 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
614 compute_on_none = False
615 else:
615 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
617 if source == b'changeset-only':
618 compute_on_none = False
618 compute_on_none = False
619 elif source != b'compatibility':
619 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
621 filesremoved = None
621 filesremoved = None
622 if filesremoved is None:
622 if filesremoved is None:
623 if compute_on_none:
623 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
625 else:
626 filesremoved = []
626 filesremoved = []
627 return filesremoved
627 return filesremoved
628
628
629 @propertycache
629 @propertycache
630 def _copies(self):
630 def _copies(self):
631 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
633 compute_on_none = True
633 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
635 compute_on_none = False
636 else:
636 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
642 # the filelogs (below).
643 #
643 #
644 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
646 #
646 #
647 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
649 if source == b'changeset-only':
649 if source == b'changeset-only':
650 compute_on_none = False
650 compute_on_none = False
651 elif source != b'compatibility':
651 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
653 p1copies = p2copies = None
654 if p1copies is None:
654 if p1copies is None:
655 if compute_on_none:
655 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
657 else:
657 else:
658 if p1copies is None:
658 if p1copies is None:
659 p1copies = {}
659 p1copies = {}
660 if p2copies is None:
660 if p2copies is None:
661 p2copies = {}
661 p2copies = {}
662 return p1copies, p2copies
662 return p1copies, p2copies
663
663
664 def description(self):
664 def description(self):
665 return self._changeset.description
665 return self._changeset.description
666
666
667 def branch(self):
667 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
669
670 def closesbranch(self):
670 def closesbranch(self):
671 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
672
672
673 def extra(self):
673 def extra(self):
674 """Return a dict of extra information."""
674 """Return a dict of extra information."""
675 return self._changeset.extra
675 return self._changeset.extra
676
676
677 def tags(self):
677 def tags(self):
678 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
680
680
681 def bookmarks(self):
681 def bookmarks(self):
682 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
684
684
685 def phase(self):
685 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
686 return self._repo._phasecache.phase(self._repo, self._rev)
687
687
688 def hidden(self):
688 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
690
691 def isinmemory(self):
691 def isinmemory(self):
692 return False
692 return False
693
693
694 def children(self):
694 def children(self):
695 """return list of changectx contexts for each child changeset.
695 """return list of changectx contexts for each child changeset.
696
696
697 This returns only the immediate child changesets. Use descendants() to
697 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
698 recursively walk children.
699 """
699 """
700 c = self._repo.changelog.children(self._node)
700 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
701 return [self._repo[x] for x in c]
702
702
703 def ancestors(self):
703 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
704 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
705 yield self._repo[a]
706
706
707 def descendants(self):
707 def descendants(self):
708 """Recursively yield all children of the changeset.
708 """Recursively yield all children of the changeset.
709
709
710 For just the immediate children, use children()
710 For just the immediate children, use children()
711 """
711 """
712 for d in self._repo.changelog.descendants([self._rev]):
712 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
713 yield self._repo[d]
714
714
715 def filectx(self, path, fileid=None, filelog=None):
715 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
716 """get a file context from this changeset"""
717 if fileid is None:
717 if fileid is None:
718 fileid = self.filenode(path)
718 fileid = self.filenode(path)
719 return filectx(
719 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
721 )
722
722
723 def ancestor(self, c2, warn=False):
723 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
724 """return the "best" ancestor context of self and c2
725
725
726 If there are multiple candidates, it will show a message and check
726 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
727 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
728 revlog ancestor."""
729 # deal with workingctxs
729 # deal with workingctxs
730 n2 = c2._node
730 n2 = c2._node
731 if n2 is None:
731 if n2 is None:
732 n2 = c2._parents[0]._node
732 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
734 if not cahs:
735 anc = self._repo.nodeconstants.nullid
735 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
736 elif len(cahs) == 1:
737 anc = cahs[0]
737 anc = cahs[0]
738 else:
738 else:
739 # experimental config: merge.preferancestor
739 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
741 try:
742 ctx = scmutil.revsymbol(self._repo, r)
742 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
743 except error.RepoLookupError:
744 continue
744 continue
745 anc = ctx.node()
745 anc = ctx.node()
746 if anc in cahs:
746 if anc in cahs:
747 break
747 break
748 else:
748 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
749 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
750 if warn:
751 self._repo.ui.status(
751 self._repo.ui.status(
752 (
752 (
753 _(b"note: using %s as ancestor of %s and %s\n")
753 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
754 % (short(anc), short(self._node), short(n2))
755 )
755 )
756 + b''.join(
756 + b''.join(
757 _(
757 _(
758 b" alternatively, use --config "
758 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
759 b"merge.preferancestor=%s\n"
760 )
760 )
761 % short(n)
761 % short(n)
762 for n in sorted(cahs)
762 for n in sorted(cahs)
763 if n != anc
763 if n != anc
764 )
764 )
765 )
765 )
766 return self._repo[anc]
766 return self._repo[anc]
767
767
768 def isancestorof(self, other):
768 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
769 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
771
772 def walk(self, match):
772 def walk(self, match):
773 '''Generates matching file names.'''
773 '''Generates matching file names.'''
774
774
775 # Wrap match.bad method to have message with nodeid
775 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
776 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
777 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
778 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
780 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
781 match.bad(fn, _(b'no such file in rev %s') % self)
782
782
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
784 return self._manifest.walk(m)
785
785
786 def matches(self, match):
786 def matches(self, match):
787 return self.walk(match)
787 return self.walk(match)
788
788
789
789
790 class basefilectx(object):
790 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
791 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
792 filectx: read-only access to a filerevision that is already present
793 in the repo,
793 in the repo,
794 workingfilectx: a filecontext that represents files from the working
794 workingfilectx: a filecontext that represents files from the working
795 directory,
795 directory,
796 memfilectx: a filecontext that represents files in-memory,
796 memfilectx: a filecontext that represents files in-memory,
797 """
797 """
798
798
799 @propertycache
799 @propertycache
800 def _filelog(self):
800 def _filelog(self):
801 return self._repo.file(self._path)
801 return self._repo.file(self._path)
802
802
803 @propertycache
803 @propertycache
804 def _changeid(self):
804 def _changeid(self):
805 if '_changectx' in self.__dict__:
805 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
806 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
807 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
808 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
809 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
810 return self._adjustlinkrev(self._descendantrev)
811 else:
811 else:
812 return self._filelog.linkrev(self._filerev)
812 return self._filelog.linkrev(self._filerev)
813
813
814 @propertycache
814 @propertycache
815 def _filenode(self):
815 def _filenode(self):
816 if '_fileid' in self.__dict__:
816 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
817 return self._filelog.lookup(self._fileid)
818 else:
818 else:
819 return self._changectx.filenode(self._path)
819 return self._changectx.filenode(self._path)
820
820
821 @propertycache
821 @propertycache
822 def _filerev(self):
822 def _filerev(self):
823 return self._filelog.rev(self._filenode)
823 return self._filelog.rev(self._filenode)
824
824
825 @propertycache
825 @propertycache
826 def _repopath(self):
826 def _repopath(self):
827 return self._path
827 return self._path
828
828
829 def __nonzero__(self):
829 def __nonzero__(self):
830 try:
830 try:
831 self._filenode
831 self._filenode
832 return True
832 return True
833 except error.LookupError:
833 except error.LookupError:
834 # file is missing
834 # file is missing
835 return False
835 return False
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __bytes__(self):
839 def __bytes__(self):
840 try:
840 try:
841 return b"%s@%s" % (self.path(), self._changectx)
841 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
842 except error.LookupError:
843 return b"%s@???" % self.path()
843 return b"%s@???" % self.path()
844
844
845 __str__ = encoding.strmethod(__bytes__)
845 __str__ = encoding.strmethod(__bytes__)
846
846
847 def __repr__(self):
847 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
848 return "<%s %s>" % (type(self).__name__, str(self))
849
849
850 def __hash__(self):
850 def __hash__(self):
851 try:
851 try:
852 return hash((self._path, self._filenode))
852 return hash((self._path, self._filenode))
853 except AttributeError:
853 except AttributeError:
854 return id(self)
854 return id(self)
855
855
856 def __eq__(self, other):
856 def __eq__(self, other):
857 try:
857 try:
858 return (
858 return (
859 type(self) == type(other)
859 type(self) == type(other)
860 and self._path == other._path
860 and self._path == other._path
861 and self._filenode == other._filenode
861 and self._filenode == other._filenode
862 )
862 )
863 except AttributeError:
863 except AttributeError:
864 return False
864 return False
865
865
866 def __ne__(self, other):
866 def __ne__(self, other):
867 return not (self == other)
867 return not (self == other)
868
868
869 def filerev(self):
869 def filerev(self):
870 return self._filerev
870 return self._filerev
871
871
872 def filenode(self):
872 def filenode(self):
873 return self._filenode
873 return self._filenode
874
874
875 @propertycache
875 @propertycache
876 def _flags(self):
876 def _flags(self):
877 return self._changectx.flags(self._path)
877 return self._changectx.flags(self._path)
878
878
879 def flags(self):
879 def flags(self):
880 return self._flags
880 return self._flags
881
881
882 def filelog(self):
882 def filelog(self):
883 return self._filelog
883 return self._filelog
884
884
885 def rev(self):
885 def rev(self):
886 return self._changeid
886 return self._changeid
887
887
888 def linkrev(self):
888 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
889 return self._filelog.linkrev(self._filerev)
890
890
891 def node(self):
891 def node(self):
892 return self._changectx.node()
892 return self._changectx.node()
893
893
894 def hex(self):
894 def hex(self):
895 return self._changectx.hex()
895 return self._changectx.hex()
896
896
897 def user(self):
897 def user(self):
898 return self._changectx.user()
898 return self._changectx.user()
899
899
900 def date(self):
900 def date(self):
901 return self._changectx.date()
901 return self._changectx.date()
902
902
903 def files(self):
903 def files(self):
904 return self._changectx.files()
904 return self._changectx.files()
905
905
906 def description(self):
906 def description(self):
907 return self._changectx.description()
907 return self._changectx.description()
908
908
909 def branch(self):
909 def branch(self):
910 return self._changectx.branch()
910 return self._changectx.branch()
911
911
912 def extra(self):
912 def extra(self):
913 return self._changectx.extra()
913 return self._changectx.extra()
914
914
915 def phase(self):
915 def phase(self):
916 return self._changectx.phase()
916 return self._changectx.phase()
917
917
918 def phasestr(self):
918 def phasestr(self):
919 return self._changectx.phasestr()
919 return self._changectx.phasestr()
920
920
921 def obsolete(self):
921 def obsolete(self):
922 return self._changectx.obsolete()
922 return self._changectx.obsolete()
923
923
924 def instabilities(self):
924 def instabilities(self):
925 return self._changectx.instabilities()
925 return self._changectx.instabilities()
926
926
927 def manifest(self):
927 def manifest(self):
928 return self._changectx.manifest()
928 return self._changectx.manifest()
929
929
930 def changectx(self):
930 def changectx(self):
931 return self._changectx
931 return self._changectx
932
932
933 def renamed(self):
933 def renamed(self):
934 return self._copied
934 return self._copied
935
935
936 def copysource(self):
936 def copysource(self):
937 return self._copied and self._copied[0]
937 return self._copied and self._copied[0]
938
938
939 def repo(self):
939 def repo(self):
940 return self._repo
940 return self._repo
941
941
942 def size(self):
942 def size(self):
943 return len(self.data())
943 return len(self.data())
944
944
945 def path(self):
945 def path(self):
946 return self._path
946 return self._path
947
947
948 def isbinary(self):
948 def isbinary(self):
949 try:
949 try:
950 return stringutil.binary(self.data())
950 return stringutil.binary(self.data())
951 except IOError:
951 except IOError:
952 return False
952 return False
953
953
954 def isexec(self):
954 def isexec(self):
955 return b'x' in self.flags()
955 return b'x' in self.flags()
956
956
957 def islink(self):
957 def islink(self):
958 return b'l' in self.flags()
958 return b'l' in self.flags()
959
959
960 def isabsent(self):
960 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
961 """whether this filectx represents a file not in self._changectx
962
962
963 This is mainly for merge code to detect change/delete conflicts. This is
963 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
964 expected to be True for all subclasses of basectx."""
965 return False
965 return False
966
966
967 _customcmp = False
967 _customcmp = False
968
968
969 def cmp(self, fctx):
969 def cmp(self, fctx):
970 """compare with other file context
970 """compare with other file context
971
971
972 returns True if different than fctx.
972 returns True if different than fctx.
973 """
973 """
974 if fctx._customcmp:
974 if fctx._customcmp:
975 return fctx.cmp(self)
975 return fctx.cmp(self)
976
976
977 if self._filenode is None:
977 if self._filenode is None:
978 raise error.ProgrammingError(
978 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
980 )
981
981
982 if fctx._filenode is None:
982 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
983 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
984 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
986 if self.size() - 4 == fctx.size():
987 # size() can match:
987 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
988 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
989 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
991 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
992 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
993 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
994 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996
996
997 # size() differs
997 # size() differs
998 return True
998 return True
999
999
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1001 """return the first ancestor of <srcrev> introducing <fnode>
1002
1002
1003 If the linkrev of the file revision does not point to an ancestor of
1003 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1005 this file revision.
1006
1006
1007 :srcrev: the changeset revision we search ancestors from
1007 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1008 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1010 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1011 revision, the function will returns "None" and stops its
1012 iteration.
1012 iteration.
1013 """
1013 """
1014 repo = self._repo
1014 repo = self._repo
1015 cl = repo.unfiltered().changelog
1015 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 # fetch the linkrev
1017 # fetch the linkrev
1018 lkr = self.linkrev()
1018 lkr = self.linkrev()
1019 if srcrev == lkr:
1019 if srcrev == lkr:
1020 return lkr
1020 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1021 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1023 iteranc = None
1024 if srcrev is None:
1024 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1025 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1027 inclusive = True # we skipped the real (revless) source
1028 else:
1028 else:
1029 revs = [srcrev]
1029 revs = [srcrev]
1030 if memberanc is None:
1030 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1032 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1033 if lkr not in memberanc:
1034 if iteranc is None:
1034 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 path = self._path
1037 path = self._path
1038 for a in iteranc:
1038 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1039 if stoprev is not None and a < stoprev:
1040 return None
1040 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1042 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1043 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1044 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1046 return a
1047 # In theory, we should never get out of that loop without a result.
1047 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1048 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1049 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1050 # result is crash somewhere else at to some point.
1051 return lkr
1051 return lkr
1052
1052
1053 def isintroducedafter(self, changelogrev):
1053 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1054 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1055 if self.linkrev() >= changelogrev:
1056 return True
1056 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1057 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1058 if introrev is None:
1059 return False
1059 return False
1060 return introrev >= changelogrev
1060 return introrev >= changelogrev
1061
1061
1062 def introrev(self):
1062 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1063 """return the rev of the changeset which introduced this file revision
1064
1064
1065 This method is different from linkrev because it take into account the
1065 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1066 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1067 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1068 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1069 changesets.
1070 """
1070 """
1071 return self._introrev()
1071 return self._introrev()
1072
1072
1073 def _introrev(self, stoprev=None):
1073 def _introrev(self, stoprev=None):
1074 """
1074 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1075 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1076 iteration range in some internal usecase.
1077
1077
1078 If `stoprev` is set, the `introrev` will not be searched past that
1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1079 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1080 limit the iteration range.
1081 """
1081 """
1082 toprev = None
1082 toprev = None
1083 attrs = vars(self)
1083 attrs = vars(self)
1084 if '_changeid' in attrs:
1084 if '_changeid' in attrs:
1085 # We have a cached value already
1085 # We have a cached value already
1086 toprev = self._changeid
1086 toprev = self._changeid
1087 elif '_changectx' in attrs:
1087 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1088 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1089 toprev = self._changectx.rev()
1090
1090
1091 if toprev is not None:
1091 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1093 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1095 # be nice and cache the result of the computation
1096 if introrev is not None:
1096 if introrev is not None:
1097 self._changeid = introrev
1097 self._changeid = introrev
1098 return introrev
1098 return introrev
1099 else:
1099 else:
1100 return self.linkrev()
1100 return self.linkrev()
1101
1101
1102 def introfilectx(self):
1102 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1103 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1104 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1105 introrev = self.introrev()
1106 if self.rev() == introrev:
1106 if self.rev() == introrev:
1107 return self
1107 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1108 return self.filectx(self.filenode(), changeid=introrev)
1109
1109
1110 def _parentfilectx(self, path, fileid, filelog):
1110 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1114 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1115 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1116 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1118 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1120 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1121 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1122 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1124 return fctx
1125
1125
1126 def parents(self):
1126 def parents(self):
1127 _path = self._path
1127 _path = self._path
1128 fl = self._filelog
1128 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1129 parents = self._filelog.parents(self._filenode)
1130 pl = [
1130 pl = [
1131 (_path, node, fl)
1131 (_path, node, fl)
1132 for node in parents
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1134 ]
1135
1135
1136 r = fl.renamed(self._filenode)
1136 r = fl.renamed(self._filenode)
1137 if r:
1137 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1139 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1140 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1141 # the first one.
1142 #
1142 #
1143 # As null id have always been filtered out in the previous list
1143 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1144 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1145 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1147
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1149
1150 def p1(self):
1150 def p1(self):
1151 return self.parents()[0]
1151 return self.parents()[0]
1152
1152
1153 def p2(self):
1153 def p2(self):
1154 p = self.parents()
1154 p = self.parents()
1155 if len(p) == 2:
1155 if len(p) == 2:
1156 return p[1]
1156 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1158
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1160 """Returns a list of annotateline objects for each line in the file
1161
1161
1162 - line.fctx is the filectx of the node where that line was last changed
1162 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1163 - line.lineno is the line number at the first appearance in the managed
1164 file
1164 file
1165 - line.text is the data on that line (including newline character)
1165 - line.text is the data on that line (including newline character)
1166 """
1166 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1168
1169 def parents(f):
1169 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1173 # isn't an ancestor of the srcrev.
1174 f._changeid
1174 f._changeid
1175 pl = f.parents()
1175 pl = f.parents()
1176
1176
1177 # Don't return renamed parents if we aren't following.
1177 # Don't return renamed parents if we aren't following.
1178 if not follow:
1178 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1179 pl = [p for p in pl if p.path() == f.path()]
1180
1180
1181 # renamed filectx won't have a filelog yet, so set it
1181 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1182 # from the cache to save time
1183 for p in pl:
1183 for p in pl:
1184 if not '_filelog' in p.__dict__:
1184 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1185 p._filelog = getlog(p.path())
1186
1186
1187 return pl
1187 return pl
1188
1188
1189 # use linkrev to find the first changeset where self appeared
1189 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1190 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1191 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1192 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1193 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1194 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1195 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1196 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1197 # is used to test filelog revisions
1198 ac = cl.ancestors(
1198 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1199 [p.rev() for p in base.parents()], inclusive=True
1200 )
1200 )
1201 else:
1201 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1203 base._ancestrycontext = ac
1204
1204
1205 return dagop.annotate(
1205 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1207 )
1208
1208
1209 def ancestors(self, followfirst=False):
1209 def ancestors(self, followfirst=False):
1210 visit = {}
1210 visit = {}
1211 c = self
1211 c = self
1212 if followfirst:
1212 if followfirst:
1213 cut = 1
1213 cut = 1
1214 else:
1214 else:
1215 cut = None
1215 cut = None
1216
1216
1217 while True:
1217 while True:
1218 for parent in c.parents()[:cut]:
1218 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1220 if not visit:
1221 break
1221 break
1222 c = visit.pop(max(visit))
1222 c = visit.pop(max(visit))
1223 yield c
1223 yield c
1224
1224
1225 def decodeddata(self):
1225 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1226 """Returns `data()` after running repository decoding filters.
1227
1227
1228 This is often equivalent to how the data would be expressed on disk.
1228 This is often equivalent to how the data would be expressed on disk.
1229 """
1229 """
1230 return self._repo.wwritedata(self.path(), self.data())
1230 return self._repo.wwritedata(self.path(), self.data())
1231
1231
1232
1232
1233 class filectx(basefilectx):
1233 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1234 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1235 filerevision convenient."""
1236
1236
1237 def __init__(
1237 def __init__(
1238 self,
1238 self,
1239 repo,
1239 repo,
1240 path,
1240 path,
1241 changeid=None,
1241 changeid=None,
1242 fileid=None,
1242 fileid=None,
1243 filelog=None,
1243 filelog=None,
1244 changectx=None,
1244 changectx=None,
1245 ):
1245 ):
1246 """changeid must be a revision number, if specified.
1246 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1247 fileid can be a file revision or node."""
1248 self._repo = repo
1248 self._repo = repo
1249 self._path = path
1249 self._path = path
1250
1250
1251 assert (
1251 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1252 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1254 changeid,
1255 fileid,
1255 fileid,
1256 changectx,
1256 changectx,
1257 )
1257 )
1258
1258
1259 if filelog is not None:
1259 if filelog is not None:
1260 self._filelog = filelog
1260 self._filelog = filelog
1261
1261
1262 if changeid is not None:
1262 if changeid is not None:
1263 self._changeid = changeid
1263 self._changeid = changeid
1264 if changectx is not None:
1264 if changectx is not None:
1265 self._changectx = changectx
1265 self._changectx = changectx
1266 if fileid is not None:
1266 if fileid is not None:
1267 self._fileid = fileid
1267 self._fileid = fileid
1268
1268
1269 @propertycache
1269 @propertycache
1270 def _changectx(self):
1270 def _changectx(self):
1271 try:
1271 try:
1272 return self._repo[self._changeid]
1272 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1273 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1274 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1275 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1276 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1277 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1278 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1279 # `filectx` are not used in complex operations that care about
1280 # filtering.
1280 # filtering.
1281 #
1281 #
1282 # This fallback is a cheap and dirty fix that prevent several
1282 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1283 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1284 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1285 # behavior" is seen as better as "crash"
1286 #
1286 #
1287 # Linkrevs have several serious troubles with filtering that are
1287 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1288 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1289 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1290 return self._repo.unfiltered()[self._changeid]
1291
1291
1292 def filectx(self, fileid, changeid=None):
1292 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1293 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1294 opening a new filelog"""
1295 return filectx(
1295 return filectx(
1296 self._repo,
1296 self._repo,
1297 self._path,
1297 self._path,
1298 fileid=fileid,
1298 fileid=fileid,
1299 filelog=self._filelog,
1299 filelog=self._filelog,
1300 changeid=changeid,
1300 changeid=changeid,
1301 )
1301 )
1302
1302
1303 def rawdata(self):
1303 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1304 return self._filelog.rawdata(self._filenode)
1305
1305
1306 def rawflags(self):
1306 def rawflags(self):
1307 """low-level revlog flags"""
1307 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1308 return self._filelog.flags(self._filerev)
1309
1309
1310 def data(self):
1310 def data(self):
1311 try:
1311 try:
1312 return self._filelog.read(self._filenode)
1312 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1313 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1315 return b""
1316 raise error.Abort(
1316 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1317 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1318 hint=_(b"set censor.policy to ignore errors"),
1319 )
1319 )
1320
1320
1321 def size(self):
1321 def size(self):
1322 return self._filelog.size(self._filerev)
1322 return self._filelog.size(self._filerev)
1323
1323
1324 @propertycache
1324 @propertycache
1325 def _copied(self):
1325 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1326 """check if file was actually renamed in this changeset revision
1327
1327
1328 If rename logged in file revision, we report copy for changeset only
1328 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1329 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1330 or both changeset parents contain different file revisions.
1331 """
1331 """
1332
1332
1333 renamed = self._filelog.renamed(self._filenode)
1333 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1334 if not renamed:
1335 return None
1335 return None
1336
1336
1337 if self.rev() == self.linkrev():
1337 if self.rev() == self.linkrev():
1338 return renamed
1338 return renamed
1339
1339
1340 name = self.path()
1340 name = self.path()
1341 fnode = self._filenode
1341 fnode = self._filenode
1342 for p in self._changectx.parents():
1342 for p in self._changectx.parents():
1343 try:
1343 try:
1344 if fnode == p.filenode(name):
1344 if fnode == p.filenode(name):
1345 return None
1345 return None
1346 except error.LookupError:
1346 except error.LookupError:
1347 pass
1347 pass
1348 return renamed
1348 return renamed
1349
1349
1350 def children(self):
1350 def children(self):
1351 # hard for renames
1351 # hard for renames
1352 c = self._filelog.children(self._filenode)
1352 c = self._filelog.children(self._filenode)
1353 return [
1353 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1355 for x in c
1356 ]
1356 ]
1357
1357
1358
1358
1359 class committablectx(basectx):
1359 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1360 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1361 wants the ability to commit, e.g. workingctx or memctx."""
1362
1362
1363 def __init__(
1363 def __init__(
1364 self,
1364 self,
1365 repo,
1365 repo,
1366 text=b"",
1366 text=b"",
1367 user=None,
1367 user=None,
1368 date=None,
1368 date=None,
1369 extra=None,
1369 extra=None,
1370 changes=None,
1370 changes=None,
1371 branch=None,
1371 branch=None,
1372 ):
1372 ):
1373 super(committablectx, self).__init__(repo)
1373 super(committablectx, self).__init__(repo)
1374 self._rev = None
1374 self._rev = None
1375 self._node = None
1375 self._node = None
1376 self._text = text
1376 self._text = text
1377 if date:
1377 if date:
1378 self._date = dateutil.parsedate(date)
1378 self._date = dateutil.parsedate(date)
1379 if user:
1379 if user:
1380 self._user = user
1380 self._user = user
1381 if changes:
1381 if changes:
1382 self._status = changes
1382 self._status = changes
1383
1383
1384 self._extra = {}
1384 self._extra = {}
1385 if extra:
1385 if extra:
1386 self._extra = extra.copy()
1386 self._extra = extra.copy()
1387 if branch is not None:
1387 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1389 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1390 self._extra[b'branch'] = b'default'
1391
1391
1392 def __bytes__(self):
1392 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1393 return bytes(self._parents[0]) + b"+"
1394
1394
1395 def hex(self):
1395 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1396 self._repo.nodeconstants.wdirhex
1397
1397
1398 __str__ = encoding.strmethod(__bytes__)
1398 __str__ = encoding.strmethod(__bytes__)
1399
1399
1400 def __nonzero__(self):
1400 def __nonzero__(self):
1401 return True
1401 return True
1402
1402
1403 __bool__ = __nonzero__
1403 __bool__ = __nonzero__
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _status(self):
1406 def _status(self):
1407 return self._repo.status()
1407 return self._repo.status()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _user(self):
1410 def _user(self):
1411 return self._repo.ui.username()
1411 return self._repo.ui.username()
1412
1412
1413 @propertycache
1413 @propertycache
1414 def _date(self):
1414 def _date(self):
1415 ui = self._repo.ui
1415 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1416 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1417 if date is None:
1418 date = dateutil.makedate()
1418 date = dateutil.makedate()
1419 return date
1419 return date
1420
1420
1421 def subrev(self, subpath):
1421 def subrev(self, subpath):
1422 return None
1422 return None
1423
1423
1424 def manifestnode(self):
1424 def manifestnode(self):
1425 return None
1425 return None
1426
1426
1427 def user(self):
1427 def user(self):
1428 return self._user or self._repo.ui.username()
1428 return self._user or self._repo.ui.username()
1429
1429
1430 def date(self):
1430 def date(self):
1431 return self._date
1431 return self._date
1432
1432
1433 def description(self):
1433 def description(self):
1434 return self._text
1434 return self._text
1435
1435
1436 def files(self):
1436 def files(self):
1437 return sorted(
1437 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1438 self._status.modified + self._status.added + self._status.removed
1439 )
1439 )
1440
1440
1441 def modified(self):
1441 def modified(self):
1442 return self._status.modified
1442 return self._status.modified
1443
1443
1444 def added(self):
1444 def added(self):
1445 return self._status.added
1445 return self._status.added
1446
1446
1447 def removed(self):
1447 def removed(self):
1448 return self._status.removed
1448 return self._status.removed
1449
1449
1450 def deleted(self):
1450 def deleted(self):
1451 return self._status.deleted
1451 return self._status.deleted
1452
1452
1453 filesmodified = modified
1453 filesmodified = modified
1454 filesadded = added
1454 filesadded = added
1455 filesremoved = removed
1455 filesremoved = removed
1456
1456
1457 def branch(self):
1457 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1458 return encoding.tolocal(self._extra[b'branch'])
1459
1459
1460 def closesbranch(self):
1460 def closesbranch(self):
1461 return b'close' in self._extra
1461 return b'close' in self._extra
1462
1462
1463 def extra(self):
1463 def extra(self):
1464 return self._extra
1464 return self._extra
1465
1465
1466 def isinmemory(self):
1466 def isinmemory(self):
1467 return False
1467 return False
1468
1468
1469 def tags(self):
1469 def tags(self):
1470 return []
1470 return []
1471
1471
1472 def bookmarks(self):
1472 def bookmarks(self):
1473 b = []
1473 b = []
1474 for p in self.parents():
1474 for p in self.parents():
1475 b.extend(p.bookmarks())
1475 b.extend(p.bookmarks())
1476 return b
1476 return b
1477
1477
1478 def phase(self):
1478 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1479 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1480 for p in self.parents():
1481 phase = max(phase, p.phase())
1481 phase = max(phase, p.phase())
1482 return phase
1482 return phase
1483
1483
1484 def hidden(self):
1484 def hidden(self):
1485 return False
1485 return False
1486
1486
1487 def children(self):
1487 def children(self):
1488 return []
1488 return []
1489
1489
1490 def flags(self, path):
1490 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1491 if '_manifest' in self.__dict__:
1492 try:
1492 try:
1493 return self._manifest.flags(path)
1493 return self._manifest.flags(path)
1494 except KeyError:
1494 except KeyError:
1495 return b''
1495 return b''
1496
1496
1497 try:
1497 try:
1498 return self._flagfunc(path)
1498 return self._flagfunc(path)
1499 except OSError:
1499 except OSError:
1500 return b''
1500 return b''
1501
1501
1502 def ancestor(self, c2):
1502 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1503 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1505
1506 def ancestors(self):
1506 def ancestors(self):
1507 for p in self._parents:
1507 for p in self._parents:
1508 yield p
1508 yield p
1509 for a in self._repo.changelog.ancestors(
1509 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1510 [p.rev() for p in self._parents]
1511 ):
1511 ):
1512 yield self._repo[a]
1512 yield self._repo[a]
1513
1513
1514 def markcommitted(self, node):
1514 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1515 """Perform post-commit cleanup necessary after committing this ctx
1516
1516
1517 Specifically, this updates backing stores this working context
1517 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1518 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1519 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1520 modified and added files as normal in the dirstate.
1521
1521
1522 """
1522 """
1523
1523
1524 def dirty(self, missing=False, merge=True, branch=True):
1524 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1525 return False
1526
1526
1527
1527
1528 class workingctx(committablectx):
1528 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1529 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1530 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1531 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1532 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1533 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1534 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1535 or None to use the repository status.
1536 """
1536 """
1537
1537
1538 def __init__(
1538 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1540 ):
1541 branch = None
1541 branch = None
1542 if not extra or b'branch' not in extra:
1542 if not extra or b'branch' not in extra:
1543 try:
1543 try:
1544 branch = repo.dirstate.branch()
1544 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1545 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1547 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1548 repo, text, user, date, extra, changes, branch=branch
1549 )
1549 )
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d[f] != b'r':
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate[key] not in b"?r"
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
1562
1562
1563 @propertycache
1563 @propertycache
1564 def _parents(self):
1564 def _parents(self):
1565 p = self._repo.dirstate.parents()
1565 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1566 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1567 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1569 unfi = self._repo.unfiltered()
1570 return [
1570 return [
1571 changectx(
1571 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1573 )
1574 for n in p
1574 for n in p
1575 ]
1575 ]
1576
1576
1577 def setparents(self, p1node, p2node=None):
1577 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1578 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1579 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1580 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1581 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1582 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1583 pctx = self._repo[p1node]
1584 if copies:
1584 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1585 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1586 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1587 # only for entries added to first parent.
1588 for f in copies:
1588 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1589 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1590 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1591 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1592 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1593 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1594 dirstate.copy(None, f)
1595
1595
1596 def _fileinfo(self, path):
1596 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1598 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1599 return super(workingctx, self)._fileinfo(path)
1600
1600
1601 def _buildflagfunc(self):
1601 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1602 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1603 # filesystem doesn't support them
1604
1604
1605 copiesget = self._repo.dirstate.copies().get
1605 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1606 parents = self.parents()
1607 if len(parents) < 2:
1607 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1608 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1609 man = parents[0].manifest()
1610
1610
1611 def func(f):
1611 def func(f):
1612 f = copiesget(f, f)
1612 f = copiesget(f, f)
1613 return man.flags(f)
1613 return man.flags(f)
1614
1614
1615 else:
1615 else:
1616 # merges are tricky: we try to reconstruct the unstored
1616 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1617 # result from the merge (issue1802)
1618 p1, p2 = parents
1618 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1619 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1621
1622 def func(f):
1622 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1625 if fl1 == fl2:
1626 return fl1
1626 return fl1
1627 if fl1 == fla:
1627 if fl1 == fla:
1628 return fl2
1628 return fl2
1629 if fl2 == fla:
1629 if fl2 == fla:
1630 return fl1
1630 return fl1
1631 return b'' # punt for conflicts
1631 return b'' # punt for conflicts
1632
1632
1633 return func
1633 return func
1634
1634
1635 @propertycache
1635 @propertycache
1636 def _flagfunc(self):
1636 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1638
1639 def flags(self, path):
1639 def flags(self, path):
1640 try:
1640 try:
1641 return self._flagfunc(path)
1641 return self._flagfunc(path)
1642 except OSError:
1642 except OSError:
1643 return b''
1643 return b''
1644
1644
1645 def filectx(self, path, filelog=None):
1645 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1646 """get a file context from the working directory"""
1647 return workingfilectx(
1647 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1648 self._repo, path, workingctx=self, filelog=filelog
1649 )
1649 )
1650
1650
1651 def dirty(self, missing=False, merge=True, branch=True):
1651 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1652 """check whether a working directory is modified"""
1653 # check subrepos first
1653 # check subrepos first
1654 for s in sorted(self.substate):
1654 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1655 if self.sub(s).dirty(missing=missing):
1656 return True
1656 return True
1657 # check current working dir
1657 # check current working dir
1658 return (
1658 return (
1659 (merge and self.p2())
1659 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1660 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1661 or self.modified()
1662 or self.added()
1662 or self.added()
1663 or self.removed()
1663 or self.removed()
1664 or (missing and self.deleted())
1664 or (missing and self.deleted())
1665 )
1665 )
1666
1666
1667 def add(self, list, prefix=b""):
1667 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1668 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1671 rejected = []
1672 lstat = self._repo.wvfs.lstat
1672 lstat = self._repo.wvfs.lstat
1673 for f in list:
1673 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1674 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1675 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1676 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1678 try:
1679 st = lstat(f)
1679 st = lstat(f)
1680 except OSError:
1680 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1682 rejected.append(f)
1683 continue
1683 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1685 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1686 ui.warn(
1687 _(
1687 _(
1688 b"%s: up to %d MB of RAM may be required "
1688 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1689 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1690 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1691 b"pending addition)\n"
1692 )
1692 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1694 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1696 ui.warn(
1697 _(
1697 _(
1698 b"%s not added: only files and symlinks "
1698 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1699 b"supported currently\n"
1700 )
1700 )
1701 % uipath(f)
1701 % uipath(f)
1702 )
1702 )
1703 rejected.append(f)
1703 rejected.append(f)
1704 elif ds[f] in b'amn':
1704 elif ds[f] in b'amn':
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 elif ds[f] == b'r':
1706 elif ds[f] == b'r':
1707 ds.normallookup(f)
1707 ds.normallookup(f)
1708 else:
1708 else:
1709 ds.add(f)
1709 ds.add(f)
1710 return rejected
1710 return rejected
1711
1711
1712 def forget(self, files, prefix=b""):
1712 def forget(self, files, prefix=b""):
1713 with self._repo.wlock():
1713 with self._repo.wlock():
1714 ds = self._repo.dirstate
1714 ds = self._repo.dirstate
1715 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1715 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1716 rejected = []
1716 rejected = []
1717 for f in files:
1717 for f in files:
1718 if f not in ds:
1718 if f not in ds:
1719 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1719 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1720 rejected.append(f)
1720 rejected.append(f)
1721 elif ds[f] != b'a':
1721 elif ds[f] != b'a':
1722 ds.remove(f)
1722 ds.remove(f)
1723 else:
1723 else:
1724 ds.drop(f)
1724 ds.drop(f)
1725 return rejected
1725 return rejected
1726
1726
1727 def copy(self, source, dest):
1727 def copy(self, source, dest):
1728 try:
1728 try:
1729 st = self._repo.wvfs.lstat(dest)
1729 st = self._repo.wvfs.lstat(dest)
1730 except OSError as err:
1730 except OSError as err:
1731 if err.errno != errno.ENOENT:
1731 if err.errno != errno.ENOENT:
1732 raise
1732 raise
1733 self._repo.ui.warn(
1733 self._repo.ui.warn(
1734 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1734 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1735 )
1735 )
1736 return
1736 return
1737 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1737 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1738 self._repo.ui.warn(
1738 self._repo.ui.warn(
1739 _(b"copy failed: %s is not a file or a symbolic link\n")
1739 _(b"copy failed: %s is not a file or a symbolic link\n")
1740 % self._repo.dirstate.pathto(dest)
1740 % self._repo.dirstate.pathto(dest)
1741 )
1741 )
1742 else:
1742 else:
1743 with self._repo.wlock():
1743 with self._repo.wlock():
1744 ds = self._repo.dirstate
1744 ds = self._repo.dirstate
1745 if ds[dest] in b'?':
1745 if ds[dest] in b'?':
1746 ds.add(dest)
1746 ds.add(dest)
1747 elif ds[dest] in b'r':
1747 elif ds[dest] in b'r':
1748 ds.normallookup(dest)
1748 ds.normallookup(dest)
1749 ds.copy(source, dest)
1749 ds.copy(source, dest)
1750
1750
1751 def match(
1751 def match(
1752 self,
1752 self,
1753 pats=None,
1753 pats=None,
1754 include=None,
1754 include=None,
1755 exclude=None,
1755 exclude=None,
1756 default=b'glob',
1756 default=b'glob',
1757 listsubrepos=False,
1757 listsubrepos=False,
1758 badfn=None,
1758 badfn=None,
1759 cwd=None,
1759 cwd=None,
1760 ):
1760 ):
1761 r = self._repo
1761 r = self._repo
1762 if not cwd:
1762 if not cwd:
1763 cwd = r.getcwd()
1763 cwd = r.getcwd()
1764
1764
1765 # Only a case insensitive filesystem needs magic to translate user input
1765 # Only a case insensitive filesystem needs magic to translate user input
1766 # to actual case in the filesystem.
1766 # to actual case in the filesystem.
1767 icasefs = not util.fscasesensitive(r.root)
1767 icasefs = not util.fscasesensitive(r.root)
1768 return matchmod.match(
1768 return matchmod.match(
1769 r.root,
1769 r.root,
1770 cwd,
1770 cwd,
1771 pats,
1771 pats,
1772 include,
1772 include,
1773 exclude,
1773 exclude,
1774 default,
1774 default,
1775 auditor=r.auditor,
1775 auditor=r.auditor,
1776 ctx=self,
1776 ctx=self,
1777 listsubrepos=listsubrepos,
1777 listsubrepos=listsubrepos,
1778 badfn=badfn,
1778 badfn=badfn,
1779 icasefs=icasefs,
1779 icasefs=icasefs,
1780 )
1780 )
1781
1781
1782 def _filtersuspectsymlink(self, files):
1782 def _filtersuspectsymlink(self, files):
1783 if not files or self._repo.dirstate._checklink:
1783 if not files or self._repo.dirstate._checklink:
1784 return files
1784 return files
1785
1785
1786 # Symlink placeholders may get non-symlink-like contents
1786 # Symlink placeholders may get non-symlink-like contents
1787 # via user error or dereferencing by NFS or Samba servers,
1787 # via user error or dereferencing by NFS or Samba servers,
1788 # so we filter out any placeholders that don't look like a
1788 # so we filter out any placeholders that don't look like a
1789 # symlink
1789 # symlink
1790 sane = []
1790 sane = []
1791 for f in files:
1791 for f in files:
1792 if self.flags(f) == b'l':
1792 if self.flags(f) == b'l':
1793 d = self[f].data()
1793 d = self[f].data()
1794 if (
1794 if (
1795 d == b''
1795 d == b''
1796 or len(d) >= 1024
1796 or len(d) >= 1024
1797 or b'\n' in d
1797 or b'\n' in d
1798 or stringutil.binary(d)
1798 or stringutil.binary(d)
1799 ):
1799 ):
1800 self._repo.ui.debug(
1800 self._repo.ui.debug(
1801 b'ignoring suspect symlink placeholder "%s"\n' % f
1801 b'ignoring suspect symlink placeholder "%s"\n' % f
1802 )
1802 )
1803 continue
1803 continue
1804 sane.append(f)
1804 sane.append(f)
1805 return sane
1805 return sane
1806
1806
1807 def _checklookup(self, files):
1807 def _checklookup(self, files):
1808 # check for any possibly clean files
1808 # check for any possibly clean files
1809 if not files:
1809 if not files:
1810 return [], [], []
1810 return [], [], []
1811
1811
1812 modified = []
1812 modified = []
1813 deleted = []
1813 deleted = []
1814 fixup = []
1814 fixup = []
1815 pctx = self._parents[0]
1815 pctx = self._parents[0]
1816 # do a full compare of any files that might have changed
1816 # do a full compare of any files that might have changed
1817 for f in sorted(files):
1817 for f in sorted(files):
1818 try:
1818 try:
1819 # This will return True for a file that got replaced by a
1819 # This will return True for a file that got replaced by a
1820 # directory in the interim, but fixing that is pretty hard.
1820 # directory in the interim, but fixing that is pretty hard.
1821 if (
1821 if (
1822 f not in pctx
1822 f not in pctx
1823 or self.flags(f) != pctx.flags(f)
1823 or self.flags(f) != pctx.flags(f)
1824 or pctx[f].cmp(self[f])
1824 or pctx[f].cmp(self[f])
1825 ):
1825 ):
1826 modified.append(f)
1826 modified.append(f)
1827 else:
1827 else:
1828 fixup.append(f)
1828 fixup.append(f)
1829 except (IOError, OSError):
1829 except (IOError, OSError):
1830 # A file become inaccessible in between? Mark it as deleted,
1830 # A file become inaccessible in between? Mark it as deleted,
1831 # matching dirstate behavior (issue5584).
1831 # matching dirstate behavior (issue5584).
1832 # The dirstate has more complex behavior around whether a
1832 # The dirstate has more complex behavior around whether a
1833 # missing file matches a directory, etc, but we don't need to
1833 # missing file matches a directory, etc, but we don't need to
1834 # bother with that: if f has made it to this point, we're sure
1834 # bother with that: if f has made it to this point, we're sure
1835 # it's in the dirstate.
1835 # it's in the dirstate.
1836 deleted.append(f)
1836 deleted.append(f)
1837
1837
1838 return modified, deleted, fixup
1838 return modified, deleted, fixup
1839
1839
1840 def _poststatusfixup(self, status, fixup):
1840 def _poststatusfixup(self, status, fixup):
1841 """update dirstate for files that are actually clean"""
1841 """update dirstate for files that are actually clean"""
1842 poststatus = self._repo.postdsstatus()
1842 poststatus = self._repo.postdsstatus()
1843 if fixup or poststatus:
1843 if fixup or poststatus or self._repo.dirstate._dirty:
1844 try:
1844 try:
1845 oldid = self._repo.dirstate.identity()
1845 oldid = self._repo.dirstate.identity()
1846
1846
1847 # updating the dirstate is optional
1847 # updating the dirstate is optional
1848 # so we don't wait on the lock
1848 # so we don't wait on the lock
1849 # wlock can invalidate the dirstate, so cache normal _after_
1849 # wlock can invalidate the dirstate, so cache normal _after_
1850 # taking the lock
1850 # taking the lock
1851 with self._repo.wlock(False):
1851 with self._repo.wlock(False):
1852 if self._repo.dirstate.identity() == oldid:
1852 if self._repo.dirstate.identity() == oldid:
1853 if fixup:
1853 if fixup:
1854 normal = self._repo.dirstate.normal
1854 normal = self._repo.dirstate.normal
1855 for f in fixup:
1855 for f in fixup:
1856 normal(f)
1856 normal(f)
1857 # write changes out explicitly, because nesting
1857 # write changes out explicitly, because nesting
1858 # wlock at runtime may prevent 'wlock.release()'
1858 # wlock at runtime may prevent 'wlock.release()'
1859 # after this block from doing so for subsequent
1859 # after this block from doing so for subsequent
1860 # changing files
1860 # changing files
1861 tr = self._repo.currenttransaction()
1861 tr = self._repo.currenttransaction()
1862 self._repo.dirstate.write(tr)
1862 self._repo.dirstate.write(tr)
1863
1863
1864 if poststatus:
1864 if poststatus:
1865 for ps in poststatus:
1865 for ps in poststatus:
1866 ps(self, status)
1866 ps(self, status)
1867 else:
1867 else:
1868 # in this case, writing changes out breaks
1868 # in this case, writing changes out breaks
1869 # consistency, because .hg/dirstate was
1869 # consistency, because .hg/dirstate was
1870 # already changed simultaneously after last
1870 # already changed simultaneously after last
1871 # caching (see also issue5584 for detail)
1871 # caching (see also issue5584 for detail)
1872 self._repo.ui.debug(
1872 self._repo.ui.debug(
1873 b'skip updating dirstate: identity mismatch\n'
1873 b'skip updating dirstate: identity mismatch\n'
1874 )
1874 )
1875 except error.LockError:
1875 except error.LockError:
1876 pass
1876 pass
1877 finally:
1877 finally:
1878 # Even if the wlock couldn't be grabbed, clear out the list.
1878 # Even if the wlock couldn't be grabbed, clear out the list.
1879 self._repo.clearpostdsstatus()
1879 self._repo.clearpostdsstatus()
1880
1880
1881 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1881 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1882 '''Gets the status from the dirstate -- internal use only.'''
1882 '''Gets the status from the dirstate -- internal use only.'''
1883 subrepos = []
1883 subrepos = []
1884 if b'.hgsub' in self:
1884 if b'.hgsub' in self:
1885 subrepos = sorted(self.substate)
1885 subrepos = sorted(self.substate)
1886 cmp, s = self._repo.dirstate.status(
1886 cmp, s = self._repo.dirstate.status(
1887 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1887 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1888 )
1888 )
1889
1889
1890 # check for any possibly clean files
1890 # check for any possibly clean files
1891 fixup = []
1891 fixup = []
1892 if cmp:
1892 if cmp:
1893 modified2, deleted2, fixup = self._checklookup(cmp)
1893 modified2, deleted2, fixup = self._checklookup(cmp)
1894 s.modified.extend(modified2)
1894 s.modified.extend(modified2)
1895 s.deleted.extend(deleted2)
1895 s.deleted.extend(deleted2)
1896
1896
1897 if fixup and clean:
1897 if fixup and clean:
1898 s.clean.extend(fixup)
1898 s.clean.extend(fixup)
1899
1899
1900 self._poststatusfixup(s, fixup)
1900 self._poststatusfixup(s, fixup)
1901
1901
1902 if match.always():
1902 if match.always():
1903 # cache for performance
1903 # cache for performance
1904 if s.unknown or s.ignored or s.clean:
1904 if s.unknown or s.ignored or s.clean:
1905 # "_status" is cached with list*=False in the normal route
1905 # "_status" is cached with list*=False in the normal route
1906 self._status = scmutil.status(
1906 self._status = scmutil.status(
1907 s.modified, s.added, s.removed, s.deleted, [], [], []
1907 s.modified, s.added, s.removed, s.deleted, [], [], []
1908 )
1908 )
1909 else:
1909 else:
1910 self._status = s
1910 self._status = s
1911
1911
1912 return s
1912 return s
1913
1913
1914 @propertycache
1914 @propertycache
1915 def _copies(self):
1915 def _copies(self):
1916 p1copies = {}
1916 p1copies = {}
1917 p2copies = {}
1917 p2copies = {}
1918 parents = self._repo.dirstate.parents()
1918 parents = self._repo.dirstate.parents()
1919 p1manifest = self._repo[parents[0]].manifest()
1919 p1manifest = self._repo[parents[0]].manifest()
1920 p2manifest = self._repo[parents[1]].manifest()
1920 p2manifest = self._repo[parents[1]].manifest()
1921 changedset = set(self.added()) | set(self.modified())
1921 changedset = set(self.added()) | set(self.modified())
1922 narrowmatch = self._repo.narrowmatch()
1922 narrowmatch = self._repo.narrowmatch()
1923 for dst, src in self._repo.dirstate.copies().items():
1923 for dst, src in self._repo.dirstate.copies().items():
1924 if dst not in changedset or not narrowmatch(dst):
1924 if dst not in changedset or not narrowmatch(dst):
1925 continue
1925 continue
1926 if src in p1manifest:
1926 if src in p1manifest:
1927 p1copies[dst] = src
1927 p1copies[dst] = src
1928 elif src in p2manifest:
1928 elif src in p2manifest:
1929 p2copies[dst] = src
1929 p2copies[dst] = src
1930 return p1copies, p2copies
1930 return p1copies, p2copies
1931
1931
1932 @propertycache
1932 @propertycache
1933 def _manifest(self):
1933 def _manifest(self):
1934 """generate a manifest corresponding to the values in self._status
1934 """generate a manifest corresponding to the values in self._status
1935
1935
1936 This reuse the file nodeid from parent, but we use special node
1936 This reuse the file nodeid from parent, but we use special node
1937 identifiers for added and modified files. This is used by manifests
1937 identifiers for added and modified files. This is used by manifests
1938 merge to see that files are different and by update logic to avoid
1938 merge to see that files are different and by update logic to avoid
1939 deleting newly added files.
1939 deleting newly added files.
1940 """
1940 """
1941 return self._buildstatusmanifest(self._status)
1941 return self._buildstatusmanifest(self._status)
1942
1942
1943 def _buildstatusmanifest(self, status):
1943 def _buildstatusmanifest(self, status):
1944 """Builds a manifest that includes the given status results."""
1944 """Builds a manifest that includes the given status results."""
1945 parents = self.parents()
1945 parents = self.parents()
1946
1946
1947 man = parents[0].manifest().copy()
1947 man = parents[0].manifest().copy()
1948
1948
1949 ff = self._flagfunc
1949 ff = self._flagfunc
1950 for i, l in (
1950 for i, l in (
1951 (self._repo.nodeconstants.addednodeid, status.added),
1951 (self._repo.nodeconstants.addednodeid, status.added),
1952 (self._repo.nodeconstants.modifiednodeid, status.modified),
1952 (self._repo.nodeconstants.modifiednodeid, status.modified),
1953 ):
1953 ):
1954 for f in l:
1954 for f in l:
1955 man[f] = i
1955 man[f] = i
1956 try:
1956 try:
1957 man.setflag(f, ff(f))
1957 man.setflag(f, ff(f))
1958 except OSError:
1958 except OSError:
1959 pass
1959 pass
1960
1960
1961 for f in status.deleted + status.removed:
1961 for f in status.deleted + status.removed:
1962 if f in man:
1962 if f in man:
1963 del man[f]
1963 del man[f]
1964
1964
1965 return man
1965 return man
1966
1966
1967 def _buildstatus(
1967 def _buildstatus(
1968 self, other, s, match, listignored, listclean, listunknown
1968 self, other, s, match, listignored, listclean, listunknown
1969 ):
1969 ):
1970 """build a status with respect to another context
1970 """build a status with respect to another context
1971
1971
1972 This includes logic for maintaining the fast path of status when
1972 This includes logic for maintaining the fast path of status when
1973 comparing the working directory against its parent, which is to skip
1973 comparing the working directory against its parent, which is to skip
1974 building a new manifest if self (working directory) is not comparing
1974 building a new manifest if self (working directory) is not comparing
1975 against its parent (repo['.']).
1975 against its parent (repo['.']).
1976 """
1976 """
1977 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1977 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1978 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1978 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1979 # might have accidentally ended up with the entire contents of the file
1979 # might have accidentally ended up with the entire contents of the file
1980 # they are supposed to be linking to.
1980 # they are supposed to be linking to.
1981 s.modified[:] = self._filtersuspectsymlink(s.modified)
1981 s.modified[:] = self._filtersuspectsymlink(s.modified)
1982 if other != self._repo[b'.']:
1982 if other != self._repo[b'.']:
1983 s = super(workingctx, self)._buildstatus(
1983 s = super(workingctx, self)._buildstatus(
1984 other, s, match, listignored, listclean, listunknown
1984 other, s, match, listignored, listclean, listunknown
1985 )
1985 )
1986 return s
1986 return s
1987
1987
1988 def _matchstatus(self, other, match):
1988 def _matchstatus(self, other, match):
1989 """override the match method with a filter for directory patterns
1989 """override the match method with a filter for directory patterns
1990
1990
1991 We use inheritance to customize the match.bad method only in cases of
1991 We use inheritance to customize the match.bad method only in cases of
1992 workingctx since it belongs only to the working directory when
1992 workingctx since it belongs only to the working directory when
1993 comparing against the parent changeset.
1993 comparing against the parent changeset.
1994
1994
1995 If we aren't comparing against the working directory's parent, then we
1995 If we aren't comparing against the working directory's parent, then we
1996 just use the default match object sent to us.
1996 just use the default match object sent to us.
1997 """
1997 """
1998 if other != self._repo[b'.']:
1998 if other != self._repo[b'.']:
1999
1999
2000 def bad(f, msg):
2000 def bad(f, msg):
2001 # 'f' may be a directory pattern from 'match.files()',
2001 # 'f' may be a directory pattern from 'match.files()',
2002 # so 'f not in ctx1' is not enough
2002 # so 'f not in ctx1' is not enough
2003 if f not in other and not other.hasdir(f):
2003 if f not in other and not other.hasdir(f):
2004 self._repo.ui.warn(
2004 self._repo.ui.warn(
2005 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2005 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2006 )
2006 )
2007
2007
2008 match.bad = bad
2008 match.bad = bad
2009 return match
2009 return match
2010
2010
2011 def walk(self, match):
2011 def walk(self, match):
2012 '''Generates matching file names.'''
2012 '''Generates matching file names.'''
2013 return sorted(
2013 return sorted(
2014 self._repo.dirstate.walk(
2014 self._repo.dirstate.walk(
2015 self._repo.narrowmatch(match),
2015 self._repo.narrowmatch(match),
2016 subrepos=sorted(self.substate),
2016 subrepos=sorted(self.substate),
2017 unknown=True,
2017 unknown=True,
2018 ignored=False,
2018 ignored=False,
2019 )
2019 )
2020 )
2020 )
2021
2021
2022 def matches(self, match):
2022 def matches(self, match):
2023 match = self._repo.narrowmatch(match)
2023 match = self._repo.narrowmatch(match)
2024 ds = self._repo.dirstate
2024 ds = self._repo.dirstate
2025 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2025 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2026
2026
2027 def markcommitted(self, node):
2027 def markcommitted(self, node):
2028 with self._repo.dirstate.parentchange():
2028 with self._repo.dirstate.parentchange():
2029 for f in self.modified() + self.added():
2029 for f in self.modified() + self.added():
2030 self._repo.dirstate.normal(f)
2030 self._repo.dirstate.normal(f)
2031 for f in self.removed():
2031 for f in self.removed():
2032 self._repo.dirstate.drop(f)
2032 self._repo.dirstate.drop(f)
2033 self._repo.dirstate.setparents(node)
2033 self._repo.dirstate.setparents(node)
2034 self._repo._quick_access_changeid_invalidate()
2034 self._repo._quick_access_changeid_invalidate()
2035
2035
2036 # write changes out explicitly, because nesting wlock at
2036 # write changes out explicitly, because nesting wlock at
2037 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2037 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2038 # from immediately doing so for subsequent changing files
2038 # from immediately doing so for subsequent changing files
2039 self._repo.dirstate.write(self._repo.currenttransaction())
2039 self._repo.dirstate.write(self._repo.currenttransaction())
2040
2040
2041 sparse.aftercommit(self._repo, node)
2041 sparse.aftercommit(self._repo, node)
2042
2042
2043 def mergestate(self, clean=False):
2043 def mergestate(self, clean=False):
2044 if clean:
2044 if clean:
2045 return mergestatemod.mergestate.clean(self._repo)
2045 return mergestatemod.mergestate.clean(self._repo)
2046 return mergestatemod.mergestate.read(self._repo)
2046 return mergestatemod.mergestate.read(self._repo)
2047
2047
2048
2048
2049 class committablefilectx(basefilectx):
2049 class committablefilectx(basefilectx):
2050 """A committablefilectx provides common functionality for a file context
2050 """A committablefilectx provides common functionality for a file context
2051 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2051 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2052
2052
2053 def __init__(self, repo, path, filelog=None, ctx=None):
2053 def __init__(self, repo, path, filelog=None, ctx=None):
2054 self._repo = repo
2054 self._repo = repo
2055 self._path = path
2055 self._path = path
2056 self._changeid = None
2056 self._changeid = None
2057 self._filerev = self._filenode = None
2057 self._filerev = self._filenode = None
2058
2058
2059 if filelog is not None:
2059 if filelog is not None:
2060 self._filelog = filelog
2060 self._filelog = filelog
2061 if ctx:
2061 if ctx:
2062 self._changectx = ctx
2062 self._changectx = ctx
2063
2063
2064 def __nonzero__(self):
2064 def __nonzero__(self):
2065 return True
2065 return True
2066
2066
2067 __bool__ = __nonzero__
2067 __bool__ = __nonzero__
2068
2068
2069 def linkrev(self):
2069 def linkrev(self):
2070 # linked to self._changectx no matter if file is modified or not
2070 # linked to self._changectx no matter if file is modified or not
2071 return self.rev()
2071 return self.rev()
2072
2072
2073 def renamed(self):
2073 def renamed(self):
2074 path = self.copysource()
2074 path = self.copysource()
2075 if not path:
2075 if not path:
2076 return None
2076 return None
2077 return (
2077 return (
2078 path,
2078 path,
2079 self._changectx._parents[0]._manifest.get(
2079 self._changectx._parents[0]._manifest.get(
2080 path, self._repo.nodeconstants.nullid
2080 path, self._repo.nodeconstants.nullid
2081 ),
2081 ),
2082 )
2082 )
2083
2083
2084 def parents(self):
2084 def parents(self):
2085 '''return parent filectxs, following copies if necessary'''
2085 '''return parent filectxs, following copies if necessary'''
2086
2086
2087 def filenode(ctx, path):
2087 def filenode(ctx, path):
2088 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2088 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2089
2089
2090 path = self._path
2090 path = self._path
2091 fl = self._filelog
2091 fl = self._filelog
2092 pcl = self._changectx._parents
2092 pcl = self._changectx._parents
2093 renamed = self.renamed()
2093 renamed = self.renamed()
2094
2094
2095 if renamed:
2095 if renamed:
2096 pl = [renamed + (None,)]
2096 pl = [renamed + (None,)]
2097 else:
2097 else:
2098 pl = [(path, filenode(pcl[0], path), fl)]
2098 pl = [(path, filenode(pcl[0], path), fl)]
2099
2099
2100 for pc in pcl[1:]:
2100 for pc in pcl[1:]:
2101 pl.append((path, filenode(pc, path), fl))
2101 pl.append((path, filenode(pc, path), fl))
2102
2102
2103 return [
2103 return [
2104 self._parentfilectx(p, fileid=n, filelog=l)
2104 self._parentfilectx(p, fileid=n, filelog=l)
2105 for p, n, l in pl
2105 for p, n, l in pl
2106 if n != self._repo.nodeconstants.nullid
2106 if n != self._repo.nodeconstants.nullid
2107 ]
2107 ]
2108
2108
2109 def children(self):
2109 def children(self):
2110 return []
2110 return []
2111
2111
2112
2112
2113 class workingfilectx(committablefilectx):
2113 class workingfilectx(committablefilectx):
2114 """A workingfilectx object makes access to data related to a particular
2114 """A workingfilectx object makes access to data related to a particular
2115 file in the working directory convenient."""
2115 file in the working directory convenient."""
2116
2116
2117 def __init__(self, repo, path, filelog=None, workingctx=None):
2117 def __init__(self, repo, path, filelog=None, workingctx=None):
2118 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2118 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2119
2119
2120 @propertycache
2120 @propertycache
2121 def _changectx(self):
2121 def _changectx(self):
2122 return workingctx(self._repo)
2122 return workingctx(self._repo)
2123
2123
2124 def data(self):
2124 def data(self):
2125 return self._repo.wread(self._path)
2125 return self._repo.wread(self._path)
2126
2126
2127 def copysource(self):
2127 def copysource(self):
2128 return self._repo.dirstate.copied(self._path)
2128 return self._repo.dirstate.copied(self._path)
2129
2129
2130 def size(self):
2130 def size(self):
2131 return self._repo.wvfs.lstat(self._path).st_size
2131 return self._repo.wvfs.lstat(self._path).st_size
2132
2132
2133 def lstat(self):
2133 def lstat(self):
2134 return self._repo.wvfs.lstat(self._path)
2134 return self._repo.wvfs.lstat(self._path)
2135
2135
2136 def date(self):
2136 def date(self):
2137 t, tz = self._changectx.date()
2137 t, tz = self._changectx.date()
2138 try:
2138 try:
2139 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2139 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2140 except OSError as err:
2140 except OSError as err:
2141 if err.errno != errno.ENOENT:
2141 if err.errno != errno.ENOENT:
2142 raise
2142 raise
2143 return (t, tz)
2143 return (t, tz)
2144
2144
2145 def exists(self):
2145 def exists(self):
2146 return self._repo.wvfs.exists(self._path)
2146 return self._repo.wvfs.exists(self._path)
2147
2147
2148 def lexists(self):
2148 def lexists(self):
2149 return self._repo.wvfs.lexists(self._path)
2149 return self._repo.wvfs.lexists(self._path)
2150
2150
2151 def audit(self):
2151 def audit(self):
2152 return self._repo.wvfs.audit(self._path)
2152 return self._repo.wvfs.audit(self._path)
2153
2153
2154 def cmp(self, fctx):
2154 def cmp(self, fctx):
2155 """compare with other file context
2155 """compare with other file context
2156
2156
2157 returns True if different than fctx.
2157 returns True if different than fctx.
2158 """
2158 """
2159 # fctx should be a filectx (not a workingfilectx)
2159 # fctx should be a filectx (not a workingfilectx)
2160 # invert comparison to reuse the same code path
2160 # invert comparison to reuse the same code path
2161 return fctx.cmp(self)
2161 return fctx.cmp(self)
2162
2162
2163 def remove(self, ignoremissing=False):
2163 def remove(self, ignoremissing=False):
2164 """wraps unlink for a repo's working directory"""
2164 """wraps unlink for a repo's working directory"""
2165 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2165 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2166 self._repo.wvfs.unlinkpath(
2166 self._repo.wvfs.unlinkpath(
2167 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2167 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2168 )
2168 )
2169
2169
2170 def write(self, data, flags, backgroundclose=False, **kwargs):
2170 def write(self, data, flags, backgroundclose=False, **kwargs):
2171 """wraps repo.wwrite"""
2171 """wraps repo.wwrite"""
2172 return self._repo.wwrite(
2172 return self._repo.wwrite(
2173 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2173 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2174 )
2174 )
2175
2175
2176 def markcopied(self, src):
2176 def markcopied(self, src):
2177 """marks this file a copy of `src`"""
2177 """marks this file a copy of `src`"""
2178 self._repo.dirstate.copy(src, self._path)
2178 self._repo.dirstate.copy(src, self._path)
2179
2179
2180 def clearunknown(self):
2180 def clearunknown(self):
2181 """Removes conflicting items in the working directory so that
2181 """Removes conflicting items in the working directory so that
2182 ``write()`` can be called successfully.
2182 ``write()`` can be called successfully.
2183 """
2183 """
2184 wvfs = self._repo.wvfs
2184 wvfs = self._repo.wvfs
2185 f = self._path
2185 f = self._path
2186 wvfs.audit(f)
2186 wvfs.audit(f)
2187 if self._repo.ui.configbool(
2187 if self._repo.ui.configbool(
2188 b'experimental', b'merge.checkpathconflicts'
2188 b'experimental', b'merge.checkpathconflicts'
2189 ):
2189 ):
2190 # remove files under the directory as they should already be
2190 # remove files under the directory as they should already be
2191 # warned and backed up
2191 # warned and backed up
2192 if wvfs.isdir(f) and not wvfs.islink(f):
2192 if wvfs.isdir(f) and not wvfs.islink(f):
2193 wvfs.rmtree(f, forcibly=True)
2193 wvfs.rmtree(f, forcibly=True)
2194 for p in reversed(list(pathutil.finddirs(f))):
2194 for p in reversed(list(pathutil.finddirs(f))):
2195 if wvfs.isfileorlink(p):
2195 if wvfs.isfileorlink(p):
2196 wvfs.unlink(p)
2196 wvfs.unlink(p)
2197 break
2197 break
2198 else:
2198 else:
2199 # don't remove files if path conflicts are not processed
2199 # don't remove files if path conflicts are not processed
2200 if wvfs.isdir(f) and not wvfs.islink(f):
2200 if wvfs.isdir(f) and not wvfs.islink(f):
2201 wvfs.removedirs(f)
2201 wvfs.removedirs(f)
2202
2202
2203 def setflags(self, l, x):
2203 def setflags(self, l, x):
2204 self._repo.wvfs.setflags(self._path, l, x)
2204 self._repo.wvfs.setflags(self._path, l, x)
2205
2205
2206
2206
2207 class overlayworkingctx(committablectx):
2207 class overlayworkingctx(committablectx):
2208 """Wraps another mutable context with a write-back cache that can be
2208 """Wraps another mutable context with a write-back cache that can be
2209 converted into a commit context.
2209 converted into a commit context.
2210
2210
2211 self._cache[path] maps to a dict with keys: {
2211 self._cache[path] maps to a dict with keys: {
2212 'exists': bool?
2212 'exists': bool?
2213 'date': date?
2213 'date': date?
2214 'data': str?
2214 'data': str?
2215 'flags': str?
2215 'flags': str?
2216 'copied': str? (path or None)
2216 'copied': str? (path or None)
2217 }
2217 }
2218 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2218 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2219 is `False`, the file was deleted.
2219 is `False`, the file was deleted.
2220 """
2220 """
2221
2221
2222 def __init__(self, repo):
2222 def __init__(self, repo):
2223 super(overlayworkingctx, self).__init__(repo)
2223 super(overlayworkingctx, self).__init__(repo)
2224 self.clean()
2224 self.clean()
2225
2225
2226 def setbase(self, wrappedctx):
2226 def setbase(self, wrappedctx):
2227 self._wrappedctx = wrappedctx
2227 self._wrappedctx = wrappedctx
2228 self._parents = [wrappedctx]
2228 self._parents = [wrappedctx]
2229 # Drop old manifest cache as it is now out of date.
2229 # Drop old manifest cache as it is now out of date.
2230 # This is necessary when, e.g., rebasing several nodes with one
2230 # This is necessary when, e.g., rebasing several nodes with one
2231 # ``overlayworkingctx`` (e.g. with --collapse).
2231 # ``overlayworkingctx`` (e.g. with --collapse).
2232 util.clearcachedproperty(self, b'_manifest')
2232 util.clearcachedproperty(self, b'_manifest')
2233
2233
2234 def setparents(self, p1node, p2node=None):
2234 def setparents(self, p1node, p2node=None):
2235 if p2node is None:
2235 if p2node is None:
2236 p2node = self._repo.nodeconstants.nullid
2236 p2node = self._repo.nodeconstants.nullid
2237 assert p1node == self._wrappedctx.node()
2237 assert p1node == self._wrappedctx.node()
2238 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2238 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2239
2239
2240 def data(self, path):
2240 def data(self, path):
2241 if self.isdirty(path):
2241 if self.isdirty(path):
2242 if self._cache[path][b'exists']:
2242 if self._cache[path][b'exists']:
2243 if self._cache[path][b'data'] is not None:
2243 if self._cache[path][b'data'] is not None:
2244 return self._cache[path][b'data']
2244 return self._cache[path][b'data']
2245 else:
2245 else:
2246 # Must fallback here, too, because we only set flags.
2246 # Must fallback here, too, because we only set flags.
2247 return self._wrappedctx[path].data()
2247 return self._wrappedctx[path].data()
2248 else:
2248 else:
2249 raise error.ProgrammingError(
2249 raise error.ProgrammingError(
2250 b"No such file or directory: %s" % path
2250 b"No such file or directory: %s" % path
2251 )
2251 )
2252 else:
2252 else:
2253 return self._wrappedctx[path].data()
2253 return self._wrappedctx[path].data()
2254
2254
2255 @propertycache
2255 @propertycache
2256 def _manifest(self):
2256 def _manifest(self):
2257 parents = self.parents()
2257 parents = self.parents()
2258 man = parents[0].manifest().copy()
2258 man = parents[0].manifest().copy()
2259
2259
2260 flag = self._flagfunc
2260 flag = self._flagfunc
2261 for path in self.added():
2261 for path in self.added():
2262 man[path] = self._repo.nodeconstants.addednodeid
2262 man[path] = self._repo.nodeconstants.addednodeid
2263 man.setflag(path, flag(path))
2263 man.setflag(path, flag(path))
2264 for path in self.modified():
2264 for path in self.modified():
2265 man[path] = self._repo.nodeconstants.modifiednodeid
2265 man[path] = self._repo.nodeconstants.modifiednodeid
2266 man.setflag(path, flag(path))
2266 man.setflag(path, flag(path))
2267 for path in self.removed():
2267 for path in self.removed():
2268 del man[path]
2268 del man[path]
2269 return man
2269 return man
2270
2270
2271 @propertycache
2271 @propertycache
2272 def _flagfunc(self):
2272 def _flagfunc(self):
2273 def f(path):
2273 def f(path):
2274 return self._cache[path][b'flags']
2274 return self._cache[path][b'flags']
2275
2275
2276 return f
2276 return f
2277
2277
2278 def files(self):
2278 def files(self):
2279 return sorted(self.added() + self.modified() + self.removed())
2279 return sorted(self.added() + self.modified() + self.removed())
2280
2280
2281 def modified(self):
2281 def modified(self):
2282 return [
2282 return [
2283 f
2283 f
2284 for f in self._cache.keys()
2284 for f in self._cache.keys()
2285 if self._cache[f][b'exists'] and self._existsinparent(f)
2285 if self._cache[f][b'exists'] and self._existsinparent(f)
2286 ]
2286 ]
2287
2287
2288 def added(self):
2288 def added(self):
2289 return [
2289 return [
2290 f
2290 f
2291 for f in self._cache.keys()
2291 for f in self._cache.keys()
2292 if self._cache[f][b'exists'] and not self._existsinparent(f)
2292 if self._cache[f][b'exists'] and not self._existsinparent(f)
2293 ]
2293 ]
2294
2294
2295 def removed(self):
2295 def removed(self):
2296 return [
2296 return [
2297 f
2297 f
2298 for f in self._cache.keys()
2298 for f in self._cache.keys()
2299 if not self._cache[f][b'exists'] and self._existsinparent(f)
2299 if not self._cache[f][b'exists'] and self._existsinparent(f)
2300 ]
2300 ]
2301
2301
2302 def p1copies(self):
2302 def p1copies(self):
2303 copies = {}
2303 copies = {}
2304 narrowmatch = self._repo.narrowmatch()
2304 narrowmatch = self._repo.narrowmatch()
2305 for f in self._cache.keys():
2305 for f in self._cache.keys():
2306 if not narrowmatch(f):
2306 if not narrowmatch(f):
2307 continue
2307 continue
2308 copies.pop(f, None) # delete if it exists
2308 copies.pop(f, None) # delete if it exists
2309 source = self._cache[f][b'copied']
2309 source = self._cache[f][b'copied']
2310 if source:
2310 if source:
2311 copies[f] = source
2311 copies[f] = source
2312 return copies
2312 return copies
2313
2313
2314 def p2copies(self):
2314 def p2copies(self):
2315 copies = {}
2315 copies = {}
2316 narrowmatch = self._repo.narrowmatch()
2316 narrowmatch = self._repo.narrowmatch()
2317 for f in self._cache.keys():
2317 for f in self._cache.keys():
2318 if not narrowmatch(f):
2318 if not narrowmatch(f):
2319 continue
2319 continue
2320 copies.pop(f, None) # delete if it exists
2320 copies.pop(f, None) # delete if it exists
2321 source = self._cache[f][b'copied']
2321 source = self._cache[f][b'copied']
2322 if source:
2322 if source:
2323 copies[f] = source
2323 copies[f] = source
2324 return copies
2324 return copies
2325
2325
2326 def isinmemory(self):
2326 def isinmemory(self):
2327 return True
2327 return True
2328
2328
2329 def filedate(self, path):
2329 def filedate(self, path):
2330 if self.isdirty(path):
2330 if self.isdirty(path):
2331 return self._cache[path][b'date']
2331 return self._cache[path][b'date']
2332 else:
2332 else:
2333 return self._wrappedctx[path].date()
2333 return self._wrappedctx[path].date()
2334
2334
2335 def markcopied(self, path, origin):
2335 def markcopied(self, path, origin):
2336 self._markdirty(
2336 self._markdirty(
2337 path,
2337 path,
2338 exists=True,
2338 exists=True,
2339 date=self.filedate(path),
2339 date=self.filedate(path),
2340 flags=self.flags(path),
2340 flags=self.flags(path),
2341 copied=origin,
2341 copied=origin,
2342 )
2342 )
2343
2343
2344 def copydata(self, path):
2344 def copydata(self, path):
2345 if self.isdirty(path):
2345 if self.isdirty(path):
2346 return self._cache[path][b'copied']
2346 return self._cache[path][b'copied']
2347 else:
2347 else:
2348 return None
2348 return None
2349
2349
2350 def flags(self, path):
2350 def flags(self, path):
2351 if self.isdirty(path):
2351 if self.isdirty(path):
2352 if self._cache[path][b'exists']:
2352 if self._cache[path][b'exists']:
2353 return self._cache[path][b'flags']
2353 return self._cache[path][b'flags']
2354 else:
2354 else:
2355 raise error.ProgrammingError(
2355 raise error.ProgrammingError(
2356 b"No such file or directory: %s" % path
2356 b"No such file or directory: %s" % path
2357 )
2357 )
2358 else:
2358 else:
2359 return self._wrappedctx[path].flags()
2359 return self._wrappedctx[path].flags()
2360
2360
2361 def __contains__(self, key):
2361 def __contains__(self, key):
2362 if key in self._cache:
2362 if key in self._cache:
2363 return self._cache[key][b'exists']
2363 return self._cache[key][b'exists']
2364 return key in self.p1()
2364 return key in self.p1()
2365
2365
2366 def _existsinparent(self, path):
2366 def _existsinparent(self, path):
2367 try:
2367 try:
2368 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2368 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2369 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2369 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2370 # with an ``exists()`` function.
2370 # with an ``exists()`` function.
2371 self._wrappedctx[path]
2371 self._wrappedctx[path]
2372 return True
2372 return True
2373 except error.ManifestLookupError:
2373 except error.ManifestLookupError:
2374 return False
2374 return False
2375
2375
2376 def _auditconflicts(self, path):
2376 def _auditconflicts(self, path):
2377 """Replicates conflict checks done by wvfs.write().
2377 """Replicates conflict checks done by wvfs.write().
2378
2378
2379 Since we never write to the filesystem and never call `applyupdates` in
2379 Since we never write to the filesystem and never call `applyupdates` in
2380 IMM, we'll never check that a path is actually writable -- e.g., because
2380 IMM, we'll never check that a path is actually writable -- e.g., because
2381 it adds `a/foo`, but `a` is actually a file in the other commit.
2381 it adds `a/foo`, but `a` is actually a file in the other commit.
2382 """
2382 """
2383
2383
2384 def fail(path, component):
2384 def fail(path, component):
2385 # p1() is the base and we're receiving "writes" for p2()'s
2385 # p1() is the base and we're receiving "writes" for p2()'s
2386 # files.
2386 # files.
2387 if b'l' in self.p1()[component].flags():
2387 if b'l' in self.p1()[component].flags():
2388 raise error.Abort(
2388 raise error.Abort(
2389 b"error: %s conflicts with symlink %s "
2389 b"error: %s conflicts with symlink %s "
2390 b"in %d." % (path, component, self.p1().rev())
2390 b"in %d." % (path, component, self.p1().rev())
2391 )
2391 )
2392 else:
2392 else:
2393 raise error.Abort(
2393 raise error.Abort(
2394 b"error: '%s' conflicts with file '%s' in "
2394 b"error: '%s' conflicts with file '%s' in "
2395 b"%d." % (path, component, self.p1().rev())
2395 b"%d." % (path, component, self.p1().rev())
2396 )
2396 )
2397
2397
2398 # Test that each new directory to be created to write this path from p2
2398 # Test that each new directory to be created to write this path from p2
2399 # is not a file in p1.
2399 # is not a file in p1.
2400 components = path.split(b'/')
2400 components = path.split(b'/')
2401 for i in pycompat.xrange(len(components)):
2401 for i in pycompat.xrange(len(components)):
2402 component = b"/".join(components[0:i])
2402 component = b"/".join(components[0:i])
2403 if component in self:
2403 if component in self:
2404 fail(path, component)
2404 fail(path, component)
2405
2405
2406 # Test the other direction -- that this path from p2 isn't a directory
2406 # Test the other direction -- that this path from p2 isn't a directory
2407 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2407 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2408 match = self.match([path], default=b'path')
2408 match = self.match([path], default=b'path')
2409 mfiles = list(self.p1().manifest().walk(match))
2409 mfiles = list(self.p1().manifest().walk(match))
2410 if len(mfiles) > 0:
2410 if len(mfiles) > 0:
2411 if len(mfiles) == 1 and mfiles[0] == path:
2411 if len(mfiles) == 1 and mfiles[0] == path:
2412 return
2412 return
2413 # omit the files which are deleted in current IMM wctx
2413 # omit the files which are deleted in current IMM wctx
2414 mfiles = [m for m in mfiles if m in self]
2414 mfiles = [m for m in mfiles if m in self]
2415 if not mfiles:
2415 if not mfiles:
2416 return
2416 return
2417 raise error.Abort(
2417 raise error.Abort(
2418 b"error: file '%s' cannot be written because "
2418 b"error: file '%s' cannot be written because "
2419 b" '%s/' is a directory in %s (containing %d "
2419 b" '%s/' is a directory in %s (containing %d "
2420 b"entries: %s)"
2420 b"entries: %s)"
2421 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2421 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2422 )
2422 )
2423
2423
2424 def write(self, path, data, flags=b'', **kwargs):
2424 def write(self, path, data, flags=b'', **kwargs):
2425 if data is None:
2425 if data is None:
2426 raise error.ProgrammingError(b"data must be non-None")
2426 raise error.ProgrammingError(b"data must be non-None")
2427 self._auditconflicts(path)
2427 self._auditconflicts(path)
2428 self._markdirty(
2428 self._markdirty(
2429 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2429 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2430 )
2430 )
2431
2431
2432 def setflags(self, path, l, x):
2432 def setflags(self, path, l, x):
2433 flag = b''
2433 flag = b''
2434 if l:
2434 if l:
2435 flag = b'l'
2435 flag = b'l'
2436 elif x:
2436 elif x:
2437 flag = b'x'
2437 flag = b'x'
2438 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2438 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2439
2439
2440 def remove(self, path):
2440 def remove(self, path):
2441 self._markdirty(path, exists=False)
2441 self._markdirty(path, exists=False)
2442
2442
2443 def exists(self, path):
2443 def exists(self, path):
2444 """exists behaves like `lexists`, but needs to follow symlinks and
2444 """exists behaves like `lexists`, but needs to follow symlinks and
2445 return False if they are broken.
2445 return False if they are broken.
2446 """
2446 """
2447 if self.isdirty(path):
2447 if self.isdirty(path):
2448 # If this path exists and is a symlink, "follow" it by calling
2448 # If this path exists and is a symlink, "follow" it by calling
2449 # exists on the destination path.
2449 # exists on the destination path.
2450 if (
2450 if (
2451 self._cache[path][b'exists']
2451 self._cache[path][b'exists']
2452 and b'l' in self._cache[path][b'flags']
2452 and b'l' in self._cache[path][b'flags']
2453 ):
2453 ):
2454 return self.exists(self._cache[path][b'data'].strip())
2454 return self.exists(self._cache[path][b'data'].strip())
2455 else:
2455 else:
2456 return self._cache[path][b'exists']
2456 return self._cache[path][b'exists']
2457
2457
2458 return self._existsinparent(path)
2458 return self._existsinparent(path)
2459
2459
2460 def lexists(self, path):
2460 def lexists(self, path):
2461 """lexists returns True if the path exists"""
2461 """lexists returns True if the path exists"""
2462 if self.isdirty(path):
2462 if self.isdirty(path):
2463 return self._cache[path][b'exists']
2463 return self._cache[path][b'exists']
2464
2464
2465 return self._existsinparent(path)
2465 return self._existsinparent(path)
2466
2466
2467 def size(self, path):
2467 def size(self, path):
2468 if self.isdirty(path):
2468 if self.isdirty(path):
2469 if self._cache[path][b'exists']:
2469 if self._cache[path][b'exists']:
2470 return len(self._cache[path][b'data'])
2470 return len(self._cache[path][b'data'])
2471 else:
2471 else:
2472 raise error.ProgrammingError(
2472 raise error.ProgrammingError(
2473 b"No such file or directory: %s" % path
2473 b"No such file or directory: %s" % path
2474 )
2474 )
2475 return self._wrappedctx[path].size()
2475 return self._wrappedctx[path].size()
2476
2476
2477 def tomemctx(
2477 def tomemctx(
2478 self,
2478 self,
2479 text,
2479 text,
2480 branch=None,
2480 branch=None,
2481 extra=None,
2481 extra=None,
2482 date=None,
2482 date=None,
2483 parents=None,
2483 parents=None,
2484 user=None,
2484 user=None,
2485 editor=None,
2485 editor=None,
2486 ):
2486 ):
2487 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2487 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2488 committed.
2488 committed.
2489
2489
2490 ``text`` is the commit message.
2490 ``text`` is the commit message.
2491 ``parents`` (optional) are rev numbers.
2491 ``parents`` (optional) are rev numbers.
2492 """
2492 """
2493 # Default parents to the wrapped context if not passed.
2493 # Default parents to the wrapped context if not passed.
2494 if parents is None:
2494 if parents is None:
2495 parents = self.parents()
2495 parents = self.parents()
2496 if len(parents) == 1:
2496 if len(parents) == 1:
2497 parents = (parents[0], None)
2497 parents = (parents[0], None)
2498
2498
2499 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2499 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2500 if parents[1] is None:
2500 if parents[1] is None:
2501 parents = (self._repo[parents[0]], None)
2501 parents = (self._repo[parents[0]], None)
2502 else:
2502 else:
2503 parents = (self._repo[parents[0]], self._repo[parents[1]])
2503 parents = (self._repo[parents[0]], self._repo[parents[1]])
2504
2504
2505 files = self.files()
2505 files = self.files()
2506
2506
2507 def getfile(repo, memctx, path):
2507 def getfile(repo, memctx, path):
2508 if self._cache[path][b'exists']:
2508 if self._cache[path][b'exists']:
2509 return memfilectx(
2509 return memfilectx(
2510 repo,
2510 repo,
2511 memctx,
2511 memctx,
2512 path,
2512 path,
2513 self._cache[path][b'data'],
2513 self._cache[path][b'data'],
2514 b'l' in self._cache[path][b'flags'],
2514 b'l' in self._cache[path][b'flags'],
2515 b'x' in self._cache[path][b'flags'],
2515 b'x' in self._cache[path][b'flags'],
2516 self._cache[path][b'copied'],
2516 self._cache[path][b'copied'],
2517 )
2517 )
2518 else:
2518 else:
2519 # Returning None, but including the path in `files`, is
2519 # Returning None, but including the path in `files`, is
2520 # necessary for memctx to register a deletion.
2520 # necessary for memctx to register a deletion.
2521 return None
2521 return None
2522
2522
2523 if branch is None:
2523 if branch is None:
2524 branch = self._wrappedctx.branch()
2524 branch = self._wrappedctx.branch()
2525
2525
2526 return memctx(
2526 return memctx(
2527 self._repo,
2527 self._repo,
2528 parents,
2528 parents,
2529 text,
2529 text,
2530 files,
2530 files,
2531 getfile,
2531 getfile,
2532 date=date,
2532 date=date,
2533 extra=extra,
2533 extra=extra,
2534 user=user,
2534 user=user,
2535 branch=branch,
2535 branch=branch,
2536 editor=editor,
2536 editor=editor,
2537 )
2537 )
2538
2538
2539 def tomemctx_for_amend(self, precursor):
2539 def tomemctx_for_amend(self, precursor):
2540 extra = precursor.extra().copy()
2540 extra = precursor.extra().copy()
2541 extra[b'amend_source'] = precursor.hex()
2541 extra[b'amend_source'] = precursor.hex()
2542 return self.tomemctx(
2542 return self.tomemctx(
2543 text=precursor.description(),
2543 text=precursor.description(),
2544 branch=precursor.branch(),
2544 branch=precursor.branch(),
2545 extra=extra,
2545 extra=extra,
2546 date=precursor.date(),
2546 date=precursor.date(),
2547 user=precursor.user(),
2547 user=precursor.user(),
2548 )
2548 )
2549
2549
2550 def isdirty(self, path):
2550 def isdirty(self, path):
2551 return path in self._cache
2551 return path in self._cache
2552
2552
2553 def clean(self):
2553 def clean(self):
2554 self._mergestate = None
2554 self._mergestate = None
2555 self._cache = {}
2555 self._cache = {}
2556
2556
2557 def _compact(self):
2557 def _compact(self):
2558 """Removes keys from the cache that are actually clean, by comparing
2558 """Removes keys from the cache that are actually clean, by comparing
2559 them with the underlying context.
2559 them with the underlying context.
2560
2560
2561 This can occur during the merge process, e.g. by passing --tool :local
2561 This can occur during the merge process, e.g. by passing --tool :local
2562 to resolve a conflict.
2562 to resolve a conflict.
2563 """
2563 """
2564 keys = []
2564 keys = []
2565 # This won't be perfect, but can help performance significantly when
2565 # This won't be perfect, but can help performance significantly when
2566 # using things like remotefilelog.
2566 # using things like remotefilelog.
2567 scmutil.prefetchfiles(
2567 scmutil.prefetchfiles(
2568 self.repo(),
2568 self.repo(),
2569 [
2569 [
2570 (
2570 (
2571 self.p1().rev(),
2571 self.p1().rev(),
2572 scmutil.matchfiles(self.repo(), self._cache.keys()),
2572 scmutil.matchfiles(self.repo(), self._cache.keys()),
2573 )
2573 )
2574 ],
2574 ],
2575 )
2575 )
2576
2576
2577 for path in self._cache.keys():
2577 for path in self._cache.keys():
2578 cache = self._cache[path]
2578 cache = self._cache[path]
2579 try:
2579 try:
2580 underlying = self._wrappedctx[path]
2580 underlying = self._wrappedctx[path]
2581 if (
2581 if (
2582 underlying.data() == cache[b'data']
2582 underlying.data() == cache[b'data']
2583 and underlying.flags() == cache[b'flags']
2583 and underlying.flags() == cache[b'flags']
2584 ):
2584 ):
2585 keys.append(path)
2585 keys.append(path)
2586 except error.ManifestLookupError:
2586 except error.ManifestLookupError:
2587 # Path not in the underlying manifest (created).
2587 # Path not in the underlying manifest (created).
2588 continue
2588 continue
2589
2589
2590 for path in keys:
2590 for path in keys:
2591 del self._cache[path]
2591 del self._cache[path]
2592 return keys
2592 return keys
2593
2593
2594 def _markdirty(
2594 def _markdirty(
2595 self, path, exists, data=None, date=None, flags=b'', copied=None
2595 self, path, exists, data=None, date=None, flags=b'', copied=None
2596 ):
2596 ):
2597 # data not provided, let's see if we already have some; if not, let's
2597 # data not provided, let's see if we already have some; if not, let's
2598 # grab it from our underlying context, so that we always have data if
2598 # grab it from our underlying context, so that we always have data if
2599 # the file is marked as existing.
2599 # the file is marked as existing.
2600 if exists and data is None:
2600 if exists and data is None:
2601 oldentry = self._cache.get(path) or {}
2601 oldentry = self._cache.get(path) or {}
2602 data = oldentry.get(b'data')
2602 data = oldentry.get(b'data')
2603 if data is None:
2603 if data is None:
2604 data = self._wrappedctx[path].data()
2604 data = self._wrappedctx[path].data()
2605
2605
2606 self._cache[path] = {
2606 self._cache[path] = {
2607 b'exists': exists,
2607 b'exists': exists,
2608 b'data': data,
2608 b'data': data,
2609 b'date': date,
2609 b'date': date,
2610 b'flags': flags,
2610 b'flags': flags,
2611 b'copied': copied,
2611 b'copied': copied,
2612 }
2612 }
2613 util.clearcachedproperty(self, b'_manifest')
2613 util.clearcachedproperty(self, b'_manifest')
2614
2614
2615 def filectx(self, path, filelog=None):
2615 def filectx(self, path, filelog=None):
2616 return overlayworkingfilectx(
2616 return overlayworkingfilectx(
2617 self._repo, path, parent=self, filelog=filelog
2617 self._repo, path, parent=self, filelog=filelog
2618 )
2618 )
2619
2619
2620 def mergestate(self, clean=False):
2620 def mergestate(self, clean=False):
2621 if clean or self._mergestate is None:
2621 if clean or self._mergestate is None:
2622 self._mergestate = mergestatemod.memmergestate(self._repo)
2622 self._mergestate = mergestatemod.memmergestate(self._repo)
2623 return self._mergestate
2623 return self._mergestate
2624
2624
2625
2625
2626 class overlayworkingfilectx(committablefilectx):
2626 class overlayworkingfilectx(committablefilectx):
2627 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2627 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2628 cache, which can be flushed through later by calling ``flush()``."""
2628 cache, which can be flushed through later by calling ``flush()``."""
2629
2629
2630 def __init__(self, repo, path, filelog=None, parent=None):
2630 def __init__(self, repo, path, filelog=None, parent=None):
2631 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2631 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2632 self._repo = repo
2632 self._repo = repo
2633 self._parent = parent
2633 self._parent = parent
2634 self._path = path
2634 self._path = path
2635
2635
2636 def cmp(self, fctx):
2636 def cmp(self, fctx):
2637 return self.data() != fctx.data()
2637 return self.data() != fctx.data()
2638
2638
2639 def changectx(self):
2639 def changectx(self):
2640 return self._parent
2640 return self._parent
2641
2641
2642 def data(self):
2642 def data(self):
2643 return self._parent.data(self._path)
2643 return self._parent.data(self._path)
2644
2644
2645 def date(self):
2645 def date(self):
2646 return self._parent.filedate(self._path)
2646 return self._parent.filedate(self._path)
2647
2647
2648 def exists(self):
2648 def exists(self):
2649 return self.lexists()
2649 return self.lexists()
2650
2650
2651 def lexists(self):
2651 def lexists(self):
2652 return self._parent.exists(self._path)
2652 return self._parent.exists(self._path)
2653
2653
2654 def copysource(self):
2654 def copysource(self):
2655 return self._parent.copydata(self._path)
2655 return self._parent.copydata(self._path)
2656
2656
2657 def size(self):
2657 def size(self):
2658 return self._parent.size(self._path)
2658 return self._parent.size(self._path)
2659
2659
2660 def markcopied(self, origin):
2660 def markcopied(self, origin):
2661 self._parent.markcopied(self._path, origin)
2661 self._parent.markcopied(self._path, origin)
2662
2662
2663 def audit(self):
2663 def audit(self):
2664 pass
2664 pass
2665
2665
2666 def flags(self):
2666 def flags(self):
2667 return self._parent.flags(self._path)
2667 return self._parent.flags(self._path)
2668
2668
2669 def setflags(self, islink, isexec):
2669 def setflags(self, islink, isexec):
2670 return self._parent.setflags(self._path, islink, isexec)
2670 return self._parent.setflags(self._path, islink, isexec)
2671
2671
2672 def write(self, data, flags, backgroundclose=False, **kwargs):
2672 def write(self, data, flags, backgroundclose=False, **kwargs):
2673 return self._parent.write(self._path, data, flags, **kwargs)
2673 return self._parent.write(self._path, data, flags, **kwargs)
2674
2674
2675 def remove(self, ignoremissing=False):
2675 def remove(self, ignoremissing=False):
2676 return self._parent.remove(self._path)
2676 return self._parent.remove(self._path)
2677
2677
2678 def clearunknown(self):
2678 def clearunknown(self):
2679 pass
2679 pass
2680
2680
2681
2681
2682 class workingcommitctx(workingctx):
2682 class workingcommitctx(workingctx):
2683 """A workingcommitctx object makes access to data related to
2683 """A workingcommitctx object makes access to data related to
2684 the revision being committed convenient.
2684 the revision being committed convenient.
2685
2685
2686 This hides changes in the working directory, if they aren't
2686 This hides changes in the working directory, if they aren't
2687 committed in this context.
2687 committed in this context.
2688 """
2688 """
2689
2689
2690 def __init__(
2690 def __init__(
2691 self, repo, changes, text=b"", user=None, date=None, extra=None
2691 self, repo, changes, text=b"", user=None, date=None, extra=None
2692 ):
2692 ):
2693 super(workingcommitctx, self).__init__(
2693 super(workingcommitctx, self).__init__(
2694 repo, text, user, date, extra, changes
2694 repo, text, user, date, extra, changes
2695 )
2695 )
2696
2696
2697 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2697 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2698 """Return matched files only in ``self._status``
2698 """Return matched files only in ``self._status``
2699
2699
2700 Uncommitted files appear "clean" via this context, even if
2700 Uncommitted files appear "clean" via this context, even if
2701 they aren't actually so in the working directory.
2701 they aren't actually so in the working directory.
2702 """
2702 """
2703 if clean:
2703 if clean:
2704 clean = [f for f in self._manifest if f not in self._changedset]
2704 clean = [f for f in self._manifest if f not in self._changedset]
2705 else:
2705 else:
2706 clean = []
2706 clean = []
2707 return scmutil.status(
2707 return scmutil.status(
2708 [f for f in self._status.modified if match(f)],
2708 [f for f in self._status.modified if match(f)],
2709 [f for f in self._status.added if match(f)],
2709 [f for f in self._status.added if match(f)],
2710 [f for f in self._status.removed if match(f)],
2710 [f for f in self._status.removed if match(f)],
2711 [],
2711 [],
2712 [],
2712 [],
2713 [],
2713 [],
2714 clean,
2714 clean,
2715 )
2715 )
2716
2716
2717 @propertycache
2717 @propertycache
2718 def _changedset(self):
2718 def _changedset(self):
2719 """Return the set of files changed in this context"""
2719 """Return the set of files changed in this context"""
2720 changed = set(self._status.modified)
2720 changed = set(self._status.modified)
2721 changed.update(self._status.added)
2721 changed.update(self._status.added)
2722 changed.update(self._status.removed)
2722 changed.update(self._status.removed)
2723 return changed
2723 return changed
2724
2724
2725
2725
2726 def makecachingfilectxfn(func):
2726 def makecachingfilectxfn(func):
2727 """Create a filectxfn that caches based on the path.
2727 """Create a filectxfn that caches based on the path.
2728
2728
2729 We can't use util.cachefunc because it uses all arguments as the cache
2729 We can't use util.cachefunc because it uses all arguments as the cache
2730 key and this creates a cycle since the arguments include the repo and
2730 key and this creates a cycle since the arguments include the repo and
2731 memctx.
2731 memctx.
2732 """
2732 """
2733 cache = {}
2733 cache = {}
2734
2734
2735 def getfilectx(repo, memctx, path):
2735 def getfilectx(repo, memctx, path):
2736 if path not in cache:
2736 if path not in cache:
2737 cache[path] = func(repo, memctx, path)
2737 cache[path] = func(repo, memctx, path)
2738 return cache[path]
2738 return cache[path]
2739
2739
2740 return getfilectx
2740 return getfilectx
2741
2741
2742
2742
2743 def memfilefromctx(ctx):
2743 def memfilefromctx(ctx):
2744 """Given a context return a memfilectx for ctx[path]
2744 """Given a context return a memfilectx for ctx[path]
2745
2745
2746 This is a convenience method for building a memctx based on another
2746 This is a convenience method for building a memctx based on another
2747 context.
2747 context.
2748 """
2748 """
2749
2749
2750 def getfilectx(repo, memctx, path):
2750 def getfilectx(repo, memctx, path):
2751 fctx = ctx[path]
2751 fctx = ctx[path]
2752 copysource = fctx.copysource()
2752 copysource = fctx.copysource()
2753 return memfilectx(
2753 return memfilectx(
2754 repo,
2754 repo,
2755 memctx,
2755 memctx,
2756 path,
2756 path,
2757 fctx.data(),
2757 fctx.data(),
2758 islink=fctx.islink(),
2758 islink=fctx.islink(),
2759 isexec=fctx.isexec(),
2759 isexec=fctx.isexec(),
2760 copysource=copysource,
2760 copysource=copysource,
2761 )
2761 )
2762
2762
2763 return getfilectx
2763 return getfilectx
2764
2764
2765
2765
2766 def memfilefrompatch(patchstore):
2766 def memfilefrompatch(patchstore):
2767 """Given a patch (e.g. patchstore object) return a memfilectx
2767 """Given a patch (e.g. patchstore object) return a memfilectx
2768
2768
2769 This is a convenience method for building a memctx based on a patchstore.
2769 This is a convenience method for building a memctx based on a patchstore.
2770 """
2770 """
2771
2771
2772 def getfilectx(repo, memctx, path):
2772 def getfilectx(repo, memctx, path):
2773 data, mode, copysource = patchstore.getfile(path)
2773 data, mode, copysource = patchstore.getfile(path)
2774 if data is None:
2774 if data is None:
2775 return None
2775 return None
2776 islink, isexec = mode
2776 islink, isexec = mode
2777 return memfilectx(
2777 return memfilectx(
2778 repo,
2778 repo,
2779 memctx,
2779 memctx,
2780 path,
2780 path,
2781 data,
2781 data,
2782 islink=islink,
2782 islink=islink,
2783 isexec=isexec,
2783 isexec=isexec,
2784 copysource=copysource,
2784 copysource=copysource,
2785 )
2785 )
2786
2786
2787 return getfilectx
2787 return getfilectx
2788
2788
2789
2789
2790 class memctx(committablectx):
2790 class memctx(committablectx):
2791 """Use memctx to perform in-memory commits via localrepo.commitctx().
2791 """Use memctx to perform in-memory commits via localrepo.commitctx().
2792
2792
2793 Revision information is supplied at initialization time while
2793 Revision information is supplied at initialization time while
2794 related files data and is made available through a callback
2794 related files data and is made available through a callback
2795 mechanism. 'repo' is the current localrepo, 'parents' is a
2795 mechanism. 'repo' is the current localrepo, 'parents' is a
2796 sequence of two parent revisions identifiers (pass None for every
2796 sequence of two parent revisions identifiers (pass None for every
2797 missing parent), 'text' is the commit message and 'files' lists
2797 missing parent), 'text' is the commit message and 'files' lists
2798 names of files touched by the revision (normalized and relative to
2798 names of files touched by the revision (normalized and relative to
2799 repository root).
2799 repository root).
2800
2800
2801 filectxfn(repo, memctx, path) is a callable receiving the
2801 filectxfn(repo, memctx, path) is a callable receiving the
2802 repository, the current memctx object and the normalized path of
2802 repository, the current memctx object and the normalized path of
2803 requested file, relative to repository root. It is fired by the
2803 requested file, relative to repository root. It is fired by the
2804 commit function for every file in 'files', but calls order is
2804 commit function for every file in 'files', but calls order is
2805 undefined. If the file is available in the revision being
2805 undefined. If the file is available in the revision being
2806 committed (updated or added), filectxfn returns a memfilectx
2806 committed (updated or added), filectxfn returns a memfilectx
2807 object. If the file was removed, filectxfn return None for recent
2807 object. If the file was removed, filectxfn return None for recent
2808 Mercurial. Moved files are represented by marking the source file
2808 Mercurial. Moved files are represented by marking the source file
2809 removed and the new file added with copy information (see
2809 removed and the new file added with copy information (see
2810 memfilectx).
2810 memfilectx).
2811
2811
2812 user receives the committer name and defaults to current
2812 user receives the committer name and defaults to current
2813 repository username, date is the commit date in any format
2813 repository username, date is the commit date in any format
2814 supported by dateutil.parsedate() and defaults to current date, extra
2814 supported by dateutil.parsedate() and defaults to current date, extra
2815 is a dictionary of metadata or is left empty.
2815 is a dictionary of metadata or is left empty.
2816 """
2816 """
2817
2817
2818 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2818 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2819 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2819 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2820 # this field to determine what to do in filectxfn.
2820 # this field to determine what to do in filectxfn.
2821 _returnnoneformissingfiles = True
2821 _returnnoneformissingfiles = True
2822
2822
2823 def __init__(
2823 def __init__(
2824 self,
2824 self,
2825 repo,
2825 repo,
2826 parents,
2826 parents,
2827 text,
2827 text,
2828 files,
2828 files,
2829 filectxfn,
2829 filectxfn,
2830 user=None,
2830 user=None,
2831 date=None,
2831 date=None,
2832 extra=None,
2832 extra=None,
2833 branch=None,
2833 branch=None,
2834 editor=None,
2834 editor=None,
2835 ):
2835 ):
2836 super(memctx, self).__init__(
2836 super(memctx, self).__init__(
2837 repo, text, user, date, extra, branch=branch
2837 repo, text, user, date, extra, branch=branch
2838 )
2838 )
2839 self._rev = None
2839 self._rev = None
2840 self._node = None
2840 self._node = None
2841 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2841 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2842 p1, p2 = parents
2842 p1, p2 = parents
2843 self._parents = [self._repo[p] for p in (p1, p2)]
2843 self._parents = [self._repo[p] for p in (p1, p2)]
2844 files = sorted(set(files))
2844 files = sorted(set(files))
2845 self._files = files
2845 self._files = files
2846 self.substate = {}
2846 self.substate = {}
2847
2847
2848 if isinstance(filectxfn, patch.filestore):
2848 if isinstance(filectxfn, patch.filestore):
2849 filectxfn = memfilefrompatch(filectxfn)
2849 filectxfn = memfilefrompatch(filectxfn)
2850 elif not callable(filectxfn):
2850 elif not callable(filectxfn):
2851 # if store is not callable, wrap it in a function
2851 # if store is not callable, wrap it in a function
2852 filectxfn = memfilefromctx(filectxfn)
2852 filectxfn = memfilefromctx(filectxfn)
2853
2853
2854 # memoizing increases performance for e.g. vcs convert scenarios.
2854 # memoizing increases performance for e.g. vcs convert scenarios.
2855 self._filectxfn = makecachingfilectxfn(filectxfn)
2855 self._filectxfn = makecachingfilectxfn(filectxfn)
2856
2856
2857 if editor:
2857 if editor:
2858 self._text = editor(self._repo, self, [])
2858 self._text = editor(self._repo, self, [])
2859 self._repo.savecommitmessage(self._text)
2859 self._repo.savecommitmessage(self._text)
2860
2860
2861 def filectx(self, path, filelog=None):
2861 def filectx(self, path, filelog=None):
2862 """get a file context from the working directory
2862 """get a file context from the working directory
2863
2863
2864 Returns None if file doesn't exist and should be removed."""
2864 Returns None if file doesn't exist and should be removed."""
2865 return self._filectxfn(self._repo, self, path)
2865 return self._filectxfn(self._repo, self, path)
2866
2866
2867 def commit(self):
2867 def commit(self):
2868 """commit context to the repo"""
2868 """commit context to the repo"""
2869 return self._repo.commitctx(self)
2869 return self._repo.commitctx(self)
2870
2870
2871 @propertycache
2871 @propertycache
2872 def _manifest(self):
2872 def _manifest(self):
2873 """generate a manifest based on the return values of filectxfn"""
2873 """generate a manifest based on the return values of filectxfn"""
2874
2874
2875 # keep this simple for now; just worry about p1
2875 # keep this simple for now; just worry about p1
2876 pctx = self._parents[0]
2876 pctx = self._parents[0]
2877 man = pctx.manifest().copy()
2877 man = pctx.manifest().copy()
2878
2878
2879 for f in self._status.modified:
2879 for f in self._status.modified:
2880 man[f] = self._repo.nodeconstants.modifiednodeid
2880 man[f] = self._repo.nodeconstants.modifiednodeid
2881
2881
2882 for f in self._status.added:
2882 for f in self._status.added:
2883 man[f] = self._repo.nodeconstants.addednodeid
2883 man[f] = self._repo.nodeconstants.addednodeid
2884
2884
2885 for f in self._status.removed:
2885 for f in self._status.removed:
2886 if f in man:
2886 if f in man:
2887 del man[f]
2887 del man[f]
2888
2888
2889 return man
2889 return man
2890
2890
2891 @propertycache
2891 @propertycache
2892 def _status(self):
2892 def _status(self):
2893 """Calculate exact status from ``files`` specified at construction"""
2893 """Calculate exact status from ``files`` specified at construction"""
2894 man1 = self.p1().manifest()
2894 man1 = self.p1().manifest()
2895 p2 = self._parents[1]
2895 p2 = self._parents[1]
2896 # "1 < len(self._parents)" can't be used for checking
2896 # "1 < len(self._parents)" can't be used for checking
2897 # existence of the 2nd parent, because "memctx._parents" is
2897 # existence of the 2nd parent, because "memctx._parents" is
2898 # explicitly initialized by the list, of which length is 2.
2898 # explicitly initialized by the list, of which length is 2.
2899 if p2.rev() != nullrev:
2899 if p2.rev() != nullrev:
2900 man2 = p2.manifest()
2900 man2 = p2.manifest()
2901 managing = lambda f: f in man1 or f in man2
2901 managing = lambda f: f in man1 or f in man2
2902 else:
2902 else:
2903 managing = lambda f: f in man1
2903 managing = lambda f: f in man1
2904
2904
2905 modified, added, removed = [], [], []
2905 modified, added, removed = [], [], []
2906 for f in self._files:
2906 for f in self._files:
2907 if not managing(f):
2907 if not managing(f):
2908 added.append(f)
2908 added.append(f)
2909 elif self[f]:
2909 elif self[f]:
2910 modified.append(f)
2910 modified.append(f)
2911 else:
2911 else:
2912 removed.append(f)
2912 removed.append(f)
2913
2913
2914 return scmutil.status(modified, added, removed, [], [], [], [])
2914 return scmutil.status(modified, added, removed, [], [], [], [])
2915
2915
2916 def parents(self):
2916 def parents(self):
2917 if self._parents[1].rev() == nullrev:
2917 if self._parents[1].rev() == nullrev:
2918 return [self._parents[0]]
2918 return [self._parents[0]]
2919 return self._parents
2919 return self._parents
2920
2920
2921
2921
2922 class memfilectx(committablefilectx):
2922 class memfilectx(committablefilectx):
2923 """memfilectx represents an in-memory file to commit.
2923 """memfilectx represents an in-memory file to commit.
2924
2924
2925 See memctx and committablefilectx for more details.
2925 See memctx and committablefilectx for more details.
2926 """
2926 """
2927
2927
2928 def __init__(
2928 def __init__(
2929 self,
2929 self,
2930 repo,
2930 repo,
2931 changectx,
2931 changectx,
2932 path,
2932 path,
2933 data,
2933 data,
2934 islink=False,
2934 islink=False,
2935 isexec=False,
2935 isexec=False,
2936 copysource=None,
2936 copysource=None,
2937 ):
2937 ):
2938 """
2938 """
2939 path is the normalized file path relative to repository root.
2939 path is the normalized file path relative to repository root.
2940 data is the file content as a string.
2940 data is the file content as a string.
2941 islink is True if the file is a symbolic link.
2941 islink is True if the file is a symbolic link.
2942 isexec is True if the file is executable.
2942 isexec is True if the file is executable.
2943 copied is the source file path if current file was copied in the
2943 copied is the source file path if current file was copied in the
2944 revision being committed, or None."""
2944 revision being committed, or None."""
2945 super(memfilectx, self).__init__(repo, path, None, changectx)
2945 super(memfilectx, self).__init__(repo, path, None, changectx)
2946 self._data = data
2946 self._data = data
2947 if islink:
2947 if islink:
2948 self._flags = b'l'
2948 self._flags = b'l'
2949 elif isexec:
2949 elif isexec:
2950 self._flags = b'x'
2950 self._flags = b'x'
2951 else:
2951 else:
2952 self._flags = b''
2952 self._flags = b''
2953 self._copysource = copysource
2953 self._copysource = copysource
2954
2954
2955 def copysource(self):
2955 def copysource(self):
2956 return self._copysource
2956 return self._copysource
2957
2957
2958 def cmp(self, fctx):
2958 def cmp(self, fctx):
2959 return self.data() != fctx.data()
2959 return self.data() != fctx.data()
2960
2960
2961 def data(self):
2961 def data(self):
2962 return self._data
2962 return self._data
2963
2963
2964 def remove(self, ignoremissing=False):
2964 def remove(self, ignoremissing=False):
2965 """wraps unlink for a repo's working directory"""
2965 """wraps unlink for a repo's working directory"""
2966 # need to figure out what to do here
2966 # need to figure out what to do here
2967 del self._changectx[self._path]
2967 del self._changectx[self._path]
2968
2968
2969 def write(self, data, flags, **kwargs):
2969 def write(self, data, flags, **kwargs):
2970 """wraps repo.wwrite"""
2970 """wraps repo.wwrite"""
2971 self._data = data
2971 self._data = data
2972
2972
2973
2973
2974 class metadataonlyctx(committablectx):
2974 class metadataonlyctx(committablectx):
2975 """Like memctx but it's reusing the manifest of different commit.
2975 """Like memctx but it's reusing the manifest of different commit.
2976 Intended to be used by lightweight operations that are creating
2976 Intended to be used by lightweight operations that are creating
2977 metadata-only changes.
2977 metadata-only changes.
2978
2978
2979 Revision information is supplied at initialization time. 'repo' is the
2979 Revision information is supplied at initialization time. 'repo' is the
2980 current localrepo, 'ctx' is original revision which manifest we're reuisng
2980 current localrepo, 'ctx' is original revision which manifest we're reuisng
2981 'parents' is a sequence of two parent revisions identifiers (pass None for
2981 'parents' is a sequence of two parent revisions identifiers (pass None for
2982 every missing parent), 'text' is the commit.
2982 every missing parent), 'text' is the commit.
2983
2983
2984 user receives the committer name and defaults to current repository
2984 user receives the committer name and defaults to current repository
2985 username, date is the commit date in any format supported by
2985 username, date is the commit date in any format supported by
2986 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2986 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2987 metadata or is left empty.
2987 metadata or is left empty.
2988 """
2988 """
2989
2989
2990 def __init__(
2990 def __init__(
2991 self,
2991 self,
2992 repo,
2992 repo,
2993 originalctx,
2993 originalctx,
2994 parents=None,
2994 parents=None,
2995 text=None,
2995 text=None,
2996 user=None,
2996 user=None,
2997 date=None,
2997 date=None,
2998 extra=None,
2998 extra=None,
2999 editor=None,
2999 editor=None,
3000 ):
3000 ):
3001 if text is None:
3001 if text is None:
3002 text = originalctx.description()
3002 text = originalctx.description()
3003 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3003 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3004 self._rev = None
3004 self._rev = None
3005 self._node = None
3005 self._node = None
3006 self._originalctx = originalctx
3006 self._originalctx = originalctx
3007 self._manifestnode = originalctx.manifestnode()
3007 self._manifestnode = originalctx.manifestnode()
3008 if parents is None:
3008 if parents is None:
3009 parents = originalctx.parents()
3009 parents = originalctx.parents()
3010 else:
3010 else:
3011 parents = [repo[p] for p in parents if p is not None]
3011 parents = [repo[p] for p in parents if p is not None]
3012 parents = parents[:]
3012 parents = parents[:]
3013 while len(parents) < 2:
3013 while len(parents) < 2:
3014 parents.append(repo[nullrev])
3014 parents.append(repo[nullrev])
3015 p1, p2 = self._parents = parents
3015 p1, p2 = self._parents = parents
3016
3016
3017 # sanity check to ensure that the reused manifest parents are
3017 # sanity check to ensure that the reused manifest parents are
3018 # manifests of our commit parents
3018 # manifests of our commit parents
3019 mp1, mp2 = self.manifestctx().parents
3019 mp1, mp2 = self.manifestctx().parents
3020 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3020 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3021 raise RuntimeError(
3021 raise RuntimeError(
3022 r"can't reuse the manifest: its p1 "
3022 r"can't reuse the manifest: its p1 "
3023 r"doesn't match the new ctx p1"
3023 r"doesn't match the new ctx p1"
3024 )
3024 )
3025 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3025 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3026 raise RuntimeError(
3026 raise RuntimeError(
3027 r"can't reuse the manifest: "
3027 r"can't reuse the manifest: "
3028 r"its p2 doesn't match the new ctx p2"
3028 r"its p2 doesn't match the new ctx p2"
3029 )
3029 )
3030
3030
3031 self._files = originalctx.files()
3031 self._files = originalctx.files()
3032 self.substate = {}
3032 self.substate = {}
3033
3033
3034 if editor:
3034 if editor:
3035 self._text = editor(self._repo, self, [])
3035 self._text = editor(self._repo, self, [])
3036 self._repo.savecommitmessage(self._text)
3036 self._repo.savecommitmessage(self._text)
3037
3037
3038 def manifestnode(self):
3038 def manifestnode(self):
3039 return self._manifestnode
3039 return self._manifestnode
3040
3040
3041 @property
3041 @property
3042 def _manifestctx(self):
3042 def _manifestctx(self):
3043 return self._repo.manifestlog[self._manifestnode]
3043 return self._repo.manifestlog[self._manifestnode]
3044
3044
3045 def filectx(self, path, filelog=None):
3045 def filectx(self, path, filelog=None):
3046 return self._originalctx.filectx(path, filelog=filelog)
3046 return self._originalctx.filectx(path, filelog=filelog)
3047
3047
3048 def commit(self):
3048 def commit(self):
3049 """commit context to the repo"""
3049 """commit context to the repo"""
3050 return self._repo.commitctx(self)
3050 return self._repo.commitctx(self)
3051
3051
3052 @property
3052 @property
3053 def _manifest(self):
3053 def _manifest(self):
3054 return self._originalctx.manifest()
3054 return self._originalctx.manifest()
3055
3055
3056 @propertycache
3056 @propertycache
3057 def _status(self):
3057 def _status(self):
3058 """Calculate exact status from ``files`` specified in the ``origctx``
3058 """Calculate exact status from ``files`` specified in the ``origctx``
3059 and parents manifests.
3059 and parents manifests.
3060 """
3060 """
3061 man1 = self.p1().manifest()
3061 man1 = self.p1().manifest()
3062 p2 = self._parents[1]
3062 p2 = self._parents[1]
3063 # "1 < len(self._parents)" can't be used for checking
3063 # "1 < len(self._parents)" can't be used for checking
3064 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3064 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3065 # explicitly initialized by the list, of which length is 2.
3065 # explicitly initialized by the list, of which length is 2.
3066 if p2.rev() != nullrev:
3066 if p2.rev() != nullrev:
3067 man2 = p2.manifest()
3067 man2 = p2.manifest()
3068 managing = lambda f: f in man1 or f in man2
3068 managing = lambda f: f in man1 or f in man2
3069 else:
3069 else:
3070 managing = lambda f: f in man1
3070 managing = lambda f: f in man1
3071
3071
3072 modified, added, removed = [], [], []
3072 modified, added, removed = [], [], []
3073 for f in self._files:
3073 for f in self._files:
3074 if not managing(f):
3074 if not managing(f):
3075 added.append(f)
3075 added.append(f)
3076 elif f in self:
3076 elif f in self:
3077 modified.append(f)
3077 modified.append(f)
3078 else:
3078 else:
3079 removed.append(f)
3079 removed.append(f)
3080
3080
3081 return scmutil.status(modified, added, removed, [], [], [], [])
3081 return scmutil.status(modified, added, removed, [], [], [], [])
3082
3082
3083
3083
3084 class arbitraryfilectx(object):
3084 class arbitraryfilectx(object):
3085 """Allows you to use filectx-like functions on a file in an arbitrary
3085 """Allows you to use filectx-like functions on a file in an arbitrary
3086 location on disk, possibly not in the working directory.
3086 location on disk, possibly not in the working directory.
3087 """
3087 """
3088
3088
3089 def __init__(self, path, repo=None):
3089 def __init__(self, path, repo=None):
3090 # Repo is optional because contrib/simplemerge uses this class.
3090 # Repo is optional because contrib/simplemerge uses this class.
3091 self._repo = repo
3091 self._repo = repo
3092 self._path = path
3092 self._path = path
3093
3093
3094 def cmp(self, fctx):
3094 def cmp(self, fctx):
3095 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3095 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3096 # path if either side is a symlink.
3096 # path if either side is a symlink.
3097 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3097 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3098 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3098 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3099 # Add a fast-path for merge if both sides are disk-backed.
3099 # Add a fast-path for merge if both sides are disk-backed.
3100 # Note that filecmp uses the opposite return values (True if same)
3100 # Note that filecmp uses the opposite return values (True if same)
3101 # from our cmp functions (True if different).
3101 # from our cmp functions (True if different).
3102 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3102 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3103 return self.data() != fctx.data()
3103 return self.data() != fctx.data()
3104
3104
3105 def path(self):
3105 def path(self):
3106 return self._path
3106 return self._path
3107
3107
3108 def flags(self):
3108 def flags(self):
3109 return b''
3109 return b''
3110
3110
3111 def data(self):
3111 def data(self):
3112 return util.readfile(self._path)
3112 return util.readfile(self._path)
3113
3113
3114 def decodeddata(self):
3114 def decodeddata(self):
3115 with open(self._path, b"rb") as f:
3115 with open(self._path, b"rb") as f:
3116 return f.read()
3116 return f.read()
3117
3117
3118 def remove(self):
3118 def remove(self):
3119 util.unlink(self._path)
3119 util.unlink(self._path)
3120
3120
3121 def write(self, data, flags, **kwargs):
3121 def write(self, data, flags, **kwargs):
3122 assert not flags
3122 assert not flags
3123 with open(self._path, b"wb") as f:
3123 with open(self._path, b"wb") as f:
3124 f.write(data)
3124 f.write(data)
@@ -1,1980 +1,1983 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 pathutil,
25 pathutil,
26 policy,
26 policy,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = 0x7FFFFFFF
46 _rangemask = 0x7FFFFFFF
47
47
48 dirstatetuple = parsers.dirstatetuple
48 dirstatetuple = parsers.dirstatetuple
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 @interfaceutil.implementer(intdirstate.idirstate)
75 @interfaceutil.implementer(intdirstate.idirstate)
76 class dirstate(object):
76 class dirstate(object):
77 def __init__(
77 def __init__(
78 self,
78 self,
79 opener,
79 opener,
80 ui,
80 ui,
81 root,
81 root,
82 validate,
82 validate,
83 sparsematchfn,
83 sparsematchfn,
84 nodeconstants,
84 nodeconstants,
85 use_dirstate_v2,
85 use_dirstate_v2,
86 ):
86 ):
87 """Create a new dirstate object.
87 """Create a new dirstate object.
88
88
89 opener is an open()-like callable that can be used to open the
89 opener is an open()-like callable that can be used to open the
90 dirstate file; root is the root of the directory tracked by
90 dirstate file; root is the root of the directory tracked by
91 the dirstate.
91 the dirstate.
92 """
92 """
93 self._use_dirstate_v2 = use_dirstate_v2
93 self._use_dirstate_v2 = use_dirstate_v2
94 self._nodeconstants = nodeconstants
94 self._nodeconstants = nodeconstants
95 self._opener = opener
95 self._opener = opener
96 self._validate = validate
96 self._validate = validate
97 self._root = root
97 self._root = root
98 self._sparsematchfn = sparsematchfn
98 self._sparsematchfn = sparsematchfn
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
99 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
100 # UNC path pointing to root share (issue4557)
100 # UNC path pointing to root share (issue4557)
101 self._rootdir = pathutil.normasprefix(root)
101 self._rootdir = pathutil.normasprefix(root)
102 self._dirty = False
102 self._dirty = False
103 self._lastnormaltime = 0
103 self._lastnormaltime = 0
104 self._ui = ui
104 self._ui = ui
105 self._filecache = {}
105 self._filecache = {}
106 self._parentwriters = 0
106 self._parentwriters = 0
107 self._filename = b'dirstate'
107 self._filename = b'dirstate'
108 self._pendingfilename = b'%s.pending' % self._filename
108 self._pendingfilename = b'%s.pending' % self._filename
109 self._plchangecallbacks = {}
109 self._plchangecallbacks = {}
110 self._origpl = None
110 self._origpl = None
111 self._updatedfiles = set()
111 self._updatedfiles = set()
112 self._mapcls = dirstatemap
112 self._mapcls = dirstatemap
113 # Access and cache cwd early, so we don't access it for the first time
113 # Access and cache cwd early, so we don't access it for the first time
114 # after a working-copy update caused it to not exist (accessing it then
114 # after a working-copy update caused it to not exist (accessing it then
115 # raises an exception).
115 # raises an exception).
116 self._cwd
116 self._cwd
117
117
118 def prefetch_parents(self):
118 def prefetch_parents(self):
119 """make sure the parents are loaded
119 """make sure the parents are loaded
120
120
121 Used to avoid a race condition.
121 Used to avoid a race condition.
122 """
122 """
123 self._pl
123 self._pl
124
124
125 @contextlib.contextmanager
125 @contextlib.contextmanager
126 def parentchange(self):
126 def parentchange(self):
127 """Context manager for handling dirstate parents.
127 """Context manager for handling dirstate parents.
128
128
129 If an exception occurs in the scope of the context manager,
129 If an exception occurs in the scope of the context manager,
130 the incoherent dirstate won't be written when wlock is
130 the incoherent dirstate won't be written when wlock is
131 released.
131 released.
132 """
132 """
133 self._parentwriters += 1
133 self._parentwriters += 1
134 yield
134 yield
135 # Typically we want the "undo" step of a context manager in a
135 # Typically we want the "undo" step of a context manager in a
136 # finally block so it happens even when an exception
136 # finally block so it happens even when an exception
137 # occurs. In this case, however, we only want to decrement
137 # occurs. In this case, however, we only want to decrement
138 # parentwriters if the code in the with statement exits
138 # parentwriters if the code in the with statement exits
139 # normally, so we don't have a try/finally here on purpose.
139 # normally, so we don't have a try/finally here on purpose.
140 self._parentwriters -= 1
140 self._parentwriters -= 1
141
141
142 def pendingparentchange(self):
142 def pendingparentchange(self):
143 """Returns true if the dirstate is in the middle of a set of changes
143 """Returns true if the dirstate is in the middle of a set of changes
144 that modify the dirstate parent.
144 that modify the dirstate parent.
145 """
145 """
146 return self._parentwriters > 0
146 return self._parentwriters > 0
147
147
148 @propertycache
148 @propertycache
149 def _map(self):
149 def _map(self):
150 """Return the dirstate contents (see documentation for dirstatemap)."""
150 """Return the dirstate contents (see documentation for dirstatemap)."""
151 self._map = self._mapcls(
151 self._map = self._mapcls(
152 self._ui,
152 self._ui,
153 self._opener,
153 self._opener,
154 self._root,
154 self._root,
155 self._nodeconstants,
155 self._nodeconstants,
156 self._use_dirstate_v2,
156 self._use_dirstate_v2,
157 )
157 )
158 return self._map
158 return self._map
159
159
160 @property
160 @property
161 def _sparsematcher(self):
161 def _sparsematcher(self):
162 """The matcher for the sparse checkout.
162 """The matcher for the sparse checkout.
163
163
164 The working directory may not include every file from a manifest. The
164 The working directory may not include every file from a manifest. The
165 matcher obtained by this property will match a path if it is to be
165 matcher obtained by this property will match a path if it is to be
166 included in the working directory.
166 included in the working directory.
167 """
167 """
168 # TODO there is potential to cache this property. For now, the matcher
168 # TODO there is potential to cache this property. For now, the matcher
169 # is resolved on every access. (But the called function does use a
169 # is resolved on every access. (But the called function does use a
170 # cache to keep the lookup fast.)
170 # cache to keep the lookup fast.)
171 return self._sparsematchfn()
171 return self._sparsematchfn()
172
172
173 @repocache(b'branch')
173 @repocache(b'branch')
174 def _branch(self):
174 def _branch(self):
175 try:
175 try:
176 return self._opener.read(b"branch").strip() or b"default"
176 return self._opener.read(b"branch").strip() or b"default"
177 except IOError as inst:
177 except IOError as inst:
178 if inst.errno != errno.ENOENT:
178 if inst.errno != errno.ENOENT:
179 raise
179 raise
180 return b"default"
180 return b"default"
181
181
182 @property
182 @property
183 def _pl(self):
183 def _pl(self):
184 return self._map.parents()
184 return self._map.parents()
185
185
186 def hasdir(self, d):
186 def hasdir(self, d):
187 return self._map.hastrackeddir(d)
187 return self._map.hastrackeddir(d)
188
188
189 @rootcache(b'.hgignore')
189 @rootcache(b'.hgignore')
190 def _ignore(self):
190 def _ignore(self):
191 files = self._ignorefiles()
191 files = self._ignorefiles()
192 if not files:
192 if not files:
193 return matchmod.never()
193 return matchmod.never()
194
194
195 pats = [b'include:%s' % f for f in files]
195 pats = [b'include:%s' % f for f in files]
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
196 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
197
197
198 @propertycache
198 @propertycache
199 def _slash(self):
199 def _slash(self):
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
200 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
201
201
202 @propertycache
202 @propertycache
203 def _checklink(self):
203 def _checklink(self):
204 return util.checklink(self._root)
204 return util.checklink(self._root)
205
205
206 @propertycache
206 @propertycache
207 def _checkexec(self):
207 def _checkexec(self):
208 return bool(util.checkexec(self._root))
208 return bool(util.checkexec(self._root))
209
209
210 @propertycache
210 @propertycache
211 def _checkcase(self):
211 def _checkcase(self):
212 return not util.fscasesensitive(self._join(b'.hg'))
212 return not util.fscasesensitive(self._join(b'.hg'))
213
213
214 def _join(self, f):
214 def _join(self, f):
215 # much faster than os.path.join()
215 # much faster than os.path.join()
216 # it's safe because f is always a relative path
216 # it's safe because f is always a relative path
217 return self._rootdir + f
217 return self._rootdir + f
218
218
219 def flagfunc(self, buildfallback):
219 def flagfunc(self, buildfallback):
220 if self._checklink and self._checkexec:
220 if self._checklink and self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 try:
223 try:
224 st = os.lstat(self._join(x))
224 st = os.lstat(self._join(x))
225 if util.statislink(st):
225 if util.statislink(st):
226 return b'l'
226 return b'l'
227 if util.statisexec(st):
227 if util.statisexec(st):
228 return b'x'
228 return b'x'
229 except OSError:
229 except OSError:
230 pass
230 pass
231 return b''
231 return b''
232
232
233 return f
233 return f
234
234
235 fallback = buildfallback()
235 fallback = buildfallback()
236 if self._checklink:
236 if self._checklink:
237
237
238 def f(x):
238 def f(x):
239 if os.path.islink(self._join(x)):
239 if os.path.islink(self._join(x)):
240 return b'l'
240 return b'l'
241 if b'x' in fallback(x):
241 if b'x' in fallback(x):
242 return b'x'
242 return b'x'
243 return b''
243 return b''
244
244
245 return f
245 return f
246 if self._checkexec:
246 if self._checkexec:
247
247
248 def f(x):
248 def f(x):
249 if b'l' in fallback(x):
249 if b'l' in fallback(x):
250 return b'l'
250 return b'l'
251 if util.isexec(self._join(x)):
251 if util.isexec(self._join(x)):
252 return b'x'
252 return b'x'
253 return b''
253 return b''
254
254
255 return f
255 return f
256 else:
256 else:
257 return fallback
257 return fallback
258
258
259 @propertycache
259 @propertycache
260 def _cwd(self):
260 def _cwd(self):
261 # internal config: ui.forcecwd
261 # internal config: ui.forcecwd
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
262 forcecwd = self._ui.config(b'ui', b'forcecwd')
263 if forcecwd:
263 if forcecwd:
264 return forcecwd
264 return forcecwd
265 return encoding.getcwd()
265 return encoding.getcwd()
266
266
267 def getcwd(self):
267 def getcwd(self):
268 """Return the path from which a canonical path is calculated.
268 """Return the path from which a canonical path is calculated.
269
269
270 This path should be used to resolve file patterns or to convert
270 This path should be used to resolve file patterns or to convert
271 canonical paths back to file paths for display. It shouldn't be
271 canonical paths back to file paths for display. It shouldn't be
272 used to get real file paths. Use vfs functions instead.
272 used to get real file paths. Use vfs functions instead.
273 """
273 """
274 cwd = self._cwd
274 cwd = self._cwd
275 if cwd == self._root:
275 if cwd == self._root:
276 return b''
276 return b''
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
277 # self._root ends with a path separator if self._root is '/' or 'C:\'
278 rootsep = self._root
278 rootsep = self._root
279 if not util.endswithsep(rootsep):
279 if not util.endswithsep(rootsep):
280 rootsep += pycompat.ossep
280 rootsep += pycompat.ossep
281 if cwd.startswith(rootsep):
281 if cwd.startswith(rootsep):
282 return cwd[len(rootsep) :]
282 return cwd[len(rootsep) :]
283 else:
283 else:
284 # we're outside the repo. return an absolute path.
284 # we're outside the repo. return an absolute path.
285 return cwd
285 return cwd
286
286
287 def pathto(self, f, cwd=None):
287 def pathto(self, f, cwd=None):
288 if cwd is None:
288 if cwd is None:
289 cwd = self.getcwd()
289 cwd = self.getcwd()
290 path = util.pathto(self._root, cwd, f)
290 path = util.pathto(self._root, cwd, f)
291 if self._slash:
291 if self._slash:
292 return util.pconvert(path)
292 return util.pconvert(path)
293 return path
293 return path
294
294
295 def __getitem__(self, key):
295 def __getitem__(self, key):
296 """Return the current state of key (a filename) in the dirstate.
296 """Return the current state of key (a filename) in the dirstate.
297
297
298 States are:
298 States are:
299 n normal
299 n normal
300 m needs merging
300 m needs merging
301 r marked for removal
301 r marked for removal
302 a marked for addition
302 a marked for addition
303 ? not tracked
303 ? not tracked
304 """
304 """
305 return self._map.get(key, (b"?",))[0]
305 return self._map.get(key, (b"?",))[0]
306
306
307 def __contains__(self, key):
307 def __contains__(self, key):
308 return key in self._map
308 return key in self._map
309
309
310 def __iter__(self):
310 def __iter__(self):
311 return iter(sorted(self._map))
311 return iter(sorted(self._map))
312
312
313 def items(self):
313 def items(self):
314 return pycompat.iteritems(self._map)
314 return pycompat.iteritems(self._map)
315
315
316 iteritems = items
316 iteritems = items
317
317
318 def parents(self):
318 def parents(self):
319 return [self._validate(p) for p in self._pl]
319 return [self._validate(p) for p in self._pl]
320
320
321 def p1(self):
321 def p1(self):
322 return self._validate(self._pl[0])
322 return self._validate(self._pl[0])
323
323
324 def p2(self):
324 def p2(self):
325 return self._validate(self._pl[1])
325 return self._validate(self._pl[1])
326
326
327 def branch(self):
327 def branch(self):
328 return encoding.tolocal(self._branch)
328 return encoding.tolocal(self._branch)
329
329
330 def setparents(self, p1, p2=None):
330 def setparents(self, p1, p2=None):
331 """Set dirstate parents to p1 and p2.
331 """Set dirstate parents to p1 and p2.
332
332
333 When moving from two parents to one, 'm' merged entries a
333 When moving from two parents to one, 'm' merged entries a
334 adjusted to normal and previous copy records discarded and
334 adjusted to normal and previous copy records discarded and
335 returned by the call.
335 returned by the call.
336
336
337 See localrepo.setparents()
337 See localrepo.setparents()
338 """
338 """
339 if p2 is None:
339 if p2 is None:
340 p2 = self._nodeconstants.nullid
340 p2 = self._nodeconstants.nullid
341 if self._parentwriters == 0:
341 if self._parentwriters == 0:
342 raise ValueError(
342 raise ValueError(
343 b"cannot set dirstate parent outside of "
343 b"cannot set dirstate parent outside of "
344 b"dirstate.parentchange context manager"
344 b"dirstate.parentchange context manager"
345 )
345 )
346
346
347 self._dirty = True
347 self._dirty = True
348 oldp2 = self._pl[1]
348 oldp2 = self._pl[1]
349 if self._origpl is None:
349 if self._origpl is None:
350 self._origpl = self._pl
350 self._origpl = self._pl
351 self._map.setparents(p1, p2)
351 self._map.setparents(p1, p2)
352 copies = {}
352 copies = {}
353 if (
353 if (
354 oldp2 != self._nodeconstants.nullid
354 oldp2 != self._nodeconstants.nullid
355 and p2 == self._nodeconstants.nullid
355 and p2 == self._nodeconstants.nullid
356 ):
356 ):
357 candidatefiles = self._map.non_normal_or_other_parent_paths()
357 candidatefiles = self._map.non_normal_or_other_parent_paths()
358
358
359 for f in candidatefiles:
359 for f in candidatefiles:
360 s = self._map.get(f)
360 s = self._map.get(f)
361 if s is None:
361 if s is None:
362 continue
362 continue
363
363
364 # Discard 'm' markers when moving away from a merge state
364 # Discard 'm' markers when moving away from a merge state
365 if s[0] == b'm':
365 if s[0] == b'm':
366 source = self._map.copymap.get(f)
366 source = self._map.copymap.get(f)
367 if source:
367 if source:
368 copies[f] = source
368 copies[f] = source
369 self.normallookup(f)
369 self.normallookup(f)
370 # Also fix up otherparent markers
370 # Also fix up otherparent markers
371 elif s[0] == b'n' and s[2] == -2:
371 elif s[0] == b'n' and s[2] == -2:
372 source = self._map.copymap.get(f)
372 source = self._map.copymap.get(f)
373 if source:
373 if source:
374 copies[f] = source
374 copies[f] = source
375 self.add(f)
375 self.add(f)
376 return copies
376 return copies
377
377
378 def setbranch(self, branch):
378 def setbranch(self, branch):
379 self.__class__._branch.set(self, encoding.fromlocal(branch))
379 self.__class__._branch.set(self, encoding.fromlocal(branch))
380 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
380 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
381 try:
381 try:
382 f.write(self._branch + b'\n')
382 f.write(self._branch + b'\n')
383 f.close()
383 f.close()
384
384
385 # make sure filecache has the correct stat info for _branch after
385 # make sure filecache has the correct stat info for _branch after
386 # replacing the underlying file
386 # replacing the underlying file
387 ce = self._filecache[b'_branch']
387 ce = self._filecache[b'_branch']
388 if ce:
388 if ce:
389 ce.refresh()
389 ce.refresh()
390 except: # re-raises
390 except: # re-raises
391 f.discard()
391 f.discard()
392 raise
392 raise
393
393
394 def invalidate(self):
394 def invalidate(self):
395 """Causes the next access to reread the dirstate.
395 """Causes the next access to reread the dirstate.
396
396
397 This is different from localrepo.invalidatedirstate() because it always
397 This is different from localrepo.invalidatedirstate() because it always
398 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
398 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
399 check whether the dirstate has changed before rereading it."""
399 check whether the dirstate has changed before rereading it."""
400
400
401 for a in ("_map", "_branch", "_ignore"):
401 for a in ("_map", "_branch", "_ignore"):
402 if a in self.__dict__:
402 if a in self.__dict__:
403 delattr(self, a)
403 delattr(self, a)
404 self._lastnormaltime = 0
404 self._lastnormaltime = 0
405 self._dirty = False
405 self._dirty = False
406 self._updatedfiles.clear()
406 self._updatedfiles.clear()
407 self._parentwriters = 0
407 self._parentwriters = 0
408 self._origpl = None
408 self._origpl = None
409
409
410 def copy(self, source, dest):
410 def copy(self, source, dest):
411 """Mark dest as a copy of source. Unmark dest if source is None."""
411 """Mark dest as a copy of source. Unmark dest if source is None."""
412 if source == dest:
412 if source == dest:
413 return
413 return
414 self._dirty = True
414 self._dirty = True
415 if source is not None:
415 if source is not None:
416 self._map.copymap[dest] = source
416 self._map.copymap[dest] = source
417 self._updatedfiles.add(source)
417 self._updatedfiles.add(source)
418 self._updatedfiles.add(dest)
418 self._updatedfiles.add(dest)
419 elif self._map.copymap.pop(dest, None):
419 elif self._map.copymap.pop(dest, None):
420 self._updatedfiles.add(dest)
420 self._updatedfiles.add(dest)
421
421
422 def copied(self, file):
422 def copied(self, file):
423 return self._map.copymap.get(file, None)
423 return self._map.copymap.get(file, None)
424
424
425 def copies(self):
425 def copies(self):
426 return self._map.copymap
426 return self._map.copymap
427
427
428 def _addpath(self, f, state, mode, size, mtime):
428 def _addpath(self, f, state, mode, size, mtime):
429 oldstate = self[f]
429 oldstate = self[f]
430 if state == b'a' or oldstate == b'r':
430 if state == b'a' or oldstate == b'r':
431 scmutil.checkfilename(f)
431 scmutil.checkfilename(f)
432 if self._map.hastrackeddir(f):
432 if self._map.hastrackeddir(f):
433 raise error.Abort(
433 raise error.Abort(
434 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
434 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
435 )
435 )
436 # shadows
436 # shadows
437 for d in pathutil.finddirs(f):
437 for d in pathutil.finddirs(f):
438 if self._map.hastrackeddir(d):
438 if self._map.hastrackeddir(d):
439 break
439 break
440 entry = self._map.get(d)
440 entry = self._map.get(d)
441 if entry is not None and entry[0] != b'r':
441 if entry is not None and entry[0] != b'r':
442 raise error.Abort(
442 raise error.Abort(
443 _(b'file %r in dirstate clashes with %r')
443 _(b'file %r in dirstate clashes with %r')
444 % (pycompat.bytestr(d), pycompat.bytestr(f))
444 % (pycompat.bytestr(d), pycompat.bytestr(f))
445 )
445 )
446 self._dirty = True
446 self._dirty = True
447 self._updatedfiles.add(f)
447 self._updatedfiles.add(f)
448 self._map.addfile(f, oldstate, state, mode, size, mtime)
448 self._map.addfile(f, oldstate, state, mode, size, mtime)
449
449
450 def normal(self, f, parentfiledata=None):
450 def normal(self, f, parentfiledata=None):
451 """Mark a file normal and clean.
451 """Mark a file normal and clean.
452
452
453 parentfiledata: (mode, size, mtime) of the clean file
453 parentfiledata: (mode, size, mtime) of the clean file
454
454
455 parentfiledata should be computed from memory (for mode,
455 parentfiledata should be computed from memory (for mode,
456 size), as or close as possible from the point where we
456 size), as or close as possible from the point where we
457 determined the file was clean, to limit the risk of the
457 determined the file was clean, to limit the risk of the
458 file having been changed by an external process between the
458 file having been changed by an external process between the
459 moment where the file was determined to be clean and now."""
459 moment where the file was determined to be clean and now."""
460 if parentfiledata:
460 if parentfiledata:
461 (mode, size, mtime) = parentfiledata
461 (mode, size, mtime) = parentfiledata
462 else:
462 else:
463 s = os.lstat(self._join(f))
463 s = os.lstat(self._join(f))
464 mode = s.st_mode
464 mode = s.st_mode
465 size = s.st_size
465 size = s.st_size
466 mtime = s[stat.ST_MTIME]
466 mtime = s[stat.ST_MTIME]
467 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
467 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
468 self._map.copymap.pop(f, None)
468 self._map.copymap.pop(f, None)
469 if f in self._map.nonnormalset:
469 if f in self._map.nonnormalset:
470 self._map.nonnormalset.remove(f)
470 self._map.nonnormalset.remove(f)
471 if mtime > self._lastnormaltime:
471 if mtime > self._lastnormaltime:
472 # Remember the most recent modification timeslot for status(),
472 # Remember the most recent modification timeslot for status(),
473 # to make sure we won't miss future size-preserving file content
473 # to make sure we won't miss future size-preserving file content
474 # modifications that happen within the same timeslot.
474 # modifications that happen within the same timeslot.
475 self._lastnormaltime = mtime
475 self._lastnormaltime = mtime
476
476
477 def normallookup(self, f):
477 def normallookup(self, f):
478 '''Mark a file normal, but possibly dirty.'''
478 '''Mark a file normal, but possibly dirty.'''
479 if self._pl[1] != self._nodeconstants.nullid:
479 if self._pl[1] != self._nodeconstants.nullid:
480 # if there is a merge going on and the file was either
480 # if there is a merge going on and the file was either
481 # in state 'm' (-1) or coming from other parent (-2) before
481 # in state 'm' (-1) or coming from other parent (-2) before
482 # being removed, restore that state.
482 # being removed, restore that state.
483 entry = self._map.get(f)
483 entry = self._map.get(f)
484 if entry is not None:
484 if entry is not None:
485 if entry[0] == b'r' and entry[2] in (-1, -2):
485 if entry[0] == b'r' and entry[2] in (-1, -2):
486 source = self._map.copymap.get(f)
486 source = self._map.copymap.get(f)
487 if entry[2] == -1:
487 if entry[2] == -1:
488 self.merge(f)
488 self.merge(f)
489 elif entry[2] == -2:
489 elif entry[2] == -2:
490 self.otherparent(f)
490 self.otherparent(f)
491 if source:
491 if source:
492 self.copy(source, f)
492 self.copy(source, f)
493 return
493 return
494 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
494 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
495 return
495 return
496 self._addpath(f, b'n', 0, -1, -1)
496 self._addpath(f, b'n', 0, -1, -1)
497 self._map.copymap.pop(f, None)
497 self._map.copymap.pop(f, None)
498
498
499 def otherparent(self, f):
499 def otherparent(self, f):
500 '''Mark as coming from the other parent, always dirty.'''
500 '''Mark as coming from the other parent, always dirty.'''
501 if self._pl[1] == self._nodeconstants.nullid:
501 if self._pl[1] == self._nodeconstants.nullid:
502 raise error.Abort(
502 raise error.Abort(
503 _(b"setting %r to other parent only allowed in merges") % f
503 _(b"setting %r to other parent only allowed in merges") % f
504 )
504 )
505 if f in self and self[f] == b'n':
505 if f in self and self[f] == b'n':
506 # merge-like
506 # merge-like
507 self._addpath(f, b'm', 0, -2, -1)
507 self._addpath(f, b'm', 0, -2, -1)
508 else:
508 else:
509 # add-like
509 # add-like
510 self._addpath(f, b'n', 0, -2, -1)
510 self._addpath(f, b'n', 0, -2, -1)
511 self._map.copymap.pop(f, None)
511 self._map.copymap.pop(f, None)
512
512
513 def add(self, f):
513 def add(self, f):
514 '''Mark a file added.'''
514 '''Mark a file added.'''
515 self._addpath(f, b'a', 0, -1, -1)
515 self._addpath(f, b'a', 0, -1, -1)
516 self._map.copymap.pop(f, None)
516 self._map.copymap.pop(f, None)
517
517
518 def remove(self, f):
518 def remove(self, f):
519 '''Mark a file removed.'''
519 '''Mark a file removed.'''
520 self._dirty = True
520 self._dirty = True
521 oldstate = self[f]
521 oldstate = self[f]
522 size = 0
522 size = 0
523 if self._pl[1] != self._nodeconstants.nullid:
523 if self._pl[1] != self._nodeconstants.nullid:
524 entry = self._map.get(f)
524 entry = self._map.get(f)
525 if entry is not None:
525 if entry is not None:
526 # backup the previous state
526 # backup the previous state
527 if entry[0] == b'm': # merge
527 if entry[0] == b'm': # merge
528 size = -1
528 size = -1
529 elif entry[0] == b'n' and entry[2] == -2: # other parent
529 elif entry[0] == b'n' and entry[2] == -2: # other parent
530 size = -2
530 size = -2
531 self._map.otherparentset.add(f)
531 self._map.otherparentset.add(f)
532 self._updatedfiles.add(f)
532 self._updatedfiles.add(f)
533 self._map.removefile(f, oldstate, size)
533 self._map.removefile(f, oldstate, size)
534 if size == 0:
534 if size == 0:
535 self._map.copymap.pop(f, None)
535 self._map.copymap.pop(f, None)
536
536
537 def merge(self, f):
537 def merge(self, f):
538 '''Mark a file merged.'''
538 '''Mark a file merged.'''
539 if self._pl[1] == self._nodeconstants.nullid:
539 if self._pl[1] == self._nodeconstants.nullid:
540 return self.normallookup(f)
540 return self.normallookup(f)
541 return self.otherparent(f)
541 return self.otherparent(f)
542
542
543 def drop(self, f):
543 def drop(self, f):
544 '''Drop a file from the dirstate'''
544 '''Drop a file from the dirstate'''
545 oldstate = self[f]
545 oldstate = self[f]
546 if self._map.dropfile(f, oldstate):
546 if self._map.dropfile(f, oldstate):
547 self._dirty = True
547 self._dirty = True
548 self._updatedfiles.add(f)
548 self._updatedfiles.add(f)
549 self._map.copymap.pop(f, None)
549 self._map.copymap.pop(f, None)
550
550
551 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
551 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
552 if exists is None:
552 if exists is None:
553 exists = os.path.lexists(os.path.join(self._root, path))
553 exists = os.path.lexists(os.path.join(self._root, path))
554 if not exists:
554 if not exists:
555 # Maybe a path component exists
555 # Maybe a path component exists
556 if not ignoremissing and b'/' in path:
556 if not ignoremissing and b'/' in path:
557 d, f = path.rsplit(b'/', 1)
557 d, f = path.rsplit(b'/', 1)
558 d = self._normalize(d, False, ignoremissing, None)
558 d = self._normalize(d, False, ignoremissing, None)
559 folded = d + b"/" + f
559 folded = d + b"/" + f
560 else:
560 else:
561 # No path components, preserve original case
561 # No path components, preserve original case
562 folded = path
562 folded = path
563 else:
563 else:
564 # recursively normalize leading directory components
564 # recursively normalize leading directory components
565 # against dirstate
565 # against dirstate
566 if b'/' in normed:
566 if b'/' in normed:
567 d, f = normed.rsplit(b'/', 1)
567 d, f = normed.rsplit(b'/', 1)
568 d = self._normalize(d, False, ignoremissing, True)
568 d = self._normalize(d, False, ignoremissing, True)
569 r = self._root + b"/" + d
569 r = self._root + b"/" + d
570 folded = d + b"/" + util.fspath(f, r)
570 folded = d + b"/" + util.fspath(f, r)
571 else:
571 else:
572 folded = util.fspath(normed, self._root)
572 folded = util.fspath(normed, self._root)
573 storemap[normed] = folded
573 storemap[normed] = folded
574
574
575 return folded
575 return folded
576
576
577 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
577 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
578 normed = util.normcase(path)
578 normed = util.normcase(path)
579 folded = self._map.filefoldmap.get(normed, None)
579 folded = self._map.filefoldmap.get(normed, None)
580 if folded is None:
580 if folded is None:
581 if isknown:
581 if isknown:
582 folded = path
582 folded = path
583 else:
583 else:
584 folded = self._discoverpath(
584 folded = self._discoverpath(
585 path, normed, ignoremissing, exists, self._map.filefoldmap
585 path, normed, ignoremissing, exists, self._map.filefoldmap
586 )
586 )
587 return folded
587 return folded
588
588
589 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
589 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
590 normed = util.normcase(path)
590 normed = util.normcase(path)
591 folded = self._map.filefoldmap.get(normed, None)
591 folded = self._map.filefoldmap.get(normed, None)
592 if folded is None:
592 if folded is None:
593 folded = self._map.dirfoldmap.get(normed, None)
593 folded = self._map.dirfoldmap.get(normed, None)
594 if folded is None:
594 if folded is None:
595 if isknown:
595 if isknown:
596 folded = path
596 folded = path
597 else:
597 else:
598 # store discovered result in dirfoldmap so that future
598 # store discovered result in dirfoldmap so that future
599 # normalizefile calls don't start matching directories
599 # normalizefile calls don't start matching directories
600 folded = self._discoverpath(
600 folded = self._discoverpath(
601 path, normed, ignoremissing, exists, self._map.dirfoldmap
601 path, normed, ignoremissing, exists, self._map.dirfoldmap
602 )
602 )
603 return folded
603 return folded
604
604
605 def normalize(self, path, isknown=False, ignoremissing=False):
605 def normalize(self, path, isknown=False, ignoremissing=False):
606 """
606 """
607 normalize the case of a pathname when on a casefolding filesystem
607 normalize the case of a pathname when on a casefolding filesystem
608
608
609 isknown specifies whether the filename came from walking the
609 isknown specifies whether the filename came from walking the
610 disk, to avoid extra filesystem access.
610 disk, to avoid extra filesystem access.
611
611
612 If ignoremissing is True, missing path are returned
612 If ignoremissing is True, missing path are returned
613 unchanged. Otherwise, we try harder to normalize possibly
613 unchanged. Otherwise, we try harder to normalize possibly
614 existing path components.
614 existing path components.
615
615
616 The normalized case is determined based on the following precedence:
616 The normalized case is determined based on the following precedence:
617
617
618 - version of name already stored in the dirstate
618 - version of name already stored in the dirstate
619 - version of name stored on disk
619 - version of name stored on disk
620 - version provided via command arguments
620 - version provided via command arguments
621 """
621 """
622
622
623 if self._checkcase:
623 if self._checkcase:
624 return self._normalize(path, isknown, ignoremissing)
624 return self._normalize(path, isknown, ignoremissing)
625 return path
625 return path
626
626
627 def clear(self):
627 def clear(self):
628 self._map.clear()
628 self._map.clear()
629 self._lastnormaltime = 0
629 self._lastnormaltime = 0
630 self._updatedfiles.clear()
630 self._updatedfiles.clear()
631 self._dirty = True
631 self._dirty = True
632
632
633 def rebuild(self, parent, allfiles, changedfiles=None):
633 def rebuild(self, parent, allfiles, changedfiles=None):
634 if changedfiles is None:
634 if changedfiles is None:
635 # Rebuild entire dirstate
635 # Rebuild entire dirstate
636 to_lookup = allfiles
636 to_lookup = allfiles
637 to_drop = []
637 to_drop = []
638 lastnormaltime = self._lastnormaltime
638 lastnormaltime = self._lastnormaltime
639 self.clear()
639 self.clear()
640 self._lastnormaltime = lastnormaltime
640 self._lastnormaltime = lastnormaltime
641 elif len(changedfiles) < 10:
641 elif len(changedfiles) < 10:
642 # Avoid turning allfiles into a set, which can be expensive if it's
642 # Avoid turning allfiles into a set, which can be expensive if it's
643 # large.
643 # large.
644 to_lookup = []
644 to_lookup = []
645 to_drop = []
645 to_drop = []
646 for f in changedfiles:
646 for f in changedfiles:
647 if f in allfiles:
647 if f in allfiles:
648 to_lookup.append(f)
648 to_lookup.append(f)
649 else:
649 else:
650 to_drop.append(f)
650 to_drop.append(f)
651 else:
651 else:
652 changedfilesset = set(changedfiles)
652 changedfilesset = set(changedfiles)
653 to_lookup = changedfilesset & set(allfiles)
653 to_lookup = changedfilesset & set(allfiles)
654 to_drop = changedfilesset - to_lookup
654 to_drop = changedfilesset - to_lookup
655
655
656 if self._origpl is None:
656 if self._origpl is None:
657 self._origpl = self._pl
657 self._origpl = self._pl
658 self._map.setparents(parent, self._nodeconstants.nullid)
658 self._map.setparents(parent, self._nodeconstants.nullid)
659
659
660 for f in to_lookup:
660 for f in to_lookup:
661 self.normallookup(f)
661 self.normallookup(f)
662 for f in to_drop:
662 for f in to_drop:
663 self.drop(f)
663 self.drop(f)
664
664
665 self._dirty = True
665 self._dirty = True
666
666
667 def identity(self):
667 def identity(self):
668 """Return identity of dirstate itself to detect changing in storage
668 """Return identity of dirstate itself to detect changing in storage
669
669
670 If identity of previous dirstate is equal to this, writing
670 If identity of previous dirstate is equal to this, writing
671 changes based on the former dirstate out can keep consistency.
671 changes based on the former dirstate out can keep consistency.
672 """
672 """
673 return self._map.identity
673 return self._map.identity
674
674
675 def write(self, tr):
675 def write(self, tr):
676 if not self._dirty:
676 if not self._dirty:
677 return
677 return
678
678
679 filename = self._filename
679 filename = self._filename
680 if tr:
680 if tr:
681 # 'dirstate.write()' is not only for writing in-memory
681 # 'dirstate.write()' is not only for writing in-memory
682 # changes out, but also for dropping ambiguous timestamp.
682 # changes out, but also for dropping ambiguous timestamp.
683 # delayed writing re-raise "ambiguous timestamp issue".
683 # delayed writing re-raise "ambiguous timestamp issue".
684 # See also the wiki page below for detail:
684 # See also the wiki page below for detail:
685 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
685 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
686
686
687 # emulate dropping timestamp in 'parsers.pack_dirstate'
687 # emulate dropping timestamp in 'parsers.pack_dirstate'
688 now = _getfsnow(self._opener)
688 now = _getfsnow(self._opener)
689 self._map.clearambiguoustimes(self._updatedfiles, now)
689 self._map.clearambiguoustimes(self._updatedfiles, now)
690
690
691 # emulate that all 'dirstate.normal' results are written out
691 # emulate that all 'dirstate.normal' results are written out
692 self._lastnormaltime = 0
692 self._lastnormaltime = 0
693 self._updatedfiles.clear()
693 self._updatedfiles.clear()
694
694
695 # delay writing in-memory changes out
695 # delay writing in-memory changes out
696 tr.addfilegenerator(
696 tr.addfilegenerator(
697 b'dirstate',
697 b'dirstate',
698 (self._filename,),
698 (self._filename,),
699 self._writedirstate,
699 self._writedirstate,
700 location=b'plain',
700 location=b'plain',
701 )
701 )
702 return
702 return
703
703
704 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
704 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
705 self._writedirstate(st)
705 self._writedirstate(st)
706
706
707 def addparentchangecallback(self, category, callback):
707 def addparentchangecallback(self, category, callback):
708 """add a callback to be called when the wd parents are changed
708 """add a callback to be called when the wd parents are changed
709
709
710 Callback will be called with the following arguments:
710 Callback will be called with the following arguments:
711 dirstate, (oldp1, oldp2), (newp1, newp2)
711 dirstate, (oldp1, oldp2), (newp1, newp2)
712
712
713 Category is a unique identifier to allow overwriting an old callback
713 Category is a unique identifier to allow overwriting an old callback
714 with a newer callback.
714 with a newer callback.
715 """
715 """
716 self._plchangecallbacks[category] = callback
716 self._plchangecallbacks[category] = callback
717
717
718 def _writedirstate(self, st):
718 def _writedirstate(self, st):
719 # notify callbacks about parents change
719 # notify callbacks about parents change
720 if self._origpl is not None and self._origpl != self._pl:
720 if self._origpl is not None and self._origpl != self._pl:
721 for c, callback in sorted(
721 for c, callback in sorted(
722 pycompat.iteritems(self._plchangecallbacks)
722 pycompat.iteritems(self._plchangecallbacks)
723 ):
723 ):
724 callback(self, self._origpl, self._pl)
724 callback(self, self._origpl, self._pl)
725 self._origpl = None
725 self._origpl = None
726 # use the modification time of the newly created temporary file as the
726 # use the modification time of the newly created temporary file as the
727 # filesystem's notion of 'now'
727 # filesystem's notion of 'now'
728 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
728 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
729
729
730 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
730 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
731 # timestamp of each entries in dirstate, because of 'now > mtime'
731 # timestamp of each entries in dirstate, because of 'now > mtime'
732 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
732 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
733 if delaywrite > 0:
733 if delaywrite > 0:
734 # do we have any files to delay for?
734 # do we have any files to delay for?
735 for f, e in pycompat.iteritems(self._map):
735 for f, e in pycompat.iteritems(self._map):
736 if e[0] == b'n' and e[3] == now:
736 if e[0] == b'n' and e[3] == now:
737 import time # to avoid useless import
737 import time # to avoid useless import
738
738
739 # rather than sleep n seconds, sleep until the next
739 # rather than sleep n seconds, sleep until the next
740 # multiple of n seconds
740 # multiple of n seconds
741 clock = time.time()
741 clock = time.time()
742 start = int(clock) - (int(clock) % delaywrite)
742 start = int(clock) - (int(clock) % delaywrite)
743 end = start + delaywrite
743 end = start + delaywrite
744 time.sleep(end - clock)
744 time.sleep(end - clock)
745 now = end # trust our estimate that the end is near now
745 now = end # trust our estimate that the end is near now
746 break
746 break
747
747
748 self._map.write(st, now)
748 self._map.write(st, now)
749 self._lastnormaltime = 0
749 self._lastnormaltime = 0
750 self._dirty = False
750 self._dirty = False
751
751
752 def _dirignore(self, f):
752 def _dirignore(self, f):
753 if self._ignore(f):
753 if self._ignore(f):
754 return True
754 return True
755 for p in pathutil.finddirs(f):
755 for p in pathutil.finddirs(f):
756 if self._ignore(p):
756 if self._ignore(p):
757 return True
757 return True
758 return False
758 return False
759
759
760 def _ignorefiles(self):
760 def _ignorefiles(self):
761 files = []
761 files = []
762 if os.path.exists(self._join(b'.hgignore')):
762 if os.path.exists(self._join(b'.hgignore')):
763 files.append(self._join(b'.hgignore'))
763 files.append(self._join(b'.hgignore'))
764 for name, path in self._ui.configitems(b"ui"):
764 for name, path in self._ui.configitems(b"ui"):
765 if name == b'ignore' or name.startswith(b'ignore.'):
765 if name == b'ignore' or name.startswith(b'ignore.'):
766 # we need to use os.path.join here rather than self._join
766 # we need to use os.path.join here rather than self._join
767 # because path is arbitrary and user-specified
767 # because path is arbitrary and user-specified
768 files.append(os.path.join(self._rootdir, util.expandpath(path)))
768 files.append(os.path.join(self._rootdir, util.expandpath(path)))
769 return files
769 return files
770
770
771 def _ignorefileandline(self, f):
771 def _ignorefileandline(self, f):
772 files = collections.deque(self._ignorefiles())
772 files = collections.deque(self._ignorefiles())
773 visited = set()
773 visited = set()
774 while files:
774 while files:
775 i = files.popleft()
775 i = files.popleft()
776 patterns = matchmod.readpatternfile(
776 patterns = matchmod.readpatternfile(
777 i, self._ui.warn, sourceinfo=True
777 i, self._ui.warn, sourceinfo=True
778 )
778 )
779 for pattern, lineno, line in patterns:
779 for pattern, lineno, line in patterns:
780 kind, p = matchmod._patsplit(pattern, b'glob')
780 kind, p = matchmod._patsplit(pattern, b'glob')
781 if kind == b"subinclude":
781 if kind == b"subinclude":
782 if p not in visited:
782 if p not in visited:
783 files.append(p)
783 files.append(p)
784 continue
784 continue
785 m = matchmod.match(
785 m = matchmod.match(
786 self._root, b'', [], [pattern], warn=self._ui.warn
786 self._root, b'', [], [pattern], warn=self._ui.warn
787 )
787 )
788 if m(f):
788 if m(f):
789 return (i, lineno, line)
789 return (i, lineno, line)
790 visited.add(i)
790 visited.add(i)
791 return (None, -1, b"")
791 return (None, -1, b"")
792
792
793 def _walkexplicit(self, match, subrepos):
793 def _walkexplicit(self, match, subrepos):
794 """Get stat data about the files explicitly specified by match.
794 """Get stat data about the files explicitly specified by match.
795
795
796 Return a triple (results, dirsfound, dirsnotfound).
796 Return a triple (results, dirsfound, dirsnotfound).
797 - results is a mapping from filename to stat result. It also contains
797 - results is a mapping from filename to stat result. It also contains
798 listings mapping subrepos and .hg to None.
798 listings mapping subrepos and .hg to None.
799 - dirsfound is a list of files found to be directories.
799 - dirsfound is a list of files found to be directories.
800 - dirsnotfound is a list of files that the dirstate thinks are
800 - dirsnotfound is a list of files that the dirstate thinks are
801 directories and that were not found."""
801 directories and that were not found."""
802
802
803 def badtype(mode):
803 def badtype(mode):
804 kind = _(b'unknown')
804 kind = _(b'unknown')
805 if stat.S_ISCHR(mode):
805 if stat.S_ISCHR(mode):
806 kind = _(b'character device')
806 kind = _(b'character device')
807 elif stat.S_ISBLK(mode):
807 elif stat.S_ISBLK(mode):
808 kind = _(b'block device')
808 kind = _(b'block device')
809 elif stat.S_ISFIFO(mode):
809 elif stat.S_ISFIFO(mode):
810 kind = _(b'fifo')
810 kind = _(b'fifo')
811 elif stat.S_ISSOCK(mode):
811 elif stat.S_ISSOCK(mode):
812 kind = _(b'socket')
812 kind = _(b'socket')
813 elif stat.S_ISDIR(mode):
813 elif stat.S_ISDIR(mode):
814 kind = _(b'directory')
814 kind = _(b'directory')
815 return _(b'unsupported file type (type is %s)') % kind
815 return _(b'unsupported file type (type is %s)') % kind
816
816
817 badfn = match.bad
817 badfn = match.bad
818 dmap = self._map
818 dmap = self._map
819 lstat = os.lstat
819 lstat = os.lstat
820 getkind = stat.S_IFMT
820 getkind = stat.S_IFMT
821 dirkind = stat.S_IFDIR
821 dirkind = stat.S_IFDIR
822 regkind = stat.S_IFREG
822 regkind = stat.S_IFREG
823 lnkkind = stat.S_IFLNK
823 lnkkind = stat.S_IFLNK
824 join = self._join
824 join = self._join
825 dirsfound = []
825 dirsfound = []
826 foundadd = dirsfound.append
826 foundadd = dirsfound.append
827 dirsnotfound = []
827 dirsnotfound = []
828 notfoundadd = dirsnotfound.append
828 notfoundadd = dirsnotfound.append
829
829
830 if not match.isexact() and self._checkcase:
830 if not match.isexact() and self._checkcase:
831 normalize = self._normalize
831 normalize = self._normalize
832 else:
832 else:
833 normalize = None
833 normalize = None
834
834
835 files = sorted(match.files())
835 files = sorted(match.files())
836 subrepos.sort()
836 subrepos.sort()
837 i, j = 0, 0
837 i, j = 0, 0
838 while i < len(files) and j < len(subrepos):
838 while i < len(files) and j < len(subrepos):
839 subpath = subrepos[j] + b"/"
839 subpath = subrepos[j] + b"/"
840 if files[i] < subpath:
840 if files[i] < subpath:
841 i += 1
841 i += 1
842 continue
842 continue
843 while i < len(files) and files[i].startswith(subpath):
843 while i < len(files) and files[i].startswith(subpath):
844 del files[i]
844 del files[i]
845 j += 1
845 j += 1
846
846
847 if not files or b'' in files:
847 if not files or b'' in files:
848 files = [b'']
848 files = [b'']
849 # constructing the foldmap is expensive, so don't do it for the
849 # constructing the foldmap is expensive, so don't do it for the
850 # common case where files is ['']
850 # common case where files is ['']
851 normalize = None
851 normalize = None
852 results = dict.fromkeys(subrepos)
852 results = dict.fromkeys(subrepos)
853 results[b'.hg'] = None
853 results[b'.hg'] = None
854
854
855 for ff in files:
855 for ff in files:
856 if normalize:
856 if normalize:
857 nf = normalize(ff, False, True)
857 nf = normalize(ff, False, True)
858 else:
858 else:
859 nf = ff
859 nf = ff
860 if nf in results:
860 if nf in results:
861 continue
861 continue
862
862
863 try:
863 try:
864 st = lstat(join(nf))
864 st = lstat(join(nf))
865 kind = getkind(st.st_mode)
865 kind = getkind(st.st_mode)
866 if kind == dirkind:
866 if kind == dirkind:
867 if nf in dmap:
867 if nf in dmap:
868 # file replaced by dir on disk but still in dirstate
868 # file replaced by dir on disk but still in dirstate
869 results[nf] = None
869 results[nf] = None
870 foundadd((nf, ff))
870 foundadd((nf, ff))
871 elif kind == regkind or kind == lnkkind:
871 elif kind == regkind or kind == lnkkind:
872 results[nf] = st
872 results[nf] = st
873 else:
873 else:
874 badfn(ff, badtype(kind))
874 badfn(ff, badtype(kind))
875 if nf in dmap:
875 if nf in dmap:
876 results[nf] = None
876 results[nf] = None
877 except OSError as inst: # nf not found on disk - it is dirstate only
877 except OSError as inst: # nf not found on disk - it is dirstate only
878 if nf in dmap: # does it exactly match a missing file?
878 if nf in dmap: # does it exactly match a missing file?
879 results[nf] = None
879 results[nf] = None
880 else: # does it match a missing directory?
880 else: # does it match a missing directory?
881 if self._map.hasdir(nf):
881 if self._map.hasdir(nf):
882 notfoundadd(nf)
882 notfoundadd(nf)
883 else:
883 else:
884 badfn(ff, encoding.strtolocal(inst.strerror))
884 badfn(ff, encoding.strtolocal(inst.strerror))
885
885
886 # match.files() may contain explicitly-specified paths that shouldn't
886 # match.files() may contain explicitly-specified paths that shouldn't
887 # be taken; drop them from the list of files found. dirsfound/notfound
887 # be taken; drop them from the list of files found. dirsfound/notfound
888 # aren't filtered here because they will be tested later.
888 # aren't filtered here because they will be tested later.
889 if match.anypats():
889 if match.anypats():
890 for f in list(results):
890 for f in list(results):
891 if f == b'.hg' or f in subrepos:
891 if f == b'.hg' or f in subrepos:
892 # keep sentinel to disable further out-of-repo walks
892 # keep sentinel to disable further out-of-repo walks
893 continue
893 continue
894 if not match(f):
894 if not match(f):
895 del results[f]
895 del results[f]
896
896
897 # Case insensitive filesystems cannot rely on lstat() failing to detect
897 # Case insensitive filesystems cannot rely on lstat() failing to detect
898 # a case-only rename. Prune the stat object for any file that does not
898 # a case-only rename. Prune the stat object for any file that does not
899 # match the case in the filesystem, if there are multiple files that
899 # match the case in the filesystem, if there are multiple files that
900 # normalize to the same path.
900 # normalize to the same path.
901 if match.isexact() and self._checkcase:
901 if match.isexact() and self._checkcase:
902 normed = {}
902 normed = {}
903
903
904 for f, st in pycompat.iteritems(results):
904 for f, st in pycompat.iteritems(results):
905 if st is None:
905 if st is None:
906 continue
906 continue
907
907
908 nc = util.normcase(f)
908 nc = util.normcase(f)
909 paths = normed.get(nc)
909 paths = normed.get(nc)
910
910
911 if paths is None:
911 if paths is None:
912 paths = set()
912 paths = set()
913 normed[nc] = paths
913 normed[nc] = paths
914
914
915 paths.add(f)
915 paths.add(f)
916
916
917 for norm, paths in pycompat.iteritems(normed):
917 for norm, paths in pycompat.iteritems(normed):
918 if len(paths) > 1:
918 if len(paths) > 1:
919 for path in paths:
919 for path in paths:
920 folded = self._discoverpath(
920 folded = self._discoverpath(
921 path, norm, True, None, self._map.dirfoldmap
921 path, norm, True, None, self._map.dirfoldmap
922 )
922 )
923 if path != folded:
923 if path != folded:
924 results[path] = None
924 results[path] = None
925
925
926 return results, dirsfound, dirsnotfound
926 return results, dirsfound, dirsnotfound
927
927
928 def walk(self, match, subrepos, unknown, ignored, full=True):
928 def walk(self, match, subrepos, unknown, ignored, full=True):
929 """
929 """
930 Walk recursively through the directory tree, finding all files
930 Walk recursively through the directory tree, finding all files
931 matched by match.
931 matched by match.
932
932
933 If full is False, maybe skip some known-clean files.
933 If full is False, maybe skip some known-clean files.
934
934
935 Return a dict mapping filename to stat-like object (either
935 Return a dict mapping filename to stat-like object (either
936 mercurial.osutil.stat instance or return value of os.stat()).
936 mercurial.osutil.stat instance or return value of os.stat()).
937
937
938 """
938 """
939 # full is a flag that extensions that hook into walk can use -- this
939 # full is a flag that extensions that hook into walk can use -- this
940 # implementation doesn't use it at all. This satisfies the contract
940 # implementation doesn't use it at all. This satisfies the contract
941 # because we only guarantee a "maybe".
941 # because we only guarantee a "maybe".
942
942
943 if ignored:
943 if ignored:
944 ignore = util.never
944 ignore = util.never
945 dirignore = util.never
945 dirignore = util.never
946 elif unknown:
946 elif unknown:
947 ignore = self._ignore
947 ignore = self._ignore
948 dirignore = self._dirignore
948 dirignore = self._dirignore
949 else:
949 else:
950 # if not unknown and not ignored, drop dir recursion and step 2
950 # if not unknown and not ignored, drop dir recursion and step 2
951 ignore = util.always
951 ignore = util.always
952 dirignore = util.always
952 dirignore = util.always
953
953
954 matchfn = match.matchfn
954 matchfn = match.matchfn
955 matchalways = match.always()
955 matchalways = match.always()
956 matchtdir = match.traversedir
956 matchtdir = match.traversedir
957 dmap = self._map
957 dmap = self._map
958 listdir = util.listdir
958 listdir = util.listdir
959 lstat = os.lstat
959 lstat = os.lstat
960 dirkind = stat.S_IFDIR
960 dirkind = stat.S_IFDIR
961 regkind = stat.S_IFREG
961 regkind = stat.S_IFREG
962 lnkkind = stat.S_IFLNK
962 lnkkind = stat.S_IFLNK
963 join = self._join
963 join = self._join
964
964
965 exact = skipstep3 = False
965 exact = skipstep3 = False
966 if match.isexact(): # match.exact
966 if match.isexact(): # match.exact
967 exact = True
967 exact = True
968 dirignore = util.always # skip step 2
968 dirignore = util.always # skip step 2
969 elif match.prefix(): # match.match, no patterns
969 elif match.prefix(): # match.match, no patterns
970 skipstep3 = True
970 skipstep3 = True
971
971
972 if not exact and self._checkcase:
972 if not exact and self._checkcase:
973 normalize = self._normalize
973 normalize = self._normalize
974 normalizefile = self._normalizefile
974 normalizefile = self._normalizefile
975 skipstep3 = False
975 skipstep3 = False
976 else:
976 else:
977 normalize = self._normalize
977 normalize = self._normalize
978 normalizefile = None
978 normalizefile = None
979
979
980 # step 1: find all explicit files
980 # step 1: find all explicit files
981 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
981 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
982 if matchtdir:
982 if matchtdir:
983 for d in work:
983 for d in work:
984 matchtdir(d[0])
984 matchtdir(d[0])
985 for d in dirsnotfound:
985 for d in dirsnotfound:
986 matchtdir(d)
986 matchtdir(d)
987
987
988 skipstep3 = skipstep3 and not (work or dirsnotfound)
988 skipstep3 = skipstep3 and not (work or dirsnotfound)
989 work = [d for d in work if not dirignore(d[0])]
989 work = [d for d in work if not dirignore(d[0])]
990
990
991 # step 2: visit subdirectories
991 # step 2: visit subdirectories
992 def traverse(work, alreadynormed):
992 def traverse(work, alreadynormed):
993 wadd = work.append
993 wadd = work.append
994 while work:
994 while work:
995 tracing.counter('dirstate.walk work', len(work))
995 tracing.counter('dirstate.walk work', len(work))
996 nd = work.pop()
996 nd = work.pop()
997 visitentries = match.visitchildrenset(nd)
997 visitentries = match.visitchildrenset(nd)
998 if not visitentries:
998 if not visitentries:
999 continue
999 continue
1000 if visitentries == b'this' or visitentries == b'all':
1000 if visitentries == b'this' or visitentries == b'all':
1001 visitentries = None
1001 visitentries = None
1002 skip = None
1002 skip = None
1003 if nd != b'':
1003 if nd != b'':
1004 skip = b'.hg'
1004 skip = b'.hg'
1005 try:
1005 try:
1006 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1006 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1007 entries = listdir(join(nd), stat=True, skip=skip)
1007 entries = listdir(join(nd), stat=True, skip=skip)
1008 except OSError as inst:
1008 except OSError as inst:
1009 if inst.errno in (errno.EACCES, errno.ENOENT):
1009 if inst.errno in (errno.EACCES, errno.ENOENT):
1010 match.bad(
1010 match.bad(
1011 self.pathto(nd), encoding.strtolocal(inst.strerror)
1011 self.pathto(nd), encoding.strtolocal(inst.strerror)
1012 )
1012 )
1013 continue
1013 continue
1014 raise
1014 raise
1015 for f, kind, st in entries:
1015 for f, kind, st in entries:
1016 # Some matchers may return files in the visitentries set,
1016 # Some matchers may return files in the visitentries set,
1017 # instead of 'this', if the matcher explicitly mentions them
1017 # instead of 'this', if the matcher explicitly mentions them
1018 # and is not an exactmatcher. This is acceptable; we do not
1018 # and is not an exactmatcher. This is acceptable; we do not
1019 # make any hard assumptions about file-or-directory below
1019 # make any hard assumptions about file-or-directory below
1020 # based on the presence of `f` in visitentries. If
1020 # based on the presence of `f` in visitentries. If
1021 # visitchildrenset returned a set, we can always skip the
1021 # visitchildrenset returned a set, we can always skip the
1022 # entries *not* in the set it provided regardless of whether
1022 # entries *not* in the set it provided regardless of whether
1023 # they're actually a file or a directory.
1023 # they're actually a file or a directory.
1024 if visitentries and f not in visitentries:
1024 if visitentries and f not in visitentries:
1025 continue
1025 continue
1026 if normalizefile:
1026 if normalizefile:
1027 # even though f might be a directory, we're only
1027 # even though f might be a directory, we're only
1028 # interested in comparing it to files currently in the
1028 # interested in comparing it to files currently in the
1029 # dmap -- therefore normalizefile is enough
1029 # dmap -- therefore normalizefile is enough
1030 nf = normalizefile(
1030 nf = normalizefile(
1031 nd and (nd + b"/" + f) or f, True, True
1031 nd and (nd + b"/" + f) or f, True, True
1032 )
1032 )
1033 else:
1033 else:
1034 nf = nd and (nd + b"/" + f) or f
1034 nf = nd and (nd + b"/" + f) or f
1035 if nf not in results:
1035 if nf not in results:
1036 if kind == dirkind:
1036 if kind == dirkind:
1037 if not ignore(nf):
1037 if not ignore(nf):
1038 if matchtdir:
1038 if matchtdir:
1039 matchtdir(nf)
1039 matchtdir(nf)
1040 wadd(nf)
1040 wadd(nf)
1041 if nf in dmap and (matchalways or matchfn(nf)):
1041 if nf in dmap and (matchalways or matchfn(nf)):
1042 results[nf] = None
1042 results[nf] = None
1043 elif kind == regkind or kind == lnkkind:
1043 elif kind == regkind or kind == lnkkind:
1044 if nf in dmap:
1044 if nf in dmap:
1045 if matchalways or matchfn(nf):
1045 if matchalways or matchfn(nf):
1046 results[nf] = st
1046 results[nf] = st
1047 elif (matchalways or matchfn(nf)) and not ignore(
1047 elif (matchalways or matchfn(nf)) and not ignore(
1048 nf
1048 nf
1049 ):
1049 ):
1050 # unknown file -- normalize if necessary
1050 # unknown file -- normalize if necessary
1051 if not alreadynormed:
1051 if not alreadynormed:
1052 nf = normalize(nf, False, True)
1052 nf = normalize(nf, False, True)
1053 results[nf] = st
1053 results[nf] = st
1054 elif nf in dmap and (matchalways or matchfn(nf)):
1054 elif nf in dmap and (matchalways or matchfn(nf)):
1055 results[nf] = None
1055 results[nf] = None
1056
1056
1057 for nd, d in work:
1057 for nd, d in work:
1058 # alreadynormed means that processwork doesn't have to do any
1058 # alreadynormed means that processwork doesn't have to do any
1059 # expensive directory normalization
1059 # expensive directory normalization
1060 alreadynormed = not normalize or nd == d
1060 alreadynormed = not normalize or nd == d
1061 traverse([d], alreadynormed)
1061 traverse([d], alreadynormed)
1062
1062
1063 for s in subrepos:
1063 for s in subrepos:
1064 del results[s]
1064 del results[s]
1065 del results[b'.hg']
1065 del results[b'.hg']
1066
1066
1067 # step 3: visit remaining files from dmap
1067 # step 3: visit remaining files from dmap
1068 if not skipstep3 and not exact:
1068 if not skipstep3 and not exact:
1069 # If a dmap file is not in results yet, it was either
1069 # If a dmap file is not in results yet, it was either
1070 # a) not matching matchfn b) ignored, c) missing, or d) under a
1070 # a) not matching matchfn b) ignored, c) missing, or d) under a
1071 # symlink directory.
1071 # symlink directory.
1072 if not results and matchalways:
1072 if not results and matchalways:
1073 visit = [f for f in dmap]
1073 visit = [f for f in dmap]
1074 else:
1074 else:
1075 visit = [f for f in dmap if f not in results and matchfn(f)]
1075 visit = [f for f in dmap if f not in results and matchfn(f)]
1076 visit.sort()
1076 visit.sort()
1077
1077
1078 if unknown:
1078 if unknown:
1079 # unknown == True means we walked all dirs under the roots
1079 # unknown == True means we walked all dirs under the roots
1080 # that wasn't ignored, and everything that matched was stat'ed
1080 # that wasn't ignored, and everything that matched was stat'ed
1081 # and is already in results.
1081 # and is already in results.
1082 # The rest must thus be ignored or under a symlink.
1082 # The rest must thus be ignored or under a symlink.
1083 audit_path = pathutil.pathauditor(self._root, cached=True)
1083 audit_path = pathutil.pathauditor(self._root, cached=True)
1084
1084
1085 for nf in iter(visit):
1085 for nf in iter(visit):
1086 # If a stat for the same file was already added with a
1086 # If a stat for the same file was already added with a
1087 # different case, don't add one for this, since that would
1087 # different case, don't add one for this, since that would
1088 # make it appear as if the file exists under both names
1088 # make it appear as if the file exists under both names
1089 # on disk.
1089 # on disk.
1090 if (
1090 if (
1091 normalizefile
1091 normalizefile
1092 and normalizefile(nf, True, True) in results
1092 and normalizefile(nf, True, True) in results
1093 ):
1093 ):
1094 results[nf] = None
1094 results[nf] = None
1095 # Report ignored items in the dmap as long as they are not
1095 # Report ignored items in the dmap as long as they are not
1096 # under a symlink directory.
1096 # under a symlink directory.
1097 elif audit_path.check(nf):
1097 elif audit_path.check(nf):
1098 try:
1098 try:
1099 results[nf] = lstat(join(nf))
1099 results[nf] = lstat(join(nf))
1100 # file was just ignored, no links, and exists
1100 # file was just ignored, no links, and exists
1101 except OSError:
1101 except OSError:
1102 # file doesn't exist
1102 # file doesn't exist
1103 results[nf] = None
1103 results[nf] = None
1104 else:
1104 else:
1105 # It's either missing or under a symlink directory
1105 # It's either missing or under a symlink directory
1106 # which we in this case report as missing
1106 # which we in this case report as missing
1107 results[nf] = None
1107 results[nf] = None
1108 else:
1108 else:
1109 # We may not have walked the full directory tree above,
1109 # We may not have walked the full directory tree above,
1110 # so stat and check everything we missed.
1110 # so stat and check everything we missed.
1111 iv = iter(visit)
1111 iv = iter(visit)
1112 for st in util.statfiles([join(i) for i in visit]):
1112 for st in util.statfiles([join(i) for i in visit]):
1113 results[next(iv)] = st
1113 results[next(iv)] = st
1114 return results
1114 return results
1115
1115
1116 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1116 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1117 # Force Rayon (Rust parallelism library) to respect the number of
1117 # Force Rayon (Rust parallelism library) to respect the number of
1118 # workers. This is a temporary workaround until Rust code knows
1118 # workers. This is a temporary workaround until Rust code knows
1119 # how to read the config file.
1119 # how to read the config file.
1120 numcpus = self._ui.configint(b"worker", b"numcpus")
1120 numcpus = self._ui.configint(b"worker", b"numcpus")
1121 if numcpus is not None:
1121 if numcpus is not None:
1122 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1122 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1123
1123
1124 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1124 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1125 if not workers_enabled:
1125 if not workers_enabled:
1126 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1126 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1127
1127
1128 (
1128 (
1129 lookup,
1129 lookup,
1130 modified,
1130 modified,
1131 added,
1131 added,
1132 removed,
1132 removed,
1133 deleted,
1133 deleted,
1134 clean,
1134 clean,
1135 ignored,
1135 ignored,
1136 unknown,
1136 unknown,
1137 warnings,
1137 warnings,
1138 bad,
1138 bad,
1139 traversed,
1139 traversed,
1140 dirty,
1140 ) = rustmod.status(
1141 ) = rustmod.status(
1141 self._map._rustmap,
1142 self._map._rustmap,
1142 matcher,
1143 matcher,
1143 self._rootdir,
1144 self._rootdir,
1144 self._ignorefiles(),
1145 self._ignorefiles(),
1145 self._checkexec,
1146 self._checkexec,
1146 self._lastnormaltime,
1147 self._lastnormaltime,
1147 bool(list_clean),
1148 bool(list_clean),
1148 bool(list_ignored),
1149 bool(list_ignored),
1149 bool(list_unknown),
1150 bool(list_unknown),
1150 bool(matcher.traversedir),
1151 bool(matcher.traversedir),
1151 )
1152 )
1152
1153
1154 self._dirty |= dirty
1155
1153 if matcher.traversedir:
1156 if matcher.traversedir:
1154 for dir in traversed:
1157 for dir in traversed:
1155 matcher.traversedir(dir)
1158 matcher.traversedir(dir)
1156
1159
1157 if self._ui.warn:
1160 if self._ui.warn:
1158 for item in warnings:
1161 for item in warnings:
1159 if isinstance(item, tuple):
1162 if isinstance(item, tuple):
1160 file_path, syntax = item
1163 file_path, syntax = item
1161 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1164 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1162 file_path,
1165 file_path,
1163 syntax,
1166 syntax,
1164 )
1167 )
1165 self._ui.warn(msg)
1168 self._ui.warn(msg)
1166 else:
1169 else:
1167 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1170 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1168 self._ui.warn(
1171 self._ui.warn(
1169 msg
1172 msg
1170 % (
1173 % (
1171 pathutil.canonpath(
1174 pathutil.canonpath(
1172 self._rootdir, self._rootdir, item
1175 self._rootdir, self._rootdir, item
1173 ),
1176 ),
1174 b"No such file or directory",
1177 b"No such file or directory",
1175 )
1178 )
1176 )
1179 )
1177
1180
1178 for (fn, message) in bad:
1181 for (fn, message) in bad:
1179 matcher.bad(fn, encoding.strtolocal(message))
1182 matcher.bad(fn, encoding.strtolocal(message))
1180
1183
1181 status = scmutil.status(
1184 status = scmutil.status(
1182 modified=modified,
1185 modified=modified,
1183 added=added,
1186 added=added,
1184 removed=removed,
1187 removed=removed,
1185 deleted=deleted,
1188 deleted=deleted,
1186 unknown=unknown,
1189 unknown=unknown,
1187 ignored=ignored,
1190 ignored=ignored,
1188 clean=clean,
1191 clean=clean,
1189 )
1192 )
1190 return (lookup, status)
1193 return (lookup, status)
1191
1194
1192 def status(self, match, subrepos, ignored, clean, unknown):
1195 def status(self, match, subrepos, ignored, clean, unknown):
1193 """Determine the status of the working copy relative to the
1196 """Determine the status of the working copy relative to the
1194 dirstate and return a pair of (unsure, status), where status is of type
1197 dirstate and return a pair of (unsure, status), where status is of type
1195 scmutil.status and:
1198 scmutil.status and:
1196
1199
1197 unsure:
1200 unsure:
1198 files that might have been modified since the dirstate was
1201 files that might have been modified since the dirstate was
1199 written, but need to be read to be sure (size is the same
1202 written, but need to be read to be sure (size is the same
1200 but mtime differs)
1203 but mtime differs)
1201 status.modified:
1204 status.modified:
1202 files that have definitely been modified since the dirstate
1205 files that have definitely been modified since the dirstate
1203 was written (different size or mode)
1206 was written (different size or mode)
1204 status.clean:
1207 status.clean:
1205 files that have definitely not been modified since the
1208 files that have definitely not been modified since the
1206 dirstate was written
1209 dirstate was written
1207 """
1210 """
1208 listignored, listclean, listunknown = ignored, clean, unknown
1211 listignored, listclean, listunknown = ignored, clean, unknown
1209 lookup, modified, added, unknown, ignored = [], [], [], [], []
1212 lookup, modified, added, unknown, ignored = [], [], [], [], []
1210 removed, deleted, clean = [], [], []
1213 removed, deleted, clean = [], [], []
1211
1214
1212 dmap = self._map
1215 dmap = self._map
1213 dmap.preload()
1216 dmap.preload()
1214
1217
1215 use_rust = True
1218 use_rust = True
1216
1219
1217 allowed_matchers = (
1220 allowed_matchers = (
1218 matchmod.alwaysmatcher,
1221 matchmod.alwaysmatcher,
1219 matchmod.exactmatcher,
1222 matchmod.exactmatcher,
1220 matchmod.includematcher,
1223 matchmod.includematcher,
1221 )
1224 )
1222
1225
1223 if rustmod is None:
1226 if rustmod is None:
1224 use_rust = False
1227 use_rust = False
1225 elif self._checkcase:
1228 elif self._checkcase:
1226 # Case-insensitive filesystems are not handled yet
1229 # Case-insensitive filesystems are not handled yet
1227 use_rust = False
1230 use_rust = False
1228 elif subrepos:
1231 elif subrepos:
1229 use_rust = False
1232 use_rust = False
1230 elif sparse.enabled:
1233 elif sparse.enabled:
1231 use_rust = False
1234 use_rust = False
1232 elif not isinstance(match, allowed_matchers):
1235 elif not isinstance(match, allowed_matchers):
1233 # Some matchers have yet to be implemented
1236 # Some matchers have yet to be implemented
1234 use_rust = False
1237 use_rust = False
1235
1238
1236 if use_rust:
1239 if use_rust:
1237 try:
1240 try:
1238 return self._rust_status(
1241 return self._rust_status(
1239 match, listclean, listignored, listunknown
1242 match, listclean, listignored, listunknown
1240 )
1243 )
1241 except rustmod.FallbackError:
1244 except rustmod.FallbackError:
1242 pass
1245 pass
1243
1246
1244 def noop(f):
1247 def noop(f):
1245 pass
1248 pass
1246
1249
1247 dcontains = dmap.__contains__
1250 dcontains = dmap.__contains__
1248 dget = dmap.__getitem__
1251 dget = dmap.__getitem__
1249 ladd = lookup.append # aka "unsure"
1252 ladd = lookup.append # aka "unsure"
1250 madd = modified.append
1253 madd = modified.append
1251 aadd = added.append
1254 aadd = added.append
1252 uadd = unknown.append if listunknown else noop
1255 uadd = unknown.append if listunknown else noop
1253 iadd = ignored.append if listignored else noop
1256 iadd = ignored.append if listignored else noop
1254 radd = removed.append
1257 radd = removed.append
1255 dadd = deleted.append
1258 dadd = deleted.append
1256 cadd = clean.append if listclean else noop
1259 cadd = clean.append if listclean else noop
1257 mexact = match.exact
1260 mexact = match.exact
1258 dirignore = self._dirignore
1261 dirignore = self._dirignore
1259 checkexec = self._checkexec
1262 checkexec = self._checkexec
1260 copymap = self._map.copymap
1263 copymap = self._map.copymap
1261 lastnormaltime = self._lastnormaltime
1264 lastnormaltime = self._lastnormaltime
1262
1265
1263 # We need to do full walks when either
1266 # We need to do full walks when either
1264 # - we're listing all clean files, or
1267 # - we're listing all clean files, or
1265 # - match.traversedir does something, because match.traversedir should
1268 # - match.traversedir does something, because match.traversedir should
1266 # be called for every dir in the working dir
1269 # be called for every dir in the working dir
1267 full = listclean or match.traversedir is not None
1270 full = listclean or match.traversedir is not None
1268 for fn, st in pycompat.iteritems(
1271 for fn, st in pycompat.iteritems(
1269 self.walk(match, subrepos, listunknown, listignored, full=full)
1272 self.walk(match, subrepos, listunknown, listignored, full=full)
1270 ):
1273 ):
1271 if not dcontains(fn):
1274 if not dcontains(fn):
1272 if (listignored or mexact(fn)) and dirignore(fn):
1275 if (listignored or mexact(fn)) and dirignore(fn):
1273 if listignored:
1276 if listignored:
1274 iadd(fn)
1277 iadd(fn)
1275 else:
1278 else:
1276 uadd(fn)
1279 uadd(fn)
1277 continue
1280 continue
1278
1281
1279 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1282 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1280 # written like that for performance reasons. dmap[fn] is not a
1283 # written like that for performance reasons. dmap[fn] is not a
1281 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1284 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1282 # opcode has fast paths when the value to be unpacked is a tuple or
1285 # opcode has fast paths when the value to be unpacked is a tuple or
1283 # a list, but falls back to creating a full-fledged iterator in
1286 # a list, but falls back to creating a full-fledged iterator in
1284 # general. That is much slower than simply accessing and storing the
1287 # general. That is much slower than simply accessing and storing the
1285 # tuple members one by one.
1288 # tuple members one by one.
1286 t = dget(fn)
1289 t = dget(fn)
1287 state = t[0]
1290 state = t[0]
1288 mode = t[1]
1291 mode = t[1]
1289 size = t[2]
1292 size = t[2]
1290 time = t[3]
1293 time = t[3]
1291
1294
1292 if not st and state in b"nma":
1295 if not st and state in b"nma":
1293 dadd(fn)
1296 dadd(fn)
1294 elif state == b'n':
1297 elif state == b'n':
1295 if (
1298 if (
1296 size >= 0
1299 size >= 0
1297 and (
1300 and (
1298 (size != st.st_size and size != st.st_size & _rangemask)
1301 (size != st.st_size and size != st.st_size & _rangemask)
1299 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1302 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1300 )
1303 )
1301 or size == -2 # other parent
1304 or size == -2 # other parent
1302 or fn in copymap
1305 or fn in copymap
1303 ):
1306 ):
1304 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1307 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1305 # issue6456: Size returned may be longer due to
1308 # issue6456: Size returned may be longer due to
1306 # encryption on EXT-4 fscrypt, undecided.
1309 # encryption on EXT-4 fscrypt, undecided.
1307 ladd(fn)
1310 ladd(fn)
1308 else:
1311 else:
1309 madd(fn)
1312 madd(fn)
1310 elif (
1313 elif (
1311 time != st[stat.ST_MTIME]
1314 time != st[stat.ST_MTIME]
1312 and time != st[stat.ST_MTIME] & _rangemask
1315 and time != st[stat.ST_MTIME] & _rangemask
1313 ):
1316 ):
1314 ladd(fn)
1317 ladd(fn)
1315 elif st[stat.ST_MTIME] == lastnormaltime:
1318 elif st[stat.ST_MTIME] == lastnormaltime:
1316 # fn may have just been marked as normal and it may have
1319 # fn may have just been marked as normal and it may have
1317 # changed in the same second without changing its size.
1320 # changed in the same second without changing its size.
1318 # This can happen if we quickly do multiple commits.
1321 # This can happen if we quickly do multiple commits.
1319 # Force lookup, so we don't miss such a racy file change.
1322 # Force lookup, so we don't miss such a racy file change.
1320 ladd(fn)
1323 ladd(fn)
1321 elif listclean:
1324 elif listclean:
1322 cadd(fn)
1325 cadd(fn)
1323 elif state == b'm':
1326 elif state == b'm':
1324 madd(fn)
1327 madd(fn)
1325 elif state == b'a':
1328 elif state == b'a':
1326 aadd(fn)
1329 aadd(fn)
1327 elif state == b'r':
1330 elif state == b'r':
1328 radd(fn)
1331 radd(fn)
1329 status = scmutil.status(
1332 status = scmutil.status(
1330 modified, added, removed, deleted, unknown, ignored, clean
1333 modified, added, removed, deleted, unknown, ignored, clean
1331 )
1334 )
1332 return (lookup, status)
1335 return (lookup, status)
1333
1336
1334 def matches(self, match):
1337 def matches(self, match):
1335 """
1338 """
1336 return files in the dirstate (in whatever state) filtered by match
1339 return files in the dirstate (in whatever state) filtered by match
1337 """
1340 """
1338 dmap = self._map
1341 dmap = self._map
1339 if rustmod is not None:
1342 if rustmod is not None:
1340 dmap = self._map._rustmap
1343 dmap = self._map._rustmap
1341
1344
1342 if match.always():
1345 if match.always():
1343 return dmap.keys()
1346 return dmap.keys()
1344 files = match.files()
1347 files = match.files()
1345 if match.isexact():
1348 if match.isexact():
1346 # fast path -- filter the other way around, since typically files is
1349 # fast path -- filter the other way around, since typically files is
1347 # much smaller than dmap
1350 # much smaller than dmap
1348 return [f for f in files if f in dmap]
1351 return [f for f in files if f in dmap]
1349 if match.prefix() and all(fn in dmap for fn in files):
1352 if match.prefix() and all(fn in dmap for fn in files):
1350 # fast path -- all the values are known to be files, so just return
1353 # fast path -- all the values are known to be files, so just return
1351 # that
1354 # that
1352 return list(files)
1355 return list(files)
1353 return [f for f in dmap if match(f)]
1356 return [f for f in dmap if match(f)]
1354
1357
1355 def _actualfilename(self, tr):
1358 def _actualfilename(self, tr):
1356 if tr:
1359 if tr:
1357 return self._pendingfilename
1360 return self._pendingfilename
1358 else:
1361 else:
1359 return self._filename
1362 return self._filename
1360
1363
1361 def savebackup(self, tr, backupname):
1364 def savebackup(self, tr, backupname):
1362 '''Save current dirstate into backup file'''
1365 '''Save current dirstate into backup file'''
1363 filename = self._actualfilename(tr)
1366 filename = self._actualfilename(tr)
1364 assert backupname != filename
1367 assert backupname != filename
1365
1368
1366 # use '_writedirstate' instead of 'write' to write changes certainly,
1369 # use '_writedirstate' instead of 'write' to write changes certainly,
1367 # because the latter omits writing out if transaction is running.
1370 # because the latter omits writing out if transaction is running.
1368 # output file will be used to create backup of dirstate at this point.
1371 # output file will be used to create backup of dirstate at this point.
1369 if self._dirty or not self._opener.exists(filename):
1372 if self._dirty or not self._opener.exists(filename):
1370 self._writedirstate(
1373 self._writedirstate(
1371 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1374 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1372 )
1375 )
1373
1376
1374 if tr:
1377 if tr:
1375 # ensure that subsequent tr.writepending returns True for
1378 # ensure that subsequent tr.writepending returns True for
1376 # changes written out above, even if dirstate is never
1379 # changes written out above, even if dirstate is never
1377 # changed after this
1380 # changed after this
1378 tr.addfilegenerator(
1381 tr.addfilegenerator(
1379 b'dirstate',
1382 b'dirstate',
1380 (self._filename,),
1383 (self._filename,),
1381 self._writedirstate,
1384 self._writedirstate,
1382 location=b'plain',
1385 location=b'plain',
1383 )
1386 )
1384
1387
1385 # ensure that pending file written above is unlinked at
1388 # ensure that pending file written above is unlinked at
1386 # failure, even if tr.writepending isn't invoked until the
1389 # failure, even if tr.writepending isn't invoked until the
1387 # end of this transaction
1390 # end of this transaction
1388 tr.registertmp(filename, location=b'plain')
1391 tr.registertmp(filename, location=b'plain')
1389
1392
1390 self._opener.tryunlink(backupname)
1393 self._opener.tryunlink(backupname)
1391 # hardlink backup is okay because _writedirstate is always called
1394 # hardlink backup is okay because _writedirstate is always called
1392 # with an "atomictemp=True" file.
1395 # with an "atomictemp=True" file.
1393 util.copyfile(
1396 util.copyfile(
1394 self._opener.join(filename),
1397 self._opener.join(filename),
1395 self._opener.join(backupname),
1398 self._opener.join(backupname),
1396 hardlink=True,
1399 hardlink=True,
1397 )
1400 )
1398
1401
1399 def restorebackup(self, tr, backupname):
1402 def restorebackup(self, tr, backupname):
1400 '''Restore dirstate by backup file'''
1403 '''Restore dirstate by backup file'''
1401 # this "invalidate()" prevents "wlock.release()" from writing
1404 # this "invalidate()" prevents "wlock.release()" from writing
1402 # changes of dirstate out after restoring from backup file
1405 # changes of dirstate out after restoring from backup file
1403 self.invalidate()
1406 self.invalidate()
1404 filename = self._actualfilename(tr)
1407 filename = self._actualfilename(tr)
1405 o = self._opener
1408 o = self._opener
1406 if util.samefile(o.join(backupname), o.join(filename)):
1409 if util.samefile(o.join(backupname), o.join(filename)):
1407 o.unlink(backupname)
1410 o.unlink(backupname)
1408 else:
1411 else:
1409 o.rename(backupname, filename, checkambig=True)
1412 o.rename(backupname, filename, checkambig=True)
1410
1413
1411 def clearbackup(self, tr, backupname):
1414 def clearbackup(self, tr, backupname):
1412 '''Clear backup file'''
1415 '''Clear backup file'''
1413 self._opener.unlink(backupname)
1416 self._opener.unlink(backupname)
1414
1417
1415
1418
1416 class dirstatemap(object):
1419 class dirstatemap(object):
1417 """Map encapsulating the dirstate's contents.
1420 """Map encapsulating the dirstate's contents.
1418
1421
1419 The dirstate contains the following state:
1422 The dirstate contains the following state:
1420
1423
1421 - `identity` is the identity of the dirstate file, which can be used to
1424 - `identity` is the identity of the dirstate file, which can be used to
1422 detect when changes have occurred to the dirstate file.
1425 detect when changes have occurred to the dirstate file.
1423
1426
1424 - `parents` is a pair containing the parents of the working copy. The
1427 - `parents` is a pair containing the parents of the working copy. The
1425 parents are updated by calling `setparents`.
1428 parents are updated by calling `setparents`.
1426
1429
1427 - the state map maps filenames to tuples of (state, mode, size, mtime),
1430 - the state map maps filenames to tuples of (state, mode, size, mtime),
1428 where state is a single character representing 'normal', 'added',
1431 where state is a single character representing 'normal', 'added',
1429 'removed', or 'merged'. It is read by treating the dirstate as a
1432 'removed', or 'merged'. It is read by treating the dirstate as a
1430 dict. File state is updated by calling the `addfile`, `removefile` and
1433 dict. File state is updated by calling the `addfile`, `removefile` and
1431 `dropfile` methods.
1434 `dropfile` methods.
1432
1435
1433 - `copymap` maps destination filenames to their source filename.
1436 - `copymap` maps destination filenames to their source filename.
1434
1437
1435 The dirstate also provides the following views onto the state:
1438 The dirstate also provides the following views onto the state:
1436
1439
1437 - `nonnormalset` is a set of the filenames that have state other
1440 - `nonnormalset` is a set of the filenames that have state other
1438 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1441 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1439
1442
1440 - `otherparentset` is a set of the filenames that are marked as coming
1443 - `otherparentset` is a set of the filenames that are marked as coming
1441 from the second parent when the dirstate is currently being merged.
1444 from the second parent when the dirstate is currently being merged.
1442
1445
1443 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1446 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1444 form that they appear as in the dirstate.
1447 form that they appear as in the dirstate.
1445
1448
1446 - `dirfoldmap` is a dict mapping normalized directory names to the
1449 - `dirfoldmap` is a dict mapping normalized directory names to the
1447 denormalized form that they appear as in the dirstate.
1450 denormalized form that they appear as in the dirstate.
1448 """
1451 """
1449
1452
1450 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1453 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1451 self._ui = ui
1454 self._ui = ui
1452 self._opener = opener
1455 self._opener = opener
1453 self._root = root
1456 self._root = root
1454 self._filename = b'dirstate'
1457 self._filename = b'dirstate'
1455 self._nodelen = 20
1458 self._nodelen = 20
1456 self._nodeconstants = nodeconstants
1459 self._nodeconstants = nodeconstants
1457 assert (
1460 assert (
1458 not use_dirstate_v2
1461 not use_dirstate_v2
1459 ), "should have detected unsupported requirement"
1462 ), "should have detected unsupported requirement"
1460
1463
1461 self._parents = None
1464 self._parents = None
1462 self._dirtyparents = False
1465 self._dirtyparents = False
1463
1466
1464 # for consistent view between _pl() and _read() invocations
1467 # for consistent view between _pl() and _read() invocations
1465 self._pendingmode = None
1468 self._pendingmode = None
1466
1469
1467 @propertycache
1470 @propertycache
1468 def _map(self):
1471 def _map(self):
1469 self._map = {}
1472 self._map = {}
1470 self.read()
1473 self.read()
1471 return self._map
1474 return self._map
1472
1475
1473 @propertycache
1476 @propertycache
1474 def copymap(self):
1477 def copymap(self):
1475 self.copymap = {}
1478 self.copymap = {}
1476 self._map
1479 self._map
1477 return self.copymap
1480 return self.copymap
1478
1481
1479 def clear(self):
1482 def clear(self):
1480 self._map.clear()
1483 self._map.clear()
1481 self.copymap.clear()
1484 self.copymap.clear()
1482 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1485 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
1483 util.clearcachedproperty(self, b"_dirs")
1486 util.clearcachedproperty(self, b"_dirs")
1484 util.clearcachedproperty(self, b"_alldirs")
1487 util.clearcachedproperty(self, b"_alldirs")
1485 util.clearcachedproperty(self, b"filefoldmap")
1488 util.clearcachedproperty(self, b"filefoldmap")
1486 util.clearcachedproperty(self, b"dirfoldmap")
1489 util.clearcachedproperty(self, b"dirfoldmap")
1487 util.clearcachedproperty(self, b"nonnormalset")
1490 util.clearcachedproperty(self, b"nonnormalset")
1488 util.clearcachedproperty(self, b"otherparentset")
1491 util.clearcachedproperty(self, b"otherparentset")
1489
1492
1490 def items(self):
1493 def items(self):
1491 return pycompat.iteritems(self._map)
1494 return pycompat.iteritems(self._map)
1492
1495
1493 # forward for python2,3 compat
1496 # forward for python2,3 compat
1494 iteritems = items
1497 iteritems = items
1495
1498
1496 def __len__(self):
1499 def __len__(self):
1497 return len(self._map)
1500 return len(self._map)
1498
1501
1499 def __iter__(self):
1502 def __iter__(self):
1500 return iter(self._map)
1503 return iter(self._map)
1501
1504
1502 def get(self, key, default=None):
1505 def get(self, key, default=None):
1503 return self._map.get(key, default)
1506 return self._map.get(key, default)
1504
1507
1505 def __contains__(self, key):
1508 def __contains__(self, key):
1506 return key in self._map
1509 return key in self._map
1507
1510
1508 def __getitem__(self, key):
1511 def __getitem__(self, key):
1509 return self._map[key]
1512 return self._map[key]
1510
1513
1511 def keys(self):
1514 def keys(self):
1512 return self._map.keys()
1515 return self._map.keys()
1513
1516
1514 def preload(self):
1517 def preload(self):
1515 """Loads the underlying data, if it's not already loaded"""
1518 """Loads the underlying data, if it's not already loaded"""
1516 self._map
1519 self._map
1517
1520
1518 def addfile(self, f, oldstate, state, mode, size, mtime):
1521 def addfile(self, f, oldstate, state, mode, size, mtime):
1519 """Add a tracked file to the dirstate."""
1522 """Add a tracked file to the dirstate."""
1520 if oldstate in b"?r" and "_dirs" in self.__dict__:
1523 if oldstate in b"?r" and "_dirs" in self.__dict__:
1521 self._dirs.addpath(f)
1524 self._dirs.addpath(f)
1522 if oldstate == b"?" and "_alldirs" in self.__dict__:
1525 if oldstate == b"?" and "_alldirs" in self.__dict__:
1523 self._alldirs.addpath(f)
1526 self._alldirs.addpath(f)
1524 self._map[f] = dirstatetuple(state, mode, size, mtime)
1527 self._map[f] = dirstatetuple(state, mode, size, mtime)
1525 if state != b'n' or mtime == -1:
1528 if state != b'n' or mtime == -1:
1526 self.nonnormalset.add(f)
1529 self.nonnormalset.add(f)
1527 if size == -2:
1530 if size == -2:
1528 self.otherparentset.add(f)
1531 self.otherparentset.add(f)
1529
1532
1530 def removefile(self, f, oldstate, size):
1533 def removefile(self, f, oldstate, size):
1531 """
1534 """
1532 Mark a file as removed in the dirstate.
1535 Mark a file as removed in the dirstate.
1533
1536
1534 The `size` parameter is used to store sentinel values that indicate
1537 The `size` parameter is used to store sentinel values that indicate
1535 the file's previous state. In the future, we should refactor this
1538 the file's previous state. In the future, we should refactor this
1536 to be more explicit about what that state is.
1539 to be more explicit about what that state is.
1537 """
1540 """
1538 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1541 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1539 self._dirs.delpath(f)
1542 self._dirs.delpath(f)
1540 if oldstate == b"?" and "_alldirs" in self.__dict__:
1543 if oldstate == b"?" and "_alldirs" in self.__dict__:
1541 self._alldirs.addpath(f)
1544 self._alldirs.addpath(f)
1542 if "filefoldmap" in self.__dict__:
1545 if "filefoldmap" in self.__dict__:
1543 normed = util.normcase(f)
1546 normed = util.normcase(f)
1544 self.filefoldmap.pop(normed, None)
1547 self.filefoldmap.pop(normed, None)
1545 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1548 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1546 self.nonnormalset.add(f)
1549 self.nonnormalset.add(f)
1547
1550
1548 def dropfile(self, f, oldstate):
1551 def dropfile(self, f, oldstate):
1549 """
1552 """
1550 Remove a file from the dirstate. Returns True if the file was
1553 Remove a file from the dirstate. Returns True if the file was
1551 previously recorded.
1554 previously recorded.
1552 """
1555 """
1553 exists = self._map.pop(f, None) is not None
1556 exists = self._map.pop(f, None) is not None
1554 if exists:
1557 if exists:
1555 if oldstate != b"r" and "_dirs" in self.__dict__:
1558 if oldstate != b"r" and "_dirs" in self.__dict__:
1556 self._dirs.delpath(f)
1559 self._dirs.delpath(f)
1557 if "_alldirs" in self.__dict__:
1560 if "_alldirs" in self.__dict__:
1558 self._alldirs.delpath(f)
1561 self._alldirs.delpath(f)
1559 if "filefoldmap" in self.__dict__:
1562 if "filefoldmap" in self.__dict__:
1560 normed = util.normcase(f)
1563 normed = util.normcase(f)
1561 self.filefoldmap.pop(normed, None)
1564 self.filefoldmap.pop(normed, None)
1562 self.nonnormalset.discard(f)
1565 self.nonnormalset.discard(f)
1563 return exists
1566 return exists
1564
1567
1565 def clearambiguoustimes(self, files, now):
1568 def clearambiguoustimes(self, files, now):
1566 for f in files:
1569 for f in files:
1567 e = self.get(f)
1570 e = self.get(f)
1568 if e is not None and e[0] == b'n' and e[3] == now:
1571 if e is not None and e[0] == b'n' and e[3] == now:
1569 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1572 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1570 self.nonnormalset.add(f)
1573 self.nonnormalset.add(f)
1571
1574
1572 def nonnormalentries(self):
1575 def nonnormalentries(self):
1573 '''Compute the nonnormal dirstate entries from the dmap'''
1576 '''Compute the nonnormal dirstate entries from the dmap'''
1574 try:
1577 try:
1575 return parsers.nonnormalotherparententries(self._map)
1578 return parsers.nonnormalotherparententries(self._map)
1576 except AttributeError:
1579 except AttributeError:
1577 nonnorm = set()
1580 nonnorm = set()
1578 otherparent = set()
1581 otherparent = set()
1579 for fname, e in pycompat.iteritems(self._map):
1582 for fname, e in pycompat.iteritems(self._map):
1580 if e[0] != b'n' or e[3] == -1:
1583 if e[0] != b'n' or e[3] == -1:
1581 nonnorm.add(fname)
1584 nonnorm.add(fname)
1582 if e[0] == b'n' and e[2] == -2:
1585 if e[0] == b'n' and e[2] == -2:
1583 otherparent.add(fname)
1586 otherparent.add(fname)
1584 return nonnorm, otherparent
1587 return nonnorm, otherparent
1585
1588
1586 @propertycache
1589 @propertycache
1587 def filefoldmap(self):
1590 def filefoldmap(self):
1588 """Returns a dictionary mapping normalized case paths to their
1591 """Returns a dictionary mapping normalized case paths to their
1589 non-normalized versions.
1592 non-normalized versions.
1590 """
1593 """
1591 try:
1594 try:
1592 makefilefoldmap = parsers.make_file_foldmap
1595 makefilefoldmap = parsers.make_file_foldmap
1593 except AttributeError:
1596 except AttributeError:
1594 pass
1597 pass
1595 else:
1598 else:
1596 return makefilefoldmap(
1599 return makefilefoldmap(
1597 self._map, util.normcasespec, util.normcasefallback
1600 self._map, util.normcasespec, util.normcasefallback
1598 )
1601 )
1599
1602
1600 f = {}
1603 f = {}
1601 normcase = util.normcase
1604 normcase = util.normcase
1602 for name, s in pycompat.iteritems(self._map):
1605 for name, s in pycompat.iteritems(self._map):
1603 if s[0] != b'r':
1606 if s[0] != b'r':
1604 f[normcase(name)] = name
1607 f[normcase(name)] = name
1605 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1608 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1606 return f
1609 return f
1607
1610
1608 def hastrackeddir(self, d):
1611 def hastrackeddir(self, d):
1609 """
1612 """
1610 Returns True if the dirstate contains a tracked (not removed) file
1613 Returns True if the dirstate contains a tracked (not removed) file
1611 in this directory.
1614 in this directory.
1612 """
1615 """
1613 return d in self._dirs
1616 return d in self._dirs
1614
1617
1615 def hasdir(self, d):
1618 def hasdir(self, d):
1616 """
1619 """
1617 Returns True if the dirstate contains a file (tracked or removed)
1620 Returns True if the dirstate contains a file (tracked or removed)
1618 in this directory.
1621 in this directory.
1619 """
1622 """
1620 return d in self._alldirs
1623 return d in self._alldirs
1621
1624
1622 @propertycache
1625 @propertycache
1623 def _dirs(self):
1626 def _dirs(self):
1624 return pathutil.dirs(self._map, b'r')
1627 return pathutil.dirs(self._map, b'r')
1625
1628
1626 @propertycache
1629 @propertycache
1627 def _alldirs(self):
1630 def _alldirs(self):
1628 return pathutil.dirs(self._map)
1631 return pathutil.dirs(self._map)
1629
1632
1630 def _opendirstatefile(self):
1633 def _opendirstatefile(self):
1631 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1634 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1632 if self._pendingmode is not None and self._pendingmode != mode:
1635 if self._pendingmode is not None and self._pendingmode != mode:
1633 fp.close()
1636 fp.close()
1634 raise error.Abort(
1637 raise error.Abort(
1635 _(b'working directory state may be changed parallelly')
1638 _(b'working directory state may be changed parallelly')
1636 )
1639 )
1637 self._pendingmode = mode
1640 self._pendingmode = mode
1638 return fp
1641 return fp
1639
1642
1640 def parents(self):
1643 def parents(self):
1641 if not self._parents:
1644 if not self._parents:
1642 try:
1645 try:
1643 fp = self._opendirstatefile()
1646 fp = self._opendirstatefile()
1644 st = fp.read(2 * self._nodelen)
1647 st = fp.read(2 * self._nodelen)
1645 fp.close()
1648 fp.close()
1646 except IOError as err:
1649 except IOError as err:
1647 if err.errno != errno.ENOENT:
1650 if err.errno != errno.ENOENT:
1648 raise
1651 raise
1649 # File doesn't exist, so the current state is empty
1652 # File doesn't exist, so the current state is empty
1650 st = b''
1653 st = b''
1651
1654
1652 l = len(st)
1655 l = len(st)
1653 if l == self._nodelen * 2:
1656 if l == self._nodelen * 2:
1654 self._parents = (
1657 self._parents = (
1655 st[: self._nodelen],
1658 st[: self._nodelen],
1656 st[self._nodelen : 2 * self._nodelen],
1659 st[self._nodelen : 2 * self._nodelen],
1657 )
1660 )
1658 elif l == 0:
1661 elif l == 0:
1659 self._parents = (
1662 self._parents = (
1660 self._nodeconstants.nullid,
1663 self._nodeconstants.nullid,
1661 self._nodeconstants.nullid,
1664 self._nodeconstants.nullid,
1662 )
1665 )
1663 else:
1666 else:
1664 raise error.Abort(
1667 raise error.Abort(
1665 _(b'working directory state appears damaged!')
1668 _(b'working directory state appears damaged!')
1666 )
1669 )
1667
1670
1668 return self._parents
1671 return self._parents
1669
1672
1670 def setparents(self, p1, p2):
1673 def setparents(self, p1, p2):
1671 self._parents = (p1, p2)
1674 self._parents = (p1, p2)
1672 self._dirtyparents = True
1675 self._dirtyparents = True
1673
1676
1674 def read(self):
1677 def read(self):
1675 # ignore HG_PENDING because identity is used only for writing
1678 # ignore HG_PENDING because identity is used only for writing
1676 self.identity = util.filestat.frompath(
1679 self.identity = util.filestat.frompath(
1677 self._opener.join(self._filename)
1680 self._opener.join(self._filename)
1678 )
1681 )
1679
1682
1680 try:
1683 try:
1681 fp = self._opendirstatefile()
1684 fp = self._opendirstatefile()
1682 try:
1685 try:
1683 st = fp.read()
1686 st = fp.read()
1684 finally:
1687 finally:
1685 fp.close()
1688 fp.close()
1686 except IOError as err:
1689 except IOError as err:
1687 if err.errno != errno.ENOENT:
1690 if err.errno != errno.ENOENT:
1688 raise
1691 raise
1689 return
1692 return
1690 if not st:
1693 if not st:
1691 return
1694 return
1692
1695
1693 if util.safehasattr(parsers, b'dict_new_presized'):
1696 if util.safehasattr(parsers, b'dict_new_presized'):
1694 # Make an estimate of the number of files in the dirstate based on
1697 # Make an estimate of the number of files in the dirstate based on
1695 # its size. This trades wasting some memory for avoiding costly
1698 # its size. This trades wasting some memory for avoiding costly
1696 # resizes. Each entry have a prefix of 17 bytes followed by one or
1699 # resizes. Each entry have a prefix of 17 bytes followed by one or
1697 # two path names. Studies on various large-scale real-world repositories
1700 # two path names. Studies on various large-scale real-world repositories
1698 # found 54 bytes a reasonable upper limit for the average path names.
1701 # found 54 bytes a reasonable upper limit for the average path names.
1699 # Copy entries are ignored for the sake of this estimate.
1702 # Copy entries are ignored for the sake of this estimate.
1700 self._map = parsers.dict_new_presized(len(st) // 71)
1703 self._map = parsers.dict_new_presized(len(st) // 71)
1701
1704
1702 # Python's garbage collector triggers a GC each time a certain number
1705 # Python's garbage collector triggers a GC each time a certain number
1703 # of container objects (the number being defined by
1706 # of container objects (the number being defined by
1704 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1707 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1705 # for each file in the dirstate. The C version then immediately marks
1708 # for each file in the dirstate. The C version then immediately marks
1706 # them as not to be tracked by the collector. However, this has no
1709 # them as not to be tracked by the collector. However, this has no
1707 # effect on when GCs are triggered, only on what objects the GC looks
1710 # effect on when GCs are triggered, only on what objects the GC looks
1708 # into. This means that O(number of files) GCs are unavoidable.
1711 # into. This means that O(number of files) GCs are unavoidable.
1709 # Depending on when in the process's lifetime the dirstate is parsed,
1712 # Depending on when in the process's lifetime the dirstate is parsed,
1710 # this can get very expensive. As a workaround, disable GC while
1713 # this can get very expensive. As a workaround, disable GC while
1711 # parsing the dirstate.
1714 # parsing the dirstate.
1712 #
1715 #
1713 # (we cannot decorate the function directly since it is in a C module)
1716 # (we cannot decorate the function directly since it is in a C module)
1714 parse_dirstate = util.nogc(parsers.parse_dirstate)
1717 parse_dirstate = util.nogc(parsers.parse_dirstate)
1715 p = parse_dirstate(self._map, self.copymap, st)
1718 p = parse_dirstate(self._map, self.copymap, st)
1716 if not self._dirtyparents:
1719 if not self._dirtyparents:
1717 self.setparents(*p)
1720 self.setparents(*p)
1718
1721
1719 # Avoid excess attribute lookups by fast pathing certain checks
1722 # Avoid excess attribute lookups by fast pathing certain checks
1720 self.__contains__ = self._map.__contains__
1723 self.__contains__ = self._map.__contains__
1721 self.__getitem__ = self._map.__getitem__
1724 self.__getitem__ = self._map.__getitem__
1722 self.get = self._map.get
1725 self.get = self._map.get
1723
1726
1724 def write(self, st, now):
1727 def write(self, st, now):
1725 st.write(
1728 st.write(
1726 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1729 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1727 )
1730 )
1728 st.close()
1731 st.close()
1729 self._dirtyparents = False
1732 self._dirtyparents = False
1730 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1733 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1731
1734
1732 @propertycache
1735 @propertycache
1733 def nonnormalset(self):
1736 def nonnormalset(self):
1734 nonnorm, otherparents = self.nonnormalentries()
1737 nonnorm, otherparents = self.nonnormalentries()
1735 self.otherparentset = otherparents
1738 self.otherparentset = otherparents
1736 return nonnorm
1739 return nonnorm
1737
1740
1738 @propertycache
1741 @propertycache
1739 def otherparentset(self):
1742 def otherparentset(self):
1740 nonnorm, otherparents = self.nonnormalentries()
1743 nonnorm, otherparents = self.nonnormalentries()
1741 self.nonnormalset = nonnorm
1744 self.nonnormalset = nonnorm
1742 return otherparents
1745 return otherparents
1743
1746
1744 def non_normal_or_other_parent_paths(self):
1747 def non_normal_or_other_parent_paths(self):
1745 return self.nonnormalset.union(self.otherparentset)
1748 return self.nonnormalset.union(self.otherparentset)
1746
1749
1747 @propertycache
1750 @propertycache
1748 def identity(self):
1751 def identity(self):
1749 self._map
1752 self._map
1750 return self.identity
1753 return self.identity
1751
1754
1752 @propertycache
1755 @propertycache
1753 def dirfoldmap(self):
1756 def dirfoldmap(self):
1754 f = {}
1757 f = {}
1755 normcase = util.normcase
1758 normcase = util.normcase
1756 for name in self._dirs:
1759 for name in self._dirs:
1757 f[normcase(name)] = name
1760 f[normcase(name)] = name
1758 return f
1761 return f
1759
1762
1760
1763
1761 if rustmod is not None:
1764 if rustmod is not None:
1762
1765
1763 class dirstatemap(object):
1766 class dirstatemap(object):
1764 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1767 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
1765 self._use_dirstate_v2 = use_dirstate_v2
1768 self._use_dirstate_v2 = use_dirstate_v2
1766 self._nodeconstants = nodeconstants
1769 self._nodeconstants = nodeconstants
1767 self._ui = ui
1770 self._ui = ui
1768 self._opener = opener
1771 self._opener = opener
1769 self._root = root
1772 self._root = root
1770 self._filename = b'dirstate'
1773 self._filename = b'dirstate'
1771 self._nodelen = 20 # Also update Rust code when changing this!
1774 self._nodelen = 20 # Also update Rust code when changing this!
1772 self._parents = None
1775 self._parents = None
1773 self._dirtyparents = False
1776 self._dirtyparents = False
1774
1777
1775 # for consistent view between _pl() and _read() invocations
1778 # for consistent view between _pl() and _read() invocations
1776 self._pendingmode = None
1779 self._pendingmode = None
1777
1780
1778 self._use_dirstate_tree = self._ui.configbool(
1781 self._use_dirstate_tree = self._ui.configbool(
1779 b"experimental",
1782 b"experimental",
1780 b"dirstate-tree.in-memory",
1783 b"dirstate-tree.in-memory",
1781 False,
1784 False,
1782 )
1785 )
1783
1786
1784 def addfile(self, *args, **kwargs):
1787 def addfile(self, *args, **kwargs):
1785 return self._rustmap.addfile(*args, **kwargs)
1788 return self._rustmap.addfile(*args, **kwargs)
1786
1789
1787 def removefile(self, *args, **kwargs):
1790 def removefile(self, *args, **kwargs):
1788 return self._rustmap.removefile(*args, **kwargs)
1791 return self._rustmap.removefile(*args, **kwargs)
1789
1792
1790 def dropfile(self, *args, **kwargs):
1793 def dropfile(self, *args, **kwargs):
1791 return self._rustmap.dropfile(*args, **kwargs)
1794 return self._rustmap.dropfile(*args, **kwargs)
1792
1795
1793 def clearambiguoustimes(self, *args, **kwargs):
1796 def clearambiguoustimes(self, *args, **kwargs):
1794 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1797 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1795
1798
1796 def nonnormalentries(self):
1799 def nonnormalentries(self):
1797 return self._rustmap.nonnormalentries()
1800 return self._rustmap.nonnormalentries()
1798
1801
1799 def get(self, *args, **kwargs):
1802 def get(self, *args, **kwargs):
1800 return self._rustmap.get(*args, **kwargs)
1803 return self._rustmap.get(*args, **kwargs)
1801
1804
1802 @property
1805 @property
1803 def copymap(self):
1806 def copymap(self):
1804 return self._rustmap.copymap()
1807 return self._rustmap.copymap()
1805
1808
1806 def preload(self):
1809 def preload(self):
1807 self._rustmap
1810 self._rustmap
1808
1811
1809 def clear(self):
1812 def clear(self):
1810 self._rustmap.clear()
1813 self._rustmap.clear()
1811 self.setparents(
1814 self.setparents(
1812 self._nodeconstants.nullid, self._nodeconstants.nullid
1815 self._nodeconstants.nullid, self._nodeconstants.nullid
1813 )
1816 )
1814 util.clearcachedproperty(self, b"_dirs")
1817 util.clearcachedproperty(self, b"_dirs")
1815 util.clearcachedproperty(self, b"_alldirs")
1818 util.clearcachedproperty(self, b"_alldirs")
1816 util.clearcachedproperty(self, b"dirfoldmap")
1819 util.clearcachedproperty(self, b"dirfoldmap")
1817
1820
1818 def items(self):
1821 def items(self):
1819 return self._rustmap.items()
1822 return self._rustmap.items()
1820
1823
1821 def keys(self):
1824 def keys(self):
1822 return iter(self._rustmap)
1825 return iter(self._rustmap)
1823
1826
1824 def __contains__(self, key):
1827 def __contains__(self, key):
1825 return key in self._rustmap
1828 return key in self._rustmap
1826
1829
1827 def __getitem__(self, item):
1830 def __getitem__(self, item):
1828 return self._rustmap[item]
1831 return self._rustmap[item]
1829
1832
1830 def __len__(self):
1833 def __len__(self):
1831 return len(self._rustmap)
1834 return len(self._rustmap)
1832
1835
1833 def __iter__(self):
1836 def __iter__(self):
1834 return iter(self._rustmap)
1837 return iter(self._rustmap)
1835
1838
1836 # forward for python2,3 compat
1839 # forward for python2,3 compat
1837 iteritems = items
1840 iteritems = items
1838
1841
1839 def _opendirstatefile(self):
1842 def _opendirstatefile(self):
1840 fp, mode = txnutil.trypending(
1843 fp, mode = txnutil.trypending(
1841 self._root, self._opener, self._filename
1844 self._root, self._opener, self._filename
1842 )
1845 )
1843 if self._pendingmode is not None and self._pendingmode != mode:
1846 if self._pendingmode is not None and self._pendingmode != mode:
1844 fp.close()
1847 fp.close()
1845 raise error.Abort(
1848 raise error.Abort(
1846 _(b'working directory state may be changed parallelly')
1849 _(b'working directory state may be changed parallelly')
1847 )
1850 )
1848 self._pendingmode = mode
1851 self._pendingmode = mode
1849 return fp
1852 return fp
1850
1853
1851 def setparents(self, p1, p2):
1854 def setparents(self, p1, p2):
1852 self._parents = (p1, p2)
1855 self._parents = (p1, p2)
1853 self._dirtyparents = True
1856 self._dirtyparents = True
1854
1857
1855 def parents(self):
1858 def parents(self):
1856 if not self._parents:
1859 if not self._parents:
1857 if self._use_dirstate_v2:
1860 if self._use_dirstate_v2:
1858 offset = len(rustmod.V2_FORMAT_MARKER)
1861 offset = len(rustmod.V2_FORMAT_MARKER)
1859 else:
1862 else:
1860 offset = 0
1863 offset = 0
1861 read_len = offset + self._nodelen * 2
1864 read_len = offset + self._nodelen * 2
1862 try:
1865 try:
1863 fp = self._opendirstatefile()
1866 fp = self._opendirstatefile()
1864 st = fp.read(read_len)
1867 st = fp.read(read_len)
1865 fp.close()
1868 fp.close()
1866 except IOError as err:
1869 except IOError as err:
1867 if err.errno != errno.ENOENT:
1870 if err.errno != errno.ENOENT:
1868 raise
1871 raise
1869 # File doesn't exist, so the current state is empty
1872 # File doesn't exist, so the current state is empty
1870 st = b''
1873 st = b''
1871
1874
1872 l = len(st)
1875 l = len(st)
1873 if l == read_len:
1876 if l == read_len:
1874 st = st[offset:]
1877 st = st[offset:]
1875 self._parents = (
1878 self._parents = (
1876 st[: self._nodelen],
1879 st[: self._nodelen],
1877 st[self._nodelen : 2 * self._nodelen],
1880 st[self._nodelen : 2 * self._nodelen],
1878 )
1881 )
1879 elif l == 0:
1882 elif l == 0:
1880 self._parents = (
1883 self._parents = (
1881 self._nodeconstants.nullid,
1884 self._nodeconstants.nullid,
1882 self._nodeconstants.nullid,
1885 self._nodeconstants.nullid,
1883 )
1886 )
1884 else:
1887 else:
1885 raise error.Abort(
1888 raise error.Abort(
1886 _(b'working directory state appears damaged!')
1889 _(b'working directory state appears damaged!')
1887 )
1890 )
1888
1891
1889 return self._parents
1892 return self._parents
1890
1893
1891 @propertycache
1894 @propertycache
1892 def _rustmap(self):
1895 def _rustmap(self):
1893 """
1896 """
1894 Fills the Dirstatemap when called.
1897 Fills the Dirstatemap when called.
1895 """
1898 """
1896 # ignore HG_PENDING because identity is used only for writing
1899 # ignore HG_PENDING because identity is used only for writing
1897 self.identity = util.filestat.frompath(
1900 self.identity = util.filestat.frompath(
1898 self._opener.join(self._filename)
1901 self._opener.join(self._filename)
1899 )
1902 )
1900
1903
1901 try:
1904 try:
1902 fp = self._opendirstatefile()
1905 fp = self._opendirstatefile()
1903 try:
1906 try:
1904 st = fp.read()
1907 st = fp.read()
1905 finally:
1908 finally:
1906 fp.close()
1909 fp.close()
1907 except IOError as err:
1910 except IOError as err:
1908 if err.errno != errno.ENOENT:
1911 if err.errno != errno.ENOENT:
1909 raise
1912 raise
1910 st = b''
1913 st = b''
1911
1914
1912 self._rustmap, parents = rustmod.DirstateMap.new(
1915 self._rustmap, parents = rustmod.DirstateMap.new(
1913 self._use_dirstate_tree, self._use_dirstate_v2, st
1916 self._use_dirstate_tree, self._use_dirstate_v2, st
1914 )
1917 )
1915
1918
1916 if parents and not self._dirtyparents:
1919 if parents and not self._dirtyparents:
1917 self.setparents(*parents)
1920 self.setparents(*parents)
1918
1921
1919 self.__contains__ = self._rustmap.__contains__
1922 self.__contains__ = self._rustmap.__contains__
1920 self.__getitem__ = self._rustmap.__getitem__
1923 self.__getitem__ = self._rustmap.__getitem__
1921 self.get = self._rustmap.get
1924 self.get = self._rustmap.get
1922 return self._rustmap
1925 return self._rustmap
1923
1926
1924 def write(self, st, now):
1927 def write(self, st, now):
1925 parents = self.parents()
1928 parents = self.parents()
1926 packed = self._rustmap.write(
1929 packed = self._rustmap.write(
1927 self._use_dirstate_v2, parents[0], parents[1], now
1930 self._use_dirstate_v2, parents[0], parents[1], now
1928 )
1931 )
1929 st.write(packed)
1932 st.write(packed)
1930 st.close()
1933 st.close()
1931 self._dirtyparents = False
1934 self._dirtyparents = False
1932
1935
1933 @propertycache
1936 @propertycache
1934 def filefoldmap(self):
1937 def filefoldmap(self):
1935 """Returns a dictionary mapping normalized case paths to their
1938 """Returns a dictionary mapping normalized case paths to their
1936 non-normalized versions.
1939 non-normalized versions.
1937 """
1940 """
1938 return self._rustmap.filefoldmapasdict()
1941 return self._rustmap.filefoldmapasdict()
1939
1942
1940 def hastrackeddir(self, d):
1943 def hastrackeddir(self, d):
1941 self._dirs # Trigger Python's propertycache
1944 self._dirs # Trigger Python's propertycache
1942 return self._rustmap.hastrackeddir(d)
1945 return self._rustmap.hastrackeddir(d)
1943
1946
1944 def hasdir(self, d):
1947 def hasdir(self, d):
1945 self._dirs # Trigger Python's propertycache
1948 self._dirs # Trigger Python's propertycache
1946 return self._rustmap.hasdir(d)
1949 return self._rustmap.hasdir(d)
1947
1950
1948 @propertycache
1951 @propertycache
1949 def _dirs(self):
1952 def _dirs(self):
1950 return self._rustmap.getdirs()
1953 return self._rustmap.getdirs()
1951
1954
1952 @propertycache
1955 @propertycache
1953 def _alldirs(self):
1956 def _alldirs(self):
1954 return self._rustmap.getalldirs()
1957 return self._rustmap.getalldirs()
1955
1958
1956 @propertycache
1959 @propertycache
1957 def identity(self):
1960 def identity(self):
1958 self._rustmap
1961 self._rustmap
1959 return self.identity
1962 return self.identity
1960
1963
1961 @property
1964 @property
1962 def nonnormalset(self):
1965 def nonnormalset(self):
1963 nonnorm = self._rustmap.non_normal_entries()
1966 nonnorm = self._rustmap.non_normal_entries()
1964 return nonnorm
1967 return nonnorm
1965
1968
1966 @propertycache
1969 @propertycache
1967 def otherparentset(self):
1970 def otherparentset(self):
1968 otherparents = self._rustmap.other_parent_entries()
1971 otherparents = self._rustmap.other_parent_entries()
1969 return otherparents
1972 return otherparents
1970
1973
1971 def non_normal_or_other_parent_paths(self):
1974 def non_normal_or_other_parent_paths(self):
1972 return self._rustmap.non_normal_or_other_parent_paths()
1975 return self._rustmap.non_normal_or_other_parent_paths()
1973
1976
1974 @propertycache
1977 @propertycache
1975 def dirfoldmap(self):
1978 def dirfoldmap(self):
1976 f = {}
1979 f = {}
1977 normcase = util.normcase
1980 normcase = util.normcase
1978 for name in self._dirs:
1981 for name in self._dirs:
1979 f[normcase(name)] = name
1982 f[normcase(name)] = name
1980 return f
1983 return f
@@ -1,942 +1,947 b''
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Rust implementation of dirstate.status (dirstate.py).
8 //! Rust implementation of dirstate.status (dirstate.py).
9 //! It is currently missing a lot of functionality compared to the Python one
9 //! It is currently missing a lot of functionality compared to the Python one
10 //! and will only be triggered in narrow cases.
10 //! and will only be triggered in narrow cases.
11
11
12 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
12 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
13 use crate::utils::path_auditor::PathAuditor;
13 use crate::utils::path_auditor::PathAuditor;
14 use crate::{
14 use crate::{
15 dirstate::SIZE_FROM_OTHER_PARENT,
15 dirstate::SIZE_FROM_OTHER_PARENT,
16 filepatterns::PatternFileWarning,
16 filepatterns::PatternFileWarning,
17 matchers::{get_ignore_function, Matcher, VisitChildrenSet},
17 matchers::{get_ignore_function, Matcher, VisitChildrenSet},
18 utils::{
18 utils::{
19 files::{find_dirs, HgMetadata},
19 files::{find_dirs, HgMetadata},
20 hg_path::{
20 hg_path::{
21 hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf,
21 hg_path_to_path_buf, os_string_to_hg_path_buf, HgPath, HgPathBuf,
22 HgPathError,
22 HgPathError,
23 },
23 },
24 },
24 },
25 CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap,
25 CopyMap, DirstateEntry, DirstateMap, EntryState, FastHashMap,
26 PatternError,
26 PatternError,
27 };
27 };
28 use lazy_static::lazy_static;
28 use lazy_static::lazy_static;
29 use micro_timer::timed;
29 use micro_timer::timed;
30 use rayon::prelude::*;
30 use rayon::prelude::*;
31 use std::{
31 use std::{
32 borrow::Cow,
32 borrow::Cow,
33 collections::HashSet,
33 collections::HashSet,
34 fmt,
34 fmt,
35 fs::{read_dir, DirEntry},
35 fs::{read_dir, DirEntry},
36 io::ErrorKind,
36 io::ErrorKind,
37 ops::Deref,
37 ops::Deref,
38 path::{Path, PathBuf},
38 path::{Path, PathBuf},
39 };
39 };
40
40
41 /// Wrong type of file from a `BadMatch`
41 /// Wrong type of file from a `BadMatch`
42 /// Note: a lot of those don't exist on all platforms.
42 /// Note: a lot of those don't exist on all platforms.
43 #[derive(Debug, Copy, Clone)]
43 #[derive(Debug, Copy, Clone)]
44 pub enum BadType {
44 pub enum BadType {
45 CharacterDevice,
45 CharacterDevice,
46 BlockDevice,
46 BlockDevice,
47 FIFO,
47 FIFO,
48 Socket,
48 Socket,
49 Directory,
49 Directory,
50 Unknown,
50 Unknown,
51 }
51 }
52
52
53 impl fmt::Display for BadType {
53 impl fmt::Display for BadType {
54 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
54 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
55 f.write_str(match self {
55 f.write_str(match self {
56 BadType::CharacterDevice => "character device",
56 BadType::CharacterDevice => "character device",
57 BadType::BlockDevice => "block device",
57 BadType::BlockDevice => "block device",
58 BadType::FIFO => "fifo",
58 BadType::FIFO => "fifo",
59 BadType::Socket => "socket",
59 BadType::Socket => "socket",
60 BadType::Directory => "directory",
60 BadType::Directory => "directory",
61 BadType::Unknown => "unknown",
61 BadType::Unknown => "unknown",
62 })
62 })
63 }
63 }
64 }
64 }
65
65
66 /// Was explicitly matched but cannot be found/accessed
66 /// Was explicitly matched but cannot be found/accessed
67 #[derive(Debug, Copy, Clone)]
67 #[derive(Debug, Copy, Clone)]
68 pub enum BadMatch {
68 pub enum BadMatch {
69 OsError(i32),
69 OsError(i32),
70 BadType(BadType),
70 BadType(BadType),
71 }
71 }
72
72
73 /// Enum used to dispatch new status entries into the right collections.
73 /// Enum used to dispatch new status entries into the right collections.
74 /// Is similar to `crate::EntryState`, but represents the transient state of
74 /// Is similar to `crate::EntryState`, but represents the transient state of
75 /// entries during the lifetime of a command.
75 /// entries during the lifetime of a command.
76 #[derive(Debug, Copy, Clone)]
76 #[derive(Debug, Copy, Clone)]
77 pub enum Dispatch {
77 pub enum Dispatch {
78 Unsure,
78 Unsure,
79 Modified,
79 Modified,
80 Added,
80 Added,
81 Removed,
81 Removed,
82 Deleted,
82 Deleted,
83 Clean,
83 Clean,
84 Unknown,
84 Unknown,
85 Ignored,
85 Ignored,
86 /// Empty dispatch, the file is not worth listing
86 /// Empty dispatch, the file is not worth listing
87 None,
87 None,
88 /// Was explicitly matched but cannot be found/accessed
88 /// Was explicitly matched but cannot be found/accessed
89 Bad(BadMatch),
89 Bad(BadMatch),
90 Directory {
90 Directory {
91 /// True if the directory used to be a file in the dmap so we can say
91 /// True if the directory used to be a file in the dmap so we can say
92 /// that it's been removed.
92 /// that it's been removed.
93 was_file: bool,
93 was_file: bool,
94 },
94 },
95 }
95 }
96
96
97 type IoResult<T> = std::io::Result<T>;
97 type IoResult<T> = std::io::Result<T>;
98
98
99 /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
99 /// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
100 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
100 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
101 pub type IgnoreFnType<'a> =
101 pub type IgnoreFnType<'a> =
102 Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
102 Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
103
103
104 /// We have a good mix of owned (from directory traversal) and borrowed (from
104 /// We have a good mix of owned (from directory traversal) and borrowed (from
105 /// the dirstate/explicit) paths, this comes up a lot.
105 /// the dirstate/explicit) paths, this comes up a lot.
106 pub type HgPathCow<'a> = Cow<'a, HgPath>;
106 pub type HgPathCow<'a> = Cow<'a, HgPath>;
107
107
108 /// A path with its computed ``Dispatch`` information
108 /// A path with its computed ``Dispatch`` information
109 type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch);
109 type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch);
110
110
111 /// The conversion from `HgPath` to a real fs path failed.
111 /// The conversion from `HgPath` to a real fs path failed.
112 /// `22` is the error code for "Invalid argument"
112 /// `22` is the error code for "Invalid argument"
113 const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22));
113 const INVALID_PATH_DISPATCH: Dispatch = Dispatch::Bad(BadMatch::OsError(22));
114
114
115 /// Dates and times that are outside the 31-bit signed range are compared
115 /// Dates and times that are outside the 31-bit signed range are compared
116 /// modulo 2^31. This should prevent hg from behaving badly with very large
116 /// modulo 2^31. This should prevent hg from behaving badly with very large
117 /// files or corrupt dates while still having a high probability of detecting
117 /// files or corrupt dates while still having a high probability of detecting
118 /// changes. (issue2608)
118 /// changes. (issue2608)
119 /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>`
119 /// TODO I haven't found a way of having `b` be `Into<i32>`, since `From<u64>`
120 /// is not defined for `i32`, and there is no `As` trait. This forces the
120 /// is not defined for `i32`, and there is no `As` trait. This forces the
121 /// caller to cast `b` as `i32`.
121 /// caller to cast `b` as `i32`.
122 fn mod_compare(a: i32, b: i32) -> bool {
122 fn mod_compare(a: i32, b: i32) -> bool {
123 a & i32::max_value() != b & i32::max_value()
123 a & i32::max_value() != b & i32::max_value()
124 }
124 }
125
125
126 /// Return a sorted list containing information about the entries
126 /// Return a sorted list containing information about the entries
127 /// in the directory.
127 /// in the directory.
128 ///
128 ///
129 /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory
129 /// * `skip_dot_hg` - Return an empty vec if `path` contains a `.hg` directory
130 fn list_directory(
130 fn list_directory(
131 path: impl AsRef<Path>,
131 path: impl AsRef<Path>,
132 skip_dot_hg: bool,
132 skip_dot_hg: bool,
133 ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> {
133 ) -> std::io::Result<Vec<(HgPathBuf, DirEntry)>> {
134 let mut results = vec![];
134 let mut results = vec![];
135 let entries = read_dir(path.as_ref())?;
135 let entries = read_dir(path.as_ref())?;
136
136
137 for entry in entries {
137 for entry in entries {
138 let entry = entry?;
138 let entry = entry?;
139 let filename = os_string_to_hg_path_buf(entry.file_name())?;
139 let filename = os_string_to_hg_path_buf(entry.file_name())?;
140 let file_type = entry.file_type()?;
140 let file_type = entry.file_type()?;
141 if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
141 if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
142 return Ok(vec![]);
142 return Ok(vec![]);
143 } else {
143 } else {
144 results.push((filename, entry))
144 results.push((filename, entry))
145 }
145 }
146 }
146 }
147
147
148 results.sort_unstable_by_key(|e| e.0.clone());
148 results.sort_unstable_by_key(|e| e.0.clone());
149 Ok(results)
149 Ok(results)
150 }
150 }
151
151
152 /// The file corresponding to the dirstate entry was found on the filesystem.
152 /// The file corresponding to the dirstate entry was found on the filesystem.
153 fn dispatch_found(
153 fn dispatch_found(
154 filename: impl AsRef<HgPath>,
154 filename: impl AsRef<HgPath>,
155 entry: DirstateEntry,
155 entry: DirstateEntry,
156 metadata: HgMetadata,
156 metadata: HgMetadata,
157 copy_map: &CopyMap,
157 copy_map: &CopyMap,
158 options: StatusOptions,
158 options: StatusOptions,
159 ) -> Dispatch {
159 ) -> Dispatch {
160 let DirstateEntry {
160 let DirstateEntry {
161 state,
161 state,
162 mode,
162 mode,
163 mtime,
163 mtime,
164 size,
164 size,
165 } = entry;
165 } = entry;
166
166
167 let HgMetadata {
167 let HgMetadata {
168 st_mode,
168 st_mode,
169 st_size,
169 st_size,
170 st_mtime,
170 st_mtime,
171 ..
171 ..
172 } = metadata;
172 } = metadata;
173
173
174 match state {
174 match state {
175 EntryState::Normal => {
175 EntryState::Normal => {
176 let size_changed = mod_compare(size, st_size as i32);
176 let size_changed = mod_compare(size, st_size as i32);
177 let mode_changed =
177 let mode_changed =
178 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
178 (mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
179 let metadata_changed = size >= 0 && (size_changed || mode_changed);
179 let metadata_changed = size >= 0 && (size_changed || mode_changed);
180 let other_parent = size == SIZE_FROM_OTHER_PARENT;
180 let other_parent = size == SIZE_FROM_OTHER_PARENT;
181
181
182 if metadata_changed
182 if metadata_changed
183 || other_parent
183 || other_parent
184 || copy_map.contains_key(filename.as_ref())
184 || copy_map.contains_key(filename.as_ref())
185 {
185 {
186 if metadata.is_symlink() && size_changed {
186 if metadata.is_symlink() && size_changed {
187 // issue6456: Size returned may be longer due to encryption
187 // issue6456: Size returned may be longer due to encryption
188 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
188 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
189 Dispatch::Unsure
189 Dispatch::Unsure
190 } else {
190 } else {
191 Dispatch::Modified
191 Dispatch::Modified
192 }
192 }
193 } else if mod_compare(mtime, st_mtime as i32)
193 } else if mod_compare(mtime, st_mtime as i32)
194 || st_mtime == options.last_normal_time
194 || st_mtime == options.last_normal_time
195 {
195 {
196 // the file may have just been marked as normal and
196 // the file may have just been marked as normal and
197 // it may have changed in the same second without
197 // it may have changed in the same second without
198 // changing its size. This can happen if we quickly
198 // changing its size. This can happen if we quickly
199 // do multiple commits. Force lookup, so we don't
199 // do multiple commits. Force lookup, so we don't
200 // miss such a racy file change.
200 // miss such a racy file change.
201 Dispatch::Unsure
201 Dispatch::Unsure
202 } else if options.list_clean {
202 } else if options.list_clean {
203 Dispatch::Clean
203 Dispatch::Clean
204 } else {
204 } else {
205 Dispatch::None
205 Dispatch::None
206 }
206 }
207 }
207 }
208 EntryState::Merged => Dispatch::Modified,
208 EntryState::Merged => Dispatch::Modified,
209 EntryState::Added => Dispatch::Added,
209 EntryState::Added => Dispatch::Added,
210 EntryState::Removed => Dispatch::Removed,
210 EntryState::Removed => Dispatch::Removed,
211 EntryState::Unknown => Dispatch::Unknown,
211 EntryState::Unknown => Dispatch::Unknown,
212 }
212 }
213 }
213 }
214
214
215 /// The file corresponding to this Dirstate entry is missing.
215 /// The file corresponding to this Dirstate entry is missing.
216 fn dispatch_missing(state: EntryState) -> Dispatch {
216 fn dispatch_missing(state: EntryState) -> Dispatch {
217 match state {
217 match state {
218 // File was removed from the filesystem during commands
218 // File was removed from the filesystem during commands
219 EntryState::Normal | EntryState::Merged | EntryState::Added => {
219 EntryState::Normal | EntryState::Merged | EntryState::Added => {
220 Dispatch::Deleted
220 Dispatch::Deleted
221 }
221 }
222 // File was removed, everything is normal
222 // File was removed, everything is normal
223 EntryState::Removed => Dispatch::Removed,
223 EntryState::Removed => Dispatch::Removed,
224 // File is unknown to Mercurial, everything is normal
224 // File is unknown to Mercurial, everything is normal
225 EntryState::Unknown => Dispatch::Unknown,
225 EntryState::Unknown => Dispatch::Unknown,
226 }
226 }
227 }
227 }
228
228
229 fn dispatch_os_error(e: &std::io::Error) -> Dispatch {
229 fn dispatch_os_error(e: &std::io::Error) -> Dispatch {
230 Dispatch::Bad(BadMatch::OsError(
230 Dispatch::Bad(BadMatch::OsError(
231 e.raw_os_error().expect("expected real OS error"),
231 e.raw_os_error().expect("expected real OS error"),
232 ))
232 ))
233 }
233 }
234
234
235 lazy_static! {
235 lazy_static! {
236 static ref DEFAULT_WORK: HashSet<&'static HgPath> = {
236 static ref DEFAULT_WORK: HashSet<&'static HgPath> = {
237 let mut h = HashSet::new();
237 let mut h = HashSet::new();
238 h.insert(HgPath::new(b""));
238 h.insert(HgPath::new(b""));
239 h
239 h
240 };
240 };
241 }
241 }
242
242
243 #[derive(Debug, Copy, Clone)]
243 #[derive(Debug, Copy, Clone)]
244 pub struct StatusOptions {
244 pub struct StatusOptions {
245 /// Remember the most recent modification timeslot for status, to make
245 /// Remember the most recent modification timeslot for status, to make
246 /// sure we won't miss future size-preserving file content modifications
246 /// sure we won't miss future size-preserving file content modifications
247 /// that happen within the same timeslot.
247 /// that happen within the same timeslot.
248 pub last_normal_time: i64,
248 pub last_normal_time: i64,
249 /// Whether we are on a filesystem with UNIX-like exec flags
249 /// Whether we are on a filesystem with UNIX-like exec flags
250 pub check_exec: bool,
250 pub check_exec: bool,
251 pub list_clean: bool,
251 pub list_clean: bool,
252 pub list_unknown: bool,
252 pub list_unknown: bool,
253 pub list_ignored: bool,
253 pub list_ignored: bool,
254 /// Whether to collect traversed dirs for applying a callback later.
254 /// Whether to collect traversed dirs for applying a callback later.
255 /// Used by `hg purge` for example.
255 /// Used by `hg purge` for example.
256 pub collect_traversed_dirs: bool,
256 pub collect_traversed_dirs: bool,
257 }
257 }
258
258
259 #[derive(Debug, Default)]
259 #[derive(Debug, Default)]
260 pub struct DirstateStatus<'a> {
260 pub struct DirstateStatus<'a> {
261 /// Tracked files whose contents have changed since the parent revision
261 /// Tracked files whose contents have changed since the parent revision
262 pub modified: Vec<HgPathCow<'a>>,
262 pub modified: Vec<HgPathCow<'a>>,
263
263
264 /// Newly-tracked files that were not present in the parent
264 /// Newly-tracked files that were not present in the parent
265 pub added: Vec<HgPathCow<'a>>,
265 pub added: Vec<HgPathCow<'a>>,
266
266
267 /// Previously-tracked files that have been (re)moved with an hg command
267 /// Previously-tracked files that have been (re)moved with an hg command
268 pub removed: Vec<HgPathCow<'a>>,
268 pub removed: Vec<HgPathCow<'a>>,
269
269
270 /// (Still) tracked files that are missing, (re)moved with an non-hg
270 /// (Still) tracked files that are missing, (re)moved with an non-hg
271 /// command
271 /// command
272 pub deleted: Vec<HgPathCow<'a>>,
272 pub deleted: Vec<HgPathCow<'a>>,
273
273
274 /// Tracked files that are up to date with the parent.
274 /// Tracked files that are up to date with the parent.
275 /// Only pupulated if `StatusOptions::list_clean` is true.
275 /// Only pupulated if `StatusOptions::list_clean` is true.
276 pub clean: Vec<HgPathCow<'a>>,
276 pub clean: Vec<HgPathCow<'a>>,
277
277
278 /// Files in the working directory that are ignored with `.hgignore`.
278 /// Files in the working directory that are ignored with `.hgignore`.
279 /// Only pupulated if `StatusOptions::list_ignored` is true.
279 /// Only pupulated if `StatusOptions::list_ignored` is true.
280 pub ignored: Vec<HgPathCow<'a>>,
280 pub ignored: Vec<HgPathCow<'a>>,
281
281
282 /// Files in the working directory that are neither tracked nor ignored.
282 /// Files in the working directory that are neither tracked nor ignored.
283 /// Only pupulated if `StatusOptions::list_unknown` is true.
283 /// Only pupulated if `StatusOptions::list_unknown` is true.
284 pub unknown: Vec<HgPathCow<'a>>,
284 pub unknown: Vec<HgPathCow<'a>>,
285
285
286 /// Was explicitly matched but cannot be found/accessed
286 /// Was explicitly matched but cannot be found/accessed
287 pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
287 pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
288
288
289 /// Either clean or modified, but we can’t tell from filesystem metadata
289 /// Either clean or modified, but we can’t tell from filesystem metadata
290 /// alone. The file contents need to be read and compared with that in
290 /// alone. The file contents need to be read and compared with that in
291 /// the parent.
291 /// the parent.
292 pub unsure: Vec<HgPathCow<'a>>,
292 pub unsure: Vec<HgPathCow<'a>>,
293
293
294 /// Only filled if `collect_traversed_dirs` is `true`
294 /// Only filled if `collect_traversed_dirs` is `true`
295 pub traversed: Vec<HgPathCow<'a>>,
295 pub traversed: Vec<HgPathCow<'a>>,
296
297 /// Whether `status()` made changed to the `DirstateMap` that should be
298 /// written back to disk
299 pub dirty: bool,
296 }
300 }
297
301
298 #[derive(Debug, derive_more::From)]
302 #[derive(Debug, derive_more::From)]
299 pub enum StatusError {
303 pub enum StatusError {
300 /// Generic IO error
304 /// Generic IO error
301 IO(std::io::Error),
305 IO(std::io::Error),
302 /// An invalid path that cannot be represented in Mercurial was found
306 /// An invalid path that cannot be represented in Mercurial was found
303 Path(HgPathError),
307 Path(HgPathError),
304 /// An invalid "ignore" pattern was found
308 /// An invalid "ignore" pattern was found
305 Pattern(PatternError),
309 Pattern(PatternError),
306 /// Corrupted dirstate
310 /// Corrupted dirstate
307 DirstateV2ParseError(DirstateV2ParseError),
311 DirstateV2ParseError(DirstateV2ParseError),
308 }
312 }
309
313
310 pub type StatusResult<T> = Result<T, StatusError>;
314 pub type StatusResult<T> = Result<T, StatusError>;
311
315
312 impl fmt::Display for StatusError {
316 impl fmt::Display for StatusError {
313 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
317 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
314 match self {
318 match self {
315 StatusError::IO(error) => error.fmt(f),
319 StatusError::IO(error) => error.fmt(f),
316 StatusError::Path(error) => error.fmt(f),
320 StatusError::Path(error) => error.fmt(f),
317 StatusError::Pattern(error) => error.fmt(f),
321 StatusError::Pattern(error) => error.fmt(f),
318 StatusError::DirstateV2ParseError(_) => {
322 StatusError::DirstateV2ParseError(_) => {
319 f.write_str("dirstate-v2 parse error")
323 f.write_str("dirstate-v2 parse error")
320 }
324 }
321 }
325 }
322 }
326 }
323 }
327 }
324
328
325 /// Gives information about which files are changed in the working directory
329 /// Gives information about which files are changed in the working directory
326 /// and how, compared to the revision we're based on
330 /// and how, compared to the revision we're based on
327 pub struct Status<'a, M: ?Sized + Matcher + Sync> {
331 pub struct Status<'a, M: ?Sized + Matcher + Sync> {
328 dmap: &'a DirstateMap,
332 dmap: &'a DirstateMap,
329 pub(crate) matcher: &'a M,
333 pub(crate) matcher: &'a M,
330 root_dir: PathBuf,
334 root_dir: PathBuf,
331 pub(crate) options: StatusOptions,
335 pub(crate) options: StatusOptions,
332 ignore_fn: IgnoreFnType<'a>,
336 ignore_fn: IgnoreFnType<'a>,
333 }
337 }
334
338
335 impl<'a, M> Status<'a, M>
339 impl<'a, M> Status<'a, M>
336 where
340 where
337 M: ?Sized + Matcher + Sync,
341 M: ?Sized + Matcher + Sync,
338 {
342 {
339 pub fn new(
343 pub fn new(
340 dmap: &'a DirstateMap,
344 dmap: &'a DirstateMap,
341 matcher: &'a M,
345 matcher: &'a M,
342 root_dir: PathBuf,
346 root_dir: PathBuf,
343 ignore_files: Vec<PathBuf>,
347 ignore_files: Vec<PathBuf>,
344 options: StatusOptions,
348 options: StatusOptions,
345 ) -> StatusResult<(Self, Vec<PatternFileWarning>)> {
349 ) -> StatusResult<(Self, Vec<PatternFileWarning>)> {
346 // Needs to outlive `dir_ignore_fn` since it's captured.
350 // Needs to outlive `dir_ignore_fn` since it's captured.
347
351
348 let (ignore_fn, warnings): (IgnoreFnType, _) =
352 let (ignore_fn, warnings): (IgnoreFnType, _) =
349 if options.list_ignored || options.list_unknown {
353 if options.list_ignored || options.list_unknown {
350 get_ignore_function(ignore_files, &root_dir)?
354 get_ignore_function(ignore_files, &root_dir)?
351 } else {
355 } else {
352 (Box::new(|&_| true), vec![])
356 (Box::new(|&_| true), vec![])
353 };
357 };
354
358
355 Ok((
359 Ok((
356 Self {
360 Self {
357 dmap,
361 dmap,
358 matcher,
362 matcher,
359 root_dir,
363 root_dir,
360 options,
364 options,
361 ignore_fn,
365 ignore_fn,
362 },
366 },
363 warnings,
367 warnings,
364 ))
368 ))
365 }
369 }
366
370
367 /// Is the path ignored?
371 /// Is the path ignored?
368 pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool {
372 pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool {
369 (self.ignore_fn)(path.as_ref())
373 (self.ignore_fn)(path.as_ref())
370 }
374 }
371
375
372 /// Is the path or one of its ancestors ignored?
376 /// Is the path or one of its ancestors ignored?
373 pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool {
377 pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool {
374 // Only involve ignore mechanism if we're listing unknowns or ignored.
378 // Only involve ignore mechanism if we're listing unknowns or ignored.
375 if self.options.list_ignored || self.options.list_unknown {
379 if self.options.list_ignored || self.options.list_unknown {
376 if self.is_ignored(&dir) {
380 if self.is_ignored(&dir) {
377 true
381 true
378 } else {
382 } else {
379 for p in find_dirs(dir.as_ref()) {
383 for p in find_dirs(dir.as_ref()) {
380 if self.is_ignored(p) {
384 if self.is_ignored(p) {
381 return true;
385 return true;
382 }
386 }
383 }
387 }
384 false
388 false
385 }
389 }
386 } else {
390 } else {
387 true
391 true
388 }
392 }
389 }
393 }
390
394
391 /// Get stat data about the files explicitly specified by the matcher.
395 /// Get stat data about the files explicitly specified by the matcher.
392 /// Returns a tuple of the directories that need to be traversed and the
396 /// Returns a tuple of the directories that need to be traversed and the
393 /// files with their corresponding `Dispatch`.
397 /// files with their corresponding `Dispatch`.
394 /// TODO subrepos
398 /// TODO subrepos
395 #[timed]
399 #[timed]
396 pub fn walk_explicit(
400 pub fn walk_explicit(
397 &self,
401 &self,
398 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
402 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
399 ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) {
403 ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) {
400 self.matcher
404 self.matcher
401 .file_set()
405 .file_set()
402 .unwrap_or(&DEFAULT_WORK)
406 .unwrap_or(&DEFAULT_WORK)
403 .par_iter()
407 .par_iter()
404 .flat_map(|&filename| -> Option<_> {
408 .flat_map(|&filename| -> Option<_> {
405 // TODO normalization
409 // TODO normalization
406 let normalized = filename;
410 let normalized = filename;
407
411
408 let buf = match hg_path_to_path_buf(normalized) {
412 let buf = match hg_path_to_path_buf(normalized) {
409 Ok(x) => x,
413 Ok(x) => x,
410 Err(_) => {
414 Err(_) => {
411 return Some((
415 return Some((
412 Cow::Borrowed(normalized),
416 Cow::Borrowed(normalized),
413 INVALID_PATH_DISPATCH,
417 INVALID_PATH_DISPATCH,
414 ))
418 ))
415 }
419 }
416 };
420 };
417 let target = self.root_dir.join(buf);
421 let target = self.root_dir.join(buf);
418 let st = target.symlink_metadata();
422 let st = target.symlink_metadata();
419 let in_dmap = self.dmap.get(normalized);
423 let in_dmap = self.dmap.get(normalized);
420 match st {
424 match st {
421 Ok(meta) => {
425 Ok(meta) => {
422 let file_type = meta.file_type();
426 let file_type = meta.file_type();
423 return if file_type.is_file() || file_type.is_symlink()
427 return if file_type.is_file() || file_type.is_symlink()
424 {
428 {
425 if let Some(entry) = in_dmap {
429 if let Some(entry) = in_dmap {
426 return Some((
430 return Some((
427 Cow::Borrowed(normalized),
431 Cow::Borrowed(normalized),
428 dispatch_found(
432 dispatch_found(
429 &normalized,
433 &normalized,
430 *entry,
434 *entry,
431 HgMetadata::from_metadata(meta),
435 HgMetadata::from_metadata(meta),
432 &self.dmap.copy_map,
436 &self.dmap.copy_map,
433 self.options,
437 self.options,
434 ),
438 ),
435 ));
439 ));
436 }
440 }
437 Some((
441 Some((
438 Cow::Borrowed(normalized),
442 Cow::Borrowed(normalized),
439 Dispatch::Unknown,
443 Dispatch::Unknown,
440 ))
444 ))
441 } else if file_type.is_dir() {
445 } else if file_type.is_dir() {
442 if self.options.collect_traversed_dirs {
446 if self.options.collect_traversed_dirs {
443 traversed_sender
447 traversed_sender
444 .send(normalized.to_owned())
448 .send(normalized.to_owned())
445 .expect("receiver should outlive sender");
449 .expect("receiver should outlive sender");
446 }
450 }
447 Some((
451 Some((
448 Cow::Borrowed(normalized),
452 Cow::Borrowed(normalized),
449 Dispatch::Directory {
453 Dispatch::Directory {
450 was_file: in_dmap.is_some(),
454 was_file: in_dmap.is_some(),
451 },
455 },
452 ))
456 ))
453 } else {
457 } else {
454 Some((
458 Some((
455 Cow::Borrowed(normalized),
459 Cow::Borrowed(normalized),
456 Dispatch::Bad(BadMatch::BadType(
460 Dispatch::Bad(BadMatch::BadType(
457 // TODO do more than unknown
461 // TODO do more than unknown
458 // Support for all `BadType` variant
462 // Support for all `BadType` variant
459 // varies greatly between platforms.
463 // varies greatly between platforms.
460 // So far, no tests check the type and
464 // So far, no tests check the type and
461 // this should be good enough for most
465 // this should be good enough for most
462 // users.
466 // users.
463 BadType::Unknown,
467 BadType::Unknown,
464 )),
468 )),
465 ))
469 ))
466 };
470 };
467 }
471 }
468 Err(_) => {
472 Err(_) => {
469 if let Some(entry) = in_dmap {
473 if let Some(entry) = in_dmap {
470 return Some((
474 return Some((
471 Cow::Borrowed(normalized),
475 Cow::Borrowed(normalized),
472 dispatch_missing(entry.state),
476 dispatch_missing(entry.state),
473 ));
477 ));
474 }
478 }
475 }
479 }
476 };
480 };
477 None
481 None
478 })
482 })
479 .partition(|(_, dispatch)| match dispatch {
483 .partition(|(_, dispatch)| match dispatch {
480 Dispatch::Directory { .. } => true,
484 Dispatch::Directory { .. } => true,
481 _ => false,
485 _ => false,
482 })
486 })
483 }
487 }
484
488
485 /// Walk the working directory recursively to look for changes compared to
489 /// Walk the working directory recursively to look for changes compared to
486 /// the current `DirstateMap`.
490 /// the current `DirstateMap`.
487 ///
491 ///
488 /// This takes a mutable reference to the results to account for the
492 /// This takes a mutable reference to the results to account for the
489 /// `extend` in timings
493 /// `extend` in timings
490 #[timed]
494 #[timed]
491 pub fn traverse(
495 pub fn traverse(
492 &self,
496 &self,
493 path: impl AsRef<HgPath>,
497 path: impl AsRef<HgPath>,
494 old_results: &FastHashMap<HgPathCow<'a>, Dispatch>,
498 old_results: &FastHashMap<HgPathCow<'a>, Dispatch>,
495 results: &mut Vec<DispatchedPath<'a>>,
499 results: &mut Vec<DispatchedPath<'a>>,
496 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
500 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
497 ) {
501 ) {
498 // The traversal is done in parallel, so use a channel to gather
502 // The traversal is done in parallel, so use a channel to gather
499 // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender`
503 // entries. `crossbeam_channel::Sender` is `Sync`, while `mpsc::Sender`
500 // is not.
504 // is not.
501 let (files_transmitter, files_receiver) =
505 let (files_transmitter, files_receiver) =
502 crossbeam_channel::unbounded();
506 crossbeam_channel::unbounded();
503
507
504 self.traverse_dir(
508 self.traverse_dir(
505 &files_transmitter,
509 &files_transmitter,
506 path,
510 path,
507 &old_results,
511 &old_results,
508 traversed_sender,
512 traversed_sender,
509 );
513 );
510
514
511 // Disconnect the channel so the receiver stops waiting
515 // Disconnect the channel so the receiver stops waiting
512 drop(files_transmitter);
516 drop(files_transmitter);
513
517
514 let new_results = files_receiver
518 let new_results = files_receiver
515 .into_iter()
519 .into_iter()
516 .par_bridge()
520 .par_bridge()
517 .map(|(f, d)| (Cow::Owned(f), d));
521 .map(|(f, d)| (Cow::Owned(f), d));
518
522
519 results.par_extend(new_results);
523 results.par_extend(new_results);
520 }
524 }
521
525
522 /// Dispatch a single entry (file, folder, symlink...) found during
526 /// Dispatch a single entry (file, folder, symlink...) found during
523 /// `traverse`. If the entry is a folder that needs to be traversed, it
527 /// `traverse`. If the entry is a folder that needs to be traversed, it
524 /// will be handled in a separate thread.
528 /// will be handled in a separate thread.
525 fn handle_traversed_entry<'b>(
529 fn handle_traversed_entry<'b>(
526 &'a self,
530 &'a self,
527 scope: &rayon::Scope<'b>,
531 scope: &rayon::Scope<'b>,
528 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
532 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
529 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
533 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
530 filename: HgPathBuf,
534 filename: HgPathBuf,
531 dir_entry: DirEntry,
535 dir_entry: DirEntry,
532 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
536 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
533 ) -> IoResult<()>
537 ) -> IoResult<()>
534 where
538 where
535 'a: 'b,
539 'a: 'b,
536 {
540 {
537 let file_type = dir_entry.file_type()?;
541 let file_type = dir_entry.file_type()?;
538 let entry_option = self.dmap.get(&filename);
542 let entry_option = self.dmap.get(&filename);
539
543
540 if filename.as_bytes() == b".hg" {
544 if filename.as_bytes() == b".hg" {
541 // Could be a directory or a symlink
545 // Could be a directory or a symlink
542 return Ok(());
546 return Ok(());
543 }
547 }
544
548
545 if file_type.is_dir() {
549 if file_type.is_dir() {
546 self.handle_traversed_dir(
550 self.handle_traversed_dir(
547 scope,
551 scope,
548 files_sender,
552 files_sender,
549 old_results,
553 old_results,
550 entry_option,
554 entry_option,
551 filename,
555 filename,
552 traversed_sender,
556 traversed_sender,
553 );
557 );
554 } else if file_type.is_file() || file_type.is_symlink() {
558 } else if file_type.is_file() || file_type.is_symlink() {
555 if let Some(entry) = entry_option {
559 if let Some(entry) = entry_option {
556 if self.matcher.matches_everything()
560 if self.matcher.matches_everything()
557 || self.matcher.matches(&filename)
561 || self.matcher.matches(&filename)
558 {
562 {
559 let metadata = dir_entry.metadata()?;
563 let metadata = dir_entry.metadata()?;
560 files_sender
564 files_sender
561 .send((
565 .send((
562 filename.to_owned(),
566 filename.to_owned(),
563 dispatch_found(
567 dispatch_found(
564 &filename,
568 &filename,
565 *entry,
569 *entry,
566 HgMetadata::from_metadata(metadata),
570 HgMetadata::from_metadata(metadata),
567 &self.dmap.copy_map,
571 &self.dmap.copy_map,
568 self.options,
572 self.options,
569 ),
573 ),
570 ))
574 ))
571 .unwrap();
575 .unwrap();
572 }
576 }
573 } else if (self.matcher.matches_everything()
577 } else if (self.matcher.matches_everything()
574 || self.matcher.matches(&filename))
578 || self.matcher.matches(&filename))
575 && !self.is_ignored(&filename)
579 && !self.is_ignored(&filename)
576 {
580 {
577 if (self.options.list_ignored
581 if (self.options.list_ignored
578 || self.matcher.exact_match(&filename))
582 || self.matcher.exact_match(&filename))
579 && self.dir_ignore(&filename)
583 && self.dir_ignore(&filename)
580 {
584 {
581 if self.options.list_ignored {
585 if self.options.list_ignored {
582 files_sender
586 files_sender
583 .send((filename.to_owned(), Dispatch::Ignored))
587 .send((filename.to_owned(), Dispatch::Ignored))
584 .unwrap();
588 .unwrap();
585 }
589 }
586 } else if self.options.list_unknown {
590 } else if self.options.list_unknown {
587 files_sender
591 files_sender
588 .send((filename.to_owned(), Dispatch::Unknown))
592 .send((filename.to_owned(), Dispatch::Unknown))
589 .unwrap();
593 .unwrap();
590 }
594 }
591 } else if self.is_ignored(&filename) && self.options.list_ignored {
595 } else if self.is_ignored(&filename) && self.options.list_ignored {
592 if self.matcher.matches(&filename) {
596 if self.matcher.matches(&filename) {
593 files_sender
597 files_sender
594 .send((filename.to_owned(), Dispatch::Ignored))
598 .send((filename.to_owned(), Dispatch::Ignored))
595 .unwrap();
599 .unwrap();
596 }
600 }
597 }
601 }
598 } else if let Some(entry) = entry_option {
602 } else if let Some(entry) = entry_option {
599 // Used to be a file or a folder, now something else.
603 // Used to be a file or a folder, now something else.
600 if self.matcher.matches_everything()
604 if self.matcher.matches_everything()
601 || self.matcher.matches(&filename)
605 || self.matcher.matches(&filename)
602 {
606 {
603 files_sender
607 files_sender
604 .send((filename.to_owned(), dispatch_missing(entry.state)))
608 .send((filename.to_owned(), dispatch_missing(entry.state)))
605 .unwrap();
609 .unwrap();
606 }
610 }
607 }
611 }
608
612
609 Ok(())
613 Ok(())
610 }
614 }
611
615
612 /// A directory was found in the filesystem and needs to be traversed
616 /// A directory was found in the filesystem and needs to be traversed
613 fn handle_traversed_dir<'b>(
617 fn handle_traversed_dir<'b>(
614 &'a self,
618 &'a self,
615 scope: &rayon::Scope<'b>,
619 scope: &rayon::Scope<'b>,
616 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
620 files_sender: &'b crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
617 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
621 old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
618 entry_option: Option<&'a DirstateEntry>,
622 entry_option: Option<&'a DirstateEntry>,
619 directory: HgPathBuf,
623 directory: HgPathBuf,
620 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
624 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
621 ) where
625 ) where
622 'a: 'b,
626 'a: 'b,
623 {
627 {
624 scope.spawn(move |_| {
628 scope.spawn(move |_| {
625 // Nested `if` until `rust-lang/rust#53668` is stable
629 // Nested `if` until `rust-lang/rust#53668` is stable
626 if let Some(entry) = entry_option {
630 if let Some(entry) = entry_option {
627 // Used to be a file, is now a folder
631 // Used to be a file, is now a folder
628 if self.matcher.matches_everything()
632 if self.matcher.matches_everything()
629 || self.matcher.matches(&directory)
633 || self.matcher.matches(&directory)
630 {
634 {
631 files_sender
635 files_sender
632 .send((
636 .send((
633 directory.to_owned(),
637 directory.to_owned(),
634 dispatch_missing(entry.state),
638 dispatch_missing(entry.state),
635 ))
639 ))
636 .unwrap();
640 .unwrap();
637 }
641 }
638 }
642 }
639 // Do we need to traverse it?
643 // Do we need to traverse it?
640 if !self.is_ignored(&directory) || self.options.list_ignored {
644 if !self.is_ignored(&directory) || self.options.list_ignored {
641 self.traverse_dir(
645 self.traverse_dir(
642 files_sender,
646 files_sender,
643 directory,
647 directory,
644 &old_results,
648 &old_results,
645 traversed_sender,
649 traversed_sender,
646 )
650 )
647 }
651 }
648 });
652 });
649 }
653 }
650
654
651 /// Decides whether the directory needs to be listed, and if so handles the
655 /// Decides whether the directory needs to be listed, and if so handles the
652 /// entries in a separate thread.
656 /// entries in a separate thread.
653 fn traverse_dir(
657 fn traverse_dir(
654 &self,
658 &self,
655 files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
659 files_sender: &crossbeam_channel::Sender<(HgPathBuf, Dispatch)>,
656 directory: impl AsRef<HgPath>,
660 directory: impl AsRef<HgPath>,
657 old_results: &FastHashMap<Cow<HgPath>, Dispatch>,
661 old_results: &FastHashMap<Cow<HgPath>, Dispatch>,
658 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
662 traversed_sender: crossbeam_channel::Sender<HgPathBuf>,
659 ) {
663 ) {
660 let directory = directory.as_ref();
664 let directory = directory.as_ref();
661
665
662 if self.options.collect_traversed_dirs {
666 if self.options.collect_traversed_dirs {
663 traversed_sender
667 traversed_sender
664 .send(directory.to_owned())
668 .send(directory.to_owned())
665 .expect("receiver should outlive sender");
669 .expect("receiver should outlive sender");
666 }
670 }
667
671
668 let visit_entries = match self.matcher.visit_children_set(directory) {
672 let visit_entries = match self.matcher.visit_children_set(directory) {
669 VisitChildrenSet::Empty => return,
673 VisitChildrenSet::Empty => return,
670 VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
674 VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
671 VisitChildrenSet::Set(set) => Some(set),
675 VisitChildrenSet::Set(set) => Some(set),
672 };
676 };
673 let buf = match hg_path_to_path_buf(directory) {
677 let buf = match hg_path_to_path_buf(directory) {
674 Ok(b) => b,
678 Ok(b) => b,
675 Err(_) => {
679 Err(_) => {
676 files_sender
680 files_sender
677 .send((directory.to_owned(), INVALID_PATH_DISPATCH))
681 .send((directory.to_owned(), INVALID_PATH_DISPATCH))
678 .expect("receiver should outlive sender");
682 .expect("receiver should outlive sender");
679 return;
683 return;
680 }
684 }
681 };
685 };
682 let dir_path = self.root_dir.join(buf);
686 let dir_path = self.root_dir.join(buf);
683
687
684 let skip_dot_hg = !directory.as_bytes().is_empty();
688 let skip_dot_hg = !directory.as_bytes().is_empty();
685 let entries = match list_directory(dir_path, skip_dot_hg) {
689 let entries = match list_directory(dir_path, skip_dot_hg) {
686 Err(e) => {
690 Err(e) => {
687 files_sender
691 files_sender
688 .send((directory.to_owned(), dispatch_os_error(&e)))
692 .send((directory.to_owned(), dispatch_os_error(&e)))
689 .expect("receiver should outlive sender");
693 .expect("receiver should outlive sender");
690 return;
694 return;
691 }
695 }
692 Ok(entries) => entries,
696 Ok(entries) => entries,
693 };
697 };
694
698
695 rayon::scope(|scope| {
699 rayon::scope(|scope| {
696 for (filename, dir_entry) in entries {
700 for (filename, dir_entry) in entries {
697 if let Some(ref set) = visit_entries {
701 if let Some(ref set) = visit_entries {
698 if !set.contains(filename.deref()) {
702 if !set.contains(filename.deref()) {
699 continue;
703 continue;
700 }
704 }
701 }
705 }
702 // TODO normalize
706 // TODO normalize
703 let filename = if directory.is_empty() {
707 let filename = if directory.is_empty() {
704 filename.to_owned()
708 filename.to_owned()
705 } else {
709 } else {
706 directory.join(&filename)
710 directory.join(&filename)
707 };
711 };
708
712
709 if !old_results.contains_key(filename.deref()) {
713 if !old_results.contains_key(filename.deref()) {
710 match self.handle_traversed_entry(
714 match self.handle_traversed_entry(
711 scope,
715 scope,
712 files_sender,
716 files_sender,
713 old_results,
717 old_results,
714 filename,
718 filename,
715 dir_entry,
719 dir_entry,
716 traversed_sender.clone(),
720 traversed_sender.clone(),
717 ) {
721 ) {
718 Err(e) => {
722 Err(e) => {
719 files_sender
723 files_sender
720 .send((
724 .send((
721 directory.to_owned(),
725 directory.to_owned(),
722 dispatch_os_error(&e),
726 dispatch_os_error(&e),
723 ))
727 ))
724 .expect("receiver should outlive sender");
728 .expect("receiver should outlive sender");
725 }
729 }
726 Ok(_) => {}
730 Ok(_) => {}
727 }
731 }
728 }
732 }
729 }
733 }
730 })
734 })
731 }
735 }
732
736
733 /// Add the files in the dirstate to the results.
737 /// Add the files in the dirstate to the results.
734 ///
738 ///
735 /// This takes a mutable reference to the results to account for the
739 /// This takes a mutable reference to the results to account for the
736 /// `extend` in timings
740 /// `extend` in timings
737 #[timed]
741 #[timed]
738 pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
742 pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
739 results.par_extend(
743 results.par_extend(
740 self.dmap
744 self.dmap
741 .par_iter()
745 .par_iter()
742 .filter(|(path, _)| self.matcher.matches(path))
746 .filter(|(path, _)| self.matcher.matches(path))
743 .map(move |(filename, entry)| {
747 .map(move |(filename, entry)| {
744 let filename: &HgPath = filename;
748 let filename: &HgPath = filename;
745 let filename_as_path = match hg_path_to_path_buf(filename)
749 let filename_as_path = match hg_path_to_path_buf(filename)
746 {
750 {
747 Ok(f) => f,
751 Ok(f) => f,
748 Err(_) => {
752 Err(_) => {
749 return (
753 return (
750 Cow::Borrowed(filename),
754 Cow::Borrowed(filename),
751 INVALID_PATH_DISPATCH,
755 INVALID_PATH_DISPATCH,
752 )
756 )
753 }
757 }
754 };
758 };
755 let meta = self
759 let meta = self
756 .root_dir
760 .root_dir
757 .join(filename_as_path)
761 .join(filename_as_path)
758 .symlink_metadata();
762 .symlink_metadata();
759 match meta {
763 match meta {
760 Ok(m)
764 Ok(m)
761 if !(m.file_type().is_file()
765 if !(m.file_type().is_file()
762 || m.file_type().is_symlink()) =>
766 || m.file_type().is_symlink()) =>
763 {
767 {
764 (
768 (
765 Cow::Borrowed(filename),
769 Cow::Borrowed(filename),
766 dispatch_missing(entry.state),
770 dispatch_missing(entry.state),
767 )
771 )
768 }
772 }
769 Ok(m) => (
773 Ok(m) => (
770 Cow::Borrowed(filename),
774 Cow::Borrowed(filename),
771 dispatch_found(
775 dispatch_found(
772 filename,
776 filename,
773 *entry,
777 *entry,
774 HgMetadata::from_metadata(m),
778 HgMetadata::from_metadata(m),
775 &self.dmap.copy_map,
779 &self.dmap.copy_map,
776 self.options,
780 self.options,
777 ),
781 ),
778 ),
782 ),
779 Err(e)
783 Err(e)
780 if e.kind() == ErrorKind::NotFound
784 if e.kind() == ErrorKind::NotFound
781 || e.raw_os_error() == Some(20) =>
785 || e.raw_os_error() == Some(20) =>
782 {
786 {
783 // Rust does not yet have an `ErrorKind` for
787 // Rust does not yet have an `ErrorKind` for
784 // `NotADirectory` (errno 20)
788 // `NotADirectory` (errno 20)
785 // It happens if the dirstate contains `foo/bar`
789 // It happens if the dirstate contains `foo/bar`
786 // and foo is not a
790 // and foo is not a
787 // directory
791 // directory
788 (
792 (
789 Cow::Borrowed(filename),
793 Cow::Borrowed(filename),
790 dispatch_missing(entry.state),
794 dispatch_missing(entry.state),
791 )
795 )
792 }
796 }
793 Err(e) => {
797 Err(e) => {
794 (Cow::Borrowed(filename), dispatch_os_error(&e))
798 (Cow::Borrowed(filename), dispatch_os_error(&e))
795 }
799 }
796 }
800 }
797 }),
801 }),
798 );
802 );
799 }
803 }
800
804
801 /// Checks all files that are in the dirstate but were not found during the
805 /// Checks all files that are in the dirstate but were not found during the
802 /// working directory traversal. This means that the rest must
806 /// working directory traversal. This means that the rest must
803 /// be either ignored, under a symlink or under a new nested repo.
807 /// be either ignored, under a symlink or under a new nested repo.
804 ///
808 ///
805 /// This takes a mutable reference to the results to account for the
809 /// This takes a mutable reference to the results to account for the
806 /// `extend` in timings
810 /// `extend` in timings
807 #[timed]
811 #[timed]
808 pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) {
812 pub fn handle_unknowns(&self, results: &mut Vec<DispatchedPath<'a>>) {
809 let to_visit: Vec<(&HgPath, &DirstateEntry)> =
813 let to_visit: Vec<(&HgPath, &DirstateEntry)> =
810 if results.is_empty() && self.matcher.matches_everything() {
814 if results.is_empty() && self.matcher.matches_everything() {
811 self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
815 self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
812 } else {
816 } else {
813 // Only convert to a hashmap if needed.
817 // Only convert to a hashmap if needed.
814 let old_results: FastHashMap<_, _> =
818 let old_results: FastHashMap<_, _> =
815 results.iter().cloned().collect();
819 results.iter().cloned().collect();
816 self.dmap
820 self.dmap
817 .iter()
821 .iter()
818 .filter_map(move |(f, e)| {
822 .filter_map(move |(f, e)| {
819 if !old_results.contains_key(f.deref())
823 if !old_results.contains_key(f.deref())
820 && self.matcher.matches(f)
824 && self.matcher.matches(f)
821 {
825 {
822 Some((f.deref(), e))
826 Some((f.deref(), e))
823 } else {
827 } else {
824 None
828 None
825 }
829 }
826 })
830 })
827 .collect()
831 .collect()
828 };
832 };
829
833
830 let path_auditor = PathAuditor::new(&self.root_dir);
834 let path_auditor = PathAuditor::new(&self.root_dir);
831
835
832 let new_results = to_visit.into_par_iter().filter_map(
836 let new_results = to_visit.into_par_iter().filter_map(
833 |(filename, entry)| -> Option<_> {
837 |(filename, entry)| -> Option<_> {
834 // Report ignored items in the dmap as long as they are not
838 // Report ignored items in the dmap as long as they are not
835 // under a symlink directory.
839 // under a symlink directory.
836 if path_auditor.check(filename) {
840 if path_auditor.check(filename) {
837 // TODO normalize for case-insensitive filesystems
841 // TODO normalize for case-insensitive filesystems
838 let buf = match hg_path_to_path_buf(filename) {
842 let buf = match hg_path_to_path_buf(filename) {
839 Ok(x) => x,
843 Ok(x) => x,
840 Err(_) => {
844 Err(_) => {
841 return Some((
845 return Some((
842 Cow::Owned(filename.to_owned()),
846 Cow::Owned(filename.to_owned()),
843 INVALID_PATH_DISPATCH,
847 INVALID_PATH_DISPATCH,
844 ));
848 ));
845 }
849 }
846 };
850 };
847 Some((
851 Some((
848 Cow::Owned(filename.to_owned()),
852 Cow::Owned(filename.to_owned()),
849 match self.root_dir.join(&buf).symlink_metadata() {
853 match self.root_dir.join(&buf).symlink_metadata() {
850 // File was just ignored, no links, and exists
854 // File was just ignored, no links, and exists
851 Ok(meta) => {
855 Ok(meta) => {
852 let metadata = HgMetadata::from_metadata(meta);
856 let metadata = HgMetadata::from_metadata(meta);
853 dispatch_found(
857 dispatch_found(
854 filename,
858 filename,
855 *entry,
859 *entry,
856 metadata,
860 metadata,
857 &self.dmap.copy_map,
861 &self.dmap.copy_map,
858 self.options,
862 self.options,
859 )
863 )
860 }
864 }
861 // File doesn't exist
865 // File doesn't exist
862 Err(_) => dispatch_missing(entry.state),
866 Err(_) => dispatch_missing(entry.state),
863 },
867 },
864 ))
868 ))
865 } else {
869 } else {
866 // It's either missing or under a symlink directory which
870 // It's either missing or under a symlink directory which
867 // we, in this case, report as missing.
871 // we, in this case, report as missing.
868 Some((
872 Some((
869 Cow::Owned(filename.to_owned()),
873 Cow::Owned(filename.to_owned()),
870 dispatch_missing(entry.state),
874 dispatch_missing(entry.state),
871 ))
875 ))
872 }
876 }
873 },
877 },
874 );
878 );
875
879
876 results.par_extend(new_results);
880 results.par_extend(new_results);
877 }
881 }
878 }
882 }
879
883
880 #[timed]
884 #[timed]
881 pub fn build_response<'a>(
885 pub fn build_response<'a>(
882 results: impl IntoIterator<Item = DispatchedPath<'a>>,
886 results: impl IntoIterator<Item = DispatchedPath<'a>>,
883 traversed: Vec<HgPathCow<'a>>,
887 traversed: Vec<HgPathCow<'a>>,
884 ) -> DirstateStatus<'a> {
888 ) -> DirstateStatus<'a> {
885 let mut unsure = vec![];
889 let mut unsure = vec![];
886 let mut modified = vec![];
890 let mut modified = vec![];
887 let mut added = vec![];
891 let mut added = vec![];
888 let mut removed = vec![];
892 let mut removed = vec![];
889 let mut deleted = vec![];
893 let mut deleted = vec![];
890 let mut clean = vec![];
894 let mut clean = vec![];
891 let mut ignored = vec![];
895 let mut ignored = vec![];
892 let mut unknown = vec![];
896 let mut unknown = vec![];
893 let mut bad = vec![];
897 let mut bad = vec![];
894
898
895 for (filename, dispatch) in results.into_iter() {
899 for (filename, dispatch) in results.into_iter() {
896 match dispatch {
900 match dispatch {
897 Dispatch::Unknown => unknown.push(filename),
901 Dispatch::Unknown => unknown.push(filename),
898 Dispatch::Unsure => unsure.push(filename),
902 Dispatch::Unsure => unsure.push(filename),
899 Dispatch::Modified => modified.push(filename),
903 Dispatch::Modified => modified.push(filename),
900 Dispatch::Added => added.push(filename),
904 Dispatch::Added => added.push(filename),
901 Dispatch::Removed => removed.push(filename),
905 Dispatch::Removed => removed.push(filename),
902 Dispatch::Deleted => deleted.push(filename),
906 Dispatch::Deleted => deleted.push(filename),
903 Dispatch::Clean => clean.push(filename),
907 Dispatch::Clean => clean.push(filename),
904 Dispatch::Ignored => ignored.push(filename),
908 Dispatch::Ignored => ignored.push(filename),
905 Dispatch::None => {}
909 Dispatch::None => {}
906 Dispatch::Bad(reason) => bad.push((filename, reason)),
910 Dispatch::Bad(reason) => bad.push((filename, reason)),
907 Dispatch::Directory { .. } => {}
911 Dispatch::Directory { .. } => {}
908 }
912 }
909 }
913 }
910
914
911 DirstateStatus {
915 DirstateStatus {
912 modified,
916 modified,
913 added,
917 added,
914 removed,
918 removed,
915 deleted,
919 deleted,
916 clean,
920 clean,
917 ignored,
921 ignored,
918 unknown,
922 unknown,
919 bad,
923 bad,
920 unsure,
924 unsure,
921 traversed,
925 traversed,
926 dirty: false,
922 }
927 }
923 }
928 }
924
929
925 /// Get the status of files in the working directory.
930 /// Get the status of files in the working directory.
926 ///
931 ///
927 /// This is the current entry-point for `hg-core` and is realistically unusable
932 /// This is the current entry-point for `hg-core` and is realistically unusable
928 /// outside of a Python context because its arguments need to provide a lot of
933 /// outside of a Python context because its arguments need to provide a lot of
929 /// information that will not be necessary in the future.
934 /// information that will not be necessary in the future.
930 #[timed]
935 #[timed]
931 pub fn status<'a>(
936 pub fn status<'a>(
932 dmap: &'a DirstateMap,
937 dmap: &'a DirstateMap,
933 matcher: &'a (dyn Matcher + Sync),
938 matcher: &'a (dyn Matcher + Sync),
934 root_dir: PathBuf,
939 root_dir: PathBuf,
935 ignore_files: Vec<PathBuf>,
940 ignore_files: Vec<PathBuf>,
936 options: StatusOptions,
941 options: StatusOptions,
937 ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
942 ) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
938 let (status, warnings) =
943 let (status, warnings) =
939 Status::new(dmap, matcher, root_dir, ignore_files, options)?;
944 Status::new(dmap, matcher, root_dir, ignore_files, options)?;
940
945
941 Ok((status.run()?, warnings))
946 Ok((status.run()?, warnings))
942 }
947 }
@@ -1,684 +1,685 b''
1 use crate::dirstate::status::IgnoreFnType;
1 use crate::dirstate::status::IgnoreFnType;
2 use crate::dirstate_tree::dirstate_map::BorrowedPath;
2 use crate::dirstate_tree::dirstate_map::BorrowedPath;
3 use crate::dirstate_tree::dirstate_map::ChildNodesRef;
3 use crate::dirstate_tree::dirstate_map::ChildNodesRef;
4 use crate::dirstate_tree::dirstate_map::DirstateMap;
4 use crate::dirstate_tree::dirstate_map::DirstateMap;
5 use crate::dirstate_tree::dirstate_map::NodeData;
5 use crate::dirstate_tree::dirstate_map::NodeData;
6 use crate::dirstate_tree::dirstate_map::NodeRef;
6 use crate::dirstate_tree::dirstate_map::NodeRef;
7 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
7 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
8 use crate::dirstate_tree::on_disk::Timestamp;
8 use crate::dirstate_tree::on_disk::Timestamp;
9 use crate::dirstate_tree::path_with_basename::WithBasename;
9 use crate::dirstate_tree::path_with_basename::WithBasename;
10 use crate::matchers::get_ignore_function;
10 use crate::matchers::get_ignore_function;
11 use crate::matchers::Matcher;
11 use crate::matchers::Matcher;
12 use crate::utils::files::get_bytes_from_os_string;
12 use crate::utils::files::get_bytes_from_os_string;
13 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::files::get_path_from_bytes;
14 use crate::utils::hg_path::HgPath;
14 use crate::utils::hg_path::HgPath;
15 use crate::BadMatch;
15 use crate::BadMatch;
16 use crate::DirstateStatus;
16 use crate::DirstateStatus;
17 use crate::EntryState;
17 use crate::EntryState;
18 use crate::HgPathBuf;
18 use crate::HgPathBuf;
19 use crate::PatternFileWarning;
19 use crate::PatternFileWarning;
20 use crate::StatusError;
20 use crate::StatusError;
21 use crate::StatusOptions;
21 use crate::StatusOptions;
22 use micro_timer::timed;
22 use micro_timer::timed;
23 use rayon::prelude::*;
23 use rayon::prelude::*;
24 use std::borrow::Cow;
24 use std::borrow::Cow;
25 use std::io;
25 use std::io;
26 use std::path::Path;
26 use std::path::Path;
27 use std::path::PathBuf;
27 use std::path::PathBuf;
28 use std::sync::Mutex;
28 use std::sync::Mutex;
29 use std::time::SystemTime;
29 use std::time::SystemTime;
30
30
31 /// Returns the status of the working directory compared to its parent
31 /// Returns the status of the working directory compared to its parent
32 /// changeset.
32 /// changeset.
33 ///
33 ///
34 /// This algorithm is based on traversing the filesystem tree (`fs` in function
34 /// This algorithm is based on traversing the filesystem tree (`fs` in function
35 /// and variable names) and dirstate tree at the same time. The core of this
35 /// and variable names) and dirstate tree at the same time. The core of this
36 /// traversal is the recursive `traverse_fs_directory_and_dirstate` function
36 /// traversal is the recursive `traverse_fs_directory_and_dirstate` function
37 /// and its use of `itertools::merge_join_by`. When reaching a path that only
37 /// and its use of `itertools::merge_join_by`. When reaching a path that only
38 /// exists in one of the two trees, depending on information requested by
38 /// exists in one of the two trees, depending on information requested by
39 /// `options` we may need to traverse the remaining subtree.
39 /// `options` we may need to traverse the remaining subtree.
40 #[timed]
40 #[timed]
41 pub fn status<'tree, 'on_disk: 'tree>(
41 pub fn status<'tree, 'on_disk: 'tree>(
42 dmap: &'tree mut DirstateMap<'on_disk>,
42 dmap: &'tree mut DirstateMap<'on_disk>,
43 matcher: &(dyn Matcher + Sync),
43 matcher: &(dyn Matcher + Sync),
44 root_dir: PathBuf,
44 root_dir: PathBuf,
45 ignore_files: Vec<PathBuf>,
45 ignore_files: Vec<PathBuf>,
46 options: StatusOptions,
46 options: StatusOptions,
47 ) -> Result<(DirstateStatus<'on_disk>, Vec<PatternFileWarning>), StatusError> {
47 ) -> Result<(DirstateStatus<'on_disk>, Vec<PatternFileWarning>), StatusError> {
48 let (ignore_fn, warnings): (IgnoreFnType, _) =
48 let (ignore_fn, warnings): (IgnoreFnType, _) =
49 if options.list_ignored || options.list_unknown {
49 if options.list_ignored || options.list_unknown {
50 get_ignore_function(ignore_files, &root_dir)?
50 get_ignore_function(ignore_files, &root_dir)?
51 } else {
51 } else {
52 (Box::new(|&_| true), vec![])
52 (Box::new(|&_| true), vec![])
53 };
53 };
54
54
55 let common = StatusCommon {
55 let common = StatusCommon {
56 dmap,
56 dmap,
57 options,
57 options,
58 matcher,
58 matcher,
59 ignore_fn,
59 ignore_fn,
60 outcome: Default::default(),
60 outcome: Default::default(),
61 cached_directory_mtimes_to_add: Default::default(),
61 cached_directory_mtimes_to_add: Default::default(),
62 filesystem_time_at_status_start: filesystem_now(&root_dir).ok(),
62 filesystem_time_at_status_start: filesystem_now(&root_dir).ok(),
63 };
63 };
64 let is_at_repo_root = true;
64 let is_at_repo_root = true;
65 let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
65 let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
66 let has_ignored_ancestor = false;
66 let has_ignored_ancestor = false;
67 let root_cached_mtime = None;
67 let root_cached_mtime = None;
68 let root_dir_metadata = None;
68 let root_dir_metadata = None;
69 // If the path we have for the repository root is a symlink, do follow it.
69 // If the path we have for the repository root is a symlink, do follow it.
70 // (As opposed to symlinks within the working directory which are not
70 // (As opposed to symlinks within the working directory which are not
71 // followed, using `std::fs::symlink_metadata`.)
71 // followed, using `std::fs::symlink_metadata`.)
72 common.traverse_fs_directory_and_dirstate(
72 common.traverse_fs_directory_and_dirstate(
73 has_ignored_ancestor,
73 has_ignored_ancestor,
74 dmap.root.as_ref(),
74 dmap.root.as_ref(),
75 hg_path,
75 hg_path,
76 &root_dir,
76 &root_dir,
77 root_dir_metadata,
77 root_dir_metadata,
78 root_cached_mtime,
78 root_cached_mtime,
79 is_at_repo_root,
79 is_at_repo_root,
80 )?;
80 )?;
81 let outcome = common.outcome.into_inner().unwrap();
81 let mut outcome = common.outcome.into_inner().unwrap();
82 let to_add = common.cached_directory_mtimes_to_add.into_inner().unwrap();
82 let to_add = common.cached_directory_mtimes_to_add.into_inner().unwrap();
83 outcome.dirty = !to_add.is_empty();
83 for (path, mtime) in &to_add {
84 for (path, mtime) in &to_add {
84 let node = DirstateMap::get_or_insert_node(
85 let node = DirstateMap::get_or_insert_node(
85 dmap.on_disk,
86 dmap.on_disk,
86 &mut dmap.root,
87 &mut dmap.root,
87 path,
88 path,
88 WithBasename::to_cow_owned,
89 WithBasename::to_cow_owned,
89 |_| {},
90 |_| {},
90 )?;
91 )?;
91 match &node.data {
92 match &node.data {
92 NodeData::Entry(_) => {} // Don’t overwrite an entry
93 NodeData::Entry(_) => {} // Don’t overwrite an entry
93 NodeData::CachedDirectory { .. } | NodeData::None => {
94 NodeData::CachedDirectory { .. } | NodeData::None => {
94 node.data = NodeData::CachedDirectory { mtime: *mtime }
95 node.data = NodeData::CachedDirectory { mtime: *mtime }
95 }
96 }
96 }
97 }
97 }
98 }
98 Ok((outcome, warnings))
99 Ok((outcome, warnings))
99 }
100 }
100
101
101 /// Bag of random things needed by various parts of the algorithm. Reduces the
102 /// Bag of random things needed by various parts of the algorithm. Reduces the
102 /// number of parameters passed to functions.
103 /// number of parameters passed to functions.
103 struct StatusCommon<'a, 'tree, 'on_disk: 'tree> {
104 struct StatusCommon<'a, 'tree, 'on_disk: 'tree> {
104 dmap: &'tree DirstateMap<'on_disk>,
105 dmap: &'tree DirstateMap<'on_disk>,
105 options: StatusOptions,
106 options: StatusOptions,
106 matcher: &'a (dyn Matcher + Sync),
107 matcher: &'a (dyn Matcher + Sync),
107 ignore_fn: IgnoreFnType<'a>,
108 ignore_fn: IgnoreFnType<'a>,
108 outcome: Mutex<DirstateStatus<'on_disk>>,
109 outcome: Mutex<DirstateStatus<'on_disk>>,
109 cached_directory_mtimes_to_add:
110 cached_directory_mtimes_to_add:
110 Mutex<Vec<(Cow<'on_disk, HgPath>, Timestamp)>>,
111 Mutex<Vec<(Cow<'on_disk, HgPath>, Timestamp)>>,
111
112
112 /// The current time at the start of the `status()` algorithm, as measured
113 /// The current time at the start of the `status()` algorithm, as measured
113 /// and possibly truncated by the filesystem.
114 /// and possibly truncated by the filesystem.
114 filesystem_time_at_status_start: Option<SystemTime>,
115 filesystem_time_at_status_start: Option<SystemTime>,
115 }
116 }
116
117
117 impl<'a, 'tree, 'on_disk> StatusCommon<'a, 'tree, 'on_disk> {
118 impl<'a, 'tree, 'on_disk> StatusCommon<'a, 'tree, 'on_disk> {
118 fn read_dir(
119 fn read_dir(
119 &self,
120 &self,
120 hg_path: &HgPath,
121 hg_path: &HgPath,
121 fs_path: &Path,
122 fs_path: &Path,
122 is_at_repo_root: bool,
123 is_at_repo_root: bool,
123 ) -> Result<Vec<DirEntry>, ()> {
124 ) -> Result<Vec<DirEntry>, ()> {
124 DirEntry::read_dir(fs_path, is_at_repo_root)
125 DirEntry::read_dir(fs_path, is_at_repo_root)
125 .map_err(|error| self.io_error(error, hg_path))
126 .map_err(|error| self.io_error(error, hg_path))
126 }
127 }
127
128
128 fn io_error(&self, error: std::io::Error, hg_path: &HgPath) {
129 fn io_error(&self, error: std::io::Error, hg_path: &HgPath) {
129 let errno = error.raw_os_error().expect("expected real OS error");
130 let errno = error.raw_os_error().expect("expected real OS error");
130 self.outcome
131 self.outcome
131 .lock()
132 .lock()
132 .unwrap()
133 .unwrap()
133 .bad
134 .bad
134 .push((hg_path.to_owned().into(), BadMatch::OsError(errno)))
135 .push((hg_path.to_owned().into(), BadMatch::OsError(errno)))
135 }
136 }
136
137
137 /// If this returns true, we can get accurate results by only using
138 /// If this returns true, we can get accurate results by only using
138 /// `symlink_metadata` for child nodes that exist in the dirstate and don’t
139 /// `symlink_metadata` for child nodes that exist in the dirstate and don’t
139 /// need to call `read_dir`.
140 /// need to call `read_dir`.
140 fn can_skip_fs_readdir(
141 fn can_skip_fs_readdir(
141 &self,
142 &self,
142 directory_metadata: Option<&std::fs::Metadata>,
143 directory_metadata: Option<&std::fs::Metadata>,
143 cached_directory_mtime: Option<&Timestamp>,
144 cached_directory_mtime: Option<&Timestamp>,
144 ) -> bool {
145 ) -> bool {
145 if !self.options.list_unknown && !self.options.list_ignored {
146 if !self.options.list_unknown && !self.options.list_ignored {
146 // All states that we care about listing have corresponding
147 // All states that we care about listing have corresponding
147 // dirstate entries.
148 // dirstate entries.
148 // This happens for example with `hg status -mard`.
149 // This happens for example with `hg status -mard`.
149 return true;
150 return true;
150 }
151 }
151 if let Some(cached_mtime) = cached_directory_mtime {
152 if let Some(cached_mtime) = cached_directory_mtime {
152 // The dirstate contains a cached mtime for this directory, set by
153 // The dirstate contains a cached mtime for this directory, set by
153 // a previous run of the `status` algorithm which found this
154 // a previous run of the `status` algorithm which found this
154 // directory eligible for `read_dir` caching.
155 // directory eligible for `read_dir` caching.
155 if let Some(meta) = directory_metadata {
156 if let Some(meta) = directory_metadata {
156 if let Ok(current_mtime) = meta.modified() {
157 if let Ok(current_mtime) = meta.modified() {
157 if current_mtime == cached_mtime.into() {
158 if current_mtime == cached_mtime.into() {
158 // The mtime of that directory has not changed since
159 // The mtime of that directory has not changed since
159 // then, which means that the
160 // then, which means that the
160 // results of `read_dir` should also
161 // results of `read_dir` should also
161 // be unchanged.
162 // be unchanged.
162 return true;
163 return true;
163 }
164 }
164 }
165 }
165 }
166 }
166 }
167 }
167 false
168 false
168 }
169 }
169
170
170 /// Returns whether the filesystem directory was found to have any entry
171 /// Returns whether the filesystem directory was found to have any entry
171 /// that does not have a corresponding dirstate tree node.
172 /// that does not have a corresponding dirstate tree node.
172 fn traverse_fs_directory_and_dirstate(
173 fn traverse_fs_directory_and_dirstate(
173 &self,
174 &self,
174 has_ignored_ancestor: bool,
175 has_ignored_ancestor: bool,
175 dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
176 dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
176 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
177 directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
177 directory_fs_path: &Path,
178 directory_fs_path: &Path,
178 directory_metadata: Option<&std::fs::Metadata>,
179 directory_metadata: Option<&std::fs::Metadata>,
179 cached_directory_mtime: Option<&Timestamp>,
180 cached_directory_mtime: Option<&Timestamp>,
180 is_at_repo_root: bool,
181 is_at_repo_root: bool,
181 ) -> Result<bool, DirstateV2ParseError> {
182 ) -> Result<bool, DirstateV2ParseError> {
182 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
183 if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
183 {
184 {
184 dirstate_nodes
185 dirstate_nodes
185 .par_iter()
186 .par_iter()
186 .map(|dirstate_node| {
187 .map(|dirstate_node| {
187 let fs_path = directory_fs_path.join(get_path_from_bytes(
188 let fs_path = directory_fs_path.join(get_path_from_bytes(
188 dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
189 dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
189 ));
190 ));
190 match std::fs::symlink_metadata(&fs_path) {
191 match std::fs::symlink_metadata(&fs_path) {
191 Ok(fs_metadata) => self.traverse_fs_and_dirstate(
192 Ok(fs_metadata) => self.traverse_fs_and_dirstate(
192 &fs_path,
193 &fs_path,
193 &fs_metadata,
194 &fs_metadata,
194 dirstate_node,
195 dirstate_node,
195 has_ignored_ancestor,
196 has_ignored_ancestor,
196 ),
197 ),
197 Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
198 Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
198 self.traverse_dirstate_only(dirstate_node)
199 self.traverse_dirstate_only(dirstate_node)
199 }
200 }
200 Err(error) => {
201 Err(error) => {
201 let hg_path =
202 let hg_path =
202 dirstate_node.full_path(self.dmap.on_disk)?;
203 dirstate_node.full_path(self.dmap.on_disk)?;
203 Ok(self.io_error(error, hg_path))
204 Ok(self.io_error(error, hg_path))
204 }
205 }
205 }
206 }
206 })
207 })
207 .collect::<Result<_, _>>()?;
208 .collect::<Result<_, _>>()?;
208
209
209 // Conservatively don’t let the caller assume that there aren’t
210 // Conservatively don’t let the caller assume that there aren’t
210 // any, since we don’t know.
211 // any, since we don’t know.
211 let directory_has_any_fs_only_entry = true;
212 let directory_has_any_fs_only_entry = true;
212
213
213 return Ok(directory_has_any_fs_only_entry);
214 return Ok(directory_has_any_fs_only_entry);
214 }
215 }
215
216
216 let mut fs_entries = if let Ok(entries) = self.read_dir(
217 let mut fs_entries = if let Ok(entries) = self.read_dir(
217 directory_hg_path,
218 directory_hg_path,
218 directory_fs_path,
219 directory_fs_path,
219 is_at_repo_root,
220 is_at_repo_root,
220 ) {
221 ) {
221 entries
222 entries
222 } else {
223 } else {
223 // Treat an unreadable directory (typically because of insufficient
224 // Treat an unreadable directory (typically because of insufficient
224 // permissions) like an empty directory. `self.read_dir` has
225 // permissions) like an empty directory. `self.read_dir` has
225 // already called `self.io_error` so a warning will be emitted.
226 // already called `self.io_error` so a warning will be emitted.
226 Vec::new()
227 Vec::new()
227 };
228 };
228
229
229 // `merge_join_by` requires both its input iterators to be sorted:
230 // `merge_join_by` requires both its input iterators to be sorted:
230
231
231 let dirstate_nodes = dirstate_nodes.sorted();
232 let dirstate_nodes = dirstate_nodes.sorted();
232 // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
233 // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
233 // https://github.com/rust-lang/rust/issues/34162
234 // https://github.com/rust-lang/rust/issues/34162
234 fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
235 fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
235
236
236 // Propagate here any error that would happen inside the comparison
237 // Propagate here any error that would happen inside the comparison
237 // callback below
238 // callback below
238 for dirstate_node in &dirstate_nodes {
239 for dirstate_node in &dirstate_nodes {
239 dirstate_node.base_name(self.dmap.on_disk)?;
240 dirstate_node.base_name(self.dmap.on_disk)?;
240 }
241 }
241 itertools::merge_join_by(
242 itertools::merge_join_by(
242 dirstate_nodes,
243 dirstate_nodes,
243 &fs_entries,
244 &fs_entries,
244 |dirstate_node, fs_entry| {
245 |dirstate_node, fs_entry| {
245 // This `unwrap` never panics because we already propagated
246 // This `unwrap` never panics because we already propagated
246 // those errors above
247 // those errors above
247 dirstate_node
248 dirstate_node
248 .base_name(self.dmap.on_disk)
249 .base_name(self.dmap.on_disk)
249 .unwrap()
250 .unwrap()
250 .cmp(&fs_entry.base_name)
251 .cmp(&fs_entry.base_name)
251 },
252 },
252 )
253 )
253 .par_bridge()
254 .par_bridge()
254 .map(|pair| {
255 .map(|pair| {
255 use itertools::EitherOrBoth::*;
256 use itertools::EitherOrBoth::*;
256 let is_fs_only = pair.is_right();
257 let is_fs_only = pair.is_right();
257 match pair {
258 match pair {
258 Both(dirstate_node, fs_entry) => self
259 Both(dirstate_node, fs_entry) => self
259 .traverse_fs_and_dirstate(
260 .traverse_fs_and_dirstate(
260 &fs_entry.full_path,
261 &fs_entry.full_path,
261 &fs_entry.metadata,
262 &fs_entry.metadata,
262 dirstate_node,
263 dirstate_node,
263 has_ignored_ancestor,
264 has_ignored_ancestor,
264 )?,
265 )?,
265 Left(dirstate_node) => {
266 Left(dirstate_node) => {
266 self.traverse_dirstate_only(dirstate_node)?
267 self.traverse_dirstate_only(dirstate_node)?
267 }
268 }
268 Right(fs_entry) => self.traverse_fs_only(
269 Right(fs_entry) => self.traverse_fs_only(
269 has_ignored_ancestor,
270 has_ignored_ancestor,
270 directory_hg_path,
271 directory_hg_path,
271 fs_entry,
272 fs_entry,
272 ),
273 ),
273 }
274 }
274 Ok(is_fs_only)
275 Ok(is_fs_only)
275 })
276 })
276 .try_reduce(|| false, |a, b| Ok(a || b))
277 .try_reduce(|| false, |a, b| Ok(a || b))
277 }
278 }
278
279
279 fn traverse_fs_and_dirstate(
280 fn traverse_fs_and_dirstate(
280 &self,
281 &self,
281 fs_path: &Path,
282 fs_path: &Path,
282 fs_metadata: &std::fs::Metadata,
283 fs_metadata: &std::fs::Metadata,
283 dirstate_node: NodeRef<'tree, 'on_disk>,
284 dirstate_node: NodeRef<'tree, 'on_disk>,
284 has_ignored_ancestor: bool,
285 has_ignored_ancestor: bool,
285 ) -> Result<(), DirstateV2ParseError> {
286 ) -> Result<(), DirstateV2ParseError> {
286 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
287 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
287 let file_type = fs_metadata.file_type();
288 let file_type = fs_metadata.file_type();
288 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
289 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
289 if !file_or_symlink {
290 if !file_or_symlink {
290 // If we previously had a file here, it was removed (with
291 // If we previously had a file here, it was removed (with
291 // `hg rm` or similar) or deleted before it could be
292 // `hg rm` or similar) or deleted before it could be
292 // replaced by a directory or something else.
293 // replaced by a directory or something else.
293 self.mark_removed_or_deleted_if_file(
294 self.mark_removed_or_deleted_if_file(
294 &hg_path,
295 &hg_path,
295 dirstate_node.state()?,
296 dirstate_node.state()?,
296 );
297 );
297 }
298 }
298 if file_type.is_dir() {
299 if file_type.is_dir() {
299 if self.options.collect_traversed_dirs {
300 if self.options.collect_traversed_dirs {
300 self.outcome
301 self.outcome
301 .lock()
302 .lock()
302 .unwrap()
303 .unwrap()
303 .traversed
304 .traversed
304 .push(hg_path.detach_from_tree())
305 .push(hg_path.detach_from_tree())
305 }
306 }
306 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
307 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
307 let is_at_repo_root = false;
308 let is_at_repo_root = false;
308 let directory_has_any_fs_only_entry = self
309 let directory_has_any_fs_only_entry = self
309 .traverse_fs_directory_and_dirstate(
310 .traverse_fs_directory_and_dirstate(
310 is_ignored,
311 is_ignored,
311 dirstate_node.children(self.dmap.on_disk)?,
312 dirstate_node.children(self.dmap.on_disk)?,
312 hg_path,
313 hg_path,
313 fs_path,
314 fs_path,
314 Some(fs_metadata),
315 Some(fs_metadata),
315 dirstate_node.cached_directory_mtime(),
316 dirstate_node.cached_directory_mtime(),
316 is_at_repo_root,
317 is_at_repo_root,
317 )?;
318 )?;
318 self.maybe_save_directory_mtime(
319 self.maybe_save_directory_mtime(
319 directory_has_any_fs_only_entry,
320 directory_has_any_fs_only_entry,
320 fs_metadata,
321 fs_metadata,
321 dirstate_node,
322 dirstate_node,
322 )?
323 )?
323 } else {
324 } else {
324 if file_or_symlink && self.matcher.matches(hg_path) {
325 if file_or_symlink && self.matcher.matches(hg_path) {
325 if let Some(state) = dirstate_node.state()? {
326 if let Some(state) = dirstate_node.state()? {
326 match state {
327 match state {
327 EntryState::Added => self
328 EntryState::Added => self
328 .outcome
329 .outcome
329 .lock()
330 .lock()
330 .unwrap()
331 .unwrap()
331 .added
332 .added
332 .push(hg_path.detach_from_tree()),
333 .push(hg_path.detach_from_tree()),
333 EntryState::Removed => self
334 EntryState::Removed => self
334 .outcome
335 .outcome
335 .lock()
336 .lock()
336 .unwrap()
337 .unwrap()
337 .removed
338 .removed
338 .push(hg_path.detach_from_tree()),
339 .push(hg_path.detach_from_tree()),
339 EntryState::Merged => self
340 EntryState::Merged => self
340 .outcome
341 .outcome
341 .lock()
342 .lock()
342 .unwrap()
343 .unwrap()
343 .modified
344 .modified
344 .push(hg_path.detach_from_tree()),
345 .push(hg_path.detach_from_tree()),
345 EntryState::Normal => self
346 EntryState::Normal => self
346 .handle_normal_file(&dirstate_node, fs_metadata)?,
347 .handle_normal_file(&dirstate_node, fs_metadata)?,
347 // This variant is not used in DirstateMap
348 // This variant is not used in DirstateMap
348 // nodes
349 // nodes
349 EntryState::Unknown => unreachable!(),
350 EntryState::Unknown => unreachable!(),
350 }
351 }
351 } else {
352 } else {
352 // `node.entry.is_none()` indicates a "directory"
353 // `node.entry.is_none()` indicates a "directory"
353 // node, but the filesystem has a file
354 // node, but the filesystem has a file
354 self.mark_unknown_or_ignored(has_ignored_ancestor, hg_path)
355 self.mark_unknown_or_ignored(has_ignored_ancestor, hg_path)
355 }
356 }
356 }
357 }
357
358
358 for child_node in dirstate_node.children(self.dmap.on_disk)?.iter()
359 for child_node in dirstate_node.children(self.dmap.on_disk)?.iter()
359 {
360 {
360 self.traverse_dirstate_only(child_node)?
361 self.traverse_dirstate_only(child_node)?
361 }
362 }
362 }
363 }
363 Ok(())
364 Ok(())
364 }
365 }
365
366
366 fn maybe_save_directory_mtime(
367 fn maybe_save_directory_mtime(
367 &self,
368 &self,
368 directory_has_any_fs_only_entry: bool,
369 directory_has_any_fs_only_entry: bool,
369 directory_metadata: &std::fs::Metadata,
370 directory_metadata: &std::fs::Metadata,
370 dirstate_node: NodeRef<'tree, 'on_disk>,
371 dirstate_node: NodeRef<'tree, 'on_disk>,
371 ) -> Result<(), DirstateV2ParseError> {
372 ) -> Result<(), DirstateV2ParseError> {
372 if !directory_has_any_fs_only_entry {
373 if !directory_has_any_fs_only_entry {
373 // All filesystem directory entries from `read_dir` have a
374 // All filesystem directory entries from `read_dir` have a
374 // corresponding node in the dirstate, so we can reconstitute the
375 // corresponding node in the dirstate, so we can reconstitute the
375 // names of those entries without calling `read_dir` again.
376 // names of those entries without calling `read_dir` again.
376 if let (Some(status_start), Ok(directory_mtime)) = (
377 if let (Some(status_start), Ok(directory_mtime)) = (
377 &self.filesystem_time_at_status_start,
378 &self.filesystem_time_at_status_start,
378 directory_metadata.modified(),
379 directory_metadata.modified(),
379 ) {
380 ) {
380 // Although the Rust standard library’s `SystemTime` type
381 // Although the Rust standard library’s `SystemTime` type
381 // has nanosecond precision, the times reported for a
382 // has nanosecond precision, the times reported for a
382 // directory’s (or file’s) modified time may have lower
383 // directory’s (or file’s) modified time may have lower
383 // resolution based on the filesystem (for example ext3
384 // resolution based on the filesystem (for example ext3
384 // only stores integer seconds), kernel (see
385 // only stores integer seconds), kernel (see
385 // https://stackoverflow.com/a/14393315/1162888), etc.
386 // https://stackoverflow.com/a/14393315/1162888), etc.
386 if &directory_mtime >= status_start {
387 if &directory_mtime >= status_start {
387 // The directory was modified too recently, don’t cache its
388 // The directory was modified too recently, don’t cache its
388 // `read_dir` results.
389 // `read_dir` results.
389 //
390 //
390 // A timeline like this is possible:
391 // A timeline like this is possible:
391 //
392 //
392 // 1. A change to this directory (direct child was
393 // 1. A change to this directory (direct child was
393 // added or removed) cause its mtime to be set
394 // added or removed) cause its mtime to be set
394 // (possibly truncated) to `directory_mtime`
395 // (possibly truncated) to `directory_mtime`
395 // 2. This `status` algorithm calls `read_dir`
396 // 2. This `status` algorithm calls `read_dir`
396 // 3. An other change is made to the same directory is
397 // 3. An other change is made to the same directory is
397 // made so that calling `read_dir` agin would give
398 // made so that calling `read_dir` agin would give
398 // different results, but soon enough after 1. that
399 // different results, but soon enough after 1. that
399 // the mtime stays the same
400 // the mtime stays the same
400 //
401 //
401 // On a system where the time resolution poor, this
402 // On a system where the time resolution poor, this
402 // scenario is not unlikely if all three steps are caused
403 // scenario is not unlikely if all three steps are caused
403 // by the same script.
404 // by the same script.
404 } else {
405 } else {
405 // We’ve observed (through `status_start`) that time has
406 // We’ve observed (through `status_start`) that time has
406 // β€œprogressed” since `directory_mtime`, so any further
407 // β€œprogressed” since `directory_mtime`, so any further
407 // change to this directory is extremely likely to cause a
408 // change to this directory is extremely likely to cause a
408 // different mtime.
409 // different mtime.
409 //
410 //
410 // Having the same mtime again is not entirely impossible
411 // Having the same mtime again is not entirely impossible
411 // since the system clock is not monotonous. It could jump
412 // since the system clock is not monotonous. It could jump
412 // backward to some point before `directory_mtime`, then a
413 // backward to some point before `directory_mtime`, then a
413 // directory change could potentially happen during exactly
414 // directory change could potentially happen during exactly
414 // the wrong tick.
415 // the wrong tick.
415 //
416 //
416 // We deem this scenario (unlike the previous one) to be
417 // We deem this scenario (unlike the previous one) to be
417 // unlikely enough in practice.
418 // unlikely enough in practice.
418 let timestamp = directory_mtime.into();
419 let timestamp = directory_mtime.into();
419 let cached = dirstate_node.cached_directory_mtime();
420 let cached = dirstate_node.cached_directory_mtime();
420 if cached != Some(&timestamp) {
421 if cached != Some(&timestamp) {
421 let hg_path = dirstate_node
422 let hg_path = dirstate_node
422 .full_path_borrowed(self.dmap.on_disk)?
423 .full_path_borrowed(self.dmap.on_disk)?
423 .detach_from_tree();
424 .detach_from_tree();
424 self.cached_directory_mtimes_to_add
425 self.cached_directory_mtimes_to_add
425 .lock()
426 .lock()
426 .unwrap()
427 .unwrap()
427 .push((hg_path, timestamp))
428 .push((hg_path, timestamp))
428 }
429 }
429 }
430 }
430 }
431 }
431 }
432 }
432 Ok(())
433 Ok(())
433 }
434 }
434
435
435 /// A file with `EntryState::Normal` in the dirstate was found in the
436 /// A file with `EntryState::Normal` in the dirstate was found in the
436 /// filesystem
437 /// filesystem
437 fn handle_normal_file(
438 fn handle_normal_file(
438 &self,
439 &self,
439 dirstate_node: &NodeRef<'tree, 'on_disk>,
440 dirstate_node: &NodeRef<'tree, 'on_disk>,
440 fs_metadata: &std::fs::Metadata,
441 fs_metadata: &std::fs::Metadata,
441 ) -> Result<(), DirstateV2ParseError> {
442 ) -> Result<(), DirstateV2ParseError> {
442 // Keep the low 31 bits
443 // Keep the low 31 bits
443 fn truncate_u64(value: u64) -> i32 {
444 fn truncate_u64(value: u64) -> i32 {
444 (value & 0x7FFF_FFFF) as i32
445 (value & 0x7FFF_FFFF) as i32
445 }
446 }
446 fn truncate_i64(value: i64) -> i32 {
447 fn truncate_i64(value: i64) -> i32 {
447 (value & 0x7FFF_FFFF) as i32
448 (value & 0x7FFF_FFFF) as i32
448 }
449 }
449
450
450 let entry = dirstate_node
451 let entry = dirstate_node
451 .entry()?
452 .entry()?
452 .expect("handle_normal_file called with entry-less node");
453 .expect("handle_normal_file called with entry-less node");
453 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
454 let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
454 let mode_changed =
455 let mode_changed =
455 || self.options.check_exec && entry.mode_changed(fs_metadata);
456 || self.options.check_exec && entry.mode_changed(fs_metadata);
456 let size_changed = entry.size != truncate_u64(fs_metadata.len());
457 let size_changed = entry.size != truncate_u64(fs_metadata.len());
457 if entry.size >= 0
458 if entry.size >= 0
458 && size_changed
459 && size_changed
459 && fs_metadata.file_type().is_symlink()
460 && fs_metadata.file_type().is_symlink()
460 {
461 {
461 // issue6456: Size returned may be longer due to encryption
462 // issue6456: Size returned may be longer due to encryption
462 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
463 // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
463 self.outcome
464 self.outcome
464 .lock()
465 .lock()
465 .unwrap()
466 .unwrap()
466 .unsure
467 .unsure
467 .push(hg_path.detach_from_tree())
468 .push(hg_path.detach_from_tree())
468 } else if dirstate_node.has_copy_source()
469 } else if dirstate_node.has_copy_source()
469 || entry.is_from_other_parent()
470 || entry.is_from_other_parent()
470 || (entry.size >= 0 && (size_changed || mode_changed()))
471 || (entry.size >= 0 && (size_changed || mode_changed()))
471 {
472 {
472 self.outcome
473 self.outcome
473 .lock()
474 .lock()
474 .unwrap()
475 .unwrap()
475 .modified
476 .modified
476 .push(hg_path.detach_from_tree())
477 .push(hg_path.detach_from_tree())
477 } else {
478 } else {
478 let mtime = mtime_seconds(fs_metadata);
479 let mtime = mtime_seconds(fs_metadata);
479 if truncate_i64(mtime) != entry.mtime
480 if truncate_i64(mtime) != entry.mtime
480 || mtime == self.options.last_normal_time
481 || mtime == self.options.last_normal_time
481 {
482 {
482 self.outcome
483 self.outcome
483 .lock()
484 .lock()
484 .unwrap()
485 .unwrap()
485 .unsure
486 .unsure
486 .push(hg_path.detach_from_tree())
487 .push(hg_path.detach_from_tree())
487 } else if self.options.list_clean {
488 } else if self.options.list_clean {
488 self.outcome
489 self.outcome
489 .lock()
490 .lock()
490 .unwrap()
491 .unwrap()
491 .clean
492 .clean
492 .push(hg_path.detach_from_tree())
493 .push(hg_path.detach_from_tree())
493 }
494 }
494 }
495 }
495 Ok(())
496 Ok(())
496 }
497 }
497
498
498 /// A node in the dirstate tree has no corresponding filesystem entry
499 /// A node in the dirstate tree has no corresponding filesystem entry
499 fn traverse_dirstate_only(
500 fn traverse_dirstate_only(
500 &self,
501 &self,
501 dirstate_node: NodeRef<'tree, 'on_disk>,
502 dirstate_node: NodeRef<'tree, 'on_disk>,
502 ) -> Result<(), DirstateV2ParseError> {
503 ) -> Result<(), DirstateV2ParseError> {
503 self.mark_removed_or_deleted_if_file(
504 self.mark_removed_or_deleted_if_file(
504 &dirstate_node.full_path_borrowed(self.dmap.on_disk)?,
505 &dirstate_node.full_path_borrowed(self.dmap.on_disk)?,
505 dirstate_node.state()?,
506 dirstate_node.state()?,
506 );
507 );
507 dirstate_node
508 dirstate_node
508 .children(self.dmap.on_disk)?
509 .children(self.dmap.on_disk)?
509 .par_iter()
510 .par_iter()
510 .map(|child_node| self.traverse_dirstate_only(child_node))
511 .map(|child_node| self.traverse_dirstate_only(child_node))
511 .collect()
512 .collect()
512 }
513 }
513
514
514 /// A node in the dirstate tree has no corresponding *file* on the
515 /// A node in the dirstate tree has no corresponding *file* on the
515 /// filesystem
516 /// filesystem
516 ///
517 ///
517 /// Does nothing on a "directory" node
518 /// Does nothing on a "directory" node
518 fn mark_removed_or_deleted_if_file(
519 fn mark_removed_or_deleted_if_file(
519 &self,
520 &self,
520 hg_path: &BorrowedPath<'tree, 'on_disk>,
521 hg_path: &BorrowedPath<'tree, 'on_disk>,
521 dirstate_node_state: Option<EntryState>,
522 dirstate_node_state: Option<EntryState>,
522 ) {
523 ) {
523 if let Some(state) = dirstate_node_state {
524 if let Some(state) = dirstate_node_state {
524 if self.matcher.matches(hg_path) {
525 if self.matcher.matches(hg_path) {
525 if let EntryState::Removed = state {
526 if let EntryState::Removed = state {
526 self.outcome
527 self.outcome
527 .lock()
528 .lock()
528 .unwrap()
529 .unwrap()
529 .removed
530 .removed
530 .push(hg_path.detach_from_tree())
531 .push(hg_path.detach_from_tree())
531 } else {
532 } else {
532 self.outcome
533 self.outcome
533 .lock()
534 .lock()
534 .unwrap()
535 .unwrap()
535 .deleted
536 .deleted
536 .push(hg_path.detach_from_tree())
537 .push(hg_path.detach_from_tree())
537 }
538 }
538 }
539 }
539 }
540 }
540 }
541 }
541
542
542 /// Something in the filesystem has no corresponding dirstate node
543 /// Something in the filesystem has no corresponding dirstate node
543 fn traverse_fs_only(
544 fn traverse_fs_only(
544 &self,
545 &self,
545 has_ignored_ancestor: bool,
546 has_ignored_ancestor: bool,
546 directory_hg_path: &HgPath,
547 directory_hg_path: &HgPath,
547 fs_entry: &DirEntry,
548 fs_entry: &DirEntry,
548 ) {
549 ) {
549 let hg_path = directory_hg_path.join(&fs_entry.base_name);
550 let hg_path = directory_hg_path.join(&fs_entry.base_name);
550 let file_type = fs_entry.metadata.file_type();
551 let file_type = fs_entry.metadata.file_type();
551 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
552 let file_or_symlink = file_type.is_file() || file_type.is_symlink();
552 if file_type.is_dir() {
553 if file_type.is_dir() {
553 let is_ignored =
554 let is_ignored =
554 has_ignored_ancestor || (self.ignore_fn)(&hg_path);
555 has_ignored_ancestor || (self.ignore_fn)(&hg_path);
555 let traverse_children = if is_ignored {
556 let traverse_children = if is_ignored {
556 // Descendants of an ignored directory are all ignored
557 // Descendants of an ignored directory are all ignored
557 self.options.list_ignored
558 self.options.list_ignored
558 } else {
559 } else {
559 // Descendants of an unknown directory may be either unknown or
560 // Descendants of an unknown directory may be either unknown or
560 // ignored
561 // ignored
561 self.options.list_unknown || self.options.list_ignored
562 self.options.list_unknown || self.options.list_ignored
562 };
563 };
563 if traverse_children {
564 if traverse_children {
564 let is_at_repo_root = false;
565 let is_at_repo_root = false;
565 if let Ok(children_fs_entries) = self.read_dir(
566 if let Ok(children_fs_entries) = self.read_dir(
566 &hg_path,
567 &hg_path,
567 &fs_entry.full_path,
568 &fs_entry.full_path,
568 is_at_repo_root,
569 is_at_repo_root,
569 ) {
570 ) {
570 children_fs_entries.par_iter().for_each(|child_fs_entry| {
571 children_fs_entries.par_iter().for_each(|child_fs_entry| {
571 self.traverse_fs_only(
572 self.traverse_fs_only(
572 is_ignored,
573 is_ignored,
573 &hg_path,
574 &hg_path,
574 child_fs_entry,
575 child_fs_entry,
575 )
576 )
576 })
577 })
577 }
578 }
578 }
579 }
579 if self.options.collect_traversed_dirs {
580 if self.options.collect_traversed_dirs {
580 self.outcome.lock().unwrap().traversed.push(hg_path.into())
581 self.outcome.lock().unwrap().traversed.push(hg_path.into())
581 }
582 }
582 } else if file_or_symlink && self.matcher.matches(&hg_path) {
583 } else if file_or_symlink && self.matcher.matches(&hg_path) {
583 self.mark_unknown_or_ignored(
584 self.mark_unknown_or_ignored(
584 has_ignored_ancestor,
585 has_ignored_ancestor,
585 &BorrowedPath::InMemory(&hg_path),
586 &BorrowedPath::InMemory(&hg_path),
586 )
587 )
587 }
588 }
588 }
589 }
589
590
590 fn mark_unknown_or_ignored(
591 fn mark_unknown_or_ignored(
591 &self,
592 &self,
592 has_ignored_ancestor: bool,
593 has_ignored_ancestor: bool,
593 hg_path: &BorrowedPath<'_, 'on_disk>,
594 hg_path: &BorrowedPath<'_, 'on_disk>,
594 ) {
595 ) {
595 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
596 let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
596 if is_ignored {
597 if is_ignored {
597 if self.options.list_ignored {
598 if self.options.list_ignored {
598 self.outcome
599 self.outcome
599 .lock()
600 .lock()
600 .unwrap()
601 .unwrap()
601 .ignored
602 .ignored
602 .push(hg_path.detach_from_tree())
603 .push(hg_path.detach_from_tree())
603 }
604 }
604 } else {
605 } else {
605 if self.options.list_unknown {
606 if self.options.list_unknown {
606 self.outcome
607 self.outcome
607 .lock()
608 .lock()
608 .unwrap()
609 .unwrap()
609 .unknown
610 .unknown
610 .push(hg_path.detach_from_tree())
611 .push(hg_path.detach_from_tree())
611 }
612 }
612 }
613 }
613 }
614 }
614 }
615 }
615
616
616 #[cfg(unix)] // TODO
617 #[cfg(unix)] // TODO
617 fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
618 fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
618 // Going through `Metadata::modified()` would be portable, but would take
619 // Going through `Metadata::modified()` would be portable, but would take
619 // care to construct a `SystemTime` value with sub-second precision just
620 // care to construct a `SystemTime` value with sub-second precision just
620 // for us to throw that away here.
621 // for us to throw that away here.
621 use std::os::unix::fs::MetadataExt;
622 use std::os::unix::fs::MetadataExt;
622 metadata.mtime()
623 metadata.mtime()
623 }
624 }
624
625
625 struct DirEntry {
626 struct DirEntry {
626 base_name: HgPathBuf,
627 base_name: HgPathBuf,
627 full_path: PathBuf,
628 full_path: PathBuf,
628 metadata: std::fs::Metadata,
629 metadata: std::fs::Metadata,
629 }
630 }
630
631
631 impl DirEntry {
632 impl DirEntry {
632 /// Returns **unsorted** entries in the given directory, with name and
633 /// Returns **unsorted** entries in the given directory, with name and
633 /// metadata.
634 /// metadata.
634 ///
635 ///
635 /// If a `.hg` sub-directory is encountered:
636 /// If a `.hg` sub-directory is encountered:
636 ///
637 ///
637 /// * At the repository root, ignore that sub-directory
638 /// * At the repository root, ignore that sub-directory
638 /// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
639 /// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
639 /// list instead.
640 /// list instead.
640 fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
641 fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
641 let mut results = Vec::new();
642 let mut results = Vec::new();
642 for entry in path.read_dir()? {
643 for entry in path.read_dir()? {
643 let entry = entry?;
644 let entry = entry?;
644 let metadata = entry.metadata()?;
645 let metadata = entry.metadata()?;
645 let name = get_bytes_from_os_string(entry.file_name());
646 let name = get_bytes_from_os_string(entry.file_name());
646 // FIXME don't do this when cached
647 // FIXME don't do this when cached
647 if name == b".hg" {
648 if name == b".hg" {
648 if is_at_repo_root {
649 if is_at_repo_root {
649 // Skip the repo’s own .hg (might be a symlink)
650 // Skip the repo’s own .hg (might be a symlink)
650 continue;
651 continue;
651 } else if metadata.is_dir() {
652 } else if metadata.is_dir() {
652 // A .hg sub-directory at another location means a subrepo,
653 // A .hg sub-directory at another location means a subrepo,
653 // skip it entirely.
654 // skip it entirely.
654 return Ok(Vec::new());
655 return Ok(Vec::new());
655 }
656 }
656 }
657 }
657 results.push(DirEntry {
658 results.push(DirEntry {
658 base_name: name.into(),
659 base_name: name.into(),
659 full_path: entry.path(),
660 full_path: entry.path(),
660 metadata,
661 metadata,
661 })
662 })
662 }
663 }
663 Ok(results)
664 Ok(results)
664 }
665 }
665 }
666 }
666
667
667 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
668 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
668 /// of the give repository.
669 /// of the give repository.
669 ///
670 ///
670 /// This is similar to `SystemTime::now()`, with the result truncated to the
671 /// This is similar to `SystemTime::now()`, with the result truncated to the
671 /// same time resolution as other files’ modification times. Using `.hg`
672 /// same time resolution as other files’ modification times. Using `.hg`
672 /// instead of the system’s default temporary directory (such as `/tmp`) makes
673 /// instead of the system’s default temporary directory (such as `/tmp`) makes
673 /// it more likely the temporary file is in the same disk partition as contents
674 /// it more likely the temporary file is in the same disk partition as contents
674 /// of the working directory, which can matter since different filesystems may
675 /// of the working directory, which can matter since different filesystems may
675 /// store timestamps with different resolutions.
676 /// store timestamps with different resolutions.
676 ///
677 ///
677 /// This may fail, typically if we lack write permissions. In that case we
678 /// This may fail, typically if we lack write permissions. In that case we
678 /// should continue the `status()` algoritm anyway and consider the current
679 /// should continue the `status()` algoritm anyway and consider the current
679 /// date/time to be unknown.
680 /// date/time to be unknown.
680 fn filesystem_now(repo_root: &Path) -> Result<SystemTime, io::Error> {
681 fn filesystem_now(repo_root: &Path) -> Result<SystemTime, io::Error> {
681 tempfile::tempfile_in(repo_root.join(".hg"))?
682 tempfile::tempfile_in(repo_root.join(".hg"))?
682 .metadata()?
683 .metadata()?
683 .modified()
684 .modified()
684 }
685 }
@@ -1,302 +1,304 b''
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::status` module provided by the
8 //! Bindings for the `hg::status` module provided by the
9 //! `hg-core` crate. From Python, this will be seen as
9 //! `hg-core` crate. From Python, this will be seen as
10 //! `rustext.dirstate.status`.
10 //! `rustext.dirstate.status`.
11
11
12 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
12 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
13 use cpython::exc::OSError;
13 use cpython::exc::OSError;
14 use cpython::{
14 use cpython::{
15 exc::ValueError, ObjectProtocol, PyBytes, PyErr, PyList, PyObject,
15 exc::ValueError, ObjectProtocol, PyBytes, PyErr, PyList, PyObject,
16 PyResult, PyTuple, Python, PythonObject, ToPyObject,
16 PyResult, PyTuple, Python, PythonObject, ToPyObject,
17 };
17 };
18 use hg::{
18 use hg::{
19 matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
19 matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
20 parse_pattern_syntax,
20 parse_pattern_syntax,
21 utils::{
21 utils::{
22 files::{get_bytes_from_path, get_path_from_bytes},
22 files::{get_bytes_from_path, get_path_from_bytes},
23 hg_path::{HgPath, HgPathBuf},
23 hg_path::{HgPath, HgPathBuf},
24 },
24 },
25 BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
25 BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
26 StatusOptions,
26 StatusOptions,
27 };
27 };
28 use std::borrow::Borrow;
28 use std::borrow::Borrow;
29
29
30 /// This will be useless once trait impls for collection are added to `PyBytes`
30 /// This will be useless once trait impls for collection are added to `PyBytes`
31 /// upstream.
31 /// upstream.
32 fn collect_pybytes_list(
32 fn collect_pybytes_list(
33 py: Python,
33 py: Python,
34 collection: &[impl AsRef<HgPath>],
34 collection: &[impl AsRef<HgPath>],
35 ) -> PyList {
35 ) -> PyList {
36 let list = PyList::new(py, &[]);
36 let list = PyList::new(py, &[]);
37
37
38 for path in collection.iter() {
38 for path in collection.iter() {
39 list.append(
39 list.append(
40 py,
40 py,
41 PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
41 PyBytes::new(py, path.as_ref().as_bytes()).into_object(),
42 )
42 )
43 }
43 }
44
44
45 list
45 list
46 }
46 }
47
47
48 fn collect_bad_matches(
48 fn collect_bad_matches(
49 py: Python,
49 py: Python,
50 collection: &[(impl AsRef<HgPath>, BadMatch)],
50 collection: &[(impl AsRef<HgPath>, BadMatch)],
51 ) -> PyResult<PyList> {
51 ) -> PyResult<PyList> {
52 let list = PyList::new(py, &[]);
52 let list = PyList::new(py, &[]);
53
53
54 let os = py.import("os")?;
54 let os = py.import("os")?;
55 let get_error_message = |code: i32| -> PyResult<_> {
55 let get_error_message = |code: i32| -> PyResult<_> {
56 os.call(
56 os.call(
57 py,
57 py,
58 "strerror",
58 "strerror",
59 PyTuple::new(py, &[code.to_py_object(py).into_object()]),
59 PyTuple::new(py, &[code.to_py_object(py).into_object()]),
60 None,
60 None,
61 )
61 )
62 };
62 };
63
63
64 for (path, bad_match) in collection.iter() {
64 for (path, bad_match) in collection.iter() {
65 let message = match bad_match {
65 let message = match bad_match {
66 BadMatch::OsError(code) => get_error_message(*code)?,
66 BadMatch::OsError(code) => get_error_message(*code)?,
67 BadMatch::BadType(bad_type) => format!(
67 BadMatch::BadType(bad_type) => format!(
68 "unsupported file type (type is {})",
68 "unsupported file type (type is {})",
69 bad_type.to_string()
69 bad_type.to_string()
70 )
70 )
71 .to_py_object(py)
71 .to_py_object(py)
72 .into_object(),
72 .into_object(),
73 };
73 };
74 list.append(
74 list.append(
75 py,
75 py,
76 (PyBytes::new(py, path.as_ref().as_bytes()), message)
76 (PyBytes::new(py, path.as_ref().as_bytes()), message)
77 .to_py_object(py)
77 .to_py_object(py)
78 .into_object(),
78 .into_object(),
79 )
79 )
80 }
80 }
81
81
82 Ok(list)
82 Ok(list)
83 }
83 }
84
84
85 fn handle_fallback(py: Python, err: StatusError) -> PyErr {
85 fn handle_fallback(py: Python, err: StatusError) -> PyErr {
86 match err {
86 match err {
87 StatusError::Pattern(e) => {
87 StatusError::Pattern(e) => {
88 let as_string = e.to_string();
88 let as_string = e.to_string();
89 log::trace!("Rust status fallback: `{}`", &as_string);
89 log::trace!("Rust status fallback: `{}`", &as_string);
90
90
91 PyErr::new::<FallbackError, _>(py, &as_string)
91 PyErr::new::<FallbackError, _>(py, &as_string)
92 }
92 }
93 StatusError::IO(e) => PyErr::new::<OSError, _>(py, e.to_string()),
93 StatusError::IO(e) => PyErr::new::<OSError, _>(py, e.to_string()),
94 e => PyErr::new::<ValueError, _>(py, e.to_string()),
94 e => PyErr::new::<ValueError, _>(py, e.to_string()),
95 }
95 }
96 }
96 }
97
97
98 pub fn status_wrapper(
98 pub fn status_wrapper(
99 py: Python,
99 py: Python,
100 dmap: DirstateMap,
100 dmap: DirstateMap,
101 matcher: PyObject,
101 matcher: PyObject,
102 root_dir: PyObject,
102 root_dir: PyObject,
103 ignore_files: PyList,
103 ignore_files: PyList,
104 check_exec: bool,
104 check_exec: bool,
105 last_normal_time: i64,
105 last_normal_time: i64,
106 list_clean: bool,
106 list_clean: bool,
107 list_ignored: bool,
107 list_ignored: bool,
108 list_unknown: bool,
108 list_unknown: bool,
109 collect_traversed_dirs: bool,
109 collect_traversed_dirs: bool,
110 ) -> PyResult<PyTuple> {
110 ) -> PyResult<PyTuple> {
111 let bytes = root_dir.extract::<PyBytes>(py)?;
111 let bytes = root_dir.extract::<PyBytes>(py)?;
112 let root_dir = get_path_from_bytes(bytes.data(py));
112 let root_dir = get_path_from_bytes(bytes.data(py));
113
113
114 let dmap: DirstateMap = dmap.to_py_object(py);
114 let dmap: DirstateMap = dmap.to_py_object(py);
115 let mut dmap = dmap.get_inner_mut(py);
115 let mut dmap = dmap.get_inner_mut(py);
116
116
117 let ignore_files: PyResult<Vec<_>> = ignore_files
117 let ignore_files: PyResult<Vec<_>> = ignore_files
118 .iter(py)
118 .iter(py)
119 .map(|b| {
119 .map(|b| {
120 let file = b.extract::<PyBytes>(py)?;
120 let file = b.extract::<PyBytes>(py)?;
121 Ok(get_path_from_bytes(file.data(py)).to_owned())
121 Ok(get_path_from_bytes(file.data(py)).to_owned())
122 })
122 })
123 .collect();
123 .collect();
124 let ignore_files = ignore_files?;
124 let ignore_files = ignore_files?;
125
125
126 match matcher.get_type(py).name(py).borrow() {
126 match matcher.get_type(py).name(py).borrow() {
127 "alwaysmatcher" => {
127 "alwaysmatcher" => {
128 let matcher = AlwaysMatcher;
128 let matcher = AlwaysMatcher;
129 let (status_res, warnings) = dmap
129 let (status_res, warnings) = dmap
130 .status(
130 .status(
131 &matcher,
131 &matcher,
132 root_dir.to_path_buf(),
132 root_dir.to_path_buf(),
133 ignore_files,
133 ignore_files,
134 StatusOptions {
134 StatusOptions {
135 check_exec,
135 check_exec,
136 last_normal_time,
136 last_normal_time,
137 list_clean,
137 list_clean,
138 list_ignored,
138 list_ignored,
139 list_unknown,
139 list_unknown,
140 collect_traversed_dirs,
140 collect_traversed_dirs,
141 },
141 },
142 )
142 )
143 .map_err(|e| handle_fallback(py, e))?;
143 .map_err(|e| handle_fallback(py, e))?;
144 build_response(py, status_res, warnings)
144 build_response(py, status_res, warnings)
145 }
145 }
146 "exactmatcher" => {
146 "exactmatcher" => {
147 let files = matcher.call_method(
147 let files = matcher.call_method(
148 py,
148 py,
149 "files",
149 "files",
150 PyTuple::new(py, &[]),
150 PyTuple::new(py, &[]),
151 None,
151 None,
152 )?;
152 )?;
153 let files: PyList = files.cast_into(py)?;
153 let files: PyList = files.cast_into(py)?;
154 let files: PyResult<Vec<HgPathBuf>> = files
154 let files: PyResult<Vec<HgPathBuf>> = files
155 .iter(py)
155 .iter(py)
156 .map(|f| {
156 .map(|f| {
157 Ok(HgPathBuf::from_bytes(
157 Ok(HgPathBuf::from_bytes(
158 f.extract::<PyBytes>(py)?.data(py),
158 f.extract::<PyBytes>(py)?.data(py),
159 ))
159 ))
160 })
160 })
161 .collect();
161 .collect();
162
162
163 let files = files?;
163 let files = files?;
164 let matcher = FileMatcher::new(files.as_ref())
164 let matcher = FileMatcher::new(files.as_ref())
165 .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
165 .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
166 let (status_res, warnings) = dmap
166 let (status_res, warnings) = dmap
167 .status(
167 .status(
168 &matcher,
168 &matcher,
169 root_dir.to_path_buf(),
169 root_dir.to_path_buf(),
170 ignore_files,
170 ignore_files,
171 StatusOptions {
171 StatusOptions {
172 check_exec,
172 check_exec,
173 last_normal_time,
173 last_normal_time,
174 list_clean,
174 list_clean,
175 list_ignored,
175 list_ignored,
176 list_unknown,
176 list_unknown,
177 collect_traversed_dirs,
177 collect_traversed_dirs,
178 },
178 },
179 )
179 )
180 .map_err(|e| handle_fallback(py, e))?;
180 .map_err(|e| handle_fallback(py, e))?;
181 build_response(py, status_res, warnings)
181 build_response(py, status_res, warnings)
182 }
182 }
183 "includematcher" => {
183 "includematcher" => {
184 // Get the patterns from Python even though most of them are
184 // Get the patterns from Python even though most of them are
185 // redundant with those we will parse later on, as they include
185 // redundant with those we will parse later on, as they include
186 // those passed from the command line.
186 // those passed from the command line.
187 let ignore_patterns: PyResult<Vec<_>> = matcher
187 let ignore_patterns: PyResult<Vec<_>> = matcher
188 .getattr(py, "_kindpats")?
188 .getattr(py, "_kindpats")?
189 .iter(py)?
189 .iter(py)?
190 .map(|k| {
190 .map(|k| {
191 let k = k?;
191 let k = k?;
192 let syntax = parse_pattern_syntax(
192 let syntax = parse_pattern_syntax(
193 &[
193 &[
194 k.get_item(py, 0)?
194 k.get_item(py, 0)?
195 .extract::<PyBytes>(py)?
195 .extract::<PyBytes>(py)?
196 .data(py),
196 .data(py),
197 &b":"[..],
197 &b":"[..],
198 ]
198 ]
199 .concat(),
199 .concat(),
200 )
200 )
201 .map_err(|e| {
201 .map_err(|e| {
202 handle_fallback(py, StatusError::Pattern(e))
202 handle_fallback(py, StatusError::Pattern(e))
203 })?;
203 })?;
204 let pattern = k.get_item(py, 1)?.extract::<PyBytes>(py)?;
204 let pattern = k.get_item(py, 1)?.extract::<PyBytes>(py)?;
205 let pattern = pattern.data(py);
205 let pattern = pattern.data(py);
206 let source = k.get_item(py, 2)?.extract::<PyBytes>(py)?;
206 let source = k.get_item(py, 2)?.extract::<PyBytes>(py)?;
207 let source = get_path_from_bytes(source.data(py));
207 let source = get_path_from_bytes(source.data(py));
208 let new = IgnorePattern::new(syntax, pattern, source);
208 let new = IgnorePattern::new(syntax, pattern, source);
209 Ok(new)
209 Ok(new)
210 })
210 })
211 .collect();
211 .collect();
212
212
213 let ignore_patterns = ignore_patterns?;
213 let ignore_patterns = ignore_patterns?;
214 let mut all_warnings = vec![];
214 let mut all_warnings = vec![];
215
215
216 let (matcher, warnings) =
216 let (matcher, warnings) =
217 IncludeMatcher::new(ignore_patterns, &root_dir)
217 IncludeMatcher::new(ignore_patterns, &root_dir)
218 .map_err(|e| handle_fallback(py, e.into()))?;
218 .map_err(|e| handle_fallback(py, e.into()))?;
219 all_warnings.extend(warnings);
219 all_warnings.extend(warnings);
220
220
221 let (status_res, warnings) = dmap
221 let (status_res, warnings) = dmap
222 .status(
222 .status(
223 &matcher,
223 &matcher,
224 root_dir.to_path_buf(),
224 root_dir.to_path_buf(),
225 ignore_files,
225 ignore_files,
226 StatusOptions {
226 StatusOptions {
227 check_exec,
227 check_exec,
228 last_normal_time,
228 last_normal_time,
229 list_clean,
229 list_clean,
230 list_ignored,
230 list_ignored,
231 list_unknown,
231 list_unknown,
232 collect_traversed_dirs,
232 collect_traversed_dirs,
233 },
233 },
234 )
234 )
235 .map_err(|e| handle_fallback(py, e))?;
235 .map_err(|e| handle_fallback(py, e))?;
236
236
237 all_warnings.extend(warnings);
237 all_warnings.extend(warnings);
238
238
239 build_response(py, status_res, all_warnings)
239 build_response(py, status_res, all_warnings)
240 }
240 }
241 e => Err(PyErr::new::<ValueError, _>(
241 e => Err(PyErr::new::<ValueError, _>(
242 py,
242 py,
243 format!("Unsupported matcher {}", e),
243 format!("Unsupported matcher {}", e),
244 )),
244 )),
245 }
245 }
246 }
246 }
247
247
248 fn build_response(
248 fn build_response(
249 py: Python,
249 py: Python,
250 status_res: DirstateStatus,
250 status_res: DirstateStatus,
251 warnings: Vec<PatternFileWarning>,
251 warnings: Vec<PatternFileWarning>,
252 ) -> PyResult<PyTuple> {
252 ) -> PyResult<PyTuple> {
253 let modified = collect_pybytes_list(py, status_res.modified.as_ref());
253 let modified = collect_pybytes_list(py, status_res.modified.as_ref());
254 let added = collect_pybytes_list(py, status_res.added.as_ref());
254 let added = collect_pybytes_list(py, status_res.added.as_ref());
255 let removed = collect_pybytes_list(py, status_res.removed.as_ref());
255 let removed = collect_pybytes_list(py, status_res.removed.as_ref());
256 let deleted = collect_pybytes_list(py, status_res.deleted.as_ref());
256 let deleted = collect_pybytes_list(py, status_res.deleted.as_ref());
257 let clean = collect_pybytes_list(py, status_res.clean.as_ref());
257 let clean = collect_pybytes_list(py, status_res.clean.as_ref());
258 let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
258 let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
259 let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
259 let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
260 let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
260 let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
261 let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
261 let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
262 let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
262 let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
263 let dirty = status_res.dirty.to_py_object(py);
263 let py_warnings = PyList::new(py, &[]);
264 let py_warnings = PyList::new(py, &[]);
264 for warning in warnings.iter() {
265 for warning in warnings.iter() {
265 // We use duck-typing on the Python side for dispatch, good enough for
266 // We use duck-typing on the Python side for dispatch, good enough for
266 // now.
267 // now.
267 match warning {
268 match warning {
268 PatternFileWarning::InvalidSyntax(file, syn) => {
269 PatternFileWarning::InvalidSyntax(file, syn) => {
269 py_warnings.append(
270 py_warnings.append(
270 py,
271 py,
271 (
272 (
272 PyBytes::new(py, &get_bytes_from_path(&file)),
273 PyBytes::new(py, &get_bytes_from_path(&file)),
273 PyBytes::new(py, syn),
274 PyBytes::new(py, syn),
274 )
275 )
275 .to_py_object(py)
276 .to_py_object(py)
276 .into_object(),
277 .into_object(),
277 );
278 );
278 }
279 }
279 PatternFileWarning::NoSuchFile(file) => py_warnings.append(
280 PatternFileWarning::NoSuchFile(file) => py_warnings.append(
280 py,
281 py,
281 PyBytes::new(py, &get_bytes_from_path(&file)).into_object(),
282 PyBytes::new(py, &get_bytes_from_path(&file)).into_object(),
282 ),
283 ),
283 }
284 }
284 }
285 }
285
286
286 Ok(PyTuple::new(
287 Ok(PyTuple::new(
287 py,
288 py,
288 &[
289 &[
289 unsure.into_object(),
290 unsure.into_object(),
290 modified.into_object(),
291 modified.into_object(),
291 added.into_object(),
292 added.into_object(),
292 removed.into_object(),
293 removed.into_object(),
293 deleted.into_object(),
294 deleted.into_object(),
294 clean.into_object(),
295 clean.into_object(),
295 ignored.into_object(),
296 ignored.into_object(),
296 unknown.into_object(),
297 unknown.into_object(),
297 py_warnings.into_object(),
298 py_warnings.into_object(),
298 bad.into_object(),
299 bad.into_object(),
299 traversed.into_object(),
300 traversed.into_object(),
301 dirty.into_object(),
300 ][..],
302 ][..],
301 ))
303 ))
302 }
304 }
General Comments 0
You need to be logged in to leave comments. Login now