##// END OF EJS Templates
context: use `dirstate.set_tracked` in `context.add`...
marmoute -
r48394:0cef28b1 default
parent child Browse files
Show More
@@ -1,3124 +1,3120 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
70 return "<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(
102 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
103 self, other, s, match, listignored, listclean, listunknown
104 ):
104 ):
105 """build a status with respect to another context"""
105 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
106 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
111 # delta application.
112 mf2 = None
112 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
113 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
116 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118
118
119 modified, added = [], []
119 modified, added = [], []
120 removed = []
120 removed = []
121 clean = []
121 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
123 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
124 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
125 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
126 if fn in deletedset:
127 continue
127 continue
128 if value is None:
128 if value is None:
129 clean.append(fn)
129 clean.append(fn)
130 continue
130 continue
131 (node1, flag1), (node2, flag2) = value
131 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
132 if node1 is None:
133 added.append(fn)
133 added.append(fn)
134 elif node2 is None:
134 elif node2 is None:
135 removed.append(fn)
135 removed.append(fn)
136 elif flag1 != flag2:
136 elif flag1 != flag2:
137 modified.append(fn)
137 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
139 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
140 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
141 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
142 # to a file as a modification.
143 modified.append(fn)
143 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
144 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
145 modified.append(fn)
146 else:
146 else:
147 clean.append(fn)
147 clean.append(fn)
148
148
149 if removed:
149 if removed:
150 # need to filter files if they are already reported as removed
150 # need to filter files if they are already reported as removed
151 unknown = [
151 unknown = [
152 fn
152 fn
153 for fn in unknown
153 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
154 if fn not in mf1 and (not match or match(fn))
155 ]
155 ]
156 ignored = [
156 ignored = [
157 fn
157 fn
158 for fn in ignored
158 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
163
163
164 return scmutil.status(
164 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
165 modified, added, removed, deleted, unknown, ignored, clean
166 )
166 )
167
167
168 @propertycache
168 @propertycache
169 def substate(self):
169 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
170 return subrepoutil.state(self, self._repo.ui)
171
171
172 def subrev(self, subpath):
172 def subrev(self, subpath):
173 return self.substate[subpath][1]
173 return self.substate[subpath][1]
174
174
175 def rev(self):
175 def rev(self):
176 return self._rev
176 return self._rev
177
177
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180
180
181 def hex(self):
181 def hex(self):
182 return hex(self.node())
182 return hex(self.node())
183
183
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186
186
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189
189
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192
192
193 def phasestr(self):
193 def phasestr(self):
194 return phases.phasenames[self.phase()]
194 return phases.phasenames[self.phase()]
195
195
196 def mutable(self):
196 def mutable(self):
197 return self.phase() > phases.public
197 return self.phase() > phases.public
198
198
199 def matchfileset(self, cwd, expr, badfn=None):
199 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
200 return fileset.match(self, cwd, expr, badfn=badfn)
201
201
202 def obsolete(self):
202 def obsolete(self):
203 """True if the changeset is obsolete"""
203 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
205
206 def extinct(self):
206 def extinct(self):
207 """True if the changeset is extinct"""
207 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
211 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
213
214 def phasedivergent(self):
214 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
215 """True if the changeset tries to be a successor of a public changeset
216
216
217 Only non-public and non-obsolete changesets may be phase-divergent.
217 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
218 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
220
221 def contentdivergent(self):
221 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
222 """Is a successor of a changeset with multiple possible successor sets
223
223
224 Only non-public and non-obsolete changesets may be content-divergent.
224 Only non-public and non-obsolete changesets may be content-divergent.
225 """
225 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
227
228 def isunstable(self):
228 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
229 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
230 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
232
233 def instabilities(self):
233 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
234 """return the list of instabilities affecting this changeset.
235
235
236 Instabilities are returned as strings. possible values are:
236 Instabilities are returned as strings. possible values are:
237 - orphan,
237 - orphan,
238 - phase-divergent,
238 - phase-divergent,
239 - content-divergent.
239 - content-divergent.
240 """
240 """
241 instabilities = []
241 instabilities = []
242 if self.orphan():
242 if self.orphan():
243 instabilities.append(b'orphan')
243 instabilities.append(b'orphan')
244 if self.phasedivergent():
244 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
245 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
246 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
247 instabilities.append(b'content-divergent')
248 return instabilities
248 return instabilities
249
249
250 def parents(self):
250 def parents(self):
251 """return contexts for each parent changeset"""
251 """return contexts for each parent changeset"""
252 return self._parents
252 return self._parents
253
253
254 def p1(self):
254 def p1(self):
255 return self._parents[0]
255 return self._parents[0]
256
256
257 def p2(self):
257 def p2(self):
258 parents = self._parents
258 parents = self._parents
259 if len(parents) == 2:
259 if len(parents) == 2:
260 return parents[1]
260 return parents[1]
261 return self._repo[nullrev]
261 return self._repo[nullrev]
262
262
263 def _fileinfo(self, path):
263 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
264 if '_manifest' in self.__dict__:
265 try:
265 try:
266 return self._manifest.find(path)
266 return self._manifest.find(path)
267 except KeyError:
267 except KeyError:
268 raise error.ManifestLookupError(
268 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
269 self._node or b'None', path, _(b'not found in manifest')
270 )
270 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
272 if path in self._manifestdelta:
273 return (
273 return (
274 self._manifestdelta[path],
274 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
276 )
276 )
277 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
278 try:
278 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
280 except KeyError:
281 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
283 )
283 )
284
284
285 return node, flag
285 return node, flag
286
286
287 def filenode(self, path):
287 def filenode(self, path):
288 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
289
289
290 def flags(self, path):
290 def flags(self, path):
291 try:
291 try:
292 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
293 except error.LookupError:
293 except error.LookupError:
294 return b''
294 return b''
295
295
296 @propertycache
296 @propertycache
297 def _copies(self):
297 def _copies(self):
298 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
299
299
300 def p1copies(self):
300 def p1copies(self):
301 return self._copies[0]
301 return self._copies[0]
302
302
303 def p2copies(self):
303 def p2copies(self):
304 return self._copies[1]
304 return self._copies[1]
305
305
306 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
309
310 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
312
312
313 def workingsub(self, path):
313 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
315 context.
316 """
316 """
317 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
318
318
319 def match(
319 def match(
320 self,
320 self,
321 pats=None,
321 pats=None,
322 include=None,
322 include=None,
323 exclude=None,
323 exclude=None,
324 default=b'glob',
324 default=b'glob',
325 listsubrepos=False,
325 listsubrepos=False,
326 badfn=None,
326 badfn=None,
327 cwd=None,
327 cwd=None,
328 ):
328 ):
329 r = self._repo
329 r = self._repo
330 if not cwd:
330 if not cwd:
331 cwd = r.getcwd()
331 cwd = r.getcwd()
332 return matchmod.match(
332 return matchmod.match(
333 r.root,
333 r.root,
334 cwd,
334 cwd,
335 pats,
335 pats,
336 include,
336 include,
337 exclude,
337 exclude,
338 default,
338 default,
339 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
340 ctx=self,
340 ctx=self,
341 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
342 badfn=badfn,
342 badfn=badfn,
343 )
343 )
344
344
345 def diff(
345 def diff(
346 self,
346 self,
347 ctx2=None,
347 ctx2=None,
348 match=None,
348 match=None,
349 changes=None,
349 changes=None,
350 opts=None,
350 opts=None,
351 losedatafn=None,
351 losedatafn=None,
352 pathfn=None,
352 pathfn=None,
353 copy=None,
353 copy=None,
354 copysourcematch=None,
354 copysourcematch=None,
355 hunksfilterfn=None,
355 hunksfilterfn=None,
356 ):
356 ):
357 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
358 if ctx2 is None:
359 ctx2 = self.p1()
359 ctx2 = self.p1()
360 if ctx2 is not None:
360 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
362 return patch.diff(
362 return patch.diff(
363 self._repo,
363 self._repo,
364 ctx2,
364 ctx2,
365 self,
365 self,
366 match=match,
366 match=match,
367 changes=changes,
367 changes=changes,
368 opts=opts,
368 opts=opts,
369 losedatafn=losedatafn,
369 losedatafn=losedatafn,
370 pathfn=pathfn,
370 pathfn=pathfn,
371 copy=copy,
371 copy=copy,
372 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
374 )
374 )
375
375
376 def dirs(self):
376 def dirs(self):
377 return self._manifest.dirs()
377 return self._manifest.dirs()
378
378
379 def hasdir(self, dir):
379 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
381
381
382 def status(
382 def status(
383 self,
383 self,
384 other=None,
384 other=None,
385 match=None,
385 match=None,
386 listignored=False,
386 listignored=False,
387 listclean=False,
387 listclean=False,
388 listunknown=False,
388 listunknown=False,
389 listsubrepos=False,
389 listsubrepos=False,
390 ):
390 ):
391 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
392 directory.
392 directory.
393
393
394 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
395
395
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
397
398 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
399
399
400 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
401
401
402 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
403
403
404 or direct attribute access:
404 or direct attribute access:
405
405
406 s.modified, s.added, ...
406 s.modified, s.added, ...
407 """
407 """
408
408
409 ctx1 = self
409 ctx1 = self
410 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
411
411
412 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
415 # with its first parent.
416 #
416 #
417 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
418 #
418 #
419 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
420 #
420 #
421 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
424 reversed = False
424 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
426 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
428
428
429 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
434 )
434 )
435
435
436 if reversed:
436 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
438 # these make no sense to reverse.
439 r = scmutil.status(
439 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
441 )
442
442
443 if listsubrepos:
443 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
445 try:
446 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
447 except KeyError:
447 except KeyError:
448 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
451 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
453 s = sub.status(
454 rev2,
454 rev2,
455 match=submatch,
455 match=submatch,
456 ignored=listignored,
456 ignored=listignored,
457 clean=listclean,
457 clean=listclean,
458 unknown=listunknown,
458 unknown=listunknown,
459 listsubrepos=True,
459 listsubrepos=True,
460 )
460 )
461 for k in (
461 for k in (
462 'modified',
462 'modified',
463 'added',
463 'added',
464 'removed',
464 'removed',
465 'deleted',
465 'deleted',
466 'unknown',
466 'unknown',
467 'ignored',
467 'ignored',
468 'clean',
468 'clean',
469 ):
469 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
472
473 r.modified.sort()
473 r.modified.sort()
474 r.added.sort()
474 r.added.sort()
475 r.removed.sort()
475 r.removed.sort()
476 r.deleted.sort()
476 r.deleted.sort()
477 r.unknown.sort()
477 r.unknown.sort()
478 r.ignored.sort()
478 r.ignored.sort()
479 r.clean.sort()
479 r.clean.sort()
480
480
481 return r
481 return r
482
482
483 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
485 raise NotImplementedError(
485 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
487 )
487 )
488
488
489 def isempty(self):
489 def isempty(self):
490 return not (
490 return not (
491 len(self.parents()) > 1
491 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
493 or self.closesbranch()
493 or self.closesbranch()
494 or self.files()
494 or self.files()
495 )
495 )
496
496
497
497
498 class changectx(basectx):
498 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
501 the repo."""
501 the repo."""
502
502
503 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
505 self._rev = rev
505 self._rev = rev
506 self._node = node
506 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
509 #
509 #
510 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
514
514
515 def __hash__(self):
515 def __hash__(self):
516 try:
516 try:
517 return hash(self._rev)
517 return hash(self._rev)
518 except AttributeError:
518 except AttributeError:
519 return id(self)
519 return id(self)
520
520
521 def __nonzero__(self):
521 def __nonzero__(self):
522 return self._rev != nullrev
522 return self._rev != nullrev
523
523
524 __bool__ = __nonzero__
524 __bool__ = __nonzero__
525
525
526 @propertycache
526 @propertycache
527 def _changeset(self):
527 def _changeset(self):
528 if self._maybe_filtered:
528 if self._maybe_filtered:
529 repo = self._repo
529 repo = self._repo
530 else:
530 else:
531 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
533
533
534 @propertycache
534 @propertycache
535 def _manifest(self):
535 def _manifest(self):
536 return self._manifestctx.read()
536 return self._manifestctx.read()
537
537
538 @property
538 @property
539 def _manifestctx(self):
539 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
541
541
542 @propertycache
542 @propertycache
543 def _manifestdelta(self):
543 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
545
545
546 @propertycache
546 @propertycache
547 def _parents(self):
547 def _parents(self):
548 repo = self._repo
548 repo = self._repo
549 if self._maybe_filtered:
549 if self._maybe_filtered:
550 cl = repo.changelog
550 cl = repo.changelog
551 else:
551 else:
552 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
553
553
554 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
555 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
557 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
560 ]
561
561
562 def changeset(self):
562 def changeset(self):
563 c = self._changeset
563 c = self._changeset
564 return (
564 return (
565 c.manifest,
565 c.manifest,
566 c.user,
566 c.user,
567 c.date,
567 c.date,
568 c.files,
568 c.files,
569 c.description,
569 c.description,
570 c.extra,
570 c.extra,
571 )
571 )
572
572
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578
578
579 def date(self):
579 def date(self):
580 return self._changeset.date
580 return self._changeset.date
581
581
582 def files(self):
582 def files(self):
583 return self._changeset.files
583 return self._changeset.files
584
584
585 def filesmodified(self):
585 def filesmodified(self):
586 modified = set(self.files())
586 modified = set(self.files())
587 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
589 return sorted(modified)
589 return sorted(modified)
590
590
591 def filesadded(self):
591 def filesadded(self):
592 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
593 compute_on_none = True
593 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
595 compute_on_none = False
596 else:
596 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
598 if source == b'changeset-only':
599 compute_on_none = False
599 compute_on_none = False
600 elif source != b'compatibility':
600 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
602 filesadded = None
602 filesadded = None
603 if filesadded is None:
603 if filesadded is None:
604 if compute_on_none:
604 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
606 else:
606 else:
607 filesadded = []
607 filesadded = []
608 return filesadded
608 return filesadded
609
609
610 def filesremoved(self):
610 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
612 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
614 compute_on_none = False
615 else:
615 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
617 if source == b'changeset-only':
618 compute_on_none = False
618 compute_on_none = False
619 elif source != b'compatibility':
619 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
621 filesremoved = None
621 filesremoved = None
622 if filesremoved is None:
622 if filesremoved is None:
623 if compute_on_none:
623 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
625 else:
626 filesremoved = []
626 filesremoved = []
627 return filesremoved
627 return filesremoved
628
628
629 @propertycache
629 @propertycache
630 def _copies(self):
630 def _copies(self):
631 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
633 compute_on_none = True
633 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
635 compute_on_none = False
636 else:
636 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
642 # the filelogs (below).
643 #
643 #
644 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
646 #
646 #
647 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
649 if source == b'changeset-only':
649 if source == b'changeset-only':
650 compute_on_none = False
650 compute_on_none = False
651 elif source != b'compatibility':
651 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
653 p1copies = p2copies = None
654 if p1copies is None:
654 if p1copies is None:
655 if compute_on_none:
655 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
657 else:
657 else:
658 if p1copies is None:
658 if p1copies is None:
659 p1copies = {}
659 p1copies = {}
660 if p2copies is None:
660 if p2copies is None:
661 p2copies = {}
661 p2copies = {}
662 return p1copies, p2copies
662 return p1copies, p2copies
663
663
664 def description(self):
664 def description(self):
665 return self._changeset.description
665 return self._changeset.description
666
666
667 def branch(self):
667 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
669
670 def closesbranch(self):
670 def closesbranch(self):
671 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
672
672
673 def extra(self):
673 def extra(self):
674 """Return a dict of extra information."""
674 """Return a dict of extra information."""
675 return self._changeset.extra
675 return self._changeset.extra
676
676
677 def tags(self):
677 def tags(self):
678 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
680
680
681 def bookmarks(self):
681 def bookmarks(self):
682 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
684
684
685 def phase(self):
685 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
686 return self._repo._phasecache.phase(self._repo, self._rev)
687
687
688 def hidden(self):
688 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
690
691 def isinmemory(self):
691 def isinmemory(self):
692 return False
692 return False
693
693
694 def children(self):
694 def children(self):
695 """return list of changectx contexts for each child changeset.
695 """return list of changectx contexts for each child changeset.
696
696
697 This returns only the immediate child changesets. Use descendants() to
697 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
698 recursively walk children.
699 """
699 """
700 c = self._repo.changelog.children(self._node)
700 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
701 return [self._repo[x] for x in c]
702
702
703 def ancestors(self):
703 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
704 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
705 yield self._repo[a]
706
706
707 def descendants(self):
707 def descendants(self):
708 """Recursively yield all children of the changeset.
708 """Recursively yield all children of the changeset.
709
709
710 For just the immediate children, use children()
710 For just the immediate children, use children()
711 """
711 """
712 for d in self._repo.changelog.descendants([self._rev]):
712 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
713 yield self._repo[d]
714
714
715 def filectx(self, path, fileid=None, filelog=None):
715 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
716 """get a file context from this changeset"""
717 if fileid is None:
717 if fileid is None:
718 fileid = self.filenode(path)
718 fileid = self.filenode(path)
719 return filectx(
719 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
721 )
722
722
723 def ancestor(self, c2, warn=False):
723 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
724 """return the "best" ancestor context of self and c2
725
725
726 If there are multiple candidates, it will show a message and check
726 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
727 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
728 revlog ancestor."""
729 # deal with workingctxs
729 # deal with workingctxs
730 n2 = c2._node
730 n2 = c2._node
731 if n2 is None:
731 if n2 is None:
732 n2 = c2._parents[0]._node
732 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
734 if not cahs:
735 anc = self._repo.nodeconstants.nullid
735 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
736 elif len(cahs) == 1:
737 anc = cahs[0]
737 anc = cahs[0]
738 else:
738 else:
739 # experimental config: merge.preferancestor
739 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
741 try:
742 ctx = scmutil.revsymbol(self._repo, r)
742 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
743 except error.RepoLookupError:
744 continue
744 continue
745 anc = ctx.node()
745 anc = ctx.node()
746 if anc in cahs:
746 if anc in cahs:
747 break
747 break
748 else:
748 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
749 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
750 if warn:
751 self._repo.ui.status(
751 self._repo.ui.status(
752 (
752 (
753 _(b"note: using %s as ancestor of %s and %s\n")
753 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
754 % (short(anc), short(self._node), short(n2))
755 )
755 )
756 + b''.join(
756 + b''.join(
757 _(
757 _(
758 b" alternatively, use --config "
758 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
759 b"merge.preferancestor=%s\n"
760 )
760 )
761 % short(n)
761 % short(n)
762 for n in sorted(cahs)
762 for n in sorted(cahs)
763 if n != anc
763 if n != anc
764 )
764 )
765 )
765 )
766 return self._repo[anc]
766 return self._repo[anc]
767
767
768 def isancestorof(self, other):
768 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
769 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
771
772 def walk(self, match):
772 def walk(self, match):
773 '''Generates matching file names.'''
773 '''Generates matching file names.'''
774
774
775 # Wrap match.bad method to have message with nodeid
775 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
776 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
777 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
778 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
780 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
781 match.bad(fn, _(b'no such file in rev %s') % self)
782
782
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
784 return self._manifest.walk(m)
785
785
786 def matches(self, match):
786 def matches(self, match):
787 return self.walk(match)
787 return self.walk(match)
788
788
789
789
790 class basefilectx(object):
790 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
791 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
792 filectx: read-only access to a filerevision that is already present
793 in the repo,
793 in the repo,
794 workingfilectx: a filecontext that represents files from the working
794 workingfilectx: a filecontext that represents files from the working
795 directory,
795 directory,
796 memfilectx: a filecontext that represents files in-memory,
796 memfilectx: a filecontext that represents files in-memory,
797 """
797 """
798
798
799 @propertycache
799 @propertycache
800 def _filelog(self):
800 def _filelog(self):
801 return self._repo.file(self._path)
801 return self._repo.file(self._path)
802
802
803 @propertycache
803 @propertycache
804 def _changeid(self):
804 def _changeid(self):
805 if '_changectx' in self.__dict__:
805 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
806 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
807 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
808 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
809 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
810 return self._adjustlinkrev(self._descendantrev)
811 else:
811 else:
812 return self._filelog.linkrev(self._filerev)
812 return self._filelog.linkrev(self._filerev)
813
813
814 @propertycache
814 @propertycache
815 def _filenode(self):
815 def _filenode(self):
816 if '_fileid' in self.__dict__:
816 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
817 return self._filelog.lookup(self._fileid)
818 else:
818 else:
819 return self._changectx.filenode(self._path)
819 return self._changectx.filenode(self._path)
820
820
821 @propertycache
821 @propertycache
822 def _filerev(self):
822 def _filerev(self):
823 return self._filelog.rev(self._filenode)
823 return self._filelog.rev(self._filenode)
824
824
825 @propertycache
825 @propertycache
826 def _repopath(self):
826 def _repopath(self):
827 return self._path
827 return self._path
828
828
829 def __nonzero__(self):
829 def __nonzero__(self):
830 try:
830 try:
831 self._filenode
831 self._filenode
832 return True
832 return True
833 except error.LookupError:
833 except error.LookupError:
834 # file is missing
834 # file is missing
835 return False
835 return False
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __bytes__(self):
839 def __bytes__(self):
840 try:
840 try:
841 return b"%s@%s" % (self.path(), self._changectx)
841 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
842 except error.LookupError:
843 return b"%s@???" % self.path()
843 return b"%s@???" % self.path()
844
844
845 __str__ = encoding.strmethod(__bytes__)
845 __str__ = encoding.strmethod(__bytes__)
846
846
847 def __repr__(self):
847 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
848 return "<%s %s>" % (type(self).__name__, str(self))
849
849
850 def __hash__(self):
850 def __hash__(self):
851 try:
851 try:
852 return hash((self._path, self._filenode))
852 return hash((self._path, self._filenode))
853 except AttributeError:
853 except AttributeError:
854 return id(self)
854 return id(self)
855
855
856 def __eq__(self, other):
856 def __eq__(self, other):
857 try:
857 try:
858 return (
858 return (
859 type(self) == type(other)
859 type(self) == type(other)
860 and self._path == other._path
860 and self._path == other._path
861 and self._filenode == other._filenode
861 and self._filenode == other._filenode
862 )
862 )
863 except AttributeError:
863 except AttributeError:
864 return False
864 return False
865
865
866 def __ne__(self, other):
866 def __ne__(self, other):
867 return not (self == other)
867 return not (self == other)
868
868
869 def filerev(self):
869 def filerev(self):
870 return self._filerev
870 return self._filerev
871
871
872 def filenode(self):
872 def filenode(self):
873 return self._filenode
873 return self._filenode
874
874
875 @propertycache
875 @propertycache
876 def _flags(self):
876 def _flags(self):
877 return self._changectx.flags(self._path)
877 return self._changectx.flags(self._path)
878
878
879 def flags(self):
879 def flags(self):
880 return self._flags
880 return self._flags
881
881
882 def filelog(self):
882 def filelog(self):
883 return self._filelog
883 return self._filelog
884
884
885 def rev(self):
885 def rev(self):
886 return self._changeid
886 return self._changeid
887
887
888 def linkrev(self):
888 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
889 return self._filelog.linkrev(self._filerev)
890
890
891 def node(self):
891 def node(self):
892 return self._changectx.node()
892 return self._changectx.node()
893
893
894 def hex(self):
894 def hex(self):
895 return self._changectx.hex()
895 return self._changectx.hex()
896
896
897 def user(self):
897 def user(self):
898 return self._changectx.user()
898 return self._changectx.user()
899
899
900 def date(self):
900 def date(self):
901 return self._changectx.date()
901 return self._changectx.date()
902
902
903 def files(self):
903 def files(self):
904 return self._changectx.files()
904 return self._changectx.files()
905
905
906 def description(self):
906 def description(self):
907 return self._changectx.description()
907 return self._changectx.description()
908
908
909 def branch(self):
909 def branch(self):
910 return self._changectx.branch()
910 return self._changectx.branch()
911
911
912 def extra(self):
912 def extra(self):
913 return self._changectx.extra()
913 return self._changectx.extra()
914
914
915 def phase(self):
915 def phase(self):
916 return self._changectx.phase()
916 return self._changectx.phase()
917
917
918 def phasestr(self):
918 def phasestr(self):
919 return self._changectx.phasestr()
919 return self._changectx.phasestr()
920
920
921 def obsolete(self):
921 def obsolete(self):
922 return self._changectx.obsolete()
922 return self._changectx.obsolete()
923
923
924 def instabilities(self):
924 def instabilities(self):
925 return self._changectx.instabilities()
925 return self._changectx.instabilities()
926
926
927 def manifest(self):
927 def manifest(self):
928 return self._changectx.manifest()
928 return self._changectx.manifest()
929
929
930 def changectx(self):
930 def changectx(self):
931 return self._changectx
931 return self._changectx
932
932
933 def renamed(self):
933 def renamed(self):
934 return self._copied
934 return self._copied
935
935
936 def copysource(self):
936 def copysource(self):
937 return self._copied and self._copied[0]
937 return self._copied and self._copied[0]
938
938
939 def repo(self):
939 def repo(self):
940 return self._repo
940 return self._repo
941
941
942 def size(self):
942 def size(self):
943 return len(self.data())
943 return len(self.data())
944
944
945 def path(self):
945 def path(self):
946 return self._path
946 return self._path
947
947
948 def isbinary(self):
948 def isbinary(self):
949 try:
949 try:
950 return stringutil.binary(self.data())
950 return stringutil.binary(self.data())
951 except IOError:
951 except IOError:
952 return False
952 return False
953
953
954 def isexec(self):
954 def isexec(self):
955 return b'x' in self.flags()
955 return b'x' in self.flags()
956
956
957 def islink(self):
957 def islink(self):
958 return b'l' in self.flags()
958 return b'l' in self.flags()
959
959
960 def isabsent(self):
960 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
961 """whether this filectx represents a file not in self._changectx
962
962
963 This is mainly for merge code to detect change/delete conflicts. This is
963 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
964 expected to be True for all subclasses of basectx."""
965 return False
965 return False
966
966
967 _customcmp = False
967 _customcmp = False
968
968
969 def cmp(self, fctx):
969 def cmp(self, fctx):
970 """compare with other file context
970 """compare with other file context
971
971
972 returns True if different than fctx.
972 returns True if different than fctx.
973 """
973 """
974 if fctx._customcmp:
974 if fctx._customcmp:
975 return fctx.cmp(self)
975 return fctx.cmp(self)
976
976
977 if self._filenode is None:
977 if self._filenode is None:
978 raise error.ProgrammingError(
978 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
980 )
981
981
982 if fctx._filenode is None:
982 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
983 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
984 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
986 if self.size() - 4 == fctx.size():
987 # size() can match:
987 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
988 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
989 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
991 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
992 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
993 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
994 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996
996
997 # size() differs
997 # size() differs
998 return True
998 return True
999
999
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1001 """return the first ancestor of <srcrev> introducing <fnode>
1002
1002
1003 If the linkrev of the file revision does not point to an ancestor of
1003 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1005 this file revision.
1006
1006
1007 :srcrev: the changeset revision we search ancestors from
1007 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1008 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1010 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1011 revision, the function will returns "None" and stops its
1012 iteration.
1012 iteration.
1013 """
1013 """
1014 repo = self._repo
1014 repo = self._repo
1015 cl = repo.unfiltered().changelog
1015 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 # fetch the linkrev
1017 # fetch the linkrev
1018 lkr = self.linkrev()
1018 lkr = self.linkrev()
1019 if srcrev == lkr:
1019 if srcrev == lkr:
1020 return lkr
1020 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1021 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1023 iteranc = None
1024 if srcrev is None:
1024 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1025 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1027 inclusive = True # we skipped the real (revless) source
1028 else:
1028 else:
1029 revs = [srcrev]
1029 revs = [srcrev]
1030 if memberanc is None:
1030 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1032 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1033 if lkr not in memberanc:
1034 if iteranc is None:
1034 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 path = self._path
1037 path = self._path
1038 for a in iteranc:
1038 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1039 if stoprev is not None and a < stoprev:
1040 return None
1040 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1042 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1043 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1044 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1046 return a
1047 # In theory, we should never get out of that loop without a result.
1047 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1048 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1049 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1050 # result is crash somewhere else at to some point.
1051 return lkr
1051 return lkr
1052
1052
1053 def isintroducedafter(self, changelogrev):
1053 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1054 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1055 if self.linkrev() >= changelogrev:
1056 return True
1056 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1057 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1058 if introrev is None:
1059 return False
1059 return False
1060 return introrev >= changelogrev
1060 return introrev >= changelogrev
1061
1061
1062 def introrev(self):
1062 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1063 """return the rev of the changeset which introduced this file revision
1064
1064
1065 This method is different from linkrev because it take into account the
1065 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1066 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1067 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1068 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1069 changesets.
1070 """
1070 """
1071 return self._introrev()
1071 return self._introrev()
1072
1072
1073 def _introrev(self, stoprev=None):
1073 def _introrev(self, stoprev=None):
1074 """
1074 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1075 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1076 iteration range in some internal usecase.
1077
1077
1078 If `stoprev` is set, the `introrev` will not be searched past that
1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1079 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1080 limit the iteration range.
1081 """
1081 """
1082 toprev = None
1082 toprev = None
1083 attrs = vars(self)
1083 attrs = vars(self)
1084 if '_changeid' in attrs:
1084 if '_changeid' in attrs:
1085 # We have a cached value already
1085 # We have a cached value already
1086 toprev = self._changeid
1086 toprev = self._changeid
1087 elif '_changectx' in attrs:
1087 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1088 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1089 toprev = self._changectx.rev()
1090
1090
1091 if toprev is not None:
1091 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1093 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1095 # be nice and cache the result of the computation
1096 if introrev is not None:
1096 if introrev is not None:
1097 self._changeid = introrev
1097 self._changeid = introrev
1098 return introrev
1098 return introrev
1099 else:
1099 else:
1100 return self.linkrev()
1100 return self.linkrev()
1101
1101
1102 def introfilectx(self):
1102 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1103 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1104 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1105 introrev = self.introrev()
1106 if self.rev() == introrev:
1106 if self.rev() == introrev:
1107 return self
1107 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1108 return self.filectx(self.filenode(), changeid=introrev)
1109
1109
1110 def _parentfilectx(self, path, fileid, filelog):
1110 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1114 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1115 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1116 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1118 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1120 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1121 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1122 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1124 return fctx
1125
1125
1126 def parents(self):
1126 def parents(self):
1127 _path = self._path
1127 _path = self._path
1128 fl = self._filelog
1128 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1129 parents = self._filelog.parents(self._filenode)
1130 pl = [
1130 pl = [
1131 (_path, node, fl)
1131 (_path, node, fl)
1132 for node in parents
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1134 ]
1135
1135
1136 r = fl.renamed(self._filenode)
1136 r = fl.renamed(self._filenode)
1137 if r:
1137 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1139 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1140 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1141 # the first one.
1142 #
1142 #
1143 # As null id have always been filtered out in the previous list
1143 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1144 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1145 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1147
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1149
1150 def p1(self):
1150 def p1(self):
1151 return self.parents()[0]
1151 return self.parents()[0]
1152
1152
1153 def p2(self):
1153 def p2(self):
1154 p = self.parents()
1154 p = self.parents()
1155 if len(p) == 2:
1155 if len(p) == 2:
1156 return p[1]
1156 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1158
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1160 """Returns a list of annotateline objects for each line in the file
1161
1161
1162 - line.fctx is the filectx of the node where that line was last changed
1162 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1163 - line.lineno is the line number at the first appearance in the managed
1164 file
1164 file
1165 - line.text is the data on that line (including newline character)
1165 - line.text is the data on that line (including newline character)
1166 """
1166 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1168
1169 def parents(f):
1169 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1173 # isn't an ancestor of the srcrev.
1174 f._changeid
1174 f._changeid
1175 pl = f.parents()
1175 pl = f.parents()
1176
1176
1177 # Don't return renamed parents if we aren't following.
1177 # Don't return renamed parents if we aren't following.
1178 if not follow:
1178 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1179 pl = [p for p in pl if p.path() == f.path()]
1180
1180
1181 # renamed filectx won't have a filelog yet, so set it
1181 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1182 # from the cache to save time
1183 for p in pl:
1183 for p in pl:
1184 if not '_filelog' in p.__dict__:
1184 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1185 p._filelog = getlog(p.path())
1186
1186
1187 return pl
1187 return pl
1188
1188
1189 # use linkrev to find the first changeset where self appeared
1189 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1190 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1191 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1192 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1193 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1194 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1195 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1196 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1197 # is used to test filelog revisions
1198 ac = cl.ancestors(
1198 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1199 [p.rev() for p in base.parents()], inclusive=True
1200 )
1200 )
1201 else:
1201 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1203 base._ancestrycontext = ac
1204
1204
1205 return dagop.annotate(
1205 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1207 )
1208
1208
1209 def ancestors(self, followfirst=False):
1209 def ancestors(self, followfirst=False):
1210 visit = {}
1210 visit = {}
1211 c = self
1211 c = self
1212 if followfirst:
1212 if followfirst:
1213 cut = 1
1213 cut = 1
1214 else:
1214 else:
1215 cut = None
1215 cut = None
1216
1216
1217 while True:
1217 while True:
1218 for parent in c.parents()[:cut]:
1218 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1220 if not visit:
1221 break
1221 break
1222 c = visit.pop(max(visit))
1222 c = visit.pop(max(visit))
1223 yield c
1223 yield c
1224
1224
1225 def decodeddata(self):
1225 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1226 """Returns `data()` after running repository decoding filters.
1227
1227
1228 This is often equivalent to how the data would be expressed on disk.
1228 This is often equivalent to how the data would be expressed on disk.
1229 """
1229 """
1230 return self._repo.wwritedata(self.path(), self.data())
1230 return self._repo.wwritedata(self.path(), self.data())
1231
1231
1232
1232
1233 class filectx(basefilectx):
1233 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1234 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1235 filerevision convenient."""
1236
1236
1237 def __init__(
1237 def __init__(
1238 self,
1238 self,
1239 repo,
1239 repo,
1240 path,
1240 path,
1241 changeid=None,
1241 changeid=None,
1242 fileid=None,
1242 fileid=None,
1243 filelog=None,
1243 filelog=None,
1244 changectx=None,
1244 changectx=None,
1245 ):
1245 ):
1246 """changeid must be a revision number, if specified.
1246 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1247 fileid can be a file revision or node."""
1248 self._repo = repo
1248 self._repo = repo
1249 self._path = path
1249 self._path = path
1250
1250
1251 assert (
1251 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1252 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1254 changeid,
1255 fileid,
1255 fileid,
1256 changectx,
1256 changectx,
1257 )
1257 )
1258
1258
1259 if filelog is not None:
1259 if filelog is not None:
1260 self._filelog = filelog
1260 self._filelog = filelog
1261
1261
1262 if changeid is not None:
1262 if changeid is not None:
1263 self._changeid = changeid
1263 self._changeid = changeid
1264 if changectx is not None:
1264 if changectx is not None:
1265 self._changectx = changectx
1265 self._changectx = changectx
1266 if fileid is not None:
1266 if fileid is not None:
1267 self._fileid = fileid
1267 self._fileid = fileid
1268
1268
1269 @propertycache
1269 @propertycache
1270 def _changectx(self):
1270 def _changectx(self):
1271 try:
1271 try:
1272 return self._repo[self._changeid]
1272 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1273 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1274 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1275 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1276 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1277 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1278 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1279 # `filectx` are not used in complex operations that care about
1280 # filtering.
1280 # filtering.
1281 #
1281 #
1282 # This fallback is a cheap and dirty fix that prevent several
1282 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1283 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1284 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1285 # behavior" is seen as better as "crash"
1286 #
1286 #
1287 # Linkrevs have several serious troubles with filtering that are
1287 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1288 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1289 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1290 return self._repo.unfiltered()[self._changeid]
1291
1291
1292 def filectx(self, fileid, changeid=None):
1292 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1293 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1294 opening a new filelog"""
1295 return filectx(
1295 return filectx(
1296 self._repo,
1296 self._repo,
1297 self._path,
1297 self._path,
1298 fileid=fileid,
1298 fileid=fileid,
1299 filelog=self._filelog,
1299 filelog=self._filelog,
1300 changeid=changeid,
1300 changeid=changeid,
1301 )
1301 )
1302
1302
1303 def rawdata(self):
1303 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1304 return self._filelog.rawdata(self._filenode)
1305
1305
1306 def rawflags(self):
1306 def rawflags(self):
1307 """low-level revlog flags"""
1307 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1308 return self._filelog.flags(self._filerev)
1309
1309
1310 def data(self):
1310 def data(self):
1311 try:
1311 try:
1312 return self._filelog.read(self._filenode)
1312 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1313 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1315 return b""
1316 raise error.Abort(
1316 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1317 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1318 hint=_(b"set censor.policy to ignore errors"),
1319 )
1319 )
1320
1320
1321 def size(self):
1321 def size(self):
1322 return self._filelog.size(self._filerev)
1322 return self._filelog.size(self._filerev)
1323
1323
1324 @propertycache
1324 @propertycache
1325 def _copied(self):
1325 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1326 """check if file was actually renamed in this changeset revision
1327
1327
1328 If rename logged in file revision, we report copy for changeset only
1328 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1329 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1330 or both changeset parents contain different file revisions.
1331 """
1331 """
1332
1332
1333 renamed = self._filelog.renamed(self._filenode)
1333 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1334 if not renamed:
1335 return None
1335 return None
1336
1336
1337 if self.rev() == self.linkrev():
1337 if self.rev() == self.linkrev():
1338 return renamed
1338 return renamed
1339
1339
1340 name = self.path()
1340 name = self.path()
1341 fnode = self._filenode
1341 fnode = self._filenode
1342 for p in self._changectx.parents():
1342 for p in self._changectx.parents():
1343 try:
1343 try:
1344 if fnode == p.filenode(name):
1344 if fnode == p.filenode(name):
1345 return None
1345 return None
1346 except error.LookupError:
1346 except error.LookupError:
1347 pass
1347 pass
1348 return renamed
1348 return renamed
1349
1349
1350 def children(self):
1350 def children(self):
1351 # hard for renames
1351 # hard for renames
1352 c = self._filelog.children(self._filenode)
1352 c = self._filelog.children(self._filenode)
1353 return [
1353 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1355 for x in c
1356 ]
1356 ]
1357
1357
1358
1358
1359 class committablectx(basectx):
1359 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1360 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1361 wants the ability to commit, e.g. workingctx or memctx."""
1362
1362
1363 def __init__(
1363 def __init__(
1364 self,
1364 self,
1365 repo,
1365 repo,
1366 text=b"",
1366 text=b"",
1367 user=None,
1367 user=None,
1368 date=None,
1368 date=None,
1369 extra=None,
1369 extra=None,
1370 changes=None,
1370 changes=None,
1371 branch=None,
1371 branch=None,
1372 ):
1372 ):
1373 super(committablectx, self).__init__(repo)
1373 super(committablectx, self).__init__(repo)
1374 self._rev = None
1374 self._rev = None
1375 self._node = None
1375 self._node = None
1376 self._text = text
1376 self._text = text
1377 if date:
1377 if date:
1378 self._date = dateutil.parsedate(date)
1378 self._date = dateutil.parsedate(date)
1379 if user:
1379 if user:
1380 self._user = user
1380 self._user = user
1381 if changes:
1381 if changes:
1382 self._status = changes
1382 self._status = changes
1383
1383
1384 self._extra = {}
1384 self._extra = {}
1385 if extra:
1385 if extra:
1386 self._extra = extra.copy()
1386 self._extra = extra.copy()
1387 if branch is not None:
1387 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1389 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1390 self._extra[b'branch'] = b'default'
1391
1391
1392 def __bytes__(self):
1392 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1393 return bytes(self._parents[0]) + b"+"
1394
1394
1395 def hex(self):
1395 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1396 self._repo.nodeconstants.wdirhex
1397
1397
1398 __str__ = encoding.strmethod(__bytes__)
1398 __str__ = encoding.strmethod(__bytes__)
1399
1399
1400 def __nonzero__(self):
1400 def __nonzero__(self):
1401 return True
1401 return True
1402
1402
1403 __bool__ = __nonzero__
1403 __bool__ = __nonzero__
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _status(self):
1406 def _status(self):
1407 return self._repo.status()
1407 return self._repo.status()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _user(self):
1410 def _user(self):
1411 return self._repo.ui.username()
1411 return self._repo.ui.username()
1412
1412
1413 @propertycache
1413 @propertycache
1414 def _date(self):
1414 def _date(self):
1415 ui = self._repo.ui
1415 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1416 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1417 if date is None:
1418 date = dateutil.makedate()
1418 date = dateutil.makedate()
1419 return date
1419 return date
1420
1420
1421 def subrev(self, subpath):
1421 def subrev(self, subpath):
1422 return None
1422 return None
1423
1423
1424 def manifestnode(self):
1424 def manifestnode(self):
1425 return None
1425 return None
1426
1426
1427 def user(self):
1427 def user(self):
1428 return self._user or self._repo.ui.username()
1428 return self._user or self._repo.ui.username()
1429
1429
1430 def date(self):
1430 def date(self):
1431 return self._date
1431 return self._date
1432
1432
1433 def description(self):
1433 def description(self):
1434 return self._text
1434 return self._text
1435
1435
1436 def files(self):
1436 def files(self):
1437 return sorted(
1437 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1438 self._status.modified + self._status.added + self._status.removed
1439 )
1439 )
1440
1440
1441 def modified(self):
1441 def modified(self):
1442 return self._status.modified
1442 return self._status.modified
1443
1443
1444 def added(self):
1444 def added(self):
1445 return self._status.added
1445 return self._status.added
1446
1446
1447 def removed(self):
1447 def removed(self):
1448 return self._status.removed
1448 return self._status.removed
1449
1449
1450 def deleted(self):
1450 def deleted(self):
1451 return self._status.deleted
1451 return self._status.deleted
1452
1452
1453 filesmodified = modified
1453 filesmodified = modified
1454 filesadded = added
1454 filesadded = added
1455 filesremoved = removed
1455 filesremoved = removed
1456
1456
1457 def branch(self):
1457 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1458 return encoding.tolocal(self._extra[b'branch'])
1459
1459
1460 def closesbranch(self):
1460 def closesbranch(self):
1461 return b'close' in self._extra
1461 return b'close' in self._extra
1462
1462
1463 def extra(self):
1463 def extra(self):
1464 return self._extra
1464 return self._extra
1465
1465
1466 def isinmemory(self):
1466 def isinmemory(self):
1467 return False
1467 return False
1468
1468
1469 def tags(self):
1469 def tags(self):
1470 return []
1470 return []
1471
1471
1472 def bookmarks(self):
1472 def bookmarks(self):
1473 b = []
1473 b = []
1474 for p in self.parents():
1474 for p in self.parents():
1475 b.extend(p.bookmarks())
1475 b.extend(p.bookmarks())
1476 return b
1476 return b
1477
1477
1478 def phase(self):
1478 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1479 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1480 for p in self.parents():
1481 phase = max(phase, p.phase())
1481 phase = max(phase, p.phase())
1482 return phase
1482 return phase
1483
1483
1484 def hidden(self):
1484 def hidden(self):
1485 return False
1485 return False
1486
1486
1487 def children(self):
1487 def children(self):
1488 return []
1488 return []
1489
1489
1490 def flags(self, path):
1490 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1491 if '_manifest' in self.__dict__:
1492 try:
1492 try:
1493 return self._manifest.flags(path)
1493 return self._manifest.flags(path)
1494 except KeyError:
1494 except KeyError:
1495 return b''
1495 return b''
1496
1496
1497 try:
1497 try:
1498 return self._flagfunc(path)
1498 return self._flagfunc(path)
1499 except OSError:
1499 except OSError:
1500 return b''
1500 return b''
1501
1501
1502 def ancestor(self, c2):
1502 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1503 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1505
1506 def ancestors(self):
1506 def ancestors(self):
1507 for p in self._parents:
1507 for p in self._parents:
1508 yield p
1508 yield p
1509 for a in self._repo.changelog.ancestors(
1509 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1510 [p.rev() for p in self._parents]
1511 ):
1511 ):
1512 yield self._repo[a]
1512 yield self._repo[a]
1513
1513
1514 def markcommitted(self, node):
1514 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1515 """Perform post-commit cleanup necessary after committing this ctx
1516
1516
1517 Specifically, this updates backing stores this working context
1517 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1518 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1519 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1520 modified and added files as normal in the dirstate.
1521
1521
1522 """
1522 """
1523
1523
1524 def dirty(self, missing=False, merge=True, branch=True):
1524 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1525 return False
1526
1526
1527
1527
1528 class workingctx(committablectx):
1528 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1529 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1530 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1531 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1532 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1533 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1534 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1535 or None to use the repository status.
1536 """
1536 """
1537
1537
1538 def __init__(
1538 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1540 ):
1541 branch = None
1541 branch = None
1542 if not extra or b'branch' not in extra:
1542 if not extra or b'branch' not in extra:
1543 try:
1543 try:
1544 branch = repo.dirstate.branch()
1544 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1545 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1547 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1548 repo, text, user, date, extra, changes, branch=branch
1549 )
1549 )
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d[f] != b'r':
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate[key] not in b"?r"
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
1562
1562
1563 @propertycache
1563 @propertycache
1564 def _parents(self):
1564 def _parents(self):
1565 p = self._repo.dirstate.parents()
1565 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1566 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1567 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1569 unfi = self._repo.unfiltered()
1570 return [
1570 return [
1571 changectx(
1571 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1573 )
1574 for n in p
1574 for n in p
1575 ]
1575 ]
1576
1576
1577 def setparents(self, p1node, p2node=None):
1577 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1578 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1579 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1580 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1581 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1582 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1583 pctx = self._repo[p1node]
1584 if copies:
1584 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1585 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1586 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1587 # only for entries added to first parent.
1588 for f in copies:
1588 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1589 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1590 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1591 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1592 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1593 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1594 dirstate.copy(None, f)
1595
1595
1596 def _fileinfo(self, path):
1596 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1598 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1599 return super(workingctx, self)._fileinfo(path)
1600
1600
1601 def _buildflagfunc(self):
1601 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1602 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1603 # filesystem doesn't support them
1604
1604
1605 copiesget = self._repo.dirstate.copies().get
1605 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1606 parents = self.parents()
1607 if len(parents) < 2:
1607 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1608 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1609 man = parents[0].manifest()
1610
1610
1611 def func(f):
1611 def func(f):
1612 f = copiesget(f, f)
1612 f = copiesget(f, f)
1613 return man.flags(f)
1613 return man.flags(f)
1614
1614
1615 else:
1615 else:
1616 # merges are tricky: we try to reconstruct the unstored
1616 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1617 # result from the merge (issue1802)
1618 p1, p2 = parents
1618 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1619 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1621
1622 def func(f):
1622 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1625 if fl1 == fl2:
1626 return fl1
1626 return fl1
1627 if fl1 == fla:
1627 if fl1 == fla:
1628 return fl2
1628 return fl2
1629 if fl2 == fla:
1629 if fl2 == fla:
1630 return fl1
1630 return fl1
1631 return b'' # punt for conflicts
1631 return b'' # punt for conflicts
1632
1632
1633 return func
1633 return func
1634
1634
1635 @propertycache
1635 @propertycache
1636 def _flagfunc(self):
1636 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1638
1639 def flags(self, path):
1639 def flags(self, path):
1640 try:
1640 try:
1641 return self._flagfunc(path)
1641 return self._flagfunc(path)
1642 except OSError:
1642 except OSError:
1643 return b''
1643 return b''
1644
1644
1645 def filectx(self, path, filelog=None):
1645 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1646 """get a file context from the working directory"""
1647 return workingfilectx(
1647 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1648 self._repo, path, workingctx=self, filelog=filelog
1649 )
1649 )
1650
1650
1651 def dirty(self, missing=False, merge=True, branch=True):
1651 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1652 """check whether a working directory is modified"""
1653 # check subrepos first
1653 # check subrepos first
1654 for s in sorted(self.substate):
1654 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1655 if self.sub(s).dirty(missing=missing):
1656 return True
1656 return True
1657 # check current working dir
1657 # check current working dir
1658 return (
1658 return (
1659 (merge and self.p2())
1659 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1660 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1661 or self.modified()
1662 or self.added()
1662 or self.added()
1663 or self.removed()
1663 or self.removed()
1664 or (missing and self.deleted())
1664 or (missing and self.deleted())
1665 )
1665 )
1666
1666
1667 def add(self, list, prefix=b""):
1667 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1668 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1671 rejected = []
1672 lstat = self._repo.wvfs.lstat
1672 lstat = self._repo.wvfs.lstat
1673 for f in list:
1673 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1674 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1675 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1676 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1678 try:
1679 st = lstat(f)
1679 st = lstat(f)
1680 except OSError:
1680 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1682 rejected.append(f)
1683 continue
1683 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1685 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1686 ui.warn(
1687 _(
1687 _(
1688 b"%s: up to %d MB of RAM may be required "
1688 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1689 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1690 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1691 b"pending addition)\n"
1692 )
1692 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1694 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1696 ui.warn(
1697 _(
1697 _(
1698 b"%s not added: only files and symlinks "
1698 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1699 b"supported currently\n"
1700 )
1700 )
1701 % uipath(f)
1701 % uipath(f)
1702 )
1702 )
1703 rejected.append(f)
1703 rejected.append(f)
1704 elif ds[f] in b'amn':
1704 elif not ds.set_tracked(f):
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 elif ds[f] == b'r':
1707 ds.normallookup(f)
1708 else:
1709 ds.add(f)
1710 return rejected
1706 return rejected
1711
1707
1712 def forget(self, files, prefix=b""):
1708 def forget(self, files, prefix=b""):
1713 with self._repo.wlock():
1709 with self._repo.wlock():
1714 ds = self._repo.dirstate
1710 ds = self._repo.dirstate
1715 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1716 rejected = []
1712 rejected = []
1717 for f in files:
1713 for f in files:
1718 if f not in ds:
1714 if f not in ds:
1719 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1720 rejected.append(f)
1716 rejected.append(f)
1721 elif ds[f] != b'a':
1717 elif ds[f] != b'a':
1722 ds.remove(f)
1718 ds.remove(f)
1723 else:
1719 else:
1724 ds.drop(f)
1720 ds.drop(f)
1725 return rejected
1721 return rejected
1726
1722
1727 def copy(self, source, dest):
1723 def copy(self, source, dest):
1728 try:
1724 try:
1729 st = self._repo.wvfs.lstat(dest)
1725 st = self._repo.wvfs.lstat(dest)
1730 except OSError as err:
1726 except OSError as err:
1731 if err.errno != errno.ENOENT:
1727 if err.errno != errno.ENOENT:
1732 raise
1728 raise
1733 self._repo.ui.warn(
1729 self._repo.ui.warn(
1734 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1735 )
1731 )
1736 return
1732 return
1737 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1738 self._repo.ui.warn(
1734 self._repo.ui.warn(
1739 _(b"copy failed: %s is not a file or a symbolic link\n")
1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1740 % self._repo.dirstate.pathto(dest)
1736 % self._repo.dirstate.pathto(dest)
1741 )
1737 )
1742 else:
1738 else:
1743 with self._repo.wlock():
1739 with self._repo.wlock():
1744 ds = self._repo.dirstate
1740 ds = self._repo.dirstate
1745 if ds[dest] in b'?':
1741 if ds[dest] in b'?':
1746 ds.add(dest)
1742 ds.add(dest)
1747 elif ds[dest] in b'r':
1743 elif ds[dest] in b'r':
1748 ds.normallookup(dest)
1744 ds.normallookup(dest)
1749 ds.copy(source, dest)
1745 ds.copy(source, dest)
1750
1746
1751 def match(
1747 def match(
1752 self,
1748 self,
1753 pats=None,
1749 pats=None,
1754 include=None,
1750 include=None,
1755 exclude=None,
1751 exclude=None,
1756 default=b'glob',
1752 default=b'glob',
1757 listsubrepos=False,
1753 listsubrepos=False,
1758 badfn=None,
1754 badfn=None,
1759 cwd=None,
1755 cwd=None,
1760 ):
1756 ):
1761 r = self._repo
1757 r = self._repo
1762 if not cwd:
1758 if not cwd:
1763 cwd = r.getcwd()
1759 cwd = r.getcwd()
1764
1760
1765 # Only a case insensitive filesystem needs magic to translate user input
1761 # Only a case insensitive filesystem needs magic to translate user input
1766 # to actual case in the filesystem.
1762 # to actual case in the filesystem.
1767 icasefs = not util.fscasesensitive(r.root)
1763 icasefs = not util.fscasesensitive(r.root)
1768 return matchmod.match(
1764 return matchmod.match(
1769 r.root,
1765 r.root,
1770 cwd,
1766 cwd,
1771 pats,
1767 pats,
1772 include,
1768 include,
1773 exclude,
1769 exclude,
1774 default,
1770 default,
1775 auditor=r.auditor,
1771 auditor=r.auditor,
1776 ctx=self,
1772 ctx=self,
1777 listsubrepos=listsubrepos,
1773 listsubrepos=listsubrepos,
1778 badfn=badfn,
1774 badfn=badfn,
1779 icasefs=icasefs,
1775 icasefs=icasefs,
1780 )
1776 )
1781
1777
1782 def _filtersuspectsymlink(self, files):
1778 def _filtersuspectsymlink(self, files):
1783 if not files or self._repo.dirstate._checklink:
1779 if not files or self._repo.dirstate._checklink:
1784 return files
1780 return files
1785
1781
1786 # Symlink placeholders may get non-symlink-like contents
1782 # Symlink placeholders may get non-symlink-like contents
1787 # via user error or dereferencing by NFS or Samba servers,
1783 # via user error or dereferencing by NFS or Samba servers,
1788 # so we filter out any placeholders that don't look like a
1784 # so we filter out any placeholders that don't look like a
1789 # symlink
1785 # symlink
1790 sane = []
1786 sane = []
1791 for f in files:
1787 for f in files:
1792 if self.flags(f) == b'l':
1788 if self.flags(f) == b'l':
1793 d = self[f].data()
1789 d = self[f].data()
1794 if (
1790 if (
1795 d == b''
1791 d == b''
1796 or len(d) >= 1024
1792 or len(d) >= 1024
1797 or b'\n' in d
1793 or b'\n' in d
1798 or stringutil.binary(d)
1794 or stringutil.binary(d)
1799 ):
1795 ):
1800 self._repo.ui.debug(
1796 self._repo.ui.debug(
1801 b'ignoring suspect symlink placeholder "%s"\n' % f
1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1802 )
1798 )
1803 continue
1799 continue
1804 sane.append(f)
1800 sane.append(f)
1805 return sane
1801 return sane
1806
1802
1807 def _checklookup(self, files):
1803 def _checklookup(self, files):
1808 # check for any possibly clean files
1804 # check for any possibly clean files
1809 if not files:
1805 if not files:
1810 return [], [], []
1806 return [], [], []
1811
1807
1812 modified = []
1808 modified = []
1813 deleted = []
1809 deleted = []
1814 fixup = []
1810 fixup = []
1815 pctx = self._parents[0]
1811 pctx = self._parents[0]
1816 # do a full compare of any files that might have changed
1812 # do a full compare of any files that might have changed
1817 for f in sorted(files):
1813 for f in sorted(files):
1818 try:
1814 try:
1819 # This will return True for a file that got replaced by a
1815 # This will return True for a file that got replaced by a
1820 # directory in the interim, but fixing that is pretty hard.
1816 # directory in the interim, but fixing that is pretty hard.
1821 if (
1817 if (
1822 f not in pctx
1818 f not in pctx
1823 or self.flags(f) != pctx.flags(f)
1819 or self.flags(f) != pctx.flags(f)
1824 or pctx[f].cmp(self[f])
1820 or pctx[f].cmp(self[f])
1825 ):
1821 ):
1826 modified.append(f)
1822 modified.append(f)
1827 else:
1823 else:
1828 fixup.append(f)
1824 fixup.append(f)
1829 except (IOError, OSError):
1825 except (IOError, OSError):
1830 # A file become inaccessible in between? Mark it as deleted,
1826 # A file become inaccessible in between? Mark it as deleted,
1831 # matching dirstate behavior (issue5584).
1827 # matching dirstate behavior (issue5584).
1832 # The dirstate has more complex behavior around whether a
1828 # The dirstate has more complex behavior around whether a
1833 # missing file matches a directory, etc, but we don't need to
1829 # missing file matches a directory, etc, but we don't need to
1834 # bother with that: if f has made it to this point, we're sure
1830 # bother with that: if f has made it to this point, we're sure
1835 # it's in the dirstate.
1831 # it's in the dirstate.
1836 deleted.append(f)
1832 deleted.append(f)
1837
1833
1838 return modified, deleted, fixup
1834 return modified, deleted, fixup
1839
1835
1840 def _poststatusfixup(self, status, fixup):
1836 def _poststatusfixup(self, status, fixup):
1841 """update dirstate for files that are actually clean"""
1837 """update dirstate for files that are actually clean"""
1842 poststatus = self._repo.postdsstatus()
1838 poststatus = self._repo.postdsstatus()
1843 if fixup or poststatus or self._repo.dirstate._dirty:
1839 if fixup or poststatus or self._repo.dirstate._dirty:
1844 try:
1840 try:
1845 oldid = self._repo.dirstate.identity()
1841 oldid = self._repo.dirstate.identity()
1846
1842
1847 # updating the dirstate is optional
1843 # updating the dirstate is optional
1848 # so we don't wait on the lock
1844 # so we don't wait on the lock
1849 # wlock can invalidate the dirstate, so cache normal _after_
1845 # wlock can invalidate the dirstate, so cache normal _after_
1850 # taking the lock
1846 # taking the lock
1851 with self._repo.wlock(False):
1847 with self._repo.wlock(False):
1852 if self._repo.dirstate.identity() == oldid:
1848 if self._repo.dirstate.identity() == oldid:
1853 if fixup:
1849 if fixup:
1854 normal = self._repo.dirstate.normal
1850 normal = self._repo.dirstate.normal
1855 for f in fixup:
1851 for f in fixup:
1856 normal(f)
1852 normal(f)
1857 # write changes out explicitly, because nesting
1853 # write changes out explicitly, because nesting
1858 # wlock at runtime may prevent 'wlock.release()'
1854 # wlock at runtime may prevent 'wlock.release()'
1859 # after this block from doing so for subsequent
1855 # after this block from doing so for subsequent
1860 # changing files
1856 # changing files
1861 tr = self._repo.currenttransaction()
1857 tr = self._repo.currenttransaction()
1862 self._repo.dirstate.write(tr)
1858 self._repo.dirstate.write(tr)
1863
1859
1864 if poststatus:
1860 if poststatus:
1865 for ps in poststatus:
1861 for ps in poststatus:
1866 ps(self, status)
1862 ps(self, status)
1867 else:
1863 else:
1868 # in this case, writing changes out breaks
1864 # in this case, writing changes out breaks
1869 # consistency, because .hg/dirstate was
1865 # consistency, because .hg/dirstate was
1870 # already changed simultaneously after last
1866 # already changed simultaneously after last
1871 # caching (see also issue5584 for detail)
1867 # caching (see also issue5584 for detail)
1872 self._repo.ui.debug(
1868 self._repo.ui.debug(
1873 b'skip updating dirstate: identity mismatch\n'
1869 b'skip updating dirstate: identity mismatch\n'
1874 )
1870 )
1875 except error.LockError:
1871 except error.LockError:
1876 pass
1872 pass
1877 finally:
1873 finally:
1878 # Even if the wlock couldn't be grabbed, clear out the list.
1874 # Even if the wlock couldn't be grabbed, clear out the list.
1879 self._repo.clearpostdsstatus()
1875 self._repo.clearpostdsstatus()
1880
1876
1881 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1882 '''Gets the status from the dirstate -- internal use only.'''
1878 '''Gets the status from the dirstate -- internal use only.'''
1883 subrepos = []
1879 subrepos = []
1884 if b'.hgsub' in self:
1880 if b'.hgsub' in self:
1885 subrepos = sorted(self.substate)
1881 subrepos = sorted(self.substate)
1886 cmp, s = self._repo.dirstate.status(
1882 cmp, s = self._repo.dirstate.status(
1887 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1888 )
1884 )
1889
1885
1890 # check for any possibly clean files
1886 # check for any possibly clean files
1891 fixup = []
1887 fixup = []
1892 if cmp:
1888 if cmp:
1893 modified2, deleted2, fixup = self._checklookup(cmp)
1889 modified2, deleted2, fixup = self._checklookup(cmp)
1894 s.modified.extend(modified2)
1890 s.modified.extend(modified2)
1895 s.deleted.extend(deleted2)
1891 s.deleted.extend(deleted2)
1896
1892
1897 if fixup and clean:
1893 if fixup and clean:
1898 s.clean.extend(fixup)
1894 s.clean.extend(fixup)
1899
1895
1900 self._poststatusfixup(s, fixup)
1896 self._poststatusfixup(s, fixup)
1901
1897
1902 if match.always():
1898 if match.always():
1903 # cache for performance
1899 # cache for performance
1904 if s.unknown or s.ignored or s.clean:
1900 if s.unknown or s.ignored or s.clean:
1905 # "_status" is cached with list*=False in the normal route
1901 # "_status" is cached with list*=False in the normal route
1906 self._status = scmutil.status(
1902 self._status = scmutil.status(
1907 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1908 )
1904 )
1909 else:
1905 else:
1910 self._status = s
1906 self._status = s
1911
1907
1912 return s
1908 return s
1913
1909
1914 @propertycache
1910 @propertycache
1915 def _copies(self):
1911 def _copies(self):
1916 p1copies = {}
1912 p1copies = {}
1917 p2copies = {}
1913 p2copies = {}
1918 parents = self._repo.dirstate.parents()
1914 parents = self._repo.dirstate.parents()
1919 p1manifest = self._repo[parents[0]].manifest()
1915 p1manifest = self._repo[parents[0]].manifest()
1920 p2manifest = self._repo[parents[1]].manifest()
1916 p2manifest = self._repo[parents[1]].manifest()
1921 changedset = set(self.added()) | set(self.modified())
1917 changedset = set(self.added()) | set(self.modified())
1922 narrowmatch = self._repo.narrowmatch()
1918 narrowmatch = self._repo.narrowmatch()
1923 for dst, src in self._repo.dirstate.copies().items():
1919 for dst, src in self._repo.dirstate.copies().items():
1924 if dst not in changedset or not narrowmatch(dst):
1920 if dst not in changedset or not narrowmatch(dst):
1925 continue
1921 continue
1926 if src in p1manifest:
1922 if src in p1manifest:
1927 p1copies[dst] = src
1923 p1copies[dst] = src
1928 elif src in p2manifest:
1924 elif src in p2manifest:
1929 p2copies[dst] = src
1925 p2copies[dst] = src
1930 return p1copies, p2copies
1926 return p1copies, p2copies
1931
1927
1932 @propertycache
1928 @propertycache
1933 def _manifest(self):
1929 def _manifest(self):
1934 """generate a manifest corresponding to the values in self._status
1930 """generate a manifest corresponding to the values in self._status
1935
1931
1936 This reuse the file nodeid from parent, but we use special node
1932 This reuse the file nodeid from parent, but we use special node
1937 identifiers for added and modified files. This is used by manifests
1933 identifiers for added and modified files. This is used by manifests
1938 merge to see that files are different and by update logic to avoid
1934 merge to see that files are different and by update logic to avoid
1939 deleting newly added files.
1935 deleting newly added files.
1940 """
1936 """
1941 return self._buildstatusmanifest(self._status)
1937 return self._buildstatusmanifest(self._status)
1942
1938
1943 def _buildstatusmanifest(self, status):
1939 def _buildstatusmanifest(self, status):
1944 """Builds a manifest that includes the given status results."""
1940 """Builds a manifest that includes the given status results."""
1945 parents = self.parents()
1941 parents = self.parents()
1946
1942
1947 man = parents[0].manifest().copy()
1943 man = parents[0].manifest().copy()
1948
1944
1949 ff = self._flagfunc
1945 ff = self._flagfunc
1950 for i, l in (
1946 for i, l in (
1951 (self._repo.nodeconstants.addednodeid, status.added),
1947 (self._repo.nodeconstants.addednodeid, status.added),
1952 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 (self._repo.nodeconstants.modifiednodeid, status.modified),
1953 ):
1949 ):
1954 for f in l:
1950 for f in l:
1955 man[f] = i
1951 man[f] = i
1956 try:
1952 try:
1957 man.setflag(f, ff(f))
1953 man.setflag(f, ff(f))
1958 except OSError:
1954 except OSError:
1959 pass
1955 pass
1960
1956
1961 for f in status.deleted + status.removed:
1957 for f in status.deleted + status.removed:
1962 if f in man:
1958 if f in man:
1963 del man[f]
1959 del man[f]
1964
1960
1965 return man
1961 return man
1966
1962
1967 def _buildstatus(
1963 def _buildstatus(
1968 self, other, s, match, listignored, listclean, listunknown
1964 self, other, s, match, listignored, listclean, listunknown
1969 ):
1965 ):
1970 """build a status with respect to another context
1966 """build a status with respect to another context
1971
1967
1972 This includes logic for maintaining the fast path of status when
1968 This includes logic for maintaining the fast path of status when
1973 comparing the working directory against its parent, which is to skip
1969 comparing the working directory against its parent, which is to skip
1974 building a new manifest if self (working directory) is not comparing
1970 building a new manifest if self (working directory) is not comparing
1975 against its parent (repo['.']).
1971 against its parent (repo['.']).
1976 """
1972 """
1977 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1978 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1979 # might have accidentally ended up with the entire contents of the file
1975 # might have accidentally ended up with the entire contents of the file
1980 # they are supposed to be linking to.
1976 # they are supposed to be linking to.
1981 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1982 if other != self._repo[b'.']:
1978 if other != self._repo[b'.']:
1983 s = super(workingctx, self)._buildstatus(
1979 s = super(workingctx, self)._buildstatus(
1984 other, s, match, listignored, listclean, listunknown
1980 other, s, match, listignored, listclean, listunknown
1985 )
1981 )
1986 return s
1982 return s
1987
1983
1988 def _matchstatus(self, other, match):
1984 def _matchstatus(self, other, match):
1989 """override the match method with a filter for directory patterns
1985 """override the match method with a filter for directory patterns
1990
1986
1991 We use inheritance to customize the match.bad method only in cases of
1987 We use inheritance to customize the match.bad method only in cases of
1992 workingctx since it belongs only to the working directory when
1988 workingctx since it belongs only to the working directory when
1993 comparing against the parent changeset.
1989 comparing against the parent changeset.
1994
1990
1995 If we aren't comparing against the working directory's parent, then we
1991 If we aren't comparing against the working directory's parent, then we
1996 just use the default match object sent to us.
1992 just use the default match object sent to us.
1997 """
1993 """
1998 if other != self._repo[b'.']:
1994 if other != self._repo[b'.']:
1999
1995
2000 def bad(f, msg):
1996 def bad(f, msg):
2001 # 'f' may be a directory pattern from 'match.files()',
1997 # 'f' may be a directory pattern from 'match.files()',
2002 # so 'f not in ctx1' is not enough
1998 # so 'f not in ctx1' is not enough
2003 if f not in other and not other.hasdir(f):
1999 if f not in other and not other.hasdir(f):
2004 self._repo.ui.warn(
2000 self._repo.ui.warn(
2005 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2006 )
2002 )
2007
2003
2008 match.bad = bad
2004 match.bad = bad
2009 return match
2005 return match
2010
2006
2011 def walk(self, match):
2007 def walk(self, match):
2012 '''Generates matching file names.'''
2008 '''Generates matching file names.'''
2013 return sorted(
2009 return sorted(
2014 self._repo.dirstate.walk(
2010 self._repo.dirstate.walk(
2015 self._repo.narrowmatch(match),
2011 self._repo.narrowmatch(match),
2016 subrepos=sorted(self.substate),
2012 subrepos=sorted(self.substate),
2017 unknown=True,
2013 unknown=True,
2018 ignored=False,
2014 ignored=False,
2019 )
2015 )
2020 )
2016 )
2021
2017
2022 def matches(self, match):
2018 def matches(self, match):
2023 match = self._repo.narrowmatch(match)
2019 match = self._repo.narrowmatch(match)
2024 ds = self._repo.dirstate
2020 ds = self._repo.dirstate
2025 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2026
2022
2027 def markcommitted(self, node):
2023 def markcommitted(self, node):
2028 with self._repo.dirstate.parentchange():
2024 with self._repo.dirstate.parentchange():
2029 for f in self.modified() + self.added():
2025 for f in self.modified() + self.added():
2030 self._repo.dirstate.normal(f)
2026 self._repo.dirstate.normal(f)
2031 for f in self.removed():
2027 for f in self.removed():
2032 self._repo.dirstate.drop(f)
2028 self._repo.dirstate.drop(f)
2033 self._repo.dirstate.setparents(node)
2029 self._repo.dirstate.setparents(node)
2034 self._repo._quick_access_changeid_invalidate()
2030 self._repo._quick_access_changeid_invalidate()
2035
2031
2036 # write changes out explicitly, because nesting wlock at
2032 # write changes out explicitly, because nesting wlock at
2037 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2038 # from immediately doing so for subsequent changing files
2034 # from immediately doing so for subsequent changing files
2039 self._repo.dirstate.write(self._repo.currenttransaction())
2035 self._repo.dirstate.write(self._repo.currenttransaction())
2040
2036
2041 sparse.aftercommit(self._repo, node)
2037 sparse.aftercommit(self._repo, node)
2042
2038
2043 def mergestate(self, clean=False):
2039 def mergestate(self, clean=False):
2044 if clean:
2040 if clean:
2045 return mergestatemod.mergestate.clean(self._repo)
2041 return mergestatemod.mergestate.clean(self._repo)
2046 return mergestatemod.mergestate.read(self._repo)
2042 return mergestatemod.mergestate.read(self._repo)
2047
2043
2048
2044
2049 class committablefilectx(basefilectx):
2045 class committablefilectx(basefilectx):
2050 """A committablefilectx provides common functionality for a file context
2046 """A committablefilectx provides common functionality for a file context
2051 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2052
2048
2053 def __init__(self, repo, path, filelog=None, ctx=None):
2049 def __init__(self, repo, path, filelog=None, ctx=None):
2054 self._repo = repo
2050 self._repo = repo
2055 self._path = path
2051 self._path = path
2056 self._changeid = None
2052 self._changeid = None
2057 self._filerev = self._filenode = None
2053 self._filerev = self._filenode = None
2058
2054
2059 if filelog is not None:
2055 if filelog is not None:
2060 self._filelog = filelog
2056 self._filelog = filelog
2061 if ctx:
2057 if ctx:
2062 self._changectx = ctx
2058 self._changectx = ctx
2063
2059
2064 def __nonzero__(self):
2060 def __nonzero__(self):
2065 return True
2061 return True
2066
2062
2067 __bool__ = __nonzero__
2063 __bool__ = __nonzero__
2068
2064
2069 def linkrev(self):
2065 def linkrev(self):
2070 # linked to self._changectx no matter if file is modified or not
2066 # linked to self._changectx no matter if file is modified or not
2071 return self.rev()
2067 return self.rev()
2072
2068
2073 def renamed(self):
2069 def renamed(self):
2074 path = self.copysource()
2070 path = self.copysource()
2075 if not path:
2071 if not path:
2076 return None
2072 return None
2077 return (
2073 return (
2078 path,
2074 path,
2079 self._changectx._parents[0]._manifest.get(
2075 self._changectx._parents[0]._manifest.get(
2080 path, self._repo.nodeconstants.nullid
2076 path, self._repo.nodeconstants.nullid
2081 ),
2077 ),
2082 )
2078 )
2083
2079
2084 def parents(self):
2080 def parents(self):
2085 '''return parent filectxs, following copies if necessary'''
2081 '''return parent filectxs, following copies if necessary'''
2086
2082
2087 def filenode(ctx, path):
2083 def filenode(ctx, path):
2088 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2084 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2089
2085
2090 path = self._path
2086 path = self._path
2091 fl = self._filelog
2087 fl = self._filelog
2092 pcl = self._changectx._parents
2088 pcl = self._changectx._parents
2093 renamed = self.renamed()
2089 renamed = self.renamed()
2094
2090
2095 if renamed:
2091 if renamed:
2096 pl = [renamed + (None,)]
2092 pl = [renamed + (None,)]
2097 else:
2093 else:
2098 pl = [(path, filenode(pcl[0], path), fl)]
2094 pl = [(path, filenode(pcl[0], path), fl)]
2099
2095
2100 for pc in pcl[1:]:
2096 for pc in pcl[1:]:
2101 pl.append((path, filenode(pc, path), fl))
2097 pl.append((path, filenode(pc, path), fl))
2102
2098
2103 return [
2099 return [
2104 self._parentfilectx(p, fileid=n, filelog=l)
2100 self._parentfilectx(p, fileid=n, filelog=l)
2105 for p, n, l in pl
2101 for p, n, l in pl
2106 if n != self._repo.nodeconstants.nullid
2102 if n != self._repo.nodeconstants.nullid
2107 ]
2103 ]
2108
2104
2109 def children(self):
2105 def children(self):
2110 return []
2106 return []
2111
2107
2112
2108
2113 class workingfilectx(committablefilectx):
2109 class workingfilectx(committablefilectx):
2114 """A workingfilectx object makes access to data related to a particular
2110 """A workingfilectx object makes access to data related to a particular
2115 file in the working directory convenient."""
2111 file in the working directory convenient."""
2116
2112
2117 def __init__(self, repo, path, filelog=None, workingctx=None):
2113 def __init__(self, repo, path, filelog=None, workingctx=None):
2118 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2114 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2119
2115
2120 @propertycache
2116 @propertycache
2121 def _changectx(self):
2117 def _changectx(self):
2122 return workingctx(self._repo)
2118 return workingctx(self._repo)
2123
2119
2124 def data(self):
2120 def data(self):
2125 return self._repo.wread(self._path)
2121 return self._repo.wread(self._path)
2126
2122
2127 def copysource(self):
2123 def copysource(self):
2128 return self._repo.dirstate.copied(self._path)
2124 return self._repo.dirstate.copied(self._path)
2129
2125
2130 def size(self):
2126 def size(self):
2131 return self._repo.wvfs.lstat(self._path).st_size
2127 return self._repo.wvfs.lstat(self._path).st_size
2132
2128
2133 def lstat(self):
2129 def lstat(self):
2134 return self._repo.wvfs.lstat(self._path)
2130 return self._repo.wvfs.lstat(self._path)
2135
2131
2136 def date(self):
2132 def date(self):
2137 t, tz = self._changectx.date()
2133 t, tz = self._changectx.date()
2138 try:
2134 try:
2139 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2135 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2140 except OSError as err:
2136 except OSError as err:
2141 if err.errno != errno.ENOENT:
2137 if err.errno != errno.ENOENT:
2142 raise
2138 raise
2143 return (t, tz)
2139 return (t, tz)
2144
2140
2145 def exists(self):
2141 def exists(self):
2146 return self._repo.wvfs.exists(self._path)
2142 return self._repo.wvfs.exists(self._path)
2147
2143
2148 def lexists(self):
2144 def lexists(self):
2149 return self._repo.wvfs.lexists(self._path)
2145 return self._repo.wvfs.lexists(self._path)
2150
2146
2151 def audit(self):
2147 def audit(self):
2152 return self._repo.wvfs.audit(self._path)
2148 return self._repo.wvfs.audit(self._path)
2153
2149
2154 def cmp(self, fctx):
2150 def cmp(self, fctx):
2155 """compare with other file context
2151 """compare with other file context
2156
2152
2157 returns True if different than fctx.
2153 returns True if different than fctx.
2158 """
2154 """
2159 # fctx should be a filectx (not a workingfilectx)
2155 # fctx should be a filectx (not a workingfilectx)
2160 # invert comparison to reuse the same code path
2156 # invert comparison to reuse the same code path
2161 return fctx.cmp(self)
2157 return fctx.cmp(self)
2162
2158
2163 def remove(self, ignoremissing=False):
2159 def remove(self, ignoremissing=False):
2164 """wraps unlink for a repo's working directory"""
2160 """wraps unlink for a repo's working directory"""
2165 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2161 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2166 self._repo.wvfs.unlinkpath(
2162 self._repo.wvfs.unlinkpath(
2167 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2163 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2168 )
2164 )
2169
2165
2170 def write(self, data, flags, backgroundclose=False, **kwargs):
2166 def write(self, data, flags, backgroundclose=False, **kwargs):
2171 """wraps repo.wwrite"""
2167 """wraps repo.wwrite"""
2172 return self._repo.wwrite(
2168 return self._repo.wwrite(
2173 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2169 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2174 )
2170 )
2175
2171
2176 def markcopied(self, src):
2172 def markcopied(self, src):
2177 """marks this file a copy of `src`"""
2173 """marks this file a copy of `src`"""
2178 self._repo.dirstate.copy(src, self._path)
2174 self._repo.dirstate.copy(src, self._path)
2179
2175
2180 def clearunknown(self):
2176 def clearunknown(self):
2181 """Removes conflicting items in the working directory so that
2177 """Removes conflicting items in the working directory so that
2182 ``write()`` can be called successfully.
2178 ``write()`` can be called successfully.
2183 """
2179 """
2184 wvfs = self._repo.wvfs
2180 wvfs = self._repo.wvfs
2185 f = self._path
2181 f = self._path
2186 wvfs.audit(f)
2182 wvfs.audit(f)
2187 if self._repo.ui.configbool(
2183 if self._repo.ui.configbool(
2188 b'experimental', b'merge.checkpathconflicts'
2184 b'experimental', b'merge.checkpathconflicts'
2189 ):
2185 ):
2190 # remove files under the directory as they should already be
2186 # remove files under the directory as they should already be
2191 # warned and backed up
2187 # warned and backed up
2192 if wvfs.isdir(f) and not wvfs.islink(f):
2188 if wvfs.isdir(f) and not wvfs.islink(f):
2193 wvfs.rmtree(f, forcibly=True)
2189 wvfs.rmtree(f, forcibly=True)
2194 for p in reversed(list(pathutil.finddirs(f))):
2190 for p in reversed(list(pathutil.finddirs(f))):
2195 if wvfs.isfileorlink(p):
2191 if wvfs.isfileorlink(p):
2196 wvfs.unlink(p)
2192 wvfs.unlink(p)
2197 break
2193 break
2198 else:
2194 else:
2199 # don't remove files if path conflicts are not processed
2195 # don't remove files if path conflicts are not processed
2200 if wvfs.isdir(f) and not wvfs.islink(f):
2196 if wvfs.isdir(f) and not wvfs.islink(f):
2201 wvfs.removedirs(f)
2197 wvfs.removedirs(f)
2202
2198
2203 def setflags(self, l, x):
2199 def setflags(self, l, x):
2204 self._repo.wvfs.setflags(self._path, l, x)
2200 self._repo.wvfs.setflags(self._path, l, x)
2205
2201
2206
2202
2207 class overlayworkingctx(committablectx):
2203 class overlayworkingctx(committablectx):
2208 """Wraps another mutable context with a write-back cache that can be
2204 """Wraps another mutable context with a write-back cache that can be
2209 converted into a commit context.
2205 converted into a commit context.
2210
2206
2211 self._cache[path] maps to a dict with keys: {
2207 self._cache[path] maps to a dict with keys: {
2212 'exists': bool?
2208 'exists': bool?
2213 'date': date?
2209 'date': date?
2214 'data': str?
2210 'data': str?
2215 'flags': str?
2211 'flags': str?
2216 'copied': str? (path or None)
2212 'copied': str? (path or None)
2217 }
2213 }
2218 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2214 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2219 is `False`, the file was deleted.
2215 is `False`, the file was deleted.
2220 """
2216 """
2221
2217
2222 def __init__(self, repo):
2218 def __init__(self, repo):
2223 super(overlayworkingctx, self).__init__(repo)
2219 super(overlayworkingctx, self).__init__(repo)
2224 self.clean()
2220 self.clean()
2225
2221
2226 def setbase(self, wrappedctx):
2222 def setbase(self, wrappedctx):
2227 self._wrappedctx = wrappedctx
2223 self._wrappedctx = wrappedctx
2228 self._parents = [wrappedctx]
2224 self._parents = [wrappedctx]
2229 # Drop old manifest cache as it is now out of date.
2225 # Drop old manifest cache as it is now out of date.
2230 # This is necessary when, e.g., rebasing several nodes with one
2226 # This is necessary when, e.g., rebasing several nodes with one
2231 # ``overlayworkingctx`` (e.g. with --collapse).
2227 # ``overlayworkingctx`` (e.g. with --collapse).
2232 util.clearcachedproperty(self, b'_manifest')
2228 util.clearcachedproperty(self, b'_manifest')
2233
2229
2234 def setparents(self, p1node, p2node=None):
2230 def setparents(self, p1node, p2node=None):
2235 if p2node is None:
2231 if p2node is None:
2236 p2node = self._repo.nodeconstants.nullid
2232 p2node = self._repo.nodeconstants.nullid
2237 assert p1node == self._wrappedctx.node()
2233 assert p1node == self._wrappedctx.node()
2238 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2234 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2239
2235
2240 def data(self, path):
2236 def data(self, path):
2241 if self.isdirty(path):
2237 if self.isdirty(path):
2242 if self._cache[path][b'exists']:
2238 if self._cache[path][b'exists']:
2243 if self._cache[path][b'data'] is not None:
2239 if self._cache[path][b'data'] is not None:
2244 return self._cache[path][b'data']
2240 return self._cache[path][b'data']
2245 else:
2241 else:
2246 # Must fallback here, too, because we only set flags.
2242 # Must fallback here, too, because we only set flags.
2247 return self._wrappedctx[path].data()
2243 return self._wrappedctx[path].data()
2248 else:
2244 else:
2249 raise error.ProgrammingError(
2245 raise error.ProgrammingError(
2250 b"No such file or directory: %s" % path
2246 b"No such file or directory: %s" % path
2251 )
2247 )
2252 else:
2248 else:
2253 return self._wrappedctx[path].data()
2249 return self._wrappedctx[path].data()
2254
2250
2255 @propertycache
2251 @propertycache
2256 def _manifest(self):
2252 def _manifest(self):
2257 parents = self.parents()
2253 parents = self.parents()
2258 man = parents[0].manifest().copy()
2254 man = parents[0].manifest().copy()
2259
2255
2260 flag = self._flagfunc
2256 flag = self._flagfunc
2261 for path in self.added():
2257 for path in self.added():
2262 man[path] = self._repo.nodeconstants.addednodeid
2258 man[path] = self._repo.nodeconstants.addednodeid
2263 man.setflag(path, flag(path))
2259 man.setflag(path, flag(path))
2264 for path in self.modified():
2260 for path in self.modified():
2265 man[path] = self._repo.nodeconstants.modifiednodeid
2261 man[path] = self._repo.nodeconstants.modifiednodeid
2266 man.setflag(path, flag(path))
2262 man.setflag(path, flag(path))
2267 for path in self.removed():
2263 for path in self.removed():
2268 del man[path]
2264 del man[path]
2269 return man
2265 return man
2270
2266
2271 @propertycache
2267 @propertycache
2272 def _flagfunc(self):
2268 def _flagfunc(self):
2273 def f(path):
2269 def f(path):
2274 return self._cache[path][b'flags']
2270 return self._cache[path][b'flags']
2275
2271
2276 return f
2272 return f
2277
2273
2278 def files(self):
2274 def files(self):
2279 return sorted(self.added() + self.modified() + self.removed())
2275 return sorted(self.added() + self.modified() + self.removed())
2280
2276
2281 def modified(self):
2277 def modified(self):
2282 return [
2278 return [
2283 f
2279 f
2284 for f in self._cache.keys()
2280 for f in self._cache.keys()
2285 if self._cache[f][b'exists'] and self._existsinparent(f)
2281 if self._cache[f][b'exists'] and self._existsinparent(f)
2286 ]
2282 ]
2287
2283
2288 def added(self):
2284 def added(self):
2289 return [
2285 return [
2290 f
2286 f
2291 for f in self._cache.keys()
2287 for f in self._cache.keys()
2292 if self._cache[f][b'exists'] and not self._existsinparent(f)
2288 if self._cache[f][b'exists'] and not self._existsinparent(f)
2293 ]
2289 ]
2294
2290
2295 def removed(self):
2291 def removed(self):
2296 return [
2292 return [
2297 f
2293 f
2298 for f in self._cache.keys()
2294 for f in self._cache.keys()
2299 if not self._cache[f][b'exists'] and self._existsinparent(f)
2295 if not self._cache[f][b'exists'] and self._existsinparent(f)
2300 ]
2296 ]
2301
2297
2302 def p1copies(self):
2298 def p1copies(self):
2303 copies = {}
2299 copies = {}
2304 narrowmatch = self._repo.narrowmatch()
2300 narrowmatch = self._repo.narrowmatch()
2305 for f in self._cache.keys():
2301 for f in self._cache.keys():
2306 if not narrowmatch(f):
2302 if not narrowmatch(f):
2307 continue
2303 continue
2308 copies.pop(f, None) # delete if it exists
2304 copies.pop(f, None) # delete if it exists
2309 source = self._cache[f][b'copied']
2305 source = self._cache[f][b'copied']
2310 if source:
2306 if source:
2311 copies[f] = source
2307 copies[f] = source
2312 return copies
2308 return copies
2313
2309
2314 def p2copies(self):
2310 def p2copies(self):
2315 copies = {}
2311 copies = {}
2316 narrowmatch = self._repo.narrowmatch()
2312 narrowmatch = self._repo.narrowmatch()
2317 for f in self._cache.keys():
2313 for f in self._cache.keys():
2318 if not narrowmatch(f):
2314 if not narrowmatch(f):
2319 continue
2315 continue
2320 copies.pop(f, None) # delete if it exists
2316 copies.pop(f, None) # delete if it exists
2321 source = self._cache[f][b'copied']
2317 source = self._cache[f][b'copied']
2322 if source:
2318 if source:
2323 copies[f] = source
2319 copies[f] = source
2324 return copies
2320 return copies
2325
2321
2326 def isinmemory(self):
2322 def isinmemory(self):
2327 return True
2323 return True
2328
2324
2329 def filedate(self, path):
2325 def filedate(self, path):
2330 if self.isdirty(path):
2326 if self.isdirty(path):
2331 return self._cache[path][b'date']
2327 return self._cache[path][b'date']
2332 else:
2328 else:
2333 return self._wrappedctx[path].date()
2329 return self._wrappedctx[path].date()
2334
2330
2335 def markcopied(self, path, origin):
2331 def markcopied(self, path, origin):
2336 self._markdirty(
2332 self._markdirty(
2337 path,
2333 path,
2338 exists=True,
2334 exists=True,
2339 date=self.filedate(path),
2335 date=self.filedate(path),
2340 flags=self.flags(path),
2336 flags=self.flags(path),
2341 copied=origin,
2337 copied=origin,
2342 )
2338 )
2343
2339
2344 def copydata(self, path):
2340 def copydata(self, path):
2345 if self.isdirty(path):
2341 if self.isdirty(path):
2346 return self._cache[path][b'copied']
2342 return self._cache[path][b'copied']
2347 else:
2343 else:
2348 return None
2344 return None
2349
2345
2350 def flags(self, path):
2346 def flags(self, path):
2351 if self.isdirty(path):
2347 if self.isdirty(path):
2352 if self._cache[path][b'exists']:
2348 if self._cache[path][b'exists']:
2353 return self._cache[path][b'flags']
2349 return self._cache[path][b'flags']
2354 else:
2350 else:
2355 raise error.ProgrammingError(
2351 raise error.ProgrammingError(
2356 b"No such file or directory: %s" % path
2352 b"No such file or directory: %s" % path
2357 )
2353 )
2358 else:
2354 else:
2359 return self._wrappedctx[path].flags()
2355 return self._wrappedctx[path].flags()
2360
2356
2361 def __contains__(self, key):
2357 def __contains__(self, key):
2362 if key in self._cache:
2358 if key in self._cache:
2363 return self._cache[key][b'exists']
2359 return self._cache[key][b'exists']
2364 return key in self.p1()
2360 return key in self.p1()
2365
2361
2366 def _existsinparent(self, path):
2362 def _existsinparent(self, path):
2367 try:
2363 try:
2368 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2364 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2369 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2365 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2370 # with an ``exists()`` function.
2366 # with an ``exists()`` function.
2371 self._wrappedctx[path]
2367 self._wrappedctx[path]
2372 return True
2368 return True
2373 except error.ManifestLookupError:
2369 except error.ManifestLookupError:
2374 return False
2370 return False
2375
2371
2376 def _auditconflicts(self, path):
2372 def _auditconflicts(self, path):
2377 """Replicates conflict checks done by wvfs.write().
2373 """Replicates conflict checks done by wvfs.write().
2378
2374
2379 Since we never write to the filesystem and never call `applyupdates` in
2375 Since we never write to the filesystem and never call `applyupdates` in
2380 IMM, we'll never check that a path is actually writable -- e.g., because
2376 IMM, we'll never check that a path is actually writable -- e.g., because
2381 it adds `a/foo`, but `a` is actually a file in the other commit.
2377 it adds `a/foo`, but `a` is actually a file in the other commit.
2382 """
2378 """
2383
2379
2384 def fail(path, component):
2380 def fail(path, component):
2385 # p1() is the base and we're receiving "writes" for p2()'s
2381 # p1() is the base and we're receiving "writes" for p2()'s
2386 # files.
2382 # files.
2387 if b'l' in self.p1()[component].flags():
2383 if b'l' in self.p1()[component].flags():
2388 raise error.Abort(
2384 raise error.Abort(
2389 b"error: %s conflicts with symlink %s "
2385 b"error: %s conflicts with symlink %s "
2390 b"in %d." % (path, component, self.p1().rev())
2386 b"in %d." % (path, component, self.p1().rev())
2391 )
2387 )
2392 else:
2388 else:
2393 raise error.Abort(
2389 raise error.Abort(
2394 b"error: '%s' conflicts with file '%s' in "
2390 b"error: '%s' conflicts with file '%s' in "
2395 b"%d." % (path, component, self.p1().rev())
2391 b"%d." % (path, component, self.p1().rev())
2396 )
2392 )
2397
2393
2398 # Test that each new directory to be created to write this path from p2
2394 # Test that each new directory to be created to write this path from p2
2399 # is not a file in p1.
2395 # is not a file in p1.
2400 components = path.split(b'/')
2396 components = path.split(b'/')
2401 for i in pycompat.xrange(len(components)):
2397 for i in pycompat.xrange(len(components)):
2402 component = b"/".join(components[0:i])
2398 component = b"/".join(components[0:i])
2403 if component in self:
2399 if component in self:
2404 fail(path, component)
2400 fail(path, component)
2405
2401
2406 # Test the other direction -- that this path from p2 isn't a directory
2402 # Test the other direction -- that this path from p2 isn't a directory
2407 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2403 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2408 match = self.match([path], default=b'path')
2404 match = self.match([path], default=b'path')
2409 mfiles = list(self.p1().manifest().walk(match))
2405 mfiles = list(self.p1().manifest().walk(match))
2410 if len(mfiles) > 0:
2406 if len(mfiles) > 0:
2411 if len(mfiles) == 1 and mfiles[0] == path:
2407 if len(mfiles) == 1 and mfiles[0] == path:
2412 return
2408 return
2413 # omit the files which are deleted in current IMM wctx
2409 # omit the files which are deleted in current IMM wctx
2414 mfiles = [m for m in mfiles if m in self]
2410 mfiles = [m for m in mfiles if m in self]
2415 if not mfiles:
2411 if not mfiles:
2416 return
2412 return
2417 raise error.Abort(
2413 raise error.Abort(
2418 b"error: file '%s' cannot be written because "
2414 b"error: file '%s' cannot be written because "
2419 b" '%s/' is a directory in %s (containing %d "
2415 b" '%s/' is a directory in %s (containing %d "
2420 b"entries: %s)"
2416 b"entries: %s)"
2421 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2417 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2422 )
2418 )
2423
2419
2424 def write(self, path, data, flags=b'', **kwargs):
2420 def write(self, path, data, flags=b'', **kwargs):
2425 if data is None:
2421 if data is None:
2426 raise error.ProgrammingError(b"data must be non-None")
2422 raise error.ProgrammingError(b"data must be non-None")
2427 self._auditconflicts(path)
2423 self._auditconflicts(path)
2428 self._markdirty(
2424 self._markdirty(
2429 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2425 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2430 )
2426 )
2431
2427
2432 def setflags(self, path, l, x):
2428 def setflags(self, path, l, x):
2433 flag = b''
2429 flag = b''
2434 if l:
2430 if l:
2435 flag = b'l'
2431 flag = b'l'
2436 elif x:
2432 elif x:
2437 flag = b'x'
2433 flag = b'x'
2438 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2434 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2439
2435
2440 def remove(self, path):
2436 def remove(self, path):
2441 self._markdirty(path, exists=False)
2437 self._markdirty(path, exists=False)
2442
2438
2443 def exists(self, path):
2439 def exists(self, path):
2444 """exists behaves like `lexists`, but needs to follow symlinks and
2440 """exists behaves like `lexists`, but needs to follow symlinks and
2445 return False if they are broken.
2441 return False if they are broken.
2446 """
2442 """
2447 if self.isdirty(path):
2443 if self.isdirty(path):
2448 # If this path exists and is a symlink, "follow" it by calling
2444 # If this path exists and is a symlink, "follow" it by calling
2449 # exists on the destination path.
2445 # exists on the destination path.
2450 if (
2446 if (
2451 self._cache[path][b'exists']
2447 self._cache[path][b'exists']
2452 and b'l' in self._cache[path][b'flags']
2448 and b'l' in self._cache[path][b'flags']
2453 ):
2449 ):
2454 return self.exists(self._cache[path][b'data'].strip())
2450 return self.exists(self._cache[path][b'data'].strip())
2455 else:
2451 else:
2456 return self._cache[path][b'exists']
2452 return self._cache[path][b'exists']
2457
2453
2458 return self._existsinparent(path)
2454 return self._existsinparent(path)
2459
2455
2460 def lexists(self, path):
2456 def lexists(self, path):
2461 """lexists returns True if the path exists"""
2457 """lexists returns True if the path exists"""
2462 if self.isdirty(path):
2458 if self.isdirty(path):
2463 return self._cache[path][b'exists']
2459 return self._cache[path][b'exists']
2464
2460
2465 return self._existsinparent(path)
2461 return self._existsinparent(path)
2466
2462
2467 def size(self, path):
2463 def size(self, path):
2468 if self.isdirty(path):
2464 if self.isdirty(path):
2469 if self._cache[path][b'exists']:
2465 if self._cache[path][b'exists']:
2470 return len(self._cache[path][b'data'])
2466 return len(self._cache[path][b'data'])
2471 else:
2467 else:
2472 raise error.ProgrammingError(
2468 raise error.ProgrammingError(
2473 b"No such file or directory: %s" % path
2469 b"No such file or directory: %s" % path
2474 )
2470 )
2475 return self._wrappedctx[path].size()
2471 return self._wrappedctx[path].size()
2476
2472
2477 def tomemctx(
2473 def tomemctx(
2478 self,
2474 self,
2479 text,
2475 text,
2480 branch=None,
2476 branch=None,
2481 extra=None,
2477 extra=None,
2482 date=None,
2478 date=None,
2483 parents=None,
2479 parents=None,
2484 user=None,
2480 user=None,
2485 editor=None,
2481 editor=None,
2486 ):
2482 ):
2487 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2483 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2488 committed.
2484 committed.
2489
2485
2490 ``text`` is the commit message.
2486 ``text`` is the commit message.
2491 ``parents`` (optional) are rev numbers.
2487 ``parents`` (optional) are rev numbers.
2492 """
2488 """
2493 # Default parents to the wrapped context if not passed.
2489 # Default parents to the wrapped context if not passed.
2494 if parents is None:
2490 if parents is None:
2495 parents = self.parents()
2491 parents = self.parents()
2496 if len(parents) == 1:
2492 if len(parents) == 1:
2497 parents = (parents[0], None)
2493 parents = (parents[0], None)
2498
2494
2499 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2495 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2500 if parents[1] is None:
2496 if parents[1] is None:
2501 parents = (self._repo[parents[0]], None)
2497 parents = (self._repo[parents[0]], None)
2502 else:
2498 else:
2503 parents = (self._repo[parents[0]], self._repo[parents[1]])
2499 parents = (self._repo[parents[0]], self._repo[parents[1]])
2504
2500
2505 files = self.files()
2501 files = self.files()
2506
2502
2507 def getfile(repo, memctx, path):
2503 def getfile(repo, memctx, path):
2508 if self._cache[path][b'exists']:
2504 if self._cache[path][b'exists']:
2509 return memfilectx(
2505 return memfilectx(
2510 repo,
2506 repo,
2511 memctx,
2507 memctx,
2512 path,
2508 path,
2513 self._cache[path][b'data'],
2509 self._cache[path][b'data'],
2514 b'l' in self._cache[path][b'flags'],
2510 b'l' in self._cache[path][b'flags'],
2515 b'x' in self._cache[path][b'flags'],
2511 b'x' in self._cache[path][b'flags'],
2516 self._cache[path][b'copied'],
2512 self._cache[path][b'copied'],
2517 )
2513 )
2518 else:
2514 else:
2519 # Returning None, but including the path in `files`, is
2515 # Returning None, but including the path in `files`, is
2520 # necessary for memctx to register a deletion.
2516 # necessary for memctx to register a deletion.
2521 return None
2517 return None
2522
2518
2523 if branch is None:
2519 if branch is None:
2524 branch = self._wrappedctx.branch()
2520 branch = self._wrappedctx.branch()
2525
2521
2526 return memctx(
2522 return memctx(
2527 self._repo,
2523 self._repo,
2528 parents,
2524 parents,
2529 text,
2525 text,
2530 files,
2526 files,
2531 getfile,
2527 getfile,
2532 date=date,
2528 date=date,
2533 extra=extra,
2529 extra=extra,
2534 user=user,
2530 user=user,
2535 branch=branch,
2531 branch=branch,
2536 editor=editor,
2532 editor=editor,
2537 )
2533 )
2538
2534
2539 def tomemctx_for_amend(self, precursor):
2535 def tomemctx_for_amend(self, precursor):
2540 extra = precursor.extra().copy()
2536 extra = precursor.extra().copy()
2541 extra[b'amend_source'] = precursor.hex()
2537 extra[b'amend_source'] = precursor.hex()
2542 return self.tomemctx(
2538 return self.tomemctx(
2543 text=precursor.description(),
2539 text=precursor.description(),
2544 branch=precursor.branch(),
2540 branch=precursor.branch(),
2545 extra=extra,
2541 extra=extra,
2546 date=precursor.date(),
2542 date=precursor.date(),
2547 user=precursor.user(),
2543 user=precursor.user(),
2548 )
2544 )
2549
2545
2550 def isdirty(self, path):
2546 def isdirty(self, path):
2551 return path in self._cache
2547 return path in self._cache
2552
2548
2553 def clean(self):
2549 def clean(self):
2554 self._mergestate = None
2550 self._mergestate = None
2555 self._cache = {}
2551 self._cache = {}
2556
2552
2557 def _compact(self):
2553 def _compact(self):
2558 """Removes keys from the cache that are actually clean, by comparing
2554 """Removes keys from the cache that are actually clean, by comparing
2559 them with the underlying context.
2555 them with the underlying context.
2560
2556
2561 This can occur during the merge process, e.g. by passing --tool :local
2557 This can occur during the merge process, e.g. by passing --tool :local
2562 to resolve a conflict.
2558 to resolve a conflict.
2563 """
2559 """
2564 keys = []
2560 keys = []
2565 # This won't be perfect, but can help performance significantly when
2561 # This won't be perfect, but can help performance significantly when
2566 # using things like remotefilelog.
2562 # using things like remotefilelog.
2567 scmutil.prefetchfiles(
2563 scmutil.prefetchfiles(
2568 self.repo(),
2564 self.repo(),
2569 [
2565 [
2570 (
2566 (
2571 self.p1().rev(),
2567 self.p1().rev(),
2572 scmutil.matchfiles(self.repo(), self._cache.keys()),
2568 scmutil.matchfiles(self.repo(), self._cache.keys()),
2573 )
2569 )
2574 ],
2570 ],
2575 )
2571 )
2576
2572
2577 for path in self._cache.keys():
2573 for path in self._cache.keys():
2578 cache = self._cache[path]
2574 cache = self._cache[path]
2579 try:
2575 try:
2580 underlying = self._wrappedctx[path]
2576 underlying = self._wrappedctx[path]
2581 if (
2577 if (
2582 underlying.data() == cache[b'data']
2578 underlying.data() == cache[b'data']
2583 and underlying.flags() == cache[b'flags']
2579 and underlying.flags() == cache[b'flags']
2584 ):
2580 ):
2585 keys.append(path)
2581 keys.append(path)
2586 except error.ManifestLookupError:
2582 except error.ManifestLookupError:
2587 # Path not in the underlying manifest (created).
2583 # Path not in the underlying manifest (created).
2588 continue
2584 continue
2589
2585
2590 for path in keys:
2586 for path in keys:
2591 del self._cache[path]
2587 del self._cache[path]
2592 return keys
2588 return keys
2593
2589
2594 def _markdirty(
2590 def _markdirty(
2595 self, path, exists, data=None, date=None, flags=b'', copied=None
2591 self, path, exists, data=None, date=None, flags=b'', copied=None
2596 ):
2592 ):
2597 # data not provided, let's see if we already have some; if not, let's
2593 # data not provided, let's see if we already have some; if not, let's
2598 # grab it from our underlying context, so that we always have data if
2594 # grab it from our underlying context, so that we always have data if
2599 # the file is marked as existing.
2595 # the file is marked as existing.
2600 if exists and data is None:
2596 if exists and data is None:
2601 oldentry = self._cache.get(path) or {}
2597 oldentry = self._cache.get(path) or {}
2602 data = oldentry.get(b'data')
2598 data = oldentry.get(b'data')
2603 if data is None:
2599 if data is None:
2604 data = self._wrappedctx[path].data()
2600 data = self._wrappedctx[path].data()
2605
2601
2606 self._cache[path] = {
2602 self._cache[path] = {
2607 b'exists': exists,
2603 b'exists': exists,
2608 b'data': data,
2604 b'data': data,
2609 b'date': date,
2605 b'date': date,
2610 b'flags': flags,
2606 b'flags': flags,
2611 b'copied': copied,
2607 b'copied': copied,
2612 }
2608 }
2613 util.clearcachedproperty(self, b'_manifest')
2609 util.clearcachedproperty(self, b'_manifest')
2614
2610
2615 def filectx(self, path, filelog=None):
2611 def filectx(self, path, filelog=None):
2616 return overlayworkingfilectx(
2612 return overlayworkingfilectx(
2617 self._repo, path, parent=self, filelog=filelog
2613 self._repo, path, parent=self, filelog=filelog
2618 )
2614 )
2619
2615
2620 def mergestate(self, clean=False):
2616 def mergestate(self, clean=False):
2621 if clean or self._mergestate is None:
2617 if clean or self._mergestate is None:
2622 self._mergestate = mergestatemod.memmergestate(self._repo)
2618 self._mergestate = mergestatemod.memmergestate(self._repo)
2623 return self._mergestate
2619 return self._mergestate
2624
2620
2625
2621
2626 class overlayworkingfilectx(committablefilectx):
2622 class overlayworkingfilectx(committablefilectx):
2627 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2623 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2628 cache, which can be flushed through later by calling ``flush()``."""
2624 cache, which can be flushed through later by calling ``flush()``."""
2629
2625
2630 def __init__(self, repo, path, filelog=None, parent=None):
2626 def __init__(self, repo, path, filelog=None, parent=None):
2631 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2627 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2632 self._repo = repo
2628 self._repo = repo
2633 self._parent = parent
2629 self._parent = parent
2634 self._path = path
2630 self._path = path
2635
2631
2636 def cmp(self, fctx):
2632 def cmp(self, fctx):
2637 return self.data() != fctx.data()
2633 return self.data() != fctx.data()
2638
2634
2639 def changectx(self):
2635 def changectx(self):
2640 return self._parent
2636 return self._parent
2641
2637
2642 def data(self):
2638 def data(self):
2643 return self._parent.data(self._path)
2639 return self._parent.data(self._path)
2644
2640
2645 def date(self):
2641 def date(self):
2646 return self._parent.filedate(self._path)
2642 return self._parent.filedate(self._path)
2647
2643
2648 def exists(self):
2644 def exists(self):
2649 return self.lexists()
2645 return self.lexists()
2650
2646
2651 def lexists(self):
2647 def lexists(self):
2652 return self._parent.exists(self._path)
2648 return self._parent.exists(self._path)
2653
2649
2654 def copysource(self):
2650 def copysource(self):
2655 return self._parent.copydata(self._path)
2651 return self._parent.copydata(self._path)
2656
2652
2657 def size(self):
2653 def size(self):
2658 return self._parent.size(self._path)
2654 return self._parent.size(self._path)
2659
2655
2660 def markcopied(self, origin):
2656 def markcopied(self, origin):
2661 self._parent.markcopied(self._path, origin)
2657 self._parent.markcopied(self._path, origin)
2662
2658
2663 def audit(self):
2659 def audit(self):
2664 pass
2660 pass
2665
2661
2666 def flags(self):
2662 def flags(self):
2667 return self._parent.flags(self._path)
2663 return self._parent.flags(self._path)
2668
2664
2669 def setflags(self, islink, isexec):
2665 def setflags(self, islink, isexec):
2670 return self._parent.setflags(self._path, islink, isexec)
2666 return self._parent.setflags(self._path, islink, isexec)
2671
2667
2672 def write(self, data, flags, backgroundclose=False, **kwargs):
2668 def write(self, data, flags, backgroundclose=False, **kwargs):
2673 return self._parent.write(self._path, data, flags, **kwargs)
2669 return self._parent.write(self._path, data, flags, **kwargs)
2674
2670
2675 def remove(self, ignoremissing=False):
2671 def remove(self, ignoremissing=False):
2676 return self._parent.remove(self._path)
2672 return self._parent.remove(self._path)
2677
2673
2678 def clearunknown(self):
2674 def clearunknown(self):
2679 pass
2675 pass
2680
2676
2681
2677
2682 class workingcommitctx(workingctx):
2678 class workingcommitctx(workingctx):
2683 """A workingcommitctx object makes access to data related to
2679 """A workingcommitctx object makes access to data related to
2684 the revision being committed convenient.
2680 the revision being committed convenient.
2685
2681
2686 This hides changes in the working directory, if they aren't
2682 This hides changes in the working directory, if they aren't
2687 committed in this context.
2683 committed in this context.
2688 """
2684 """
2689
2685
2690 def __init__(
2686 def __init__(
2691 self, repo, changes, text=b"", user=None, date=None, extra=None
2687 self, repo, changes, text=b"", user=None, date=None, extra=None
2692 ):
2688 ):
2693 super(workingcommitctx, self).__init__(
2689 super(workingcommitctx, self).__init__(
2694 repo, text, user, date, extra, changes
2690 repo, text, user, date, extra, changes
2695 )
2691 )
2696
2692
2697 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2693 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2698 """Return matched files only in ``self._status``
2694 """Return matched files only in ``self._status``
2699
2695
2700 Uncommitted files appear "clean" via this context, even if
2696 Uncommitted files appear "clean" via this context, even if
2701 they aren't actually so in the working directory.
2697 they aren't actually so in the working directory.
2702 """
2698 """
2703 if clean:
2699 if clean:
2704 clean = [f for f in self._manifest if f not in self._changedset]
2700 clean = [f for f in self._manifest if f not in self._changedset]
2705 else:
2701 else:
2706 clean = []
2702 clean = []
2707 return scmutil.status(
2703 return scmutil.status(
2708 [f for f in self._status.modified if match(f)],
2704 [f for f in self._status.modified if match(f)],
2709 [f for f in self._status.added if match(f)],
2705 [f for f in self._status.added if match(f)],
2710 [f for f in self._status.removed if match(f)],
2706 [f for f in self._status.removed if match(f)],
2711 [],
2707 [],
2712 [],
2708 [],
2713 [],
2709 [],
2714 clean,
2710 clean,
2715 )
2711 )
2716
2712
2717 @propertycache
2713 @propertycache
2718 def _changedset(self):
2714 def _changedset(self):
2719 """Return the set of files changed in this context"""
2715 """Return the set of files changed in this context"""
2720 changed = set(self._status.modified)
2716 changed = set(self._status.modified)
2721 changed.update(self._status.added)
2717 changed.update(self._status.added)
2722 changed.update(self._status.removed)
2718 changed.update(self._status.removed)
2723 return changed
2719 return changed
2724
2720
2725
2721
2726 def makecachingfilectxfn(func):
2722 def makecachingfilectxfn(func):
2727 """Create a filectxfn that caches based on the path.
2723 """Create a filectxfn that caches based on the path.
2728
2724
2729 We can't use util.cachefunc because it uses all arguments as the cache
2725 We can't use util.cachefunc because it uses all arguments as the cache
2730 key and this creates a cycle since the arguments include the repo and
2726 key and this creates a cycle since the arguments include the repo and
2731 memctx.
2727 memctx.
2732 """
2728 """
2733 cache = {}
2729 cache = {}
2734
2730
2735 def getfilectx(repo, memctx, path):
2731 def getfilectx(repo, memctx, path):
2736 if path not in cache:
2732 if path not in cache:
2737 cache[path] = func(repo, memctx, path)
2733 cache[path] = func(repo, memctx, path)
2738 return cache[path]
2734 return cache[path]
2739
2735
2740 return getfilectx
2736 return getfilectx
2741
2737
2742
2738
2743 def memfilefromctx(ctx):
2739 def memfilefromctx(ctx):
2744 """Given a context return a memfilectx for ctx[path]
2740 """Given a context return a memfilectx for ctx[path]
2745
2741
2746 This is a convenience method for building a memctx based on another
2742 This is a convenience method for building a memctx based on another
2747 context.
2743 context.
2748 """
2744 """
2749
2745
2750 def getfilectx(repo, memctx, path):
2746 def getfilectx(repo, memctx, path):
2751 fctx = ctx[path]
2747 fctx = ctx[path]
2752 copysource = fctx.copysource()
2748 copysource = fctx.copysource()
2753 return memfilectx(
2749 return memfilectx(
2754 repo,
2750 repo,
2755 memctx,
2751 memctx,
2756 path,
2752 path,
2757 fctx.data(),
2753 fctx.data(),
2758 islink=fctx.islink(),
2754 islink=fctx.islink(),
2759 isexec=fctx.isexec(),
2755 isexec=fctx.isexec(),
2760 copysource=copysource,
2756 copysource=copysource,
2761 )
2757 )
2762
2758
2763 return getfilectx
2759 return getfilectx
2764
2760
2765
2761
2766 def memfilefrompatch(patchstore):
2762 def memfilefrompatch(patchstore):
2767 """Given a patch (e.g. patchstore object) return a memfilectx
2763 """Given a patch (e.g. patchstore object) return a memfilectx
2768
2764
2769 This is a convenience method for building a memctx based on a patchstore.
2765 This is a convenience method for building a memctx based on a patchstore.
2770 """
2766 """
2771
2767
2772 def getfilectx(repo, memctx, path):
2768 def getfilectx(repo, memctx, path):
2773 data, mode, copysource = patchstore.getfile(path)
2769 data, mode, copysource = patchstore.getfile(path)
2774 if data is None:
2770 if data is None:
2775 return None
2771 return None
2776 islink, isexec = mode
2772 islink, isexec = mode
2777 return memfilectx(
2773 return memfilectx(
2778 repo,
2774 repo,
2779 memctx,
2775 memctx,
2780 path,
2776 path,
2781 data,
2777 data,
2782 islink=islink,
2778 islink=islink,
2783 isexec=isexec,
2779 isexec=isexec,
2784 copysource=copysource,
2780 copysource=copysource,
2785 )
2781 )
2786
2782
2787 return getfilectx
2783 return getfilectx
2788
2784
2789
2785
2790 class memctx(committablectx):
2786 class memctx(committablectx):
2791 """Use memctx to perform in-memory commits via localrepo.commitctx().
2787 """Use memctx to perform in-memory commits via localrepo.commitctx().
2792
2788
2793 Revision information is supplied at initialization time while
2789 Revision information is supplied at initialization time while
2794 related files data and is made available through a callback
2790 related files data and is made available through a callback
2795 mechanism. 'repo' is the current localrepo, 'parents' is a
2791 mechanism. 'repo' is the current localrepo, 'parents' is a
2796 sequence of two parent revisions identifiers (pass None for every
2792 sequence of two parent revisions identifiers (pass None for every
2797 missing parent), 'text' is the commit message and 'files' lists
2793 missing parent), 'text' is the commit message and 'files' lists
2798 names of files touched by the revision (normalized and relative to
2794 names of files touched by the revision (normalized and relative to
2799 repository root).
2795 repository root).
2800
2796
2801 filectxfn(repo, memctx, path) is a callable receiving the
2797 filectxfn(repo, memctx, path) is a callable receiving the
2802 repository, the current memctx object and the normalized path of
2798 repository, the current memctx object and the normalized path of
2803 requested file, relative to repository root. It is fired by the
2799 requested file, relative to repository root. It is fired by the
2804 commit function for every file in 'files', but calls order is
2800 commit function for every file in 'files', but calls order is
2805 undefined. If the file is available in the revision being
2801 undefined. If the file is available in the revision being
2806 committed (updated or added), filectxfn returns a memfilectx
2802 committed (updated or added), filectxfn returns a memfilectx
2807 object. If the file was removed, filectxfn return None for recent
2803 object. If the file was removed, filectxfn return None for recent
2808 Mercurial. Moved files are represented by marking the source file
2804 Mercurial. Moved files are represented by marking the source file
2809 removed and the new file added with copy information (see
2805 removed and the new file added with copy information (see
2810 memfilectx).
2806 memfilectx).
2811
2807
2812 user receives the committer name and defaults to current
2808 user receives the committer name and defaults to current
2813 repository username, date is the commit date in any format
2809 repository username, date is the commit date in any format
2814 supported by dateutil.parsedate() and defaults to current date, extra
2810 supported by dateutil.parsedate() and defaults to current date, extra
2815 is a dictionary of metadata or is left empty.
2811 is a dictionary of metadata or is left empty.
2816 """
2812 """
2817
2813
2818 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2814 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2819 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2815 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2820 # this field to determine what to do in filectxfn.
2816 # this field to determine what to do in filectxfn.
2821 _returnnoneformissingfiles = True
2817 _returnnoneformissingfiles = True
2822
2818
2823 def __init__(
2819 def __init__(
2824 self,
2820 self,
2825 repo,
2821 repo,
2826 parents,
2822 parents,
2827 text,
2823 text,
2828 files,
2824 files,
2829 filectxfn,
2825 filectxfn,
2830 user=None,
2826 user=None,
2831 date=None,
2827 date=None,
2832 extra=None,
2828 extra=None,
2833 branch=None,
2829 branch=None,
2834 editor=None,
2830 editor=None,
2835 ):
2831 ):
2836 super(memctx, self).__init__(
2832 super(memctx, self).__init__(
2837 repo, text, user, date, extra, branch=branch
2833 repo, text, user, date, extra, branch=branch
2838 )
2834 )
2839 self._rev = None
2835 self._rev = None
2840 self._node = None
2836 self._node = None
2841 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2837 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2842 p1, p2 = parents
2838 p1, p2 = parents
2843 self._parents = [self._repo[p] for p in (p1, p2)]
2839 self._parents = [self._repo[p] for p in (p1, p2)]
2844 files = sorted(set(files))
2840 files = sorted(set(files))
2845 self._files = files
2841 self._files = files
2846 self.substate = {}
2842 self.substate = {}
2847
2843
2848 if isinstance(filectxfn, patch.filestore):
2844 if isinstance(filectxfn, patch.filestore):
2849 filectxfn = memfilefrompatch(filectxfn)
2845 filectxfn = memfilefrompatch(filectxfn)
2850 elif not callable(filectxfn):
2846 elif not callable(filectxfn):
2851 # if store is not callable, wrap it in a function
2847 # if store is not callable, wrap it in a function
2852 filectxfn = memfilefromctx(filectxfn)
2848 filectxfn = memfilefromctx(filectxfn)
2853
2849
2854 # memoizing increases performance for e.g. vcs convert scenarios.
2850 # memoizing increases performance for e.g. vcs convert scenarios.
2855 self._filectxfn = makecachingfilectxfn(filectxfn)
2851 self._filectxfn = makecachingfilectxfn(filectxfn)
2856
2852
2857 if editor:
2853 if editor:
2858 self._text = editor(self._repo, self, [])
2854 self._text = editor(self._repo, self, [])
2859 self._repo.savecommitmessage(self._text)
2855 self._repo.savecommitmessage(self._text)
2860
2856
2861 def filectx(self, path, filelog=None):
2857 def filectx(self, path, filelog=None):
2862 """get a file context from the working directory
2858 """get a file context from the working directory
2863
2859
2864 Returns None if file doesn't exist and should be removed."""
2860 Returns None if file doesn't exist and should be removed."""
2865 return self._filectxfn(self._repo, self, path)
2861 return self._filectxfn(self._repo, self, path)
2866
2862
2867 def commit(self):
2863 def commit(self):
2868 """commit context to the repo"""
2864 """commit context to the repo"""
2869 return self._repo.commitctx(self)
2865 return self._repo.commitctx(self)
2870
2866
2871 @propertycache
2867 @propertycache
2872 def _manifest(self):
2868 def _manifest(self):
2873 """generate a manifest based on the return values of filectxfn"""
2869 """generate a manifest based on the return values of filectxfn"""
2874
2870
2875 # keep this simple for now; just worry about p1
2871 # keep this simple for now; just worry about p1
2876 pctx = self._parents[0]
2872 pctx = self._parents[0]
2877 man = pctx.manifest().copy()
2873 man = pctx.manifest().copy()
2878
2874
2879 for f in self._status.modified:
2875 for f in self._status.modified:
2880 man[f] = self._repo.nodeconstants.modifiednodeid
2876 man[f] = self._repo.nodeconstants.modifiednodeid
2881
2877
2882 for f in self._status.added:
2878 for f in self._status.added:
2883 man[f] = self._repo.nodeconstants.addednodeid
2879 man[f] = self._repo.nodeconstants.addednodeid
2884
2880
2885 for f in self._status.removed:
2881 for f in self._status.removed:
2886 if f in man:
2882 if f in man:
2887 del man[f]
2883 del man[f]
2888
2884
2889 return man
2885 return man
2890
2886
2891 @propertycache
2887 @propertycache
2892 def _status(self):
2888 def _status(self):
2893 """Calculate exact status from ``files`` specified at construction"""
2889 """Calculate exact status from ``files`` specified at construction"""
2894 man1 = self.p1().manifest()
2890 man1 = self.p1().manifest()
2895 p2 = self._parents[1]
2891 p2 = self._parents[1]
2896 # "1 < len(self._parents)" can't be used for checking
2892 # "1 < len(self._parents)" can't be used for checking
2897 # existence of the 2nd parent, because "memctx._parents" is
2893 # existence of the 2nd parent, because "memctx._parents" is
2898 # explicitly initialized by the list, of which length is 2.
2894 # explicitly initialized by the list, of which length is 2.
2899 if p2.rev() != nullrev:
2895 if p2.rev() != nullrev:
2900 man2 = p2.manifest()
2896 man2 = p2.manifest()
2901 managing = lambda f: f in man1 or f in man2
2897 managing = lambda f: f in man1 or f in man2
2902 else:
2898 else:
2903 managing = lambda f: f in man1
2899 managing = lambda f: f in man1
2904
2900
2905 modified, added, removed = [], [], []
2901 modified, added, removed = [], [], []
2906 for f in self._files:
2902 for f in self._files:
2907 if not managing(f):
2903 if not managing(f):
2908 added.append(f)
2904 added.append(f)
2909 elif self[f]:
2905 elif self[f]:
2910 modified.append(f)
2906 modified.append(f)
2911 else:
2907 else:
2912 removed.append(f)
2908 removed.append(f)
2913
2909
2914 return scmutil.status(modified, added, removed, [], [], [], [])
2910 return scmutil.status(modified, added, removed, [], [], [], [])
2915
2911
2916 def parents(self):
2912 def parents(self):
2917 if self._parents[1].rev() == nullrev:
2913 if self._parents[1].rev() == nullrev:
2918 return [self._parents[0]]
2914 return [self._parents[0]]
2919 return self._parents
2915 return self._parents
2920
2916
2921
2917
2922 class memfilectx(committablefilectx):
2918 class memfilectx(committablefilectx):
2923 """memfilectx represents an in-memory file to commit.
2919 """memfilectx represents an in-memory file to commit.
2924
2920
2925 See memctx and committablefilectx for more details.
2921 See memctx and committablefilectx for more details.
2926 """
2922 """
2927
2923
2928 def __init__(
2924 def __init__(
2929 self,
2925 self,
2930 repo,
2926 repo,
2931 changectx,
2927 changectx,
2932 path,
2928 path,
2933 data,
2929 data,
2934 islink=False,
2930 islink=False,
2935 isexec=False,
2931 isexec=False,
2936 copysource=None,
2932 copysource=None,
2937 ):
2933 ):
2938 """
2934 """
2939 path is the normalized file path relative to repository root.
2935 path is the normalized file path relative to repository root.
2940 data is the file content as a string.
2936 data is the file content as a string.
2941 islink is True if the file is a symbolic link.
2937 islink is True if the file is a symbolic link.
2942 isexec is True if the file is executable.
2938 isexec is True if the file is executable.
2943 copied is the source file path if current file was copied in the
2939 copied is the source file path if current file was copied in the
2944 revision being committed, or None."""
2940 revision being committed, or None."""
2945 super(memfilectx, self).__init__(repo, path, None, changectx)
2941 super(memfilectx, self).__init__(repo, path, None, changectx)
2946 self._data = data
2942 self._data = data
2947 if islink:
2943 if islink:
2948 self._flags = b'l'
2944 self._flags = b'l'
2949 elif isexec:
2945 elif isexec:
2950 self._flags = b'x'
2946 self._flags = b'x'
2951 else:
2947 else:
2952 self._flags = b''
2948 self._flags = b''
2953 self._copysource = copysource
2949 self._copysource = copysource
2954
2950
2955 def copysource(self):
2951 def copysource(self):
2956 return self._copysource
2952 return self._copysource
2957
2953
2958 def cmp(self, fctx):
2954 def cmp(self, fctx):
2959 return self.data() != fctx.data()
2955 return self.data() != fctx.data()
2960
2956
2961 def data(self):
2957 def data(self):
2962 return self._data
2958 return self._data
2963
2959
2964 def remove(self, ignoremissing=False):
2960 def remove(self, ignoremissing=False):
2965 """wraps unlink for a repo's working directory"""
2961 """wraps unlink for a repo's working directory"""
2966 # need to figure out what to do here
2962 # need to figure out what to do here
2967 del self._changectx[self._path]
2963 del self._changectx[self._path]
2968
2964
2969 def write(self, data, flags, **kwargs):
2965 def write(self, data, flags, **kwargs):
2970 """wraps repo.wwrite"""
2966 """wraps repo.wwrite"""
2971 self._data = data
2967 self._data = data
2972
2968
2973
2969
2974 class metadataonlyctx(committablectx):
2970 class metadataonlyctx(committablectx):
2975 """Like memctx but it's reusing the manifest of different commit.
2971 """Like memctx but it's reusing the manifest of different commit.
2976 Intended to be used by lightweight operations that are creating
2972 Intended to be used by lightweight operations that are creating
2977 metadata-only changes.
2973 metadata-only changes.
2978
2974
2979 Revision information is supplied at initialization time. 'repo' is the
2975 Revision information is supplied at initialization time. 'repo' is the
2980 current localrepo, 'ctx' is original revision which manifest we're reuisng
2976 current localrepo, 'ctx' is original revision which manifest we're reuisng
2981 'parents' is a sequence of two parent revisions identifiers (pass None for
2977 'parents' is a sequence of two parent revisions identifiers (pass None for
2982 every missing parent), 'text' is the commit.
2978 every missing parent), 'text' is the commit.
2983
2979
2984 user receives the committer name and defaults to current repository
2980 user receives the committer name and defaults to current repository
2985 username, date is the commit date in any format supported by
2981 username, date is the commit date in any format supported by
2986 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2982 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2987 metadata or is left empty.
2983 metadata or is left empty.
2988 """
2984 """
2989
2985
2990 def __init__(
2986 def __init__(
2991 self,
2987 self,
2992 repo,
2988 repo,
2993 originalctx,
2989 originalctx,
2994 parents=None,
2990 parents=None,
2995 text=None,
2991 text=None,
2996 user=None,
2992 user=None,
2997 date=None,
2993 date=None,
2998 extra=None,
2994 extra=None,
2999 editor=None,
2995 editor=None,
3000 ):
2996 ):
3001 if text is None:
2997 if text is None:
3002 text = originalctx.description()
2998 text = originalctx.description()
3003 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2999 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3004 self._rev = None
3000 self._rev = None
3005 self._node = None
3001 self._node = None
3006 self._originalctx = originalctx
3002 self._originalctx = originalctx
3007 self._manifestnode = originalctx.manifestnode()
3003 self._manifestnode = originalctx.manifestnode()
3008 if parents is None:
3004 if parents is None:
3009 parents = originalctx.parents()
3005 parents = originalctx.parents()
3010 else:
3006 else:
3011 parents = [repo[p] for p in parents if p is not None]
3007 parents = [repo[p] for p in parents if p is not None]
3012 parents = parents[:]
3008 parents = parents[:]
3013 while len(parents) < 2:
3009 while len(parents) < 2:
3014 parents.append(repo[nullrev])
3010 parents.append(repo[nullrev])
3015 p1, p2 = self._parents = parents
3011 p1, p2 = self._parents = parents
3016
3012
3017 # sanity check to ensure that the reused manifest parents are
3013 # sanity check to ensure that the reused manifest parents are
3018 # manifests of our commit parents
3014 # manifests of our commit parents
3019 mp1, mp2 = self.manifestctx().parents
3015 mp1, mp2 = self.manifestctx().parents
3020 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3016 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3021 raise RuntimeError(
3017 raise RuntimeError(
3022 r"can't reuse the manifest: its p1 "
3018 r"can't reuse the manifest: its p1 "
3023 r"doesn't match the new ctx p1"
3019 r"doesn't match the new ctx p1"
3024 )
3020 )
3025 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3021 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3026 raise RuntimeError(
3022 raise RuntimeError(
3027 r"can't reuse the manifest: "
3023 r"can't reuse the manifest: "
3028 r"its p2 doesn't match the new ctx p2"
3024 r"its p2 doesn't match the new ctx p2"
3029 )
3025 )
3030
3026
3031 self._files = originalctx.files()
3027 self._files = originalctx.files()
3032 self.substate = {}
3028 self.substate = {}
3033
3029
3034 if editor:
3030 if editor:
3035 self._text = editor(self._repo, self, [])
3031 self._text = editor(self._repo, self, [])
3036 self._repo.savecommitmessage(self._text)
3032 self._repo.savecommitmessage(self._text)
3037
3033
3038 def manifestnode(self):
3034 def manifestnode(self):
3039 return self._manifestnode
3035 return self._manifestnode
3040
3036
3041 @property
3037 @property
3042 def _manifestctx(self):
3038 def _manifestctx(self):
3043 return self._repo.manifestlog[self._manifestnode]
3039 return self._repo.manifestlog[self._manifestnode]
3044
3040
3045 def filectx(self, path, filelog=None):
3041 def filectx(self, path, filelog=None):
3046 return self._originalctx.filectx(path, filelog=filelog)
3042 return self._originalctx.filectx(path, filelog=filelog)
3047
3043
3048 def commit(self):
3044 def commit(self):
3049 """commit context to the repo"""
3045 """commit context to the repo"""
3050 return self._repo.commitctx(self)
3046 return self._repo.commitctx(self)
3051
3047
3052 @property
3048 @property
3053 def _manifest(self):
3049 def _manifest(self):
3054 return self._originalctx.manifest()
3050 return self._originalctx.manifest()
3055
3051
3056 @propertycache
3052 @propertycache
3057 def _status(self):
3053 def _status(self):
3058 """Calculate exact status from ``files`` specified in the ``origctx``
3054 """Calculate exact status from ``files`` specified in the ``origctx``
3059 and parents manifests.
3055 and parents manifests.
3060 """
3056 """
3061 man1 = self.p1().manifest()
3057 man1 = self.p1().manifest()
3062 p2 = self._parents[1]
3058 p2 = self._parents[1]
3063 # "1 < len(self._parents)" can't be used for checking
3059 # "1 < len(self._parents)" can't be used for checking
3064 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3060 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3065 # explicitly initialized by the list, of which length is 2.
3061 # explicitly initialized by the list, of which length is 2.
3066 if p2.rev() != nullrev:
3062 if p2.rev() != nullrev:
3067 man2 = p2.manifest()
3063 man2 = p2.manifest()
3068 managing = lambda f: f in man1 or f in man2
3064 managing = lambda f: f in man1 or f in man2
3069 else:
3065 else:
3070 managing = lambda f: f in man1
3066 managing = lambda f: f in man1
3071
3067
3072 modified, added, removed = [], [], []
3068 modified, added, removed = [], [], []
3073 for f in self._files:
3069 for f in self._files:
3074 if not managing(f):
3070 if not managing(f):
3075 added.append(f)
3071 added.append(f)
3076 elif f in self:
3072 elif f in self:
3077 modified.append(f)
3073 modified.append(f)
3078 else:
3074 else:
3079 removed.append(f)
3075 removed.append(f)
3080
3076
3081 return scmutil.status(modified, added, removed, [], [], [], [])
3077 return scmutil.status(modified, added, removed, [], [], [], [])
3082
3078
3083
3079
3084 class arbitraryfilectx(object):
3080 class arbitraryfilectx(object):
3085 """Allows you to use filectx-like functions on a file in an arbitrary
3081 """Allows you to use filectx-like functions on a file in an arbitrary
3086 location on disk, possibly not in the working directory.
3082 location on disk, possibly not in the working directory.
3087 """
3083 """
3088
3084
3089 def __init__(self, path, repo=None):
3085 def __init__(self, path, repo=None):
3090 # Repo is optional because contrib/simplemerge uses this class.
3086 # Repo is optional because contrib/simplemerge uses this class.
3091 self._repo = repo
3087 self._repo = repo
3092 self._path = path
3088 self._path = path
3093
3089
3094 def cmp(self, fctx):
3090 def cmp(self, fctx):
3095 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3091 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3096 # path if either side is a symlink.
3092 # path if either side is a symlink.
3097 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3093 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3098 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3094 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3099 # Add a fast-path for merge if both sides are disk-backed.
3095 # Add a fast-path for merge if both sides are disk-backed.
3100 # Note that filecmp uses the opposite return values (True if same)
3096 # Note that filecmp uses the opposite return values (True if same)
3101 # from our cmp functions (True if different).
3097 # from our cmp functions (True if different).
3102 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3098 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3103 return self.data() != fctx.data()
3099 return self.data() != fctx.data()
3104
3100
3105 def path(self):
3101 def path(self):
3106 return self._path
3102 return self._path
3107
3103
3108 def flags(self):
3104 def flags(self):
3109 return b''
3105 return b''
3110
3106
3111 def data(self):
3107 def data(self):
3112 return util.readfile(self._path)
3108 return util.readfile(self._path)
3113
3109
3114 def decodeddata(self):
3110 def decodeddata(self):
3115 with open(self._path, b"rb") as f:
3111 with open(self._path, b"rb") as f:
3116 return f.read()
3112 return f.read()
3117
3113
3118 def remove(self):
3114 def remove(self):
3119 util.unlink(self._path)
3115 util.unlink(self._path)
3120
3116
3121 def write(self, data, flags, **kwargs):
3117 def write(self, data, flags, **kwargs):
3122 assert not flags
3118 assert not flags
3123 with open(self._path, b"wb") as f:
3119 with open(self._path, b"wb") as f:
3124 f.write(data)
3120 f.write(data)
@@ -1,1526 +1,1526 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if not self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_parents_change
483 @requires_parents_change
484 def update_file_reference(
484 def update_file_reference(
485 self,
485 self,
486 filename,
486 filename,
487 p1_tracked,
487 p1_tracked,
488 ):
488 ):
489 """Set a file as tracked in the parent (or not)
489 """Set a file as tracked in the parent (or not)
490
490
491 This is to be called when adjust the dirstate to a new parent after an history
491 This is to be called when adjust the dirstate to a new parent after an history
492 rewriting operation.
492 rewriting operation.
493
493
494 It should not be called during a merge (p2 != nullid) and only within
494 It should not be called during a merge (p2 != nullid) and only within
495 a `with dirstate.parentchange():` context.
495 a `with dirstate.parentchange():` context.
496 """
496 """
497 if self.in_merge:
497 if self.in_merge:
498 msg = b'update_file_reference should not be called when merging'
498 msg = b'update_file_reference should not be called when merging'
499 raise error.ProgrammingError(msg)
499 raise error.ProgrammingError(msg)
500 entry = self._map.get(filename)
500 entry = self._map.get(filename)
501 if entry is None:
501 if entry is None:
502 wc_tracked = False
502 wc_tracked = False
503 else:
503 else:
504 wc_tracked = entry.tracked
504 wc_tracked = entry.tracked
505 if p1_tracked and wc_tracked:
505 if p1_tracked and wc_tracked:
506 # the underlying reference might have changed, we will have to
506 # the underlying reference might have changed, we will have to
507 # check it.
507 # check it.
508 self.normallookup(filename)
508 self.normallookup(filename)
509 elif not (p1_tracked or wc_tracked):
509 elif not (p1_tracked or wc_tracked):
510 # the file is no longer relevant to anyone
510 # the file is no longer relevant to anyone
511 self._drop(filename)
511 self._drop(filename)
512 elif (not p1_tracked) and wc_tracked:
512 elif (not p1_tracked) and wc_tracked:
513 if not entry.added:
513 if not entry.added:
514 self._add(filename)
514 self._add(filename)
515 elif p1_tracked and not wc_tracked:
515 elif p1_tracked and not wc_tracked:
516 if entry is None or not entry.removed:
516 if entry is None or not entry.removed:
517 self._remove(filename)
517 self._remove(filename)
518 else:
518 else:
519 assert False, 'unreachable'
519 assert False, 'unreachable'
520
520
521 def _addpath(
521 def _addpath(
522 self,
522 self,
523 f,
523 f,
524 mode=0,
524 mode=0,
525 size=None,
525 size=None,
526 mtime=None,
526 mtime=None,
527 added=False,
527 added=False,
528 merged=False,
528 merged=False,
529 from_p2=False,
529 from_p2=False,
530 possibly_dirty=False,
530 possibly_dirty=False,
531 ):
531 ):
532 entry = self._map.get(f)
532 entry = self._map.get(f)
533 if added or entry is not None and entry.removed:
533 if added or entry is not None and entry.removed:
534 scmutil.checkfilename(f)
534 scmutil.checkfilename(f)
535 if self._map.hastrackeddir(f):
535 if self._map.hastrackeddir(f):
536 msg = _(b'directory %r already in dirstate')
536 msg = _(b'directory %r already in dirstate')
537 msg %= pycompat.bytestr(f)
537 msg %= pycompat.bytestr(f)
538 raise error.Abort(msg)
538 raise error.Abort(msg)
539 # shadows
539 # shadows
540 for d in pathutil.finddirs(f):
540 for d in pathutil.finddirs(f):
541 if self._map.hastrackeddir(d):
541 if self._map.hastrackeddir(d):
542 break
542 break
543 entry = self._map.get(d)
543 entry = self._map.get(d)
544 if entry is not None and not entry.removed:
544 if entry is not None and not entry.removed:
545 msg = _(b'file %r in dirstate clashes with %r')
545 msg = _(b'file %r in dirstate clashes with %r')
546 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
546 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
547 raise error.Abort(msg)
547 raise error.Abort(msg)
548 self._dirty = True
548 self._dirty = True
549 self._updatedfiles.add(f)
549 self._updatedfiles.add(f)
550 self._map.addfile(
550 self._map.addfile(
551 f,
551 f,
552 mode=mode,
552 mode=mode,
553 size=size,
553 size=size,
554 mtime=mtime,
554 mtime=mtime,
555 added=added,
555 added=added,
556 merged=merged,
556 merged=merged,
557 from_p2=from_p2,
557 from_p2=from_p2,
558 possibly_dirty=possibly_dirty,
558 possibly_dirty=possibly_dirty,
559 )
559 )
560
560
561 def normal(self, f, parentfiledata=None):
561 def normal(self, f, parentfiledata=None):
562 """Mark a file normal and clean.
562 """Mark a file normal and clean.
563
563
564 parentfiledata: (mode, size, mtime) of the clean file
564 parentfiledata: (mode, size, mtime) of the clean file
565
565
566 parentfiledata should be computed from memory (for mode,
566 parentfiledata should be computed from memory (for mode,
567 size), as or close as possible from the point where we
567 size), as or close as possible from the point where we
568 determined the file was clean, to limit the risk of the
568 determined the file was clean, to limit the risk of the
569 file having been changed by an external process between the
569 file having been changed by an external process between the
570 moment where the file was determined to be clean and now."""
570 moment where the file was determined to be clean and now."""
571 if parentfiledata:
571 if parentfiledata:
572 (mode, size, mtime) = parentfiledata
572 (mode, size, mtime) = parentfiledata
573 else:
573 else:
574 s = os.lstat(self._join(f))
574 s = os.lstat(self._join(f))
575 mode = s.st_mode
575 mode = s.st_mode
576 size = s.st_size
576 size = s.st_size
577 mtime = s[stat.ST_MTIME]
577 mtime = s[stat.ST_MTIME]
578 self._addpath(f, mode=mode, size=size, mtime=mtime)
578 self._addpath(f, mode=mode, size=size, mtime=mtime)
579 self._map.copymap.pop(f, None)
579 self._map.copymap.pop(f, None)
580 if f in self._map.nonnormalset:
580 if f in self._map.nonnormalset:
581 self._map.nonnormalset.remove(f)
581 self._map.nonnormalset.remove(f)
582 if mtime > self._lastnormaltime:
582 if mtime > self._lastnormaltime:
583 # Remember the most recent modification timeslot for status(),
583 # Remember the most recent modification timeslot for status(),
584 # to make sure we won't miss future size-preserving file content
584 # to make sure we won't miss future size-preserving file content
585 # modifications that happen within the same timeslot.
585 # modifications that happen within the same timeslot.
586 self._lastnormaltime = mtime
586 self._lastnormaltime = mtime
587
587
588 def normallookup(self, f):
588 def normallookup(self, f):
589 '''Mark a file normal, but possibly dirty.'''
589 '''Mark a file normal, but possibly dirty.'''
590 if self.in_merge:
590 if self.in_merge:
591 # if there is a merge going on and the file was either
591 # if there is a merge going on and the file was either
592 # "merged" or coming from other parent (-2) before
592 # "merged" or coming from other parent (-2) before
593 # being removed, restore that state.
593 # being removed, restore that state.
594 entry = self._map.get(f)
594 entry = self._map.get(f)
595 if entry is not None:
595 if entry is not None:
596 # XXX this should probably be dealt with a a lower level
596 # XXX this should probably be dealt with a a lower level
597 # (see `merged_removed` and `from_p2_removed`)
597 # (see `merged_removed` and `from_p2_removed`)
598 if entry.merged_removed or entry.from_p2_removed:
598 if entry.merged_removed or entry.from_p2_removed:
599 source = self._map.copymap.get(f)
599 source = self._map.copymap.get(f)
600 if entry.merged_removed:
600 if entry.merged_removed:
601 self.merge(f)
601 self.merge(f)
602 elif entry.from_p2_removed:
602 elif entry.from_p2_removed:
603 self.otherparent(f)
603 self.otherparent(f)
604 if source is not None:
604 if source is not None:
605 self.copy(source, f)
605 self.copy(source, f)
606 return
606 return
607 elif entry.merged or entry.from_p2:
607 elif entry.merged or entry.from_p2:
608 return
608 return
609 self._addpath(f, possibly_dirty=True)
609 self._addpath(f, possibly_dirty=True)
610 self._map.copymap.pop(f, None)
610 self._map.copymap.pop(f, None)
611
611
612 def otherparent(self, f):
612 def otherparent(self, f):
613 '''Mark as coming from the other parent, always dirty.'''
613 '''Mark as coming from the other parent, always dirty.'''
614 if not self.in_merge:
614 if not self.in_merge:
615 msg = _(b"setting %r to other parent only allowed in merges") % f
615 msg = _(b"setting %r to other parent only allowed in merges") % f
616 raise error.Abort(msg)
616 raise error.Abort(msg)
617 entry = self._map.get(f)
617 entry = self._map.get(f)
618 if entry is not None and entry.tracked:
618 if entry is not None and entry.tracked:
619 # merge-like
619 # merge-like
620 self._addpath(f, merged=True)
620 self._addpath(f, merged=True)
621 else:
621 else:
622 # add-like
622 # add-like
623 self._addpath(f, from_p2=True)
623 self._addpath(f, from_p2=True)
624 self._map.copymap.pop(f, None)
624 self._map.copymap.pop(f, None)
625
625
626 def add(self, f):
626 def add(self, f):
627 '''Mark a file added.'''
627 '''Mark a file added.'''
628 self._add(f)
628 self._add(f)
629
629
630 def _add(self, filename):
630 def _add(self, filename):
631 """internal function to mark a file as added"""
631 """internal function to mark a file as added"""
632 self._addpath(filename, added=True)
632 self._addpath(filename, added=True)
633 self._map.copymap.pop(filename, None)
633 self._map.copymap.pop(filename, None)
634
634
635 def remove(self, f):
635 def remove(self, f):
636 '''Mark a file removed'''
636 '''Mark a file removed'''
637 self._remove(f)
637 self._remove(f)
638
638
639 def _remove(self, filename):
639 def _remove(self, filename):
640 """internal function to mark a file removed"""
640 """internal function to mark a file removed"""
641 self._dirty = True
641 self._dirty = True
642 self._updatedfiles.add(filename)
642 self._updatedfiles.add(filename)
643 self._map.removefile(filename, in_merge=self.in_merge)
643 self._map.removefile(filename, in_merge=self.in_merge)
644
644
645 def merge(self, f):
645 def merge(self, f):
646 '''Mark a file merged.'''
646 '''Mark a file merged.'''
647 if not self.in_merge:
647 if not self.in_merge:
648 return self.normallookup(f)
648 return self.normallookup(f)
649 return self.otherparent(f)
649 return self.otherparent(f)
650
650
651 def drop(self, f):
651 def drop(self, f):
652 '''Drop a file from the dirstate'''
652 '''Drop a file from the dirstate'''
653 self._drop(f)
653 self._drop(f)
654
654
655 def _drop(self, filename):
655 def _drop(self, filename):
656 """internal function to drop a file from the dirstate"""
656 """internal function to drop a file from the dirstate"""
657 if self._map.dropfile(filename):
657 if self._map.dropfile(filename):
658 self._dirty = True
658 self._dirty = True
659 self._updatedfiles.add(filename)
659 self._updatedfiles.add(filename)
660 self._map.copymap.pop(filename, None)
660 self._map.copymap.pop(filename, None)
661
661
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
663 if exists is None:
663 if exists is None:
664 exists = os.path.lexists(os.path.join(self._root, path))
664 exists = os.path.lexists(os.path.join(self._root, path))
665 if not exists:
665 if not exists:
666 # Maybe a path component exists
666 # Maybe a path component exists
667 if not ignoremissing and b'/' in path:
667 if not ignoremissing and b'/' in path:
668 d, f = path.rsplit(b'/', 1)
668 d, f = path.rsplit(b'/', 1)
669 d = self._normalize(d, False, ignoremissing, None)
669 d = self._normalize(d, False, ignoremissing, None)
670 folded = d + b"/" + f
670 folded = d + b"/" + f
671 else:
671 else:
672 # No path components, preserve original case
672 # No path components, preserve original case
673 folded = path
673 folded = path
674 else:
674 else:
675 # recursively normalize leading directory components
675 # recursively normalize leading directory components
676 # against dirstate
676 # against dirstate
677 if b'/' in normed:
677 if b'/' in normed:
678 d, f = normed.rsplit(b'/', 1)
678 d, f = normed.rsplit(b'/', 1)
679 d = self._normalize(d, False, ignoremissing, True)
679 d = self._normalize(d, False, ignoremissing, True)
680 r = self._root + b"/" + d
680 r = self._root + b"/" + d
681 folded = d + b"/" + util.fspath(f, r)
681 folded = d + b"/" + util.fspath(f, r)
682 else:
682 else:
683 folded = util.fspath(normed, self._root)
683 folded = util.fspath(normed, self._root)
684 storemap[normed] = folded
684 storemap[normed] = folded
685
685
686 return folded
686 return folded
687
687
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
689 normed = util.normcase(path)
689 normed = util.normcase(path)
690 folded = self._map.filefoldmap.get(normed, None)
690 folded = self._map.filefoldmap.get(normed, None)
691 if folded is None:
691 if folded is None:
692 if isknown:
692 if isknown:
693 folded = path
693 folded = path
694 else:
694 else:
695 folded = self._discoverpath(
695 folded = self._discoverpath(
696 path, normed, ignoremissing, exists, self._map.filefoldmap
696 path, normed, ignoremissing, exists, self._map.filefoldmap
697 )
697 )
698 return folded
698 return folded
699
699
700 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
700 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
701 normed = util.normcase(path)
701 normed = util.normcase(path)
702 folded = self._map.filefoldmap.get(normed, None)
702 folded = self._map.filefoldmap.get(normed, None)
703 if folded is None:
703 if folded is None:
704 folded = self._map.dirfoldmap.get(normed, None)
704 folded = self._map.dirfoldmap.get(normed, None)
705 if folded is None:
705 if folded is None:
706 if isknown:
706 if isknown:
707 folded = path
707 folded = path
708 else:
708 else:
709 # store discovered result in dirfoldmap so that future
709 # store discovered result in dirfoldmap so that future
710 # normalizefile calls don't start matching directories
710 # normalizefile calls don't start matching directories
711 folded = self._discoverpath(
711 folded = self._discoverpath(
712 path, normed, ignoremissing, exists, self._map.dirfoldmap
712 path, normed, ignoremissing, exists, self._map.dirfoldmap
713 )
713 )
714 return folded
714 return folded
715
715
716 def normalize(self, path, isknown=False, ignoremissing=False):
716 def normalize(self, path, isknown=False, ignoremissing=False):
717 """
717 """
718 normalize the case of a pathname when on a casefolding filesystem
718 normalize the case of a pathname when on a casefolding filesystem
719
719
720 isknown specifies whether the filename came from walking the
720 isknown specifies whether the filename came from walking the
721 disk, to avoid extra filesystem access.
721 disk, to avoid extra filesystem access.
722
722
723 If ignoremissing is True, missing path are returned
723 If ignoremissing is True, missing path are returned
724 unchanged. Otherwise, we try harder to normalize possibly
724 unchanged. Otherwise, we try harder to normalize possibly
725 existing path components.
725 existing path components.
726
726
727 The normalized case is determined based on the following precedence:
727 The normalized case is determined based on the following precedence:
728
728
729 - version of name already stored in the dirstate
729 - version of name already stored in the dirstate
730 - version of name stored on disk
730 - version of name stored on disk
731 - version provided via command arguments
731 - version provided via command arguments
732 """
732 """
733
733
734 if self._checkcase:
734 if self._checkcase:
735 return self._normalize(path, isknown, ignoremissing)
735 return self._normalize(path, isknown, ignoremissing)
736 return path
736 return path
737
737
738 def clear(self):
738 def clear(self):
739 self._map.clear()
739 self._map.clear()
740 self._lastnormaltime = 0
740 self._lastnormaltime = 0
741 self._updatedfiles.clear()
741 self._updatedfiles.clear()
742 self._dirty = True
742 self._dirty = True
743
743
744 def rebuild(self, parent, allfiles, changedfiles=None):
744 def rebuild(self, parent, allfiles, changedfiles=None):
745 if changedfiles is None:
745 if changedfiles is None:
746 # Rebuild entire dirstate
746 # Rebuild entire dirstate
747 to_lookup = allfiles
747 to_lookup = allfiles
748 to_drop = []
748 to_drop = []
749 lastnormaltime = self._lastnormaltime
749 lastnormaltime = self._lastnormaltime
750 self.clear()
750 self.clear()
751 self._lastnormaltime = lastnormaltime
751 self._lastnormaltime = lastnormaltime
752 elif len(changedfiles) < 10:
752 elif len(changedfiles) < 10:
753 # Avoid turning allfiles into a set, which can be expensive if it's
753 # Avoid turning allfiles into a set, which can be expensive if it's
754 # large.
754 # large.
755 to_lookup = []
755 to_lookup = []
756 to_drop = []
756 to_drop = []
757 for f in changedfiles:
757 for f in changedfiles:
758 if f in allfiles:
758 if f in allfiles:
759 to_lookup.append(f)
759 to_lookup.append(f)
760 else:
760 else:
761 to_drop.append(f)
761 to_drop.append(f)
762 else:
762 else:
763 changedfilesset = set(changedfiles)
763 changedfilesset = set(changedfiles)
764 to_lookup = changedfilesset & set(allfiles)
764 to_lookup = changedfilesset & set(allfiles)
765 to_drop = changedfilesset - to_lookup
765 to_drop = changedfilesset - to_lookup
766
766
767 if self._origpl is None:
767 if self._origpl is None:
768 self._origpl = self._pl
768 self._origpl = self._pl
769 self._map.setparents(parent, self._nodeconstants.nullid)
769 self._map.setparents(parent, self._nodeconstants.nullid)
770
770
771 for f in to_lookup:
771 for f in to_lookup:
772 self.normallookup(f)
772 self.normallookup(f)
773 for f in to_drop:
773 for f in to_drop:
774 self._drop(f)
774 self._drop(f)
775
775
776 self._dirty = True
776 self._dirty = True
777
777
778 def identity(self):
778 def identity(self):
779 """Return identity of dirstate itself to detect changing in storage
779 """Return identity of dirstate itself to detect changing in storage
780
780
781 If identity of previous dirstate is equal to this, writing
781 If identity of previous dirstate is equal to this, writing
782 changes based on the former dirstate out can keep consistency.
782 changes based on the former dirstate out can keep consistency.
783 """
783 """
784 return self._map.identity
784 return self._map.identity
785
785
786 def write(self, tr):
786 def write(self, tr):
787 if not self._dirty:
787 if not self._dirty:
788 return
788 return
789
789
790 filename = self._filename
790 filename = self._filename
791 if tr:
791 if tr:
792 # 'dirstate.write()' is not only for writing in-memory
792 # 'dirstate.write()' is not only for writing in-memory
793 # changes out, but also for dropping ambiguous timestamp.
793 # changes out, but also for dropping ambiguous timestamp.
794 # delayed writing re-raise "ambiguous timestamp issue".
794 # delayed writing re-raise "ambiguous timestamp issue".
795 # See also the wiki page below for detail:
795 # See also the wiki page below for detail:
796 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
796 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
797
797
798 # emulate dropping timestamp in 'parsers.pack_dirstate'
798 # emulate dropping timestamp in 'parsers.pack_dirstate'
799 now = _getfsnow(self._opener)
799 now = _getfsnow(self._opener)
800 self._map.clearambiguoustimes(self._updatedfiles, now)
800 self._map.clearambiguoustimes(self._updatedfiles, now)
801
801
802 # emulate that all 'dirstate.normal' results are written out
802 # emulate that all 'dirstate.normal' results are written out
803 self._lastnormaltime = 0
803 self._lastnormaltime = 0
804 self._updatedfiles.clear()
804 self._updatedfiles.clear()
805
805
806 # delay writing in-memory changes out
806 # delay writing in-memory changes out
807 tr.addfilegenerator(
807 tr.addfilegenerator(
808 b'dirstate',
808 b'dirstate',
809 (self._filename,),
809 (self._filename,),
810 self._writedirstate,
810 self._writedirstate,
811 location=b'plain',
811 location=b'plain',
812 )
812 )
813 return
813 return
814
814
815 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
815 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
816 self._writedirstate(st)
816 self._writedirstate(st)
817
817
818 def addparentchangecallback(self, category, callback):
818 def addparentchangecallback(self, category, callback):
819 """add a callback to be called when the wd parents are changed
819 """add a callback to be called when the wd parents are changed
820
820
821 Callback will be called with the following arguments:
821 Callback will be called with the following arguments:
822 dirstate, (oldp1, oldp2), (newp1, newp2)
822 dirstate, (oldp1, oldp2), (newp1, newp2)
823
823
824 Category is a unique identifier to allow overwriting an old callback
824 Category is a unique identifier to allow overwriting an old callback
825 with a newer callback.
825 with a newer callback.
826 """
826 """
827 self._plchangecallbacks[category] = callback
827 self._plchangecallbacks[category] = callback
828
828
829 def _writedirstate(self, st):
829 def _writedirstate(self, st):
830 # notify callbacks about parents change
830 # notify callbacks about parents change
831 if self._origpl is not None and self._origpl != self._pl:
831 if self._origpl is not None and self._origpl != self._pl:
832 for c, callback in sorted(
832 for c, callback in sorted(
833 pycompat.iteritems(self._plchangecallbacks)
833 pycompat.iteritems(self._plchangecallbacks)
834 ):
834 ):
835 callback(self, self._origpl, self._pl)
835 callback(self, self._origpl, self._pl)
836 self._origpl = None
836 self._origpl = None
837 # use the modification time of the newly created temporary file as the
837 # use the modification time of the newly created temporary file as the
838 # filesystem's notion of 'now'
838 # filesystem's notion of 'now'
839 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
839 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
840
840
841 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
841 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
842 # timestamp of each entries in dirstate, because of 'now > mtime'
842 # timestamp of each entries in dirstate, because of 'now > mtime'
843 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
843 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
844 if delaywrite > 0:
844 if delaywrite > 0:
845 # do we have any files to delay for?
845 # do we have any files to delay for?
846 for f, e in pycompat.iteritems(self._map):
846 for f, e in pycompat.iteritems(self._map):
847 if e.need_delay(now):
847 if e.need_delay(now):
848 import time # to avoid useless import
848 import time # to avoid useless import
849
849
850 # rather than sleep n seconds, sleep until the next
850 # rather than sleep n seconds, sleep until the next
851 # multiple of n seconds
851 # multiple of n seconds
852 clock = time.time()
852 clock = time.time()
853 start = int(clock) - (int(clock) % delaywrite)
853 start = int(clock) - (int(clock) % delaywrite)
854 end = start + delaywrite
854 end = start + delaywrite
855 time.sleep(end - clock)
855 time.sleep(end - clock)
856 now = end # trust our estimate that the end is near now
856 now = end # trust our estimate that the end is near now
857 break
857 break
858
858
859 self._map.write(st, now)
859 self._map.write(st, now)
860 self._lastnormaltime = 0
860 self._lastnormaltime = 0
861 self._dirty = False
861 self._dirty = False
862
862
863 def _dirignore(self, f):
863 def _dirignore(self, f):
864 if self._ignore(f):
864 if self._ignore(f):
865 return True
865 return True
866 for p in pathutil.finddirs(f):
866 for p in pathutil.finddirs(f):
867 if self._ignore(p):
867 if self._ignore(p):
868 return True
868 return True
869 return False
869 return False
870
870
871 def _ignorefiles(self):
871 def _ignorefiles(self):
872 files = []
872 files = []
873 if os.path.exists(self._join(b'.hgignore')):
873 if os.path.exists(self._join(b'.hgignore')):
874 files.append(self._join(b'.hgignore'))
874 files.append(self._join(b'.hgignore'))
875 for name, path in self._ui.configitems(b"ui"):
875 for name, path in self._ui.configitems(b"ui"):
876 if name == b'ignore' or name.startswith(b'ignore.'):
876 if name == b'ignore' or name.startswith(b'ignore.'):
877 # we need to use os.path.join here rather than self._join
877 # we need to use os.path.join here rather than self._join
878 # because path is arbitrary and user-specified
878 # because path is arbitrary and user-specified
879 files.append(os.path.join(self._rootdir, util.expandpath(path)))
879 files.append(os.path.join(self._rootdir, util.expandpath(path)))
880 return files
880 return files
881
881
882 def _ignorefileandline(self, f):
882 def _ignorefileandline(self, f):
883 files = collections.deque(self._ignorefiles())
883 files = collections.deque(self._ignorefiles())
884 visited = set()
884 visited = set()
885 while files:
885 while files:
886 i = files.popleft()
886 i = files.popleft()
887 patterns = matchmod.readpatternfile(
887 patterns = matchmod.readpatternfile(
888 i, self._ui.warn, sourceinfo=True
888 i, self._ui.warn, sourceinfo=True
889 )
889 )
890 for pattern, lineno, line in patterns:
890 for pattern, lineno, line in patterns:
891 kind, p = matchmod._patsplit(pattern, b'glob')
891 kind, p = matchmod._patsplit(pattern, b'glob')
892 if kind == b"subinclude":
892 if kind == b"subinclude":
893 if p not in visited:
893 if p not in visited:
894 files.append(p)
894 files.append(p)
895 continue
895 continue
896 m = matchmod.match(
896 m = matchmod.match(
897 self._root, b'', [], [pattern], warn=self._ui.warn
897 self._root, b'', [], [pattern], warn=self._ui.warn
898 )
898 )
899 if m(f):
899 if m(f):
900 return (i, lineno, line)
900 return (i, lineno, line)
901 visited.add(i)
901 visited.add(i)
902 return (None, -1, b"")
902 return (None, -1, b"")
903
903
904 def _walkexplicit(self, match, subrepos):
904 def _walkexplicit(self, match, subrepos):
905 """Get stat data about the files explicitly specified by match.
905 """Get stat data about the files explicitly specified by match.
906
906
907 Return a triple (results, dirsfound, dirsnotfound).
907 Return a triple (results, dirsfound, dirsnotfound).
908 - results is a mapping from filename to stat result. It also contains
908 - results is a mapping from filename to stat result. It also contains
909 listings mapping subrepos and .hg to None.
909 listings mapping subrepos and .hg to None.
910 - dirsfound is a list of files found to be directories.
910 - dirsfound is a list of files found to be directories.
911 - dirsnotfound is a list of files that the dirstate thinks are
911 - dirsnotfound is a list of files that the dirstate thinks are
912 directories and that were not found."""
912 directories and that were not found."""
913
913
914 def badtype(mode):
914 def badtype(mode):
915 kind = _(b'unknown')
915 kind = _(b'unknown')
916 if stat.S_ISCHR(mode):
916 if stat.S_ISCHR(mode):
917 kind = _(b'character device')
917 kind = _(b'character device')
918 elif stat.S_ISBLK(mode):
918 elif stat.S_ISBLK(mode):
919 kind = _(b'block device')
919 kind = _(b'block device')
920 elif stat.S_ISFIFO(mode):
920 elif stat.S_ISFIFO(mode):
921 kind = _(b'fifo')
921 kind = _(b'fifo')
922 elif stat.S_ISSOCK(mode):
922 elif stat.S_ISSOCK(mode):
923 kind = _(b'socket')
923 kind = _(b'socket')
924 elif stat.S_ISDIR(mode):
924 elif stat.S_ISDIR(mode):
925 kind = _(b'directory')
925 kind = _(b'directory')
926 return _(b'unsupported file type (type is %s)') % kind
926 return _(b'unsupported file type (type is %s)') % kind
927
927
928 badfn = match.bad
928 badfn = match.bad
929 dmap = self._map
929 dmap = self._map
930 lstat = os.lstat
930 lstat = os.lstat
931 getkind = stat.S_IFMT
931 getkind = stat.S_IFMT
932 dirkind = stat.S_IFDIR
932 dirkind = stat.S_IFDIR
933 regkind = stat.S_IFREG
933 regkind = stat.S_IFREG
934 lnkkind = stat.S_IFLNK
934 lnkkind = stat.S_IFLNK
935 join = self._join
935 join = self._join
936 dirsfound = []
936 dirsfound = []
937 foundadd = dirsfound.append
937 foundadd = dirsfound.append
938 dirsnotfound = []
938 dirsnotfound = []
939 notfoundadd = dirsnotfound.append
939 notfoundadd = dirsnotfound.append
940
940
941 if not match.isexact() and self._checkcase:
941 if not match.isexact() and self._checkcase:
942 normalize = self._normalize
942 normalize = self._normalize
943 else:
943 else:
944 normalize = None
944 normalize = None
945
945
946 files = sorted(match.files())
946 files = sorted(match.files())
947 subrepos.sort()
947 subrepos.sort()
948 i, j = 0, 0
948 i, j = 0, 0
949 while i < len(files) and j < len(subrepos):
949 while i < len(files) and j < len(subrepos):
950 subpath = subrepos[j] + b"/"
950 subpath = subrepos[j] + b"/"
951 if files[i] < subpath:
951 if files[i] < subpath:
952 i += 1
952 i += 1
953 continue
953 continue
954 while i < len(files) and files[i].startswith(subpath):
954 while i < len(files) and files[i].startswith(subpath):
955 del files[i]
955 del files[i]
956 j += 1
956 j += 1
957
957
958 if not files or b'' in files:
958 if not files or b'' in files:
959 files = [b'']
959 files = [b'']
960 # constructing the foldmap is expensive, so don't do it for the
960 # constructing the foldmap is expensive, so don't do it for the
961 # common case where files is ['']
961 # common case where files is ['']
962 normalize = None
962 normalize = None
963 results = dict.fromkeys(subrepos)
963 results = dict.fromkeys(subrepos)
964 results[b'.hg'] = None
964 results[b'.hg'] = None
965
965
966 for ff in files:
966 for ff in files:
967 if normalize:
967 if normalize:
968 nf = normalize(ff, False, True)
968 nf = normalize(ff, False, True)
969 else:
969 else:
970 nf = ff
970 nf = ff
971 if nf in results:
971 if nf in results:
972 continue
972 continue
973
973
974 try:
974 try:
975 st = lstat(join(nf))
975 st = lstat(join(nf))
976 kind = getkind(st.st_mode)
976 kind = getkind(st.st_mode)
977 if kind == dirkind:
977 if kind == dirkind:
978 if nf in dmap:
978 if nf in dmap:
979 # file replaced by dir on disk but still in dirstate
979 # file replaced by dir on disk but still in dirstate
980 results[nf] = None
980 results[nf] = None
981 foundadd((nf, ff))
981 foundadd((nf, ff))
982 elif kind == regkind or kind == lnkkind:
982 elif kind == regkind or kind == lnkkind:
983 results[nf] = st
983 results[nf] = st
984 else:
984 else:
985 badfn(ff, badtype(kind))
985 badfn(ff, badtype(kind))
986 if nf in dmap:
986 if nf in dmap:
987 results[nf] = None
987 results[nf] = None
988 except OSError as inst: # nf not found on disk - it is dirstate only
988 except OSError as inst: # nf not found on disk - it is dirstate only
989 if nf in dmap: # does it exactly match a missing file?
989 if nf in dmap: # does it exactly match a missing file?
990 results[nf] = None
990 results[nf] = None
991 else: # does it match a missing directory?
991 else: # does it match a missing directory?
992 if self._map.hasdir(nf):
992 if self._map.hasdir(nf):
993 notfoundadd(nf)
993 notfoundadd(nf)
994 else:
994 else:
995 badfn(ff, encoding.strtolocal(inst.strerror))
995 badfn(ff, encoding.strtolocal(inst.strerror))
996
996
997 # match.files() may contain explicitly-specified paths that shouldn't
997 # match.files() may contain explicitly-specified paths that shouldn't
998 # be taken; drop them from the list of files found. dirsfound/notfound
998 # be taken; drop them from the list of files found. dirsfound/notfound
999 # aren't filtered here because they will be tested later.
999 # aren't filtered here because they will be tested later.
1000 if match.anypats():
1000 if match.anypats():
1001 for f in list(results):
1001 for f in list(results):
1002 if f == b'.hg' or f in subrepos:
1002 if f == b'.hg' or f in subrepos:
1003 # keep sentinel to disable further out-of-repo walks
1003 # keep sentinel to disable further out-of-repo walks
1004 continue
1004 continue
1005 if not match(f):
1005 if not match(f):
1006 del results[f]
1006 del results[f]
1007
1007
1008 # Case insensitive filesystems cannot rely on lstat() failing to detect
1008 # Case insensitive filesystems cannot rely on lstat() failing to detect
1009 # a case-only rename. Prune the stat object for any file that does not
1009 # a case-only rename. Prune the stat object for any file that does not
1010 # match the case in the filesystem, if there are multiple files that
1010 # match the case in the filesystem, if there are multiple files that
1011 # normalize to the same path.
1011 # normalize to the same path.
1012 if match.isexact() and self._checkcase:
1012 if match.isexact() and self._checkcase:
1013 normed = {}
1013 normed = {}
1014
1014
1015 for f, st in pycompat.iteritems(results):
1015 for f, st in pycompat.iteritems(results):
1016 if st is None:
1016 if st is None:
1017 continue
1017 continue
1018
1018
1019 nc = util.normcase(f)
1019 nc = util.normcase(f)
1020 paths = normed.get(nc)
1020 paths = normed.get(nc)
1021
1021
1022 if paths is None:
1022 if paths is None:
1023 paths = set()
1023 paths = set()
1024 normed[nc] = paths
1024 normed[nc] = paths
1025
1025
1026 paths.add(f)
1026 paths.add(f)
1027
1027
1028 for norm, paths in pycompat.iteritems(normed):
1028 for norm, paths in pycompat.iteritems(normed):
1029 if len(paths) > 1:
1029 if len(paths) > 1:
1030 for path in paths:
1030 for path in paths:
1031 folded = self._discoverpath(
1031 folded = self._discoverpath(
1032 path, norm, True, None, self._map.dirfoldmap
1032 path, norm, True, None, self._map.dirfoldmap
1033 )
1033 )
1034 if path != folded:
1034 if path != folded:
1035 results[path] = None
1035 results[path] = None
1036
1036
1037 return results, dirsfound, dirsnotfound
1037 return results, dirsfound, dirsnotfound
1038
1038
1039 def walk(self, match, subrepos, unknown, ignored, full=True):
1039 def walk(self, match, subrepos, unknown, ignored, full=True):
1040 """
1040 """
1041 Walk recursively through the directory tree, finding all files
1041 Walk recursively through the directory tree, finding all files
1042 matched by match.
1042 matched by match.
1043
1043
1044 If full is False, maybe skip some known-clean files.
1044 If full is False, maybe skip some known-clean files.
1045
1045
1046 Return a dict mapping filename to stat-like object (either
1046 Return a dict mapping filename to stat-like object (either
1047 mercurial.osutil.stat instance or return value of os.stat()).
1047 mercurial.osutil.stat instance or return value of os.stat()).
1048
1048
1049 """
1049 """
1050 # full is a flag that extensions that hook into walk can use -- this
1050 # full is a flag that extensions that hook into walk can use -- this
1051 # implementation doesn't use it at all. This satisfies the contract
1051 # implementation doesn't use it at all. This satisfies the contract
1052 # because we only guarantee a "maybe".
1052 # because we only guarantee a "maybe".
1053
1053
1054 if ignored:
1054 if ignored:
1055 ignore = util.never
1055 ignore = util.never
1056 dirignore = util.never
1056 dirignore = util.never
1057 elif unknown:
1057 elif unknown:
1058 ignore = self._ignore
1058 ignore = self._ignore
1059 dirignore = self._dirignore
1059 dirignore = self._dirignore
1060 else:
1060 else:
1061 # if not unknown and not ignored, drop dir recursion and step 2
1061 # if not unknown and not ignored, drop dir recursion and step 2
1062 ignore = util.always
1062 ignore = util.always
1063 dirignore = util.always
1063 dirignore = util.always
1064
1064
1065 matchfn = match.matchfn
1065 matchfn = match.matchfn
1066 matchalways = match.always()
1066 matchalways = match.always()
1067 matchtdir = match.traversedir
1067 matchtdir = match.traversedir
1068 dmap = self._map
1068 dmap = self._map
1069 listdir = util.listdir
1069 listdir = util.listdir
1070 lstat = os.lstat
1070 lstat = os.lstat
1071 dirkind = stat.S_IFDIR
1071 dirkind = stat.S_IFDIR
1072 regkind = stat.S_IFREG
1072 regkind = stat.S_IFREG
1073 lnkkind = stat.S_IFLNK
1073 lnkkind = stat.S_IFLNK
1074 join = self._join
1074 join = self._join
1075
1075
1076 exact = skipstep3 = False
1076 exact = skipstep3 = False
1077 if match.isexact(): # match.exact
1077 if match.isexact(): # match.exact
1078 exact = True
1078 exact = True
1079 dirignore = util.always # skip step 2
1079 dirignore = util.always # skip step 2
1080 elif match.prefix(): # match.match, no patterns
1080 elif match.prefix(): # match.match, no patterns
1081 skipstep3 = True
1081 skipstep3 = True
1082
1082
1083 if not exact and self._checkcase:
1083 if not exact and self._checkcase:
1084 normalize = self._normalize
1084 normalize = self._normalize
1085 normalizefile = self._normalizefile
1085 normalizefile = self._normalizefile
1086 skipstep3 = False
1086 skipstep3 = False
1087 else:
1087 else:
1088 normalize = self._normalize
1088 normalize = self._normalize
1089 normalizefile = None
1089 normalizefile = None
1090
1090
1091 # step 1: find all explicit files
1091 # step 1: find all explicit files
1092 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1092 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1093 if matchtdir:
1093 if matchtdir:
1094 for d in work:
1094 for d in work:
1095 matchtdir(d[0])
1095 matchtdir(d[0])
1096 for d in dirsnotfound:
1096 for d in dirsnotfound:
1097 matchtdir(d)
1097 matchtdir(d)
1098
1098
1099 skipstep3 = skipstep3 and not (work or dirsnotfound)
1099 skipstep3 = skipstep3 and not (work or dirsnotfound)
1100 work = [d for d in work if not dirignore(d[0])]
1100 work = [d for d in work if not dirignore(d[0])]
1101
1101
1102 # step 2: visit subdirectories
1102 # step 2: visit subdirectories
1103 def traverse(work, alreadynormed):
1103 def traverse(work, alreadynormed):
1104 wadd = work.append
1104 wadd = work.append
1105 while work:
1105 while work:
1106 tracing.counter('dirstate.walk work', len(work))
1106 tracing.counter('dirstate.walk work', len(work))
1107 nd = work.pop()
1107 nd = work.pop()
1108 visitentries = match.visitchildrenset(nd)
1108 visitentries = match.visitchildrenset(nd)
1109 if not visitentries:
1109 if not visitentries:
1110 continue
1110 continue
1111 if visitentries == b'this' or visitentries == b'all':
1111 if visitentries == b'this' or visitentries == b'all':
1112 visitentries = None
1112 visitentries = None
1113 skip = None
1113 skip = None
1114 if nd != b'':
1114 if nd != b'':
1115 skip = b'.hg'
1115 skip = b'.hg'
1116 try:
1116 try:
1117 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1117 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1118 entries = listdir(join(nd), stat=True, skip=skip)
1118 entries = listdir(join(nd), stat=True, skip=skip)
1119 except OSError as inst:
1119 except OSError as inst:
1120 if inst.errno in (errno.EACCES, errno.ENOENT):
1120 if inst.errno in (errno.EACCES, errno.ENOENT):
1121 match.bad(
1121 match.bad(
1122 self.pathto(nd), encoding.strtolocal(inst.strerror)
1122 self.pathto(nd), encoding.strtolocal(inst.strerror)
1123 )
1123 )
1124 continue
1124 continue
1125 raise
1125 raise
1126 for f, kind, st in entries:
1126 for f, kind, st in entries:
1127 # Some matchers may return files in the visitentries set,
1127 # Some matchers may return files in the visitentries set,
1128 # instead of 'this', if the matcher explicitly mentions them
1128 # instead of 'this', if the matcher explicitly mentions them
1129 # and is not an exactmatcher. This is acceptable; we do not
1129 # and is not an exactmatcher. This is acceptable; we do not
1130 # make any hard assumptions about file-or-directory below
1130 # make any hard assumptions about file-or-directory below
1131 # based on the presence of `f` in visitentries. If
1131 # based on the presence of `f` in visitentries. If
1132 # visitchildrenset returned a set, we can always skip the
1132 # visitchildrenset returned a set, we can always skip the
1133 # entries *not* in the set it provided regardless of whether
1133 # entries *not* in the set it provided regardless of whether
1134 # they're actually a file or a directory.
1134 # they're actually a file or a directory.
1135 if visitentries and f not in visitentries:
1135 if visitentries and f not in visitentries:
1136 continue
1136 continue
1137 if normalizefile:
1137 if normalizefile:
1138 # even though f might be a directory, we're only
1138 # even though f might be a directory, we're only
1139 # interested in comparing it to files currently in the
1139 # interested in comparing it to files currently in the
1140 # dmap -- therefore normalizefile is enough
1140 # dmap -- therefore normalizefile is enough
1141 nf = normalizefile(
1141 nf = normalizefile(
1142 nd and (nd + b"/" + f) or f, True, True
1142 nd and (nd + b"/" + f) or f, True, True
1143 )
1143 )
1144 else:
1144 else:
1145 nf = nd and (nd + b"/" + f) or f
1145 nf = nd and (nd + b"/" + f) or f
1146 if nf not in results:
1146 if nf not in results:
1147 if kind == dirkind:
1147 if kind == dirkind:
1148 if not ignore(nf):
1148 if not ignore(nf):
1149 if matchtdir:
1149 if matchtdir:
1150 matchtdir(nf)
1150 matchtdir(nf)
1151 wadd(nf)
1151 wadd(nf)
1152 if nf in dmap and (matchalways or matchfn(nf)):
1152 if nf in dmap and (matchalways or matchfn(nf)):
1153 results[nf] = None
1153 results[nf] = None
1154 elif kind == regkind or kind == lnkkind:
1154 elif kind == regkind or kind == lnkkind:
1155 if nf in dmap:
1155 if nf in dmap:
1156 if matchalways or matchfn(nf):
1156 if matchalways or matchfn(nf):
1157 results[nf] = st
1157 results[nf] = st
1158 elif (matchalways or matchfn(nf)) and not ignore(
1158 elif (matchalways or matchfn(nf)) and not ignore(
1159 nf
1159 nf
1160 ):
1160 ):
1161 # unknown file -- normalize if necessary
1161 # unknown file -- normalize if necessary
1162 if not alreadynormed:
1162 if not alreadynormed:
1163 nf = normalize(nf, False, True)
1163 nf = normalize(nf, False, True)
1164 results[nf] = st
1164 results[nf] = st
1165 elif nf in dmap and (matchalways or matchfn(nf)):
1165 elif nf in dmap and (matchalways or matchfn(nf)):
1166 results[nf] = None
1166 results[nf] = None
1167
1167
1168 for nd, d in work:
1168 for nd, d in work:
1169 # alreadynormed means that processwork doesn't have to do any
1169 # alreadynormed means that processwork doesn't have to do any
1170 # expensive directory normalization
1170 # expensive directory normalization
1171 alreadynormed = not normalize or nd == d
1171 alreadynormed = not normalize or nd == d
1172 traverse([d], alreadynormed)
1172 traverse([d], alreadynormed)
1173
1173
1174 for s in subrepos:
1174 for s in subrepos:
1175 del results[s]
1175 del results[s]
1176 del results[b'.hg']
1176 del results[b'.hg']
1177
1177
1178 # step 3: visit remaining files from dmap
1178 # step 3: visit remaining files from dmap
1179 if not skipstep3 and not exact:
1179 if not skipstep3 and not exact:
1180 # If a dmap file is not in results yet, it was either
1180 # If a dmap file is not in results yet, it was either
1181 # a) not matching matchfn b) ignored, c) missing, or d) under a
1181 # a) not matching matchfn b) ignored, c) missing, or d) under a
1182 # symlink directory.
1182 # symlink directory.
1183 if not results and matchalways:
1183 if not results and matchalways:
1184 visit = [f for f in dmap]
1184 visit = [f for f in dmap]
1185 else:
1185 else:
1186 visit = [f for f in dmap if f not in results and matchfn(f)]
1186 visit = [f for f in dmap if f not in results and matchfn(f)]
1187 visit.sort()
1187 visit.sort()
1188
1188
1189 if unknown:
1189 if unknown:
1190 # unknown == True means we walked all dirs under the roots
1190 # unknown == True means we walked all dirs under the roots
1191 # that wasn't ignored, and everything that matched was stat'ed
1191 # that wasn't ignored, and everything that matched was stat'ed
1192 # and is already in results.
1192 # and is already in results.
1193 # The rest must thus be ignored or under a symlink.
1193 # The rest must thus be ignored or under a symlink.
1194 audit_path = pathutil.pathauditor(self._root, cached=True)
1194 audit_path = pathutil.pathauditor(self._root, cached=True)
1195
1195
1196 for nf in iter(visit):
1196 for nf in iter(visit):
1197 # If a stat for the same file was already added with a
1197 # If a stat for the same file was already added with a
1198 # different case, don't add one for this, since that would
1198 # different case, don't add one for this, since that would
1199 # make it appear as if the file exists under both names
1199 # make it appear as if the file exists under both names
1200 # on disk.
1200 # on disk.
1201 if (
1201 if (
1202 normalizefile
1202 normalizefile
1203 and normalizefile(nf, True, True) in results
1203 and normalizefile(nf, True, True) in results
1204 ):
1204 ):
1205 results[nf] = None
1205 results[nf] = None
1206 # Report ignored items in the dmap as long as they are not
1206 # Report ignored items in the dmap as long as they are not
1207 # under a symlink directory.
1207 # under a symlink directory.
1208 elif audit_path.check(nf):
1208 elif audit_path.check(nf):
1209 try:
1209 try:
1210 results[nf] = lstat(join(nf))
1210 results[nf] = lstat(join(nf))
1211 # file was just ignored, no links, and exists
1211 # file was just ignored, no links, and exists
1212 except OSError:
1212 except OSError:
1213 # file doesn't exist
1213 # file doesn't exist
1214 results[nf] = None
1214 results[nf] = None
1215 else:
1215 else:
1216 # It's either missing or under a symlink directory
1216 # It's either missing or under a symlink directory
1217 # which we in this case report as missing
1217 # which we in this case report as missing
1218 results[nf] = None
1218 results[nf] = None
1219 else:
1219 else:
1220 # We may not have walked the full directory tree above,
1220 # We may not have walked the full directory tree above,
1221 # so stat and check everything we missed.
1221 # so stat and check everything we missed.
1222 iv = iter(visit)
1222 iv = iter(visit)
1223 for st in util.statfiles([join(i) for i in visit]):
1223 for st in util.statfiles([join(i) for i in visit]):
1224 results[next(iv)] = st
1224 results[next(iv)] = st
1225 return results
1225 return results
1226
1226
1227 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1227 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1228 # Force Rayon (Rust parallelism library) to respect the number of
1228 # Force Rayon (Rust parallelism library) to respect the number of
1229 # workers. This is a temporary workaround until Rust code knows
1229 # workers. This is a temporary workaround until Rust code knows
1230 # how to read the config file.
1230 # how to read the config file.
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1232 if numcpus is not None:
1232 if numcpus is not None:
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1234
1234
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1236 if not workers_enabled:
1236 if not workers_enabled:
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1238
1238
1239 (
1239 (
1240 lookup,
1240 lookup,
1241 modified,
1241 modified,
1242 added,
1242 added,
1243 removed,
1243 removed,
1244 deleted,
1244 deleted,
1245 clean,
1245 clean,
1246 ignored,
1246 ignored,
1247 unknown,
1247 unknown,
1248 warnings,
1248 warnings,
1249 bad,
1249 bad,
1250 traversed,
1250 traversed,
1251 dirty,
1251 dirty,
1252 ) = rustmod.status(
1252 ) = rustmod.status(
1253 self._map._rustmap,
1253 self._map._rustmap,
1254 matcher,
1254 matcher,
1255 self._rootdir,
1255 self._rootdir,
1256 self._ignorefiles(),
1256 self._ignorefiles(),
1257 self._checkexec,
1257 self._checkexec,
1258 self._lastnormaltime,
1258 self._lastnormaltime,
1259 bool(list_clean),
1259 bool(list_clean),
1260 bool(list_ignored),
1260 bool(list_ignored),
1261 bool(list_unknown),
1261 bool(list_unknown),
1262 bool(matcher.traversedir),
1262 bool(matcher.traversedir),
1263 )
1263 )
1264
1264
1265 self._dirty |= dirty
1265 self._dirty |= dirty
1266
1266
1267 if matcher.traversedir:
1267 if matcher.traversedir:
1268 for dir in traversed:
1268 for dir in traversed:
1269 matcher.traversedir(dir)
1269 matcher.traversedir(dir)
1270
1270
1271 if self._ui.warn:
1271 if self._ui.warn:
1272 for item in warnings:
1272 for item in warnings:
1273 if isinstance(item, tuple):
1273 if isinstance(item, tuple):
1274 file_path, syntax = item
1274 file_path, syntax = item
1275 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1275 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1276 file_path,
1276 file_path,
1277 syntax,
1277 syntax,
1278 )
1278 )
1279 self._ui.warn(msg)
1279 self._ui.warn(msg)
1280 else:
1280 else:
1281 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1281 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1282 self._ui.warn(
1282 self._ui.warn(
1283 msg
1283 msg
1284 % (
1284 % (
1285 pathutil.canonpath(
1285 pathutil.canonpath(
1286 self._rootdir, self._rootdir, item
1286 self._rootdir, self._rootdir, item
1287 ),
1287 ),
1288 b"No such file or directory",
1288 b"No such file or directory",
1289 )
1289 )
1290 )
1290 )
1291
1291
1292 for (fn, message) in bad:
1292 for (fn, message) in bad:
1293 matcher.bad(fn, encoding.strtolocal(message))
1293 matcher.bad(fn, encoding.strtolocal(message))
1294
1294
1295 status = scmutil.status(
1295 status = scmutil.status(
1296 modified=modified,
1296 modified=modified,
1297 added=added,
1297 added=added,
1298 removed=removed,
1298 removed=removed,
1299 deleted=deleted,
1299 deleted=deleted,
1300 unknown=unknown,
1300 unknown=unknown,
1301 ignored=ignored,
1301 ignored=ignored,
1302 clean=clean,
1302 clean=clean,
1303 )
1303 )
1304 return (lookup, status)
1304 return (lookup, status)
1305
1305
1306 def status(self, match, subrepos, ignored, clean, unknown):
1306 def status(self, match, subrepos, ignored, clean, unknown):
1307 """Determine the status of the working copy relative to the
1307 """Determine the status of the working copy relative to the
1308 dirstate and return a pair of (unsure, status), where status is of type
1308 dirstate and return a pair of (unsure, status), where status is of type
1309 scmutil.status and:
1309 scmutil.status and:
1310
1310
1311 unsure:
1311 unsure:
1312 files that might have been modified since the dirstate was
1312 files that might have been modified since the dirstate was
1313 written, but need to be read to be sure (size is the same
1313 written, but need to be read to be sure (size is the same
1314 but mtime differs)
1314 but mtime differs)
1315 status.modified:
1315 status.modified:
1316 files that have definitely been modified since the dirstate
1316 files that have definitely been modified since the dirstate
1317 was written (different size or mode)
1317 was written (different size or mode)
1318 status.clean:
1318 status.clean:
1319 files that have definitely not been modified since the
1319 files that have definitely not been modified since the
1320 dirstate was written
1320 dirstate was written
1321 """
1321 """
1322 listignored, listclean, listunknown = ignored, clean, unknown
1322 listignored, listclean, listunknown = ignored, clean, unknown
1323 lookup, modified, added, unknown, ignored = [], [], [], [], []
1323 lookup, modified, added, unknown, ignored = [], [], [], [], []
1324 removed, deleted, clean = [], [], []
1324 removed, deleted, clean = [], [], []
1325
1325
1326 dmap = self._map
1326 dmap = self._map
1327 dmap.preload()
1327 dmap.preload()
1328
1328
1329 use_rust = True
1329 use_rust = True
1330
1330
1331 allowed_matchers = (
1331 allowed_matchers = (
1332 matchmod.alwaysmatcher,
1332 matchmod.alwaysmatcher,
1333 matchmod.exactmatcher,
1333 matchmod.exactmatcher,
1334 matchmod.includematcher,
1334 matchmod.includematcher,
1335 )
1335 )
1336
1336
1337 if rustmod is None:
1337 if rustmod is None:
1338 use_rust = False
1338 use_rust = False
1339 elif self._checkcase:
1339 elif self._checkcase:
1340 # Case-insensitive filesystems are not handled yet
1340 # Case-insensitive filesystems are not handled yet
1341 use_rust = False
1341 use_rust = False
1342 elif subrepos:
1342 elif subrepos:
1343 use_rust = False
1343 use_rust = False
1344 elif sparse.enabled:
1344 elif sparse.enabled:
1345 use_rust = False
1345 use_rust = False
1346 elif not isinstance(match, allowed_matchers):
1346 elif not isinstance(match, allowed_matchers):
1347 # Some matchers have yet to be implemented
1347 # Some matchers have yet to be implemented
1348 use_rust = False
1348 use_rust = False
1349
1349
1350 if use_rust:
1350 if use_rust:
1351 try:
1351 try:
1352 return self._rust_status(
1352 return self._rust_status(
1353 match, listclean, listignored, listunknown
1353 match, listclean, listignored, listunknown
1354 )
1354 )
1355 except rustmod.FallbackError:
1355 except rustmod.FallbackError:
1356 pass
1356 pass
1357
1357
1358 def noop(f):
1358 def noop(f):
1359 pass
1359 pass
1360
1360
1361 dcontains = dmap.__contains__
1361 dcontains = dmap.__contains__
1362 dget = dmap.__getitem__
1362 dget = dmap.__getitem__
1363 ladd = lookup.append # aka "unsure"
1363 ladd = lookup.append # aka "unsure"
1364 madd = modified.append
1364 madd = modified.append
1365 aadd = added.append
1365 aadd = added.append
1366 uadd = unknown.append if listunknown else noop
1366 uadd = unknown.append if listunknown else noop
1367 iadd = ignored.append if listignored else noop
1367 iadd = ignored.append if listignored else noop
1368 radd = removed.append
1368 radd = removed.append
1369 dadd = deleted.append
1369 dadd = deleted.append
1370 cadd = clean.append if listclean else noop
1370 cadd = clean.append if listclean else noop
1371 mexact = match.exact
1371 mexact = match.exact
1372 dirignore = self._dirignore
1372 dirignore = self._dirignore
1373 checkexec = self._checkexec
1373 checkexec = self._checkexec
1374 copymap = self._map.copymap
1374 copymap = self._map.copymap
1375 lastnormaltime = self._lastnormaltime
1375 lastnormaltime = self._lastnormaltime
1376
1376
1377 # We need to do full walks when either
1377 # We need to do full walks when either
1378 # - we're listing all clean files, or
1378 # - we're listing all clean files, or
1379 # - match.traversedir does something, because match.traversedir should
1379 # - match.traversedir does something, because match.traversedir should
1380 # be called for every dir in the working dir
1380 # be called for every dir in the working dir
1381 full = listclean or match.traversedir is not None
1381 full = listclean or match.traversedir is not None
1382 for fn, st in pycompat.iteritems(
1382 for fn, st in pycompat.iteritems(
1383 self.walk(match, subrepos, listunknown, listignored, full=full)
1383 self.walk(match, subrepos, listunknown, listignored, full=full)
1384 ):
1384 ):
1385 if not dcontains(fn):
1385 if not dcontains(fn):
1386 if (listignored or mexact(fn)) and dirignore(fn):
1386 if (listignored or mexact(fn)) and dirignore(fn):
1387 if listignored:
1387 if listignored:
1388 iadd(fn)
1388 iadd(fn)
1389 else:
1389 else:
1390 uadd(fn)
1390 uadd(fn)
1391 continue
1391 continue
1392
1392
1393 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1393 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1394 # written like that for performance reasons. dmap[fn] is not a
1394 # written like that for performance reasons. dmap[fn] is not a
1395 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1395 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1396 # opcode has fast paths when the value to be unpacked is a tuple or
1396 # opcode has fast paths when the value to be unpacked is a tuple or
1397 # a list, but falls back to creating a full-fledged iterator in
1397 # a list, but falls back to creating a full-fledged iterator in
1398 # general. That is much slower than simply accessing and storing the
1398 # general. That is much slower than simply accessing and storing the
1399 # tuple members one by one.
1399 # tuple members one by one.
1400 t = dget(fn)
1400 t = dget(fn)
1401 mode = t.mode
1401 mode = t.mode
1402 size = t.size
1402 size = t.size
1403 time = t.mtime
1403 time = t.mtime
1404
1404
1405 if not st and t.tracked:
1405 if not st and t.tracked:
1406 dadd(fn)
1406 dadd(fn)
1407 elif t.merged:
1407 elif t.merged:
1408 madd(fn)
1408 madd(fn)
1409 elif t.added:
1409 elif t.added:
1410 aadd(fn)
1410 aadd(fn)
1411 elif t.removed:
1411 elif t.removed:
1412 radd(fn)
1412 radd(fn)
1413 elif t.tracked:
1413 elif t.tracked:
1414 if (
1414 if (
1415 size >= 0
1415 size >= 0
1416 and (
1416 and (
1417 (size != st.st_size and size != st.st_size & _rangemask)
1417 (size != st.st_size and size != st.st_size & _rangemask)
1418 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1418 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1419 )
1419 )
1420 or t.from_p2
1420 or t.from_p2
1421 or fn in copymap
1421 or fn in copymap
1422 ):
1422 ):
1423 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1423 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1424 # issue6456: Size returned may be longer due to
1424 # issue6456: Size returned may be longer due to
1425 # encryption on EXT-4 fscrypt, undecided.
1425 # encryption on EXT-4 fscrypt, undecided.
1426 ladd(fn)
1426 ladd(fn)
1427 else:
1427 else:
1428 madd(fn)
1428 madd(fn)
1429 elif (
1429 elif (
1430 time != st[stat.ST_MTIME]
1430 time != st[stat.ST_MTIME]
1431 and time != st[stat.ST_MTIME] & _rangemask
1431 and time != st[stat.ST_MTIME] & _rangemask
1432 ):
1432 ):
1433 ladd(fn)
1433 ladd(fn)
1434 elif st[stat.ST_MTIME] == lastnormaltime:
1434 elif st[stat.ST_MTIME] == lastnormaltime:
1435 # fn may have just been marked as normal and it may have
1435 # fn may have just been marked as normal and it may have
1436 # changed in the same second without changing its size.
1436 # changed in the same second without changing its size.
1437 # This can happen if we quickly do multiple commits.
1437 # This can happen if we quickly do multiple commits.
1438 # Force lookup, so we don't miss such a racy file change.
1438 # Force lookup, so we don't miss such a racy file change.
1439 ladd(fn)
1439 ladd(fn)
1440 elif listclean:
1440 elif listclean:
1441 cadd(fn)
1441 cadd(fn)
1442 status = scmutil.status(
1442 status = scmutil.status(
1443 modified, added, removed, deleted, unknown, ignored, clean
1443 modified, added, removed, deleted, unknown, ignored, clean
1444 )
1444 )
1445 return (lookup, status)
1445 return (lookup, status)
1446
1446
1447 def matches(self, match):
1447 def matches(self, match):
1448 """
1448 """
1449 return files in the dirstate (in whatever state) filtered by match
1449 return files in the dirstate (in whatever state) filtered by match
1450 """
1450 """
1451 dmap = self._map
1451 dmap = self._map
1452 if rustmod is not None:
1452 if rustmod is not None:
1453 dmap = self._map._rustmap
1453 dmap = self._map._rustmap
1454
1454
1455 if match.always():
1455 if match.always():
1456 return dmap.keys()
1456 return dmap.keys()
1457 files = match.files()
1457 files = match.files()
1458 if match.isexact():
1458 if match.isexact():
1459 # fast path -- filter the other way around, since typically files is
1459 # fast path -- filter the other way around, since typically files is
1460 # much smaller than dmap
1460 # much smaller than dmap
1461 return [f for f in files if f in dmap]
1461 return [f for f in files if f in dmap]
1462 if match.prefix() and all(fn in dmap for fn in files):
1462 if match.prefix() and all(fn in dmap for fn in files):
1463 # fast path -- all the values are known to be files, so just return
1463 # fast path -- all the values are known to be files, so just return
1464 # that
1464 # that
1465 return list(files)
1465 return list(files)
1466 return [f for f in dmap if match(f)]
1466 return [f for f in dmap if match(f)]
1467
1467
1468 def _actualfilename(self, tr):
1468 def _actualfilename(self, tr):
1469 if tr:
1469 if tr:
1470 return self._pendingfilename
1470 return self._pendingfilename
1471 else:
1471 else:
1472 return self._filename
1472 return self._filename
1473
1473
1474 def savebackup(self, tr, backupname):
1474 def savebackup(self, tr, backupname):
1475 '''Save current dirstate into backup file'''
1475 '''Save current dirstate into backup file'''
1476 filename = self._actualfilename(tr)
1476 filename = self._actualfilename(tr)
1477 assert backupname != filename
1477 assert backupname != filename
1478
1478
1479 # use '_writedirstate' instead of 'write' to write changes certainly,
1479 # use '_writedirstate' instead of 'write' to write changes certainly,
1480 # because the latter omits writing out if transaction is running.
1480 # because the latter omits writing out if transaction is running.
1481 # output file will be used to create backup of dirstate at this point.
1481 # output file will be used to create backup of dirstate at this point.
1482 if self._dirty or not self._opener.exists(filename):
1482 if self._dirty or not self._opener.exists(filename):
1483 self._writedirstate(
1483 self._writedirstate(
1484 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1484 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1485 )
1485 )
1486
1486
1487 if tr:
1487 if tr:
1488 # ensure that subsequent tr.writepending returns True for
1488 # ensure that subsequent tr.writepending returns True for
1489 # changes written out above, even if dirstate is never
1489 # changes written out above, even if dirstate is never
1490 # changed after this
1490 # changed after this
1491 tr.addfilegenerator(
1491 tr.addfilegenerator(
1492 b'dirstate',
1492 b'dirstate',
1493 (self._filename,),
1493 (self._filename,),
1494 self._writedirstate,
1494 self._writedirstate,
1495 location=b'plain',
1495 location=b'plain',
1496 )
1496 )
1497
1497
1498 # ensure that pending file written above is unlinked at
1498 # ensure that pending file written above is unlinked at
1499 # failure, even if tr.writepending isn't invoked until the
1499 # failure, even if tr.writepending isn't invoked until the
1500 # end of this transaction
1500 # end of this transaction
1501 tr.registertmp(filename, location=b'plain')
1501 tr.registertmp(filename, location=b'plain')
1502
1502
1503 self._opener.tryunlink(backupname)
1503 self._opener.tryunlink(backupname)
1504 # hardlink backup is okay because _writedirstate is always called
1504 # hardlink backup is okay because _writedirstate is always called
1505 # with an "atomictemp=True" file.
1505 # with an "atomictemp=True" file.
1506 util.copyfile(
1506 util.copyfile(
1507 self._opener.join(filename),
1507 self._opener.join(filename),
1508 self._opener.join(backupname),
1508 self._opener.join(backupname),
1509 hardlink=True,
1509 hardlink=True,
1510 )
1510 )
1511
1511
1512 def restorebackup(self, tr, backupname):
1512 def restorebackup(self, tr, backupname):
1513 '''Restore dirstate by backup file'''
1513 '''Restore dirstate by backup file'''
1514 # this "invalidate()" prevents "wlock.release()" from writing
1514 # this "invalidate()" prevents "wlock.release()" from writing
1515 # changes of dirstate out after restoring from backup file
1515 # changes of dirstate out after restoring from backup file
1516 self.invalidate()
1516 self.invalidate()
1517 filename = self._actualfilename(tr)
1517 filename = self._actualfilename(tr)
1518 o = self._opener
1518 o = self._opener
1519 if util.samefile(o.join(backupname), o.join(filename)):
1519 if util.samefile(o.join(backupname), o.join(filename)):
1520 o.unlink(backupname)
1520 o.unlink(backupname)
1521 else:
1521 else:
1522 o.rename(backupname, filename, checkambig=True)
1522 o.rename(backupname, filename, checkambig=True)
1523
1523
1524 def clearbackup(self, tr, backupname):
1524 def clearbackup(self, tr, backupname):
1525 '''Clear backup file'''
1525 '''Clear backup file'''
1526 self._opener.unlink(backupname)
1526 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now