##// END OF EJS Templates
dirstate-item: use `tracked` instead of the `state` in context's iter...
marmoute -
r48906:3fe500d1 default
parent child Browse files
Show More
@@ -1,3123 +1,3123 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
70 return "<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(
102 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
103 self, other, s, match, listignored, listclean, listunknown
104 ):
104 ):
105 """build a status with respect to another context"""
105 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
106 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
111 # delta application.
112 mf2 = None
112 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
113 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
116 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118
118
119 modified, added = [], []
119 modified, added = [], []
120 removed = []
120 removed = []
121 clean = []
121 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
123 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
124 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
125 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
126 if fn in deletedset:
127 continue
127 continue
128 if value is None:
128 if value is None:
129 clean.append(fn)
129 clean.append(fn)
130 continue
130 continue
131 (node1, flag1), (node2, flag2) = value
131 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
132 if node1 is None:
133 added.append(fn)
133 added.append(fn)
134 elif node2 is None:
134 elif node2 is None:
135 removed.append(fn)
135 removed.append(fn)
136 elif flag1 != flag2:
136 elif flag1 != flag2:
137 modified.append(fn)
137 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
139 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
140 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
141 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
142 # to a file as a modification.
143 modified.append(fn)
143 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
144 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
145 modified.append(fn)
146 else:
146 else:
147 clean.append(fn)
147 clean.append(fn)
148
148
149 if removed:
149 if removed:
150 # need to filter files if they are already reported as removed
150 # need to filter files if they are already reported as removed
151 unknown = [
151 unknown = [
152 fn
152 fn
153 for fn in unknown
153 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
154 if fn not in mf1 and (not match or match(fn))
155 ]
155 ]
156 ignored = [
156 ignored = [
157 fn
157 fn
158 for fn in ignored
158 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
163
163
164 return scmutil.status(
164 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
165 modified, added, removed, deleted, unknown, ignored, clean
166 )
166 )
167
167
168 @propertycache
168 @propertycache
169 def substate(self):
169 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
170 return subrepoutil.state(self, self._repo.ui)
171
171
172 def subrev(self, subpath):
172 def subrev(self, subpath):
173 return self.substate[subpath][1]
173 return self.substate[subpath][1]
174
174
175 def rev(self):
175 def rev(self):
176 return self._rev
176 return self._rev
177
177
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180
180
181 def hex(self):
181 def hex(self):
182 return hex(self.node())
182 return hex(self.node())
183
183
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186
186
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189
189
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192
192
193 def phasestr(self):
193 def phasestr(self):
194 return phases.phasenames[self.phase()]
194 return phases.phasenames[self.phase()]
195
195
196 def mutable(self):
196 def mutable(self):
197 return self.phase() > phases.public
197 return self.phase() > phases.public
198
198
199 def matchfileset(self, cwd, expr, badfn=None):
199 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
200 return fileset.match(self, cwd, expr, badfn=badfn)
201
201
202 def obsolete(self):
202 def obsolete(self):
203 """True if the changeset is obsolete"""
203 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
205
206 def extinct(self):
206 def extinct(self):
207 """True if the changeset is extinct"""
207 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
211 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
213
214 def phasedivergent(self):
214 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
215 """True if the changeset tries to be a successor of a public changeset
216
216
217 Only non-public and non-obsolete changesets may be phase-divergent.
217 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
218 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
220
221 def contentdivergent(self):
221 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
222 """Is a successor of a changeset with multiple possible successor sets
223
223
224 Only non-public and non-obsolete changesets may be content-divergent.
224 Only non-public and non-obsolete changesets may be content-divergent.
225 """
225 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
227
228 def isunstable(self):
228 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
229 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
230 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
232
233 def instabilities(self):
233 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
234 """return the list of instabilities affecting this changeset.
235
235
236 Instabilities are returned as strings. possible values are:
236 Instabilities are returned as strings. possible values are:
237 - orphan,
237 - orphan,
238 - phase-divergent,
238 - phase-divergent,
239 - content-divergent.
239 - content-divergent.
240 """
240 """
241 instabilities = []
241 instabilities = []
242 if self.orphan():
242 if self.orphan():
243 instabilities.append(b'orphan')
243 instabilities.append(b'orphan')
244 if self.phasedivergent():
244 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
245 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
246 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
247 instabilities.append(b'content-divergent')
248 return instabilities
248 return instabilities
249
249
250 def parents(self):
250 def parents(self):
251 """return contexts for each parent changeset"""
251 """return contexts for each parent changeset"""
252 return self._parents
252 return self._parents
253
253
254 def p1(self):
254 def p1(self):
255 return self._parents[0]
255 return self._parents[0]
256
256
257 def p2(self):
257 def p2(self):
258 parents = self._parents
258 parents = self._parents
259 if len(parents) == 2:
259 if len(parents) == 2:
260 return parents[1]
260 return parents[1]
261 return self._repo[nullrev]
261 return self._repo[nullrev]
262
262
263 def _fileinfo(self, path):
263 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
264 if '_manifest' in self.__dict__:
265 try:
265 try:
266 return self._manifest.find(path)
266 return self._manifest.find(path)
267 except KeyError:
267 except KeyError:
268 raise error.ManifestLookupError(
268 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
269 self._node or b'None', path, _(b'not found in manifest')
270 )
270 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
272 if path in self._manifestdelta:
273 return (
273 return (
274 self._manifestdelta[path],
274 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
276 )
276 )
277 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
278 try:
278 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
280 except KeyError:
281 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
283 )
283 )
284
284
285 return node, flag
285 return node, flag
286
286
287 def filenode(self, path):
287 def filenode(self, path):
288 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
289
289
290 def flags(self, path):
290 def flags(self, path):
291 try:
291 try:
292 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
293 except error.LookupError:
293 except error.LookupError:
294 return b''
294 return b''
295
295
296 @propertycache
296 @propertycache
297 def _copies(self):
297 def _copies(self):
298 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
299
299
300 def p1copies(self):
300 def p1copies(self):
301 return self._copies[0]
301 return self._copies[0]
302
302
303 def p2copies(self):
303 def p2copies(self):
304 return self._copies[1]
304 return self._copies[1]
305
305
306 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
309
310 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
312
312
313 def workingsub(self, path):
313 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
315 context.
316 """
316 """
317 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
318
318
319 def match(
319 def match(
320 self,
320 self,
321 pats=None,
321 pats=None,
322 include=None,
322 include=None,
323 exclude=None,
323 exclude=None,
324 default=b'glob',
324 default=b'glob',
325 listsubrepos=False,
325 listsubrepos=False,
326 badfn=None,
326 badfn=None,
327 cwd=None,
327 cwd=None,
328 ):
328 ):
329 r = self._repo
329 r = self._repo
330 if not cwd:
330 if not cwd:
331 cwd = r.getcwd()
331 cwd = r.getcwd()
332 return matchmod.match(
332 return matchmod.match(
333 r.root,
333 r.root,
334 cwd,
334 cwd,
335 pats,
335 pats,
336 include,
336 include,
337 exclude,
337 exclude,
338 default,
338 default,
339 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
340 ctx=self,
340 ctx=self,
341 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
342 badfn=badfn,
342 badfn=badfn,
343 )
343 )
344
344
345 def diff(
345 def diff(
346 self,
346 self,
347 ctx2=None,
347 ctx2=None,
348 match=None,
348 match=None,
349 changes=None,
349 changes=None,
350 opts=None,
350 opts=None,
351 losedatafn=None,
351 losedatafn=None,
352 pathfn=None,
352 pathfn=None,
353 copy=None,
353 copy=None,
354 copysourcematch=None,
354 copysourcematch=None,
355 hunksfilterfn=None,
355 hunksfilterfn=None,
356 ):
356 ):
357 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
358 if ctx2 is None:
359 ctx2 = self.p1()
359 ctx2 = self.p1()
360 if ctx2 is not None:
360 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
362 return patch.diff(
362 return patch.diff(
363 self._repo,
363 self._repo,
364 ctx2,
364 ctx2,
365 self,
365 self,
366 match=match,
366 match=match,
367 changes=changes,
367 changes=changes,
368 opts=opts,
368 opts=opts,
369 losedatafn=losedatafn,
369 losedatafn=losedatafn,
370 pathfn=pathfn,
370 pathfn=pathfn,
371 copy=copy,
371 copy=copy,
372 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
374 )
374 )
375
375
376 def dirs(self):
376 def dirs(self):
377 return self._manifest.dirs()
377 return self._manifest.dirs()
378
378
379 def hasdir(self, dir):
379 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
381
381
382 def status(
382 def status(
383 self,
383 self,
384 other=None,
384 other=None,
385 match=None,
385 match=None,
386 listignored=False,
386 listignored=False,
387 listclean=False,
387 listclean=False,
388 listunknown=False,
388 listunknown=False,
389 listsubrepos=False,
389 listsubrepos=False,
390 ):
390 ):
391 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
392 directory.
392 directory.
393
393
394 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
395
395
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
397
398 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
399
399
400 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
401
401
402 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
403
403
404 or direct attribute access:
404 or direct attribute access:
405
405
406 s.modified, s.added, ...
406 s.modified, s.added, ...
407 """
407 """
408
408
409 ctx1 = self
409 ctx1 = self
410 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
411
411
412 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
415 # with its first parent.
416 #
416 #
417 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
418 #
418 #
419 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
420 #
420 #
421 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
424 reversed = False
424 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
426 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
428
428
429 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
434 )
434 )
435
435
436 if reversed:
436 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
438 # these make no sense to reverse.
439 r = scmutil.status(
439 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
441 )
442
442
443 if listsubrepos:
443 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
445 try:
446 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
447 except KeyError:
447 except KeyError:
448 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
451 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
453 s = sub.status(
454 rev2,
454 rev2,
455 match=submatch,
455 match=submatch,
456 ignored=listignored,
456 ignored=listignored,
457 clean=listclean,
457 clean=listclean,
458 unknown=listunknown,
458 unknown=listunknown,
459 listsubrepos=True,
459 listsubrepos=True,
460 )
460 )
461 for k in (
461 for k in (
462 'modified',
462 'modified',
463 'added',
463 'added',
464 'removed',
464 'removed',
465 'deleted',
465 'deleted',
466 'unknown',
466 'unknown',
467 'ignored',
467 'ignored',
468 'clean',
468 'clean',
469 ):
469 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
472
473 r.modified.sort()
473 r.modified.sort()
474 r.added.sort()
474 r.added.sort()
475 r.removed.sort()
475 r.removed.sort()
476 r.deleted.sort()
476 r.deleted.sort()
477 r.unknown.sort()
477 r.unknown.sort()
478 r.ignored.sort()
478 r.ignored.sort()
479 r.clean.sort()
479 r.clean.sort()
480
480
481 return r
481 return r
482
482
483 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
485 raise NotImplementedError(
485 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
487 )
487 )
488
488
489 def isempty(self):
489 def isempty(self):
490 return not (
490 return not (
491 len(self.parents()) > 1
491 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
493 or self.closesbranch()
493 or self.closesbranch()
494 or self.files()
494 or self.files()
495 )
495 )
496
496
497
497
498 class changectx(basectx):
498 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
501 the repo."""
501 the repo."""
502
502
503 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
505 self._rev = rev
505 self._rev = rev
506 self._node = node
506 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
509 #
509 #
510 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
514
514
515 def __hash__(self):
515 def __hash__(self):
516 try:
516 try:
517 return hash(self._rev)
517 return hash(self._rev)
518 except AttributeError:
518 except AttributeError:
519 return id(self)
519 return id(self)
520
520
521 def __nonzero__(self):
521 def __nonzero__(self):
522 return self._rev != nullrev
522 return self._rev != nullrev
523
523
524 __bool__ = __nonzero__
524 __bool__ = __nonzero__
525
525
526 @propertycache
526 @propertycache
527 def _changeset(self):
527 def _changeset(self):
528 if self._maybe_filtered:
528 if self._maybe_filtered:
529 repo = self._repo
529 repo = self._repo
530 else:
530 else:
531 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
533
533
534 @propertycache
534 @propertycache
535 def _manifest(self):
535 def _manifest(self):
536 return self._manifestctx.read()
536 return self._manifestctx.read()
537
537
538 @property
538 @property
539 def _manifestctx(self):
539 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
541
541
542 @propertycache
542 @propertycache
543 def _manifestdelta(self):
543 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
545
545
546 @propertycache
546 @propertycache
547 def _parents(self):
547 def _parents(self):
548 repo = self._repo
548 repo = self._repo
549 if self._maybe_filtered:
549 if self._maybe_filtered:
550 cl = repo.changelog
550 cl = repo.changelog
551 else:
551 else:
552 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
553
553
554 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
555 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
557 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
560 ]
561
561
562 def changeset(self):
562 def changeset(self):
563 c = self._changeset
563 c = self._changeset
564 return (
564 return (
565 c.manifest,
565 c.manifest,
566 c.user,
566 c.user,
567 c.date,
567 c.date,
568 c.files,
568 c.files,
569 c.description,
569 c.description,
570 c.extra,
570 c.extra,
571 )
571 )
572
572
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578
578
579 def date(self):
579 def date(self):
580 return self._changeset.date
580 return self._changeset.date
581
581
582 def files(self):
582 def files(self):
583 return self._changeset.files
583 return self._changeset.files
584
584
585 def filesmodified(self):
585 def filesmodified(self):
586 modified = set(self.files())
586 modified = set(self.files())
587 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
589 return sorted(modified)
589 return sorted(modified)
590
590
591 def filesadded(self):
591 def filesadded(self):
592 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
593 compute_on_none = True
593 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
595 compute_on_none = False
596 else:
596 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
598 if source == b'changeset-only':
599 compute_on_none = False
599 compute_on_none = False
600 elif source != b'compatibility':
600 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
602 filesadded = None
602 filesadded = None
603 if filesadded is None:
603 if filesadded is None:
604 if compute_on_none:
604 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
606 else:
606 else:
607 filesadded = []
607 filesadded = []
608 return filesadded
608 return filesadded
609
609
610 def filesremoved(self):
610 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
612 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
614 compute_on_none = False
615 else:
615 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
617 if source == b'changeset-only':
618 compute_on_none = False
618 compute_on_none = False
619 elif source != b'compatibility':
619 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
621 filesremoved = None
621 filesremoved = None
622 if filesremoved is None:
622 if filesremoved is None:
623 if compute_on_none:
623 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
625 else:
626 filesremoved = []
626 filesremoved = []
627 return filesremoved
627 return filesremoved
628
628
629 @propertycache
629 @propertycache
630 def _copies(self):
630 def _copies(self):
631 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
633 compute_on_none = True
633 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
635 compute_on_none = False
636 else:
636 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
642 # the filelogs (below).
643 #
643 #
644 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
646 #
646 #
647 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
649 if source == b'changeset-only':
649 if source == b'changeset-only':
650 compute_on_none = False
650 compute_on_none = False
651 elif source != b'compatibility':
651 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
653 p1copies = p2copies = None
654 if p1copies is None:
654 if p1copies is None:
655 if compute_on_none:
655 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
657 else:
657 else:
658 if p1copies is None:
658 if p1copies is None:
659 p1copies = {}
659 p1copies = {}
660 if p2copies is None:
660 if p2copies is None:
661 p2copies = {}
661 p2copies = {}
662 return p1copies, p2copies
662 return p1copies, p2copies
663
663
664 def description(self):
664 def description(self):
665 return self._changeset.description
665 return self._changeset.description
666
666
667 def branch(self):
667 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
669
670 def closesbranch(self):
670 def closesbranch(self):
671 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
672
672
673 def extra(self):
673 def extra(self):
674 """Return a dict of extra information."""
674 """Return a dict of extra information."""
675 return self._changeset.extra
675 return self._changeset.extra
676
676
677 def tags(self):
677 def tags(self):
678 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
680
680
681 def bookmarks(self):
681 def bookmarks(self):
682 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
684
684
685 def phase(self):
685 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
686 return self._repo._phasecache.phase(self._repo, self._rev)
687
687
688 def hidden(self):
688 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
690
691 def isinmemory(self):
691 def isinmemory(self):
692 return False
692 return False
693
693
694 def children(self):
694 def children(self):
695 """return list of changectx contexts for each child changeset.
695 """return list of changectx contexts for each child changeset.
696
696
697 This returns only the immediate child changesets. Use descendants() to
697 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
698 recursively walk children.
699 """
699 """
700 c = self._repo.changelog.children(self._node)
700 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
701 return [self._repo[x] for x in c]
702
702
703 def ancestors(self):
703 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
704 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
705 yield self._repo[a]
706
706
707 def descendants(self):
707 def descendants(self):
708 """Recursively yield all children of the changeset.
708 """Recursively yield all children of the changeset.
709
709
710 For just the immediate children, use children()
710 For just the immediate children, use children()
711 """
711 """
712 for d in self._repo.changelog.descendants([self._rev]):
712 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
713 yield self._repo[d]
714
714
715 def filectx(self, path, fileid=None, filelog=None):
715 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
716 """get a file context from this changeset"""
717 if fileid is None:
717 if fileid is None:
718 fileid = self.filenode(path)
718 fileid = self.filenode(path)
719 return filectx(
719 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
721 )
722
722
723 def ancestor(self, c2, warn=False):
723 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
724 """return the "best" ancestor context of self and c2
725
725
726 If there are multiple candidates, it will show a message and check
726 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
727 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
728 revlog ancestor."""
729 # deal with workingctxs
729 # deal with workingctxs
730 n2 = c2._node
730 n2 = c2._node
731 if n2 is None:
731 if n2 is None:
732 n2 = c2._parents[0]._node
732 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
734 if not cahs:
735 anc = self._repo.nodeconstants.nullid
735 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
736 elif len(cahs) == 1:
737 anc = cahs[0]
737 anc = cahs[0]
738 else:
738 else:
739 # experimental config: merge.preferancestor
739 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
741 try:
742 ctx = scmutil.revsymbol(self._repo, r)
742 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
743 except error.RepoLookupError:
744 continue
744 continue
745 anc = ctx.node()
745 anc = ctx.node()
746 if anc in cahs:
746 if anc in cahs:
747 break
747 break
748 else:
748 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
749 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
750 if warn:
751 self._repo.ui.status(
751 self._repo.ui.status(
752 (
752 (
753 _(b"note: using %s as ancestor of %s and %s\n")
753 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
754 % (short(anc), short(self._node), short(n2))
755 )
755 )
756 + b''.join(
756 + b''.join(
757 _(
757 _(
758 b" alternatively, use --config "
758 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
759 b"merge.preferancestor=%s\n"
760 )
760 )
761 % short(n)
761 % short(n)
762 for n in sorted(cahs)
762 for n in sorted(cahs)
763 if n != anc
763 if n != anc
764 )
764 )
765 )
765 )
766 return self._repo[anc]
766 return self._repo[anc]
767
767
768 def isancestorof(self, other):
768 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
769 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
771
772 def walk(self, match):
772 def walk(self, match):
773 '''Generates matching file names.'''
773 '''Generates matching file names.'''
774
774
775 # Wrap match.bad method to have message with nodeid
775 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
776 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
777 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
778 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
780 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
781 match.bad(fn, _(b'no such file in rev %s') % self)
782
782
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
784 return self._manifest.walk(m)
785
785
786 def matches(self, match):
786 def matches(self, match):
787 return self.walk(match)
787 return self.walk(match)
788
788
789
789
790 class basefilectx(object):
790 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
791 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
792 filectx: read-only access to a filerevision that is already present
793 in the repo,
793 in the repo,
794 workingfilectx: a filecontext that represents files from the working
794 workingfilectx: a filecontext that represents files from the working
795 directory,
795 directory,
796 memfilectx: a filecontext that represents files in-memory,
796 memfilectx: a filecontext that represents files in-memory,
797 """
797 """
798
798
799 @propertycache
799 @propertycache
800 def _filelog(self):
800 def _filelog(self):
801 return self._repo.file(self._path)
801 return self._repo.file(self._path)
802
802
803 @propertycache
803 @propertycache
804 def _changeid(self):
804 def _changeid(self):
805 if '_changectx' in self.__dict__:
805 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
806 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
807 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
808 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
809 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
810 return self._adjustlinkrev(self._descendantrev)
811 else:
811 else:
812 return self._filelog.linkrev(self._filerev)
812 return self._filelog.linkrev(self._filerev)
813
813
814 @propertycache
814 @propertycache
815 def _filenode(self):
815 def _filenode(self):
816 if '_fileid' in self.__dict__:
816 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
817 return self._filelog.lookup(self._fileid)
818 else:
818 else:
819 return self._changectx.filenode(self._path)
819 return self._changectx.filenode(self._path)
820
820
821 @propertycache
821 @propertycache
822 def _filerev(self):
822 def _filerev(self):
823 return self._filelog.rev(self._filenode)
823 return self._filelog.rev(self._filenode)
824
824
825 @propertycache
825 @propertycache
826 def _repopath(self):
826 def _repopath(self):
827 return self._path
827 return self._path
828
828
829 def __nonzero__(self):
829 def __nonzero__(self):
830 try:
830 try:
831 self._filenode
831 self._filenode
832 return True
832 return True
833 except error.LookupError:
833 except error.LookupError:
834 # file is missing
834 # file is missing
835 return False
835 return False
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __bytes__(self):
839 def __bytes__(self):
840 try:
840 try:
841 return b"%s@%s" % (self.path(), self._changectx)
841 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
842 except error.LookupError:
843 return b"%s@???" % self.path()
843 return b"%s@???" % self.path()
844
844
845 __str__ = encoding.strmethod(__bytes__)
845 __str__ = encoding.strmethod(__bytes__)
846
846
847 def __repr__(self):
847 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
848 return "<%s %s>" % (type(self).__name__, str(self))
849
849
850 def __hash__(self):
850 def __hash__(self):
851 try:
851 try:
852 return hash((self._path, self._filenode))
852 return hash((self._path, self._filenode))
853 except AttributeError:
853 except AttributeError:
854 return id(self)
854 return id(self)
855
855
856 def __eq__(self, other):
856 def __eq__(self, other):
857 try:
857 try:
858 return (
858 return (
859 type(self) == type(other)
859 type(self) == type(other)
860 and self._path == other._path
860 and self._path == other._path
861 and self._filenode == other._filenode
861 and self._filenode == other._filenode
862 )
862 )
863 except AttributeError:
863 except AttributeError:
864 return False
864 return False
865
865
866 def __ne__(self, other):
866 def __ne__(self, other):
867 return not (self == other)
867 return not (self == other)
868
868
869 def filerev(self):
869 def filerev(self):
870 return self._filerev
870 return self._filerev
871
871
872 def filenode(self):
872 def filenode(self):
873 return self._filenode
873 return self._filenode
874
874
875 @propertycache
875 @propertycache
876 def _flags(self):
876 def _flags(self):
877 return self._changectx.flags(self._path)
877 return self._changectx.flags(self._path)
878
878
879 def flags(self):
879 def flags(self):
880 return self._flags
880 return self._flags
881
881
882 def filelog(self):
882 def filelog(self):
883 return self._filelog
883 return self._filelog
884
884
885 def rev(self):
885 def rev(self):
886 return self._changeid
886 return self._changeid
887
887
888 def linkrev(self):
888 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
889 return self._filelog.linkrev(self._filerev)
890
890
891 def node(self):
891 def node(self):
892 return self._changectx.node()
892 return self._changectx.node()
893
893
894 def hex(self):
894 def hex(self):
895 return self._changectx.hex()
895 return self._changectx.hex()
896
896
897 def user(self):
897 def user(self):
898 return self._changectx.user()
898 return self._changectx.user()
899
899
900 def date(self):
900 def date(self):
901 return self._changectx.date()
901 return self._changectx.date()
902
902
903 def files(self):
903 def files(self):
904 return self._changectx.files()
904 return self._changectx.files()
905
905
906 def description(self):
906 def description(self):
907 return self._changectx.description()
907 return self._changectx.description()
908
908
909 def branch(self):
909 def branch(self):
910 return self._changectx.branch()
910 return self._changectx.branch()
911
911
912 def extra(self):
912 def extra(self):
913 return self._changectx.extra()
913 return self._changectx.extra()
914
914
915 def phase(self):
915 def phase(self):
916 return self._changectx.phase()
916 return self._changectx.phase()
917
917
918 def phasestr(self):
918 def phasestr(self):
919 return self._changectx.phasestr()
919 return self._changectx.phasestr()
920
920
921 def obsolete(self):
921 def obsolete(self):
922 return self._changectx.obsolete()
922 return self._changectx.obsolete()
923
923
924 def instabilities(self):
924 def instabilities(self):
925 return self._changectx.instabilities()
925 return self._changectx.instabilities()
926
926
927 def manifest(self):
927 def manifest(self):
928 return self._changectx.manifest()
928 return self._changectx.manifest()
929
929
930 def changectx(self):
930 def changectx(self):
931 return self._changectx
931 return self._changectx
932
932
933 def renamed(self):
933 def renamed(self):
934 return self._copied
934 return self._copied
935
935
936 def copysource(self):
936 def copysource(self):
937 return self._copied and self._copied[0]
937 return self._copied and self._copied[0]
938
938
939 def repo(self):
939 def repo(self):
940 return self._repo
940 return self._repo
941
941
942 def size(self):
942 def size(self):
943 return len(self.data())
943 return len(self.data())
944
944
945 def path(self):
945 def path(self):
946 return self._path
946 return self._path
947
947
948 def isbinary(self):
948 def isbinary(self):
949 try:
949 try:
950 return stringutil.binary(self.data())
950 return stringutil.binary(self.data())
951 except IOError:
951 except IOError:
952 return False
952 return False
953
953
954 def isexec(self):
954 def isexec(self):
955 return b'x' in self.flags()
955 return b'x' in self.flags()
956
956
957 def islink(self):
957 def islink(self):
958 return b'l' in self.flags()
958 return b'l' in self.flags()
959
959
960 def isabsent(self):
960 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
961 """whether this filectx represents a file not in self._changectx
962
962
963 This is mainly for merge code to detect change/delete conflicts. This is
963 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
964 expected to be True for all subclasses of basectx."""
965 return False
965 return False
966
966
967 _customcmp = False
967 _customcmp = False
968
968
969 def cmp(self, fctx):
969 def cmp(self, fctx):
970 """compare with other file context
970 """compare with other file context
971
971
972 returns True if different than fctx.
972 returns True if different than fctx.
973 """
973 """
974 if fctx._customcmp:
974 if fctx._customcmp:
975 return fctx.cmp(self)
975 return fctx.cmp(self)
976
976
977 if self._filenode is None:
977 if self._filenode is None:
978 raise error.ProgrammingError(
978 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
980 )
981
981
982 if fctx._filenode is None:
982 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
983 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
984 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
986 if self.size() - 4 == fctx.size():
987 # size() can match:
987 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
988 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
989 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
991 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
992 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
993 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
994 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996
996
997 # size() differs
997 # size() differs
998 return True
998 return True
999
999
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1001 """return the first ancestor of <srcrev> introducing <fnode>
1002
1002
1003 If the linkrev of the file revision does not point to an ancestor of
1003 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1005 this file revision.
1006
1006
1007 :srcrev: the changeset revision we search ancestors from
1007 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1008 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1010 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1011 revision, the function will returns "None" and stops its
1012 iteration.
1012 iteration.
1013 """
1013 """
1014 repo = self._repo
1014 repo = self._repo
1015 cl = repo.unfiltered().changelog
1015 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 # fetch the linkrev
1017 # fetch the linkrev
1018 lkr = self.linkrev()
1018 lkr = self.linkrev()
1019 if srcrev == lkr:
1019 if srcrev == lkr:
1020 return lkr
1020 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1021 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1023 iteranc = None
1024 if srcrev is None:
1024 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1025 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1027 inclusive = True # we skipped the real (revless) source
1028 else:
1028 else:
1029 revs = [srcrev]
1029 revs = [srcrev]
1030 if memberanc is None:
1030 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1032 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1033 if lkr not in memberanc:
1034 if iteranc is None:
1034 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 path = self._path
1037 path = self._path
1038 for a in iteranc:
1038 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1039 if stoprev is not None and a < stoprev:
1040 return None
1040 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1042 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1043 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1044 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1046 return a
1047 # In theory, we should never get out of that loop without a result.
1047 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1048 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1049 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1050 # result is crash somewhere else at to some point.
1051 return lkr
1051 return lkr
1052
1052
1053 def isintroducedafter(self, changelogrev):
1053 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1054 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1055 if self.linkrev() >= changelogrev:
1056 return True
1056 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1057 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1058 if introrev is None:
1059 return False
1059 return False
1060 return introrev >= changelogrev
1060 return introrev >= changelogrev
1061
1061
1062 def introrev(self):
1062 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1063 """return the rev of the changeset which introduced this file revision
1064
1064
1065 This method is different from linkrev because it take into account the
1065 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1066 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1067 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1068 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1069 changesets.
1070 """
1070 """
1071 return self._introrev()
1071 return self._introrev()
1072
1072
1073 def _introrev(self, stoprev=None):
1073 def _introrev(self, stoprev=None):
1074 """
1074 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1075 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1076 iteration range in some internal usecase.
1077
1077
1078 If `stoprev` is set, the `introrev` will not be searched past that
1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1079 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1080 limit the iteration range.
1081 """
1081 """
1082 toprev = None
1082 toprev = None
1083 attrs = vars(self)
1083 attrs = vars(self)
1084 if '_changeid' in attrs:
1084 if '_changeid' in attrs:
1085 # We have a cached value already
1085 # We have a cached value already
1086 toprev = self._changeid
1086 toprev = self._changeid
1087 elif '_changectx' in attrs:
1087 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1088 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1089 toprev = self._changectx.rev()
1090
1090
1091 if toprev is not None:
1091 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1093 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1095 # be nice and cache the result of the computation
1096 if introrev is not None:
1096 if introrev is not None:
1097 self._changeid = introrev
1097 self._changeid = introrev
1098 return introrev
1098 return introrev
1099 else:
1099 else:
1100 return self.linkrev()
1100 return self.linkrev()
1101
1101
1102 def introfilectx(self):
1102 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1103 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1104 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1105 introrev = self.introrev()
1106 if self.rev() == introrev:
1106 if self.rev() == introrev:
1107 return self
1107 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1108 return self.filectx(self.filenode(), changeid=introrev)
1109
1109
1110 def _parentfilectx(self, path, fileid, filelog):
1110 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1114 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1115 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1116 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1118 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1120 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1121 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1122 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1124 return fctx
1125
1125
1126 def parents(self):
1126 def parents(self):
1127 _path = self._path
1127 _path = self._path
1128 fl = self._filelog
1128 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1129 parents = self._filelog.parents(self._filenode)
1130 pl = [
1130 pl = [
1131 (_path, node, fl)
1131 (_path, node, fl)
1132 for node in parents
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1134 ]
1135
1135
1136 r = fl.renamed(self._filenode)
1136 r = fl.renamed(self._filenode)
1137 if r:
1137 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1139 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1140 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1141 # the first one.
1142 #
1142 #
1143 # As null id have always been filtered out in the previous list
1143 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1144 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1145 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1147
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1149
1150 def p1(self):
1150 def p1(self):
1151 return self.parents()[0]
1151 return self.parents()[0]
1152
1152
1153 def p2(self):
1153 def p2(self):
1154 p = self.parents()
1154 p = self.parents()
1155 if len(p) == 2:
1155 if len(p) == 2:
1156 return p[1]
1156 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1158
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1160 """Returns a list of annotateline objects for each line in the file
1161
1161
1162 - line.fctx is the filectx of the node where that line was last changed
1162 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1163 - line.lineno is the line number at the first appearance in the managed
1164 file
1164 file
1165 - line.text is the data on that line (including newline character)
1165 - line.text is the data on that line (including newline character)
1166 """
1166 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1168
1169 def parents(f):
1169 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1173 # isn't an ancestor of the srcrev.
1174 f._changeid
1174 f._changeid
1175 pl = f.parents()
1175 pl = f.parents()
1176
1176
1177 # Don't return renamed parents if we aren't following.
1177 # Don't return renamed parents if we aren't following.
1178 if not follow:
1178 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1179 pl = [p for p in pl if p.path() == f.path()]
1180
1180
1181 # renamed filectx won't have a filelog yet, so set it
1181 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1182 # from the cache to save time
1183 for p in pl:
1183 for p in pl:
1184 if not '_filelog' in p.__dict__:
1184 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1185 p._filelog = getlog(p.path())
1186
1186
1187 return pl
1187 return pl
1188
1188
1189 # use linkrev to find the first changeset where self appeared
1189 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1190 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1191 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1192 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1193 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1194 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1195 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1196 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1197 # is used to test filelog revisions
1198 ac = cl.ancestors(
1198 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1199 [p.rev() for p in base.parents()], inclusive=True
1200 )
1200 )
1201 else:
1201 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1203 base._ancestrycontext = ac
1204
1204
1205 return dagop.annotate(
1205 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1207 )
1208
1208
1209 def ancestors(self, followfirst=False):
1209 def ancestors(self, followfirst=False):
1210 visit = {}
1210 visit = {}
1211 c = self
1211 c = self
1212 if followfirst:
1212 if followfirst:
1213 cut = 1
1213 cut = 1
1214 else:
1214 else:
1215 cut = None
1215 cut = None
1216
1216
1217 while True:
1217 while True:
1218 for parent in c.parents()[:cut]:
1218 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1220 if not visit:
1221 break
1221 break
1222 c = visit.pop(max(visit))
1222 c = visit.pop(max(visit))
1223 yield c
1223 yield c
1224
1224
1225 def decodeddata(self):
1225 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1226 """Returns `data()` after running repository decoding filters.
1227
1227
1228 This is often equivalent to how the data would be expressed on disk.
1228 This is often equivalent to how the data would be expressed on disk.
1229 """
1229 """
1230 return self._repo.wwritedata(self.path(), self.data())
1230 return self._repo.wwritedata(self.path(), self.data())
1231
1231
1232
1232
1233 class filectx(basefilectx):
1233 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1234 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1235 filerevision convenient."""
1236
1236
1237 def __init__(
1237 def __init__(
1238 self,
1238 self,
1239 repo,
1239 repo,
1240 path,
1240 path,
1241 changeid=None,
1241 changeid=None,
1242 fileid=None,
1242 fileid=None,
1243 filelog=None,
1243 filelog=None,
1244 changectx=None,
1244 changectx=None,
1245 ):
1245 ):
1246 """changeid must be a revision number, if specified.
1246 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1247 fileid can be a file revision or node."""
1248 self._repo = repo
1248 self._repo = repo
1249 self._path = path
1249 self._path = path
1250
1250
1251 assert (
1251 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1252 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1254 changeid,
1255 fileid,
1255 fileid,
1256 changectx,
1256 changectx,
1257 )
1257 )
1258
1258
1259 if filelog is not None:
1259 if filelog is not None:
1260 self._filelog = filelog
1260 self._filelog = filelog
1261
1261
1262 if changeid is not None:
1262 if changeid is not None:
1263 self._changeid = changeid
1263 self._changeid = changeid
1264 if changectx is not None:
1264 if changectx is not None:
1265 self._changectx = changectx
1265 self._changectx = changectx
1266 if fileid is not None:
1266 if fileid is not None:
1267 self._fileid = fileid
1267 self._fileid = fileid
1268
1268
1269 @propertycache
1269 @propertycache
1270 def _changectx(self):
1270 def _changectx(self):
1271 try:
1271 try:
1272 return self._repo[self._changeid]
1272 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1273 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1274 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1275 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1276 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1277 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1278 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1279 # `filectx` are not used in complex operations that care about
1280 # filtering.
1280 # filtering.
1281 #
1281 #
1282 # This fallback is a cheap and dirty fix that prevent several
1282 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1283 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1284 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1285 # behavior" is seen as better as "crash"
1286 #
1286 #
1287 # Linkrevs have several serious troubles with filtering that are
1287 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1288 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1289 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1290 return self._repo.unfiltered()[self._changeid]
1291
1291
1292 def filectx(self, fileid, changeid=None):
1292 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1293 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1294 opening a new filelog"""
1295 return filectx(
1295 return filectx(
1296 self._repo,
1296 self._repo,
1297 self._path,
1297 self._path,
1298 fileid=fileid,
1298 fileid=fileid,
1299 filelog=self._filelog,
1299 filelog=self._filelog,
1300 changeid=changeid,
1300 changeid=changeid,
1301 )
1301 )
1302
1302
1303 def rawdata(self):
1303 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1304 return self._filelog.rawdata(self._filenode)
1305
1305
1306 def rawflags(self):
1306 def rawflags(self):
1307 """low-level revlog flags"""
1307 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1308 return self._filelog.flags(self._filerev)
1309
1309
1310 def data(self):
1310 def data(self):
1311 try:
1311 try:
1312 return self._filelog.read(self._filenode)
1312 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1313 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1315 return b""
1316 raise error.Abort(
1316 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1317 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1318 hint=_(b"set censor.policy to ignore errors"),
1319 )
1319 )
1320
1320
1321 def size(self):
1321 def size(self):
1322 return self._filelog.size(self._filerev)
1322 return self._filelog.size(self._filerev)
1323
1323
1324 @propertycache
1324 @propertycache
1325 def _copied(self):
1325 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1326 """check if file was actually renamed in this changeset revision
1327
1327
1328 If rename logged in file revision, we report copy for changeset only
1328 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1329 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1330 or both changeset parents contain different file revisions.
1331 """
1331 """
1332
1332
1333 renamed = self._filelog.renamed(self._filenode)
1333 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1334 if not renamed:
1335 return None
1335 return None
1336
1336
1337 if self.rev() == self.linkrev():
1337 if self.rev() == self.linkrev():
1338 return renamed
1338 return renamed
1339
1339
1340 name = self.path()
1340 name = self.path()
1341 fnode = self._filenode
1341 fnode = self._filenode
1342 for p in self._changectx.parents():
1342 for p in self._changectx.parents():
1343 try:
1343 try:
1344 if fnode == p.filenode(name):
1344 if fnode == p.filenode(name):
1345 return None
1345 return None
1346 except error.LookupError:
1346 except error.LookupError:
1347 pass
1347 pass
1348 return renamed
1348 return renamed
1349
1349
1350 def children(self):
1350 def children(self):
1351 # hard for renames
1351 # hard for renames
1352 c = self._filelog.children(self._filenode)
1352 c = self._filelog.children(self._filenode)
1353 return [
1353 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1355 for x in c
1356 ]
1356 ]
1357
1357
1358
1358
1359 class committablectx(basectx):
1359 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1360 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1361 wants the ability to commit, e.g. workingctx or memctx."""
1362
1362
1363 def __init__(
1363 def __init__(
1364 self,
1364 self,
1365 repo,
1365 repo,
1366 text=b"",
1366 text=b"",
1367 user=None,
1367 user=None,
1368 date=None,
1368 date=None,
1369 extra=None,
1369 extra=None,
1370 changes=None,
1370 changes=None,
1371 branch=None,
1371 branch=None,
1372 ):
1372 ):
1373 super(committablectx, self).__init__(repo)
1373 super(committablectx, self).__init__(repo)
1374 self._rev = None
1374 self._rev = None
1375 self._node = None
1375 self._node = None
1376 self._text = text
1376 self._text = text
1377 if date:
1377 if date:
1378 self._date = dateutil.parsedate(date)
1378 self._date = dateutil.parsedate(date)
1379 if user:
1379 if user:
1380 self._user = user
1380 self._user = user
1381 if changes:
1381 if changes:
1382 self._status = changes
1382 self._status = changes
1383
1383
1384 self._extra = {}
1384 self._extra = {}
1385 if extra:
1385 if extra:
1386 self._extra = extra.copy()
1386 self._extra = extra.copy()
1387 if branch is not None:
1387 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1389 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1390 self._extra[b'branch'] = b'default'
1391
1391
1392 def __bytes__(self):
1392 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1393 return bytes(self._parents[0]) + b"+"
1394
1394
1395 def hex(self):
1395 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1396 self._repo.nodeconstants.wdirhex
1397
1397
1398 __str__ = encoding.strmethod(__bytes__)
1398 __str__ = encoding.strmethod(__bytes__)
1399
1399
1400 def __nonzero__(self):
1400 def __nonzero__(self):
1401 return True
1401 return True
1402
1402
1403 __bool__ = __nonzero__
1403 __bool__ = __nonzero__
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _status(self):
1406 def _status(self):
1407 return self._repo.status()
1407 return self._repo.status()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _user(self):
1410 def _user(self):
1411 return self._repo.ui.username()
1411 return self._repo.ui.username()
1412
1412
1413 @propertycache
1413 @propertycache
1414 def _date(self):
1414 def _date(self):
1415 ui = self._repo.ui
1415 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1416 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1417 if date is None:
1418 date = dateutil.makedate()
1418 date = dateutil.makedate()
1419 return date
1419 return date
1420
1420
1421 def subrev(self, subpath):
1421 def subrev(self, subpath):
1422 return None
1422 return None
1423
1423
1424 def manifestnode(self):
1424 def manifestnode(self):
1425 return None
1425 return None
1426
1426
1427 def user(self):
1427 def user(self):
1428 return self._user or self._repo.ui.username()
1428 return self._user or self._repo.ui.username()
1429
1429
1430 def date(self):
1430 def date(self):
1431 return self._date
1431 return self._date
1432
1432
1433 def description(self):
1433 def description(self):
1434 return self._text
1434 return self._text
1435
1435
1436 def files(self):
1436 def files(self):
1437 return sorted(
1437 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1438 self._status.modified + self._status.added + self._status.removed
1439 )
1439 )
1440
1440
1441 def modified(self):
1441 def modified(self):
1442 return self._status.modified
1442 return self._status.modified
1443
1443
1444 def added(self):
1444 def added(self):
1445 return self._status.added
1445 return self._status.added
1446
1446
1447 def removed(self):
1447 def removed(self):
1448 return self._status.removed
1448 return self._status.removed
1449
1449
1450 def deleted(self):
1450 def deleted(self):
1451 return self._status.deleted
1451 return self._status.deleted
1452
1452
1453 filesmodified = modified
1453 filesmodified = modified
1454 filesadded = added
1454 filesadded = added
1455 filesremoved = removed
1455 filesremoved = removed
1456
1456
1457 def branch(self):
1457 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1458 return encoding.tolocal(self._extra[b'branch'])
1459
1459
1460 def closesbranch(self):
1460 def closesbranch(self):
1461 return b'close' in self._extra
1461 return b'close' in self._extra
1462
1462
1463 def extra(self):
1463 def extra(self):
1464 return self._extra
1464 return self._extra
1465
1465
1466 def isinmemory(self):
1466 def isinmemory(self):
1467 return False
1467 return False
1468
1468
1469 def tags(self):
1469 def tags(self):
1470 return []
1470 return []
1471
1471
1472 def bookmarks(self):
1472 def bookmarks(self):
1473 b = []
1473 b = []
1474 for p in self.parents():
1474 for p in self.parents():
1475 b.extend(p.bookmarks())
1475 b.extend(p.bookmarks())
1476 return b
1476 return b
1477
1477
1478 def phase(self):
1478 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1479 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1480 for p in self.parents():
1481 phase = max(phase, p.phase())
1481 phase = max(phase, p.phase())
1482 return phase
1482 return phase
1483
1483
1484 def hidden(self):
1484 def hidden(self):
1485 return False
1485 return False
1486
1486
1487 def children(self):
1487 def children(self):
1488 return []
1488 return []
1489
1489
1490 def flags(self, path):
1490 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1491 if '_manifest' in self.__dict__:
1492 try:
1492 try:
1493 return self._manifest.flags(path)
1493 return self._manifest.flags(path)
1494 except KeyError:
1494 except KeyError:
1495 return b''
1495 return b''
1496
1496
1497 try:
1497 try:
1498 return self._flagfunc(path)
1498 return self._flagfunc(path)
1499 except OSError:
1499 except OSError:
1500 return b''
1500 return b''
1501
1501
1502 def ancestor(self, c2):
1502 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1503 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1505
1506 def ancestors(self):
1506 def ancestors(self):
1507 for p in self._parents:
1507 for p in self._parents:
1508 yield p
1508 yield p
1509 for a in self._repo.changelog.ancestors(
1509 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1510 [p.rev() for p in self._parents]
1511 ):
1511 ):
1512 yield self._repo[a]
1512 yield self._repo[a]
1513
1513
1514 def markcommitted(self, node):
1514 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1515 """Perform post-commit cleanup necessary after committing this ctx
1516
1516
1517 Specifically, this updates backing stores this working context
1517 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1518 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1519 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1520 modified and added files as normal in the dirstate.
1521
1521
1522 """
1522 """
1523
1523
1524 def dirty(self, missing=False, merge=True, branch=True):
1524 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1525 return False
1526
1526
1527
1527
1528 class workingctx(committablectx):
1528 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1529 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1530 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1531 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1532 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1533 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1534 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1535 or None to use the repository status.
1536 """
1536 """
1537
1537
1538 def __init__(
1538 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1540 ):
1541 branch = None
1541 branch = None
1542 if not extra or b'branch' not in extra:
1542 if not extra or b'branch' not in extra:
1543 try:
1543 try:
1544 branch = repo.dirstate.branch()
1544 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1545 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1547 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1548 repo, text, user, date, extra, changes, branch=branch
1549 )
1549 )
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d.get_entry(f).tracked:
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate.get_entry(key).tracked
1558 return self._repo.dirstate.get_entry(key).tracked
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
1562
1562
1563 @propertycache
1563 @propertycache
1564 def _parents(self):
1564 def _parents(self):
1565 p = self._repo.dirstate.parents()
1565 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1566 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1567 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1569 unfi = self._repo.unfiltered()
1570 return [
1570 return [
1571 changectx(
1571 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1573 )
1574 for n in p
1574 for n in p
1575 ]
1575 ]
1576
1576
1577 def setparents(self, p1node, p2node=None):
1577 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1578 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1579 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1580 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1581 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1582 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1583 pctx = self._repo[p1node]
1584 if copies:
1584 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1585 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1586 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1587 # only for entries added to first parent.
1588 for f in copies:
1588 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1589 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1590 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1591 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1592 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1593 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1594 dirstate.copy(None, f)
1595
1595
1596 def _fileinfo(self, path):
1596 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1598 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1599 return super(workingctx, self)._fileinfo(path)
1600
1600
1601 def _buildflagfunc(self):
1601 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1602 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1603 # filesystem doesn't support them
1604
1604
1605 copiesget = self._repo.dirstate.copies().get
1605 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1606 parents = self.parents()
1607 if len(parents) < 2:
1607 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1608 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1609 man = parents[0].manifest()
1610
1610
1611 def func(f):
1611 def func(f):
1612 f = copiesget(f, f)
1612 f = copiesget(f, f)
1613 return man.flags(f)
1613 return man.flags(f)
1614
1614
1615 else:
1615 else:
1616 # merges are tricky: we try to reconstruct the unstored
1616 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1617 # result from the merge (issue1802)
1618 p1, p2 = parents
1618 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1619 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1621
1622 def func(f):
1622 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1625 if fl1 == fl2:
1626 return fl1
1626 return fl1
1627 if fl1 == fla:
1627 if fl1 == fla:
1628 return fl2
1628 return fl2
1629 if fl2 == fla:
1629 if fl2 == fla:
1630 return fl1
1630 return fl1
1631 return b'' # punt for conflicts
1631 return b'' # punt for conflicts
1632
1632
1633 return func
1633 return func
1634
1634
1635 @propertycache
1635 @propertycache
1636 def _flagfunc(self):
1636 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1638
1639 def flags(self, path):
1639 def flags(self, path):
1640 try:
1640 try:
1641 return self._flagfunc(path)
1641 return self._flagfunc(path)
1642 except OSError:
1642 except OSError:
1643 return b''
1643 return b''
1644
1644
1645 def filectx(self, path, filelog=None):
1645 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1646 """get a file context from the working directory"""
1647 return workingfilectx(
1647 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1648 self._repo, path, workingctx=self, filelog=filelog
1649 )
1649 )
1650
1650
1651 def dirty(self, missing=False, merge=True, branch=True):
1651 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1652 """check whether a working directory is modified"""
1653 # check subrepos first
1653 # check subrepos first
1654 for s in sorted(self.substate):
1654 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1655 if self.sub(s).dirty(missing=missing):
1656 return True
1656 return True
1657 # check current working dir
1657 # check current working dir
1658 return (
1658 return (
1659 (merge and self.p2())
1659 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1660 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1661 or self.modified()
1662 or self.added()
1662 or self.added()
1663 or self.removed()
1663 or self.removed()
1664 or (missing and self.deleted())
1664 or (missing and self.deleted())
1665 )
1665 )
1666
1666
1667 def add(self, list, prefix=b""):
1667 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1668 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1671 rejected = []
1672 lstat = self._repo.wvfs.lstat
1672 lstat = self._repo.wvfs.lstat
1673 for f in list:
1673 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1674 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1675 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1676 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1678 try:
1679 st = lstat(f)
1679 st = lstat(f)
1680 except OSError:
1680 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1682 rejected.append(f)
1683 continue
1683 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1685 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1686 ui.warn(
1687 _(
1687 _(
1688 b"%s: up to %d MB of RAM may be required "
1688 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1689 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1690 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1691 b"pending addition)\n"
1692 )
1692 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1694 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1696 ui.warn(
1697 _(
1697 _(
1698 b"%s not added: only files and symlinks "
1698 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1699 b"supported currently\n"
1700 )
1700 )
1701 % uipath(f)
1701 % uipath(f)
1702 )
1702 )
1703 rejected.append(f)
1703 rejected.append(f)
1704 elif not ds.set_tracked(f):
1704 elif not ds.set_tracked(f):
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 return rejected
1706 return rejected
1707
1707
1708 def forget(self, files, prefix=b""):
1708 def forget(self, files, prefix=b""):
1709 with self._repo.wlock():
1709 with self._repo.wlock():
1710 ds = self._repo.dirstate
1710 ds = self._repo.dirstate
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 rejected = []
1712 rejected = []
1713 for f in files:
1713 for f in files:
1714 if not ds.set_untracked(f):
1714 if not ds.set_untracked(f):
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 rejected.append(f)
1716 rejected.append(f)
1717 return rejected
1717 return rejected
1718
1718
1719 def copy(self, source, dest):
1719 def copy(self, source, dest):
1720 try:
1720 try:
1721 st = self._repo.wvfs.lstat(dest)
1721 st = self._repo.wvfs.lstat(dest)
1722 except OSError as err:
1722 except OSError as err:
1723 if err.errno != errno.ENOENT:
1723 if err.errno != errno.ENOENT:
1724 raise
1724 raise
1725 self._repo.ui.warn(
1725 self._repo.ui.warn(
1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 )
1727 )
1728 return
1728 return
1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 self._repo.ui.warn(
1730 self._repo.ui.warn(
1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 % self._repo.dirstate.pathto(dest)
1732 % self._repo.dirstate.pathto(dest)
1733 )
1733 )
1734 else:
1734 else:
1735 with self._repo.wlock():
1735 with self._repo.wlock():
1736 ds = self._repo.dirstate
1736 ds = self._repo.dirstate
1737 ds.set_tracked(dest)
1737 ds.set_tracked(dest)
1738 ds.copy(source, dest)
1738 ds.copy(source, dest)
1739
1739
1740 def match(
1740 def match(
1741 self,
1741 self,
1742 pats=None,
1742 pats=None,
1743 include=None,
1743 include=None,
1744 exclude=None,
1744 exclude=None,
1745 default=b'glob',
1745 default=b'glob',
1746 listsubrepos=False,
1746 listsubrepos=False,
1747 badfn=None,
1747 badfn=None,
1748 cwd=None,
1748 cwd=None,
1749 ):
1749 ):
1750 r = self._repo
1750 r = self._repo
1751 if not cwd:
1751 if not cwd:
1752 cwd = r.getcwd()
1752 cwd = r.getcwd()
1753
1753
1754 # Only a case insensitive filesystem needs magic to translate user input
1754 # Only a case insensitive filesystem needs magic to translate user input
1755 # to actual case in the filesystem.
1755 # to actual case in the filesystem.
1756 icasefs = not util.fscasesensitive(r.root)
1756 icasefs = not util.fscasesensitive(r.root)
1757 return matchmod.match(
1757 return matchmod.match(
1758 r.root,
1758 r.root,
1759 cwd,
1759 cwd,
1760 pats,
1760 pats,
1761 include,
1761 include,
1762 exclude,
1762 exclude,
1763 default,
1763 default,
1764 auditor=r.auditor,
1764 auditor=r.auditor,
1765 ctx=self,
1765 ctx=self,
1766 listsubrepos=listsubrepos,
1766 listsubrepos=listsubrepos,
1767 badfn=badfn,
1767 badfn=badfn,
1768 icasefs=icasefs,
1768 icasefs=icasefs,
1769 )
1769 )
1770
1770
1771 def _filtersuspectsymlink(self, files):
1771 def _filtersuspectsymlink(self, files):
1772 if not files or self._repo.dirstate._checklink:
1772 if not files or self._repo.dirstate._checklink:
1773 return files
1773 return files
1774
1774
1775 # Symlink placeholders may get non-symlink-like contents
1775 # Symlink placeholders may get non-symlink-like contents
1776 # via user error or dereferencing by NFS or Samba servers,
1776 # via user error or dereferencing by NFS or Samba servers,
1777 # so we filter out any placeholders that don't look like a
1777 # so we filter out any placeholders that don't look like a
1778 # symlink
1778 # symlink
1779 sane = []
1779 sane = []
1780 for f in files:
1780 for f in files:
1781 if self.flags(f) == b'l':
1781 if self.flags(f) == b'l':
1782 d = self[f].data()
1782 d = self[f].data()
1783 if (
1783 if (
1784 d == b''
1784 d == b''
1785 or len(d) >= 1024
1785 or len(d) >= 1024
1786 or b'\n' in d
1786 or b'\n' in d
1787 or stringutil.binary(d)
1787 or stringutil.binary(d)
1788 ):
1788 ):
1789 self._repo.ui.debug(
1789 self._repo.ui.debug(
1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 )
1791 )
1792 continue
1792 continue
1793 sane.append(f)
1793 sane.append(f)
1794 return sane
1794 return sane
1795
1795
1796 def _checklookup(self, files):
1796 def _checklookup(self, files):
1797 # check for any possibly clean files
1797 # check for any possibly clean files
1798 if not files:
1798 if not files:
1799 return [], [], []
1799 return [], [], []
1800
1800
1801 modified = []
1801 modified = []
1802 deleted = []
1802 deleted = []
1803 fixup = []
1803 fixup = []
1804 pctx = self._parents[0]
1804 pctx = self._parents[0]
1805 # do a full compare of any files that might have changed
1805 # do a full compare of any files that might have changed
1806 for f in sorted(files):
1806 for f in sorted(files):
1807 try:
1807 try:
1808 # This will return True for a file that got replaced by a
1808 # This will return True for a file that got replaced by a
1809 # directory in the interim, but fixing that is pretty hard.
1809 # directory in the interim, but fixing that is pretty hard.
1810 if (
1810 if (
1811 f not in pctx
1811 f not in pctx
1812 or self.flags(f) != pctx.flags(f)
1812 or self.flags(f) != pctx.flags(f)
1813 or pctx[f].cmp(self[f])
1813 or pctx[f].cmp(self[f])
1814 ):
1814 ):
1815 modified.append(f)
1815 modified.append(f)
1816 else:
1816 else:
1817 fixup.append(f)
1817 fixup.append(f)
1818 except (IOError, OSError):
1818 except (IOError, OSError):
1819 # A file become inaccessible in between? Mark it as deleted,
1819 # A file become inaccessible in between? Mark it as deleted,
1820 # matching dirstate behavior (issue5584).
1820 # matching dirstate behavior (issue5584).
1821 # The dirstate has more complex behavior around whether a
1821 # The dirstate has more complex behavior around whether a
1822 # missing file matches a directory, etc, but we don't need to
1822 # missing file matches a directory, etc, but we don't need to
1823 # bother with that: if f has made it to this point, we're sure
1823 # bother with that: if f has made it to this point, we're sure
1824 # it's in the dirstate.
1824 # it's in the dirstate.
1825 deleted.append(f)
1825 deleted.append(f)
1826
1826
1827 return modified, deleted, fixup
1827 return modified, deleted, fixup
1828
1828
1829 def _poststatusfixup(self, status, fixup):
1829 def _poststatusfixup(self, status, fixup):
1830 """update dirstate for files that are actually clean"""
1830 """update dirstate for files that are actually clean"""
1831 poststatus = self._repo.postdsstatus()
1831 poststatus = self._repo.postdsstatus()
1832 if fixup or poststatus or self._repo.dirstate._dirty:
1832 if fixup or poststatus or self._repo.dirstate._dirty:
1833 try:
1833 try:
1834 oldid = self._repo.dirstate.identity()
1834 oldid = self._repo.dirstate.identity()
1835
1835
1836 # updating the dirstate is optional
1836 # updating the dirstate is optional
1837 # so we don't wait on the lock
1837 # so we don't wait on the lock
1838 # wlock can invalidate the dirstate, so cache normal _after_
1838 # wlock can invalidate the dirstate, so cache normal _after_
1839 # taking the lock
1839 # taking the lock
1840 with self._repo.wlock(False):
1840 with self._repo.wlock(False):
1841 dirstate = self._repo.dirstate
1841 dirstate = self._repo.dirstate
1842 if dirstate.identity() == oldid:
1842 if dirstate.identity() == oldid:
1843 if fixup:
1843 if fixup:
1844 if dirstate.pendingparentchange():
1844 if dirstate.pendingparentchange():
1845 normal = lambda f: dirstate.update_file(
1845 normal = lambda f: dirstate.update_file(
1846 f, p1_tracked=True, wc_tracked=True
1846 f, p1_tracked=True, wc_tracked=True
1847 )
1847 )
1848 else:
1848 else:
1849 normal = dirstate.set_clean
1849 normal = dirstate.set_clean
1850 for f in fixup:
1850 for f in fixup:
1851 normal(f)
1851 normal(f)
1852 # write changes out explicitly, because nesting
1852 # write changes out explicitly, because nesting
1853 # wlock at runtime may prevent 'wlock.release()'
1853 # wlock at runtime may prevent 'wlock.release()'
1854 # after this block from doing so for subsequent
1854 # after this block from doing so for subsequent
1855 # changing files
1855 # changing files
1856 tr = self._repo.currenttransaction()
1856 tr = self._repo.currenttransaction()
1857 self._repo.dirstate.write(tr)
1857 self._repo.dirstate.write(tr)
1858
1858
1859 if poststatus:
1859 if poststatus:
1860 for ps in poststatus:
1860 for ps in poststatus:
1861 ps(self, status)
1861 ps(self, status)
1862 else:
1862 else:
1863 # in this case, writing changes out breaks
1863 # in this case, writing changes out breaks
1864 # consistency, because .hg/dirstate was
1864 # consistency, because .hg/dirstate was
1865 # already changed simultaneously after last
1865 # already changed simultaneously after last
1866 # caching (see also issue5584 for detail)
1866 # caching (see also issue5584 for detail)
1867 self._repo.ui.debug(
1867 self._repo.ui.debug(
1868 b'skip updating dirstate: identity mismatch\n'
1868 b'skip updating dirstate: identity mismatch\n'
1869 )
1869 )
1870 except error.LockError:
1870 except error.LockError:
1871 pass
1871 pass
1872 finally:
1872 finally:
1873 # Even if the wlock couldn't be grabbed, clear out the list.
1873 # Even if the wlock couldn't be grabbed, clear out the list.
1874 self._repo.clearpostdsstatus()
1874 self._repo.clearpostdsstatus()
1875
1875
1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 '''Gets the status from the dirstate -- internal use only.'''
1877 '''Gets the status from the dirstate -- internal use only.'''
1878 subrepos = []
1878 subrepos = []
1879 if b'.hgsub' in self:
1879 if b'.hgsub' in self:
1880 subrepos = sorted(self.substate)
1880 subrepos = sorted(self.substate)
1881 cmp, s = self._repo.dirstate.status(
1881 cmp, s = self._repo.dirstate.status(
1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 )
1883 )
1884
1884
1885 # check for any possibly clean files
1885 # check for any possibly clean files
1886 fixup = []
1886 fixup = []
1887 if cmp:
1887 if cmp:
1888 modified2, deleted2, fixup = self._checklookup(cmp)
1888 modified2, deleted2, fixup = self._checklookup(cmp)
1889 s.modified.extend(modified2)
1889 s.modified.extend(modified2)
1890 s.deleted.extend(deleted2)
1890 s.deleted.extend(deleted2)
1891
1891
1892 if fixup and clean:
1892 if fixup and clean:
1893 s.clean.extend(fixup)
1893 s.clean.extend(fixup)
1894
1894
1895 self._poststatusfixup(s, fixup)
1895 self._poststatusfixup(s, fixup)
1896
1896
1897 if match.always():
1897 if match.always():
1898 # cache for performance
1898 # cache for performance
1899 if s.unknown or s.ignored or s.clean:
1899 if s.unknown or s.ignored or s.clean:
1900 # "_status" is cached with list*=False in the normal route
1900 # "_status" is cached with list*=False in the normal route
1901 self._status = scmutil.status(
1901 self._status = scmutil.status(
1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 )
1903 )
1904 else:
1904 else:
1905 self._status = s
1905 self._status = s
1906
1906
1907 return s
1907 return s
1908
1908
1909 @propertycache
1909 @propertycache
1910 def _copies(self):
1910 def _copies(self):
1911 p1copies = {}
1911 p1copies = {}
1912 p2copies = {}
1912 p2copies = {}
1913 parents = self._repo.dirstate.parents()
1913 parents = self._repo.dirstate.parents()
1914 p1manifest = self._repo[parents[0]].manifest()
1914 p1manifest = self._repo[parents[0]].manifest()
1915 p2manifest = self._repo[parents[1]].manifest()
1915 p2manifest = self._repo[parents[1]].manifest()
1916 changedset = set(self.added()) | set(self.modified())
1916 changedset = set(self.added()) | set(self.modified())
1917 narrowmatch = self._repo.narrowmatch()
1917 narrowmatch = self._repo.narrowmatch()
1918 for dst, src in self._repo.dirstate.copies().items():
1918 for dst, src in self._repo.dirstate.copies().items():
1919 if dst not in changedset or not narrowmatch(dst):
1919 if dst not in changedset or not narrowmatch(dst):
1920 continue
1920 continue
1921 if src in p1manifest:
1921 if src in p1manifest:
1922 p1copies[dst] = src
1922 p1copies[dst] = src
1923 elif src in p2manifest:
1923 elif src in p2manifest:
1924 p2copies[dst] = src
1924 p2copies[dst] = src
1925 return p1copies, p2copies
1925 return p1copies, p2copies
1926
1926
1927 @propertycache
1927 @propertycache
1928 def _manifest(self):
1928 def _manifest(self):
1929 """generate a manifest corresponding to the values in self._status
1929 """generate a manifest corresponding to the values in self._status
1930
1930
1931 This reuse the file nodeid from parent, but we use special node
1931 This reuse the file nodeid from parent, but we use special node
1932 identifiers for added and modified files. This is used by manifests
1932 identifiers for added and modified files. This is used by manifests
1933 merge to see that files are different and by update logic to avoid
1933 merge to see that files are different and by update logic to avoid
1934 deleting newly added files.
1934 deleting newly added files.
1935 """
1935 """
1936 return self._buildstatusmanifest(self._status)
1936 return self._buildstatusmanifest(self._status)
1937
1937
1938 def _buildstatusmanifest(self, status):
1938 def _buildstatusmanifest(self, status):
1939 """Builds a manifest that includes the given status results."""
1939 """Builds a manifest that includes the given status results."""
1940 parents = self.parents()
1940 parents = self.parents()
1941
1941
1942 man = parents[0].manifest().copy()
1942 man = parents[0].manifest().copy()
1943
1943
1944 ff = self._flagfunc
1944 ff = self._flagfunc
1945 for i, l in (
1945 for i, l in (
1946 (self._repo.nodeconstants.addednodeid, status.added),
1946 (self._repo.nodeconstants.addednodeid, status.added),
1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 ):
1948 ):
1949 for f in l:
1949 for f in l:
1950 man[f] = i
1950 man[f] = i
1951 try:
1951 try:
1952 man.setflag(f, ff(f))
1952 man.setflag(f, ff(f))
1953 except OSError:
1953 except OSError:
1954 pass
1954 pass
1955
1955
1956 for f in status.deleted + status.removed:
1956 for f in status.deleted + status.removed:
1957 if f in man:
1957 if f in man:
1958 del man[f]
1958 del man[f]
1959
1959
1960 return man
1960 return man
1961
1961
1962 def _buildstatus(
1962 def _buildstatus(
1963 self, other, s, match, listignored, listclean, listunknown
1963 self, other, s, match, listignored, listclean, listunknown
1964 ):
1964 ):
1965 """build a status with respect to another context
1965 """build a status with respect to another context
1966
1966
1967 This includes logic for maintaining the fast path of status when
1967 This includes logic for maintaining the fast path of status when
1968 comparing the working directory against its parent, which is to skip
1968 comparing the working directory against its parent, which is to skip
1969 building a new manifest if self (working directory) is not comparing
1969 building a new manifest if self (working directory) is not comparing
1970 against its parent (repo['.']).
1970 against its parent (repo['.']).
1971 """
1971 """
1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 # might have accidentally ended up with the entire contents of the file
1974 # might have accidentally ended up with the entire contents of the file
1975 # they are supposed to be linking to.
1975 # they are supposed to be linking to.
1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 if other != self._repo[b'.']:
1977 if other != self._repo[b'.']:
1978 s = super(workingctx, self)._buildstatus(
1978 s = super(workingctx, self)._buildstatus(
1979 other, s, match, listignored, listclean, listunknown
1979 other, s, match, listignored, listclean, listunknown
1980 )
1980 )
1981 return s
1981 return s
1982
1982
1983 def _matchstatus(self, other, match):
1983 def _matchstatus(self, other, match):
1984 """override the match method with a filter for directory patterns
1984 """override the match method with a filter for directory patterns
1985
1985
1986 We use inheritance to customize the match.bad method only in cases of
1986 We use inheritance to customize the match.bad method only in cases of
1987 workingctx since it belongs only to the working directory when
1987 workingctx since it belongs only to the working directory when
1988 comparing against the parent changeset.
1988 comparing against the parent changeset.
1989
1989
1990 If we aren't comparing against the working directory's parent, then we
1990 If we aren't comparing against the working directory's parent, then we
1991 just use the default match object sent to us.
1991 just use the default match object sent to us.
1992 """
1992 """
1993 if other != self._repo[b'.']:
1993 if other != self._repo[b'.']:
1994
1994
1995 def bad(f, msg):
1995 def bad(f, msg):
1996 # 'f' may be a directory pattern from 'match.files()',
1996 # 'f' may be a directory pattern from 'match.files()',
1997 # so 'f not in ctx1' is not enough
1997 # so 'f not in ctx1' is not enough
1998 if f not in other and not other.hasdir(f):
1998 if f not in other and not other.hasdir(f):
1999 self._repo.ui.warn(
1999 self._repo.ui.warn(
2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 )
2001 )
2002
2002
2003 match.bad = bad
2003 match.bad = bad
2004 return match
2004 return match
2005
2005
2006 def walk(self, match):
2006 def walk(self, match):
2007 '''Generates matching file names.'''
2007 '''Generates matching file names.'''
2008 return sorted(
2008 return sorted(
2009 self._repo.dirstate.walk(
2009 self._repo.dirstate.walk(
2010 self._repo.narrowmatch(match),
2010 self._repo.narrowmatch(match),
2011 subrepos=sorted(self.substate),
2011 subrepos=sorted(self.substate),
2012 unknown=True,
2012 unknown=True,
2013 ignored=False,
2013 ignored=False,
2014 )
2014 )
2015 )
2015 )
2016
2016
2017 def matches(self, match):
2017 def matches(self, match):
2018 match = self._repo.narrowmatch(match)
2018 match = self._repo.narrowmatch(match)
2019 ds = self._repo.dirstate
2019 ds = self._repo.dirstate
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021
2021
2022 def markcommitted(self, node):
2022 def markcommitted(self, node):
2023 with self._repo.dirstate.parentchange():
2023 with self._repo.dirstate.parentchange():
2024 for f in self.modified() + self.added():
2024 for f in self.modified() + self.added():
2025 self._repo.dirstate.update_file(
2025 self._repo.dirstate.update_file(
2026 f, p1_tracked=True, wc_tracked=True
2026 f, p1_tracked=True, wc_tracked=True
2027 )
2027 )
2028 for f in self.removed():
2028 for f in self.removed():
2029 self._repo.dirstate.update_file(
2029 self._repo.dirstate.update_file(
2030 f, p1_tracked=False, wc_tracked=False
2030 f, p1_tracked=False, wc_tracked=False
2031 )
2031 )
2032 self._repo.dirstate.setparents(node)
2032 self._repo.dirstate.setparents(node)
2033 self._repo._quick_access_changeid_invalidate()
2033 self._repo._quick_access_changeid_invalidate()
2034
2034
2035 sparse.aftercommit(self._repo, node)
2035 sparse.aftercommit(self._repo, node)
2036
2036
2037 # write changes out explicitly, because nesting wlock at
2037 # write changes out explicitly, because nesting wlock at
2038 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2038 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2039 # from immediately doing so for subsequent changing files
2039 # from immediately doing so for subsequent changing files
2040 self._repo.dirstate.write(self._repo.currenttransaction())
2040 self._repo.dirstate.write(self._repo.currenttransaction())
2041
2041
2042 def mergestate(self, clean=False):
2042 def mergestate(self, clean=False):
2043 if clean:
2043 if clean:
2044 return mergestatemod.mergestate.clean(self._repo)
2044 return mergestatemod.mergestate.clean(self._repo)
2045 return mergestatemod.mergestate.read(self._repo)
2045 return mergestatemod.mergestate.read(self._repo)
2046
2046
2047
2047
2048 class committablefilectx(basefilectx):
2048 class committablefilectx(basefilectx):
2049 """A committablefilectx provides common functionality for a file context
2049 """A committablefilectx provides common functionality for a file context
2050 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2050 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2051
2051
2052 def __init__(self, repo, path, filelog=None, ctx=None):
2052 def __init__(self, repo, path, filelog=None, ctx=None):
2053 self._repo = repo
2053 self._repo = repo
2054 self._path = path
2054 self._path = path
2055 self._changeid = None
2055 self._changeid = None
2056 self._filerev = self._filenode = None
2056 self._filerev = self._filenode = None
2057
2057
2058 if filelog is not None:
2058 if filelog is not None:
2059 self._filelog = filelog
2059 self._filelog = filelog
2060 if ctx:
2060 if ctx:
2061 self._changectx = ctx
2061 self._changectx = ctx
2062
2062
2063 def __nonzero__(self):
2063 def __nonzero__(self):
2064 return True
2064 return True
2065
2065
2066 __bool__ = __nonzero__
2066 __bool__ = __nonzero__
2067
2067
2068 def linkrev(self):
2068 def linkrev(self):
2069 # linked to self._changectx no matter if file is modified or not
2069 # linked to self._changectx no matter if file is modified or not
2070 return self.rev()
2070 return self.rev()
2071
2071
2072 def renamed(self):
2072 def renamed(self):
2073 path = self.copysource()
2073 path = self.copysource()
2074 if not path:
2074 if not path:
2075 return None
2075 return None
2076 return (
2076 return (
2077 path,
2077 path,
2078 self._changectx._parents[0]._manifest.get(
2078 self._changectx._parents[0]._manifest.get(
2079 path, self._repo.nodeconstants.nullid
2079 path, self._repo.nodeconstants.nullid
2080 ),
2080 ),
2081 )
2081 )
2082
2082
2083 def parents(self):
2083 def parents(self):
2084 '''return parent filectxs, following copies if necessary'''
2084 '''return parent filectxs, following copies if necessary'''
2085
2085
2086 def filenode(ctx, path):
2086 def filenode(ctx, path):
2087 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2087 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2088
2088
2089 path = self._path
2089 path = self._path
2090 fl = self._filelog
2090 fl = self._filelog
2091 pcl = self._changectx._parents
2091 pcl = self._changectx._parents
2092 renamed = self.renamed()
2092 renamed = self.renamed()
2093
2093
2094 if renamed:
2094 if renamed:
2095 pl = [renamed + (None,)]
2095 pl = [renamed + (None,)]
2096 else:
2096 else:
2097 pl = [(path, filenode(pcl[0], path), fl)]
2097 pl = [(path, filenode(pcl[0], path), fl)]
2098
2098
2099 for pc in pcl[1:]:
2099 for pc in pcl[1:]:
2100 pl.append((path, filenode(pc, path), fl))
2100 pl.append((path, filenode(pc, path), fl))
2101
2101
2102 return [
2102 return [
2103 self._parentfilectx(p, fileid=n, filelog=l)
2103 self._parentfilectx(p, fileid=n, filelog=l)
2104 for p, n, l in pl
2104 for p, n, l in pl
2105 if n != self._repo.nodeconstants.nullid
2105 if n != self._repo.nodeconstants.nullid
2106 ]
2106 ]
2107
2107
2108 def children(self):
2108 def children(self):
2109 return []
2109 return []
2110
2110
2111
2111
2112 class workingfilectx(committablefilectx):
2112 class workingfilectx(committablefilectx):
2113 """A workingfilectx object makes access to data related to a particular
2113 """A workingfilectx object makes access to data related to a particular
2114 file in the working directory convenient."""
2114 file in the working directory convenient."""
2115
2115
2116 def __init__(self, repo, path, filelog=None, workingctx=None):
2116 def __init__(self, repo, path, filelog=None, workingctx=None):
2117 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2117 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2118
2118
2119 @propertycache
2119 @propertycache
2120 def _changectx(self):
2120 def _changectx(self):
2121 return workingctx(self._repo)
2121 return workingctx(self._repo)
2122
2122
2123 def data(self):
2123 def data(self):
2124 return self._repo.wread(self._path)
2124 return self._repo.wread(self._path)
2125
2125
2126 def copysource(self):
2126 def copysource(self):
2127 return self._repo.dirstate.copied(self._path)
2127 return self._repo.dirstate.copied(self._path)
2128
2128
2129 def size(self):
2129 def size(self):
2130 return self._repo.wvfs.lstat(self._path).st_size
2130 return self._repo.wvfs.lstat(self._path).st_size
2131
2131
2132 def lstat(self):
2132 def lstat(self):
2133 return self._repo.wvfs.lstat(self._path)
2133 return self._repo.wvfs.lstat(self._path)
2134
2134
2135 def date(self):
2135 def date(self):
2136 t, tz = self._changectx.date()
2136 t, tz = self._changectx.date()
2137 try:
2137 try:
2138 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2138 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2139 except OSError as err:
2139 except OSError as err:
2140 if err.errno != errno.ENOENT:
2140 if err.errno != errno.ENOENT:
2141 raise
2141 raise
2142 return (t, tz)
2142 return (t, tz)
2143
2143
2144 def exists(self):
2144 def exists(self):
2145 return self._repo.wvfs.exists(self._path)
2145 return self._repo.wvfs.exists(self._path)
2146
2146
2147 def lexists(self):
2147 def lexists(self):
2148 return self._repo.wvfs.lexists(self._path)
2148 return self._repo.wvfs.lexists(self._path)
2149
2149
2150 def audit(self):
2150 def audit(self):
2151 return self._repo.wvfs.audit(self._path)
2151 return self._repo.wvfs.audit(self._path)
2152
2152
2153 def cmp(self, fctx):
2153 def cmp(self, fctx):
2154 """compare with other file context
2154 """compare with other file context
2155
2155
2156 returns True if different than fctx.
2156 returns True if different than fctx.
2157 """
2157 """
2158 # fctx should be a filectx (not a workingfilectx)
2158 # fctx should be a filectx (not a workingfilectx)
2159 # invert comparison to reuse the same code path
2159 # invert comparison to reuse the same code path
2160 return fctx.cmp(self)
2160 return fctx.cmp(self)
2161
2161
2162 def remove(self, ignoremissing=False):
2162 def remove(self, ignoremissing=False):
2163 """wraps unlink for a repo's working directory"""
2163 """wraps unlink for a repo's working directory"""
2164 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2164 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2165 self._repo.wvfs.unlinkpath(
2165 self._repo.wvfs.unlinkpath(
2166 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2166 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2167 )
2167 )
2168
2168
2169 def write(self, data, flags, backgroundclose=False, **kwargs):
2169 def write(self, data, flags, backgroundclose=False, **kwargs):
2170 """wraps repo.wwrite"""
2170 """wraps repo.wwrite"""
2171 return self._repo.wwrite(
2171 return self._repo.wwrite(
2172 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2172 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2173 )
2173 )
2174
2174
2175 def markcopied(self, src):
2175 def markcopied(self, src):
2176 """marks this file a copy of `src`"""
2176 """marks this file a copy of `src`"""
2177 self._repo.dirstate.copy(src, self._path)
2177 self._repo.dirstate.copy(src, self._path)
2178
2178
2179 def clearunknown(self):
2179 def clearunknown(self):
2180 """Removes conflicting items in the working directory so that
2180 """Removes conflicting items in the working directory so that
2181 ``write()`` can be called successfully.
2181 ``write()`` can be called successfully.
2182 """
2182 """
2183 wvfs = self._repo.wvfs
2183 wvfs = self._repo.wvfs
2184 f = self._path
2184 f = self._path
2185 wvfs.audit(f)
2185 wvfs.audit(f)
2186 if self._repo.ui.configbool(
2186 if self._repo.ui.configbool(
2187 b'experimental', b'merge.checkpathconflicts'
2187 b'experimental', b'merge.checkpathconflicts'
2188 ):
2188 ):
2189 # remove files under the directory as they should already be
2189 # remove files under the directory as they should already be
2190 # warned and backed up
2190 # warned and backed up
2191 if wvfs.isdir(f) and not wvfs.islink(f):
2191 if wvfs.isdir(f) and not wvfs.islink(f):
2192 wvfs.rmtree(f, forcibly=True)
2192 wvfs.rmtree(f, forcibly=True)
2193 for p in reversed(list(pathutil.finddirs(f))):
2193 for p in reversed(list(pathutil.finddirs(f))):
2194 if wvfs.isfileorlink(p):
2194 if wvfs.isfileorlink(p):
2195 wvfs.unlink(p)
2195 wvfs.unlink(p)
2196 break
2196 break
2197 else:
2197 else:
2198 # don't remove files if path conflicts are not processed
2198 # don't remove files if path conflicts are not processed
2199 if wvfs.isdir(f) and not wvfs.islink(f):
2199 if wvfs.isdir(f) and not wvfs.islink(f):
2200 wvfs.removedirs(f)
2200 wvfs.removedirs(f)
2201
2201
2202 def setflags(self, l, x):
2202 def setflags(self, l, x):
2203 self._repo.wvfs.setflags(self._path, l, x)
2203 self._repo.wvfs.setflags(self._path, l, x)
2204
2204
2205
2205
2206 class overlayworkingctx(committablectx):
2206 class overlayworkingctx(committablectx):
2207 """Wraps another mutable context with a write-back cache that can be
2207 """Wraps another mutable context with a write-back cache that can be
2208 converted into a commit context.
2208 converted into a commit context.
2209
2209
2210 self._cache[path] maps to a dict with keys: {
2210 self._cache[path] maps to a dict with keys: {
2211 'exists': bool?
2211 'exists': bool?
2212 'date': date?
2212 'date': date?
2213 'data': str?
2213 'data': str?
2214 'flags': str?
2214 'flags': str?
2215 'copied': str? (path or None)
2215 'copied': str? (path or None)
2216 }
2216 }
2217 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2217 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2218 is `False`, the file was deleted.
2218 is `False`, the file was deleted.
2219 """
2219 """
2220
2220
2221 def __init__(self, repo):
2221 def __init__(self, repo):
2222 super(overlayworkingctx, self).__init__(repo)
2222 super(overlayworkingctx, self).__init__(repo)
2223 self.clean()
2223 self.clean()
2224
2224
2225 def setbase(self, wrappedctx):
2225 def setbase(self, wrappedctx):
2226 self._wrappedctx = wrappedctx
2226 self._wrappedctx = wrappedctx
2227 self._parents = [wrappedctx]
2227 self._parents = [wrappedctx]
2228 # Drop old manifest cache as it is now out of date.
2228 # Drop old manifest cache as it is now out of date.
2229 # This is necessary when, e.g., rebasing several nodes with one
2229 # This is necessary when, e.g., rebasing several nodes with one
2230 # ``overlayworkingctx`` (e.g. with --collapse).
2230 # ``overlayworkingctx`` (e.g. with --collapse).
2231 util.clearcachedproperty(self, b'_manifest')
2231 util.clearcachedproperty(self, b'_manifest')
2232
2232
2233 def setparents(self, p1node, p2node=None):
2233 def setparents(self, p1node, p2node=None):
2234 if p2node is None:
2234 if p2node is None:
2235 p2node = self._repo.nodeconstants.nullid
2235 p2node = self._repo.nodeconstants.nullid
2236 assert p1node == self._wrappedctx.node()
2236 assert p1node == self._wrappedctx.node()
2237 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2237 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2238
2238
2239 def data(self, path):
2239 def data(self, path):
2240 if self.isdirty(path):
2240 if self.isdirty(path):
2241 if self._cache[path][b'exists']:
2241 if self._cache[path][b'exists']:
2242 if self._cache[path][b'data'] is not None:
2242 if self._cache[path][b'data'] is not None:
2243 return self._cache[path][b'data']
2243 return self._cache[path][b'data']
2244 else:
2244 else:
2245 # Must fallback here, too, because we only set flags.
2245 # Must fallback here, too, because we only set flags.
2246 return self._wrappedctx[path].data()
2246 return self._wrappedctx[path].data()
2247 else:
2247 else:
2248 raise error.ProgrammingError(
2248 raise error.ProgrammingError(
2249 b"No such file or directory: %s" % path
2249 b"No such file or directory: %s" % path
2250 )
2250 )
2251 else:
2251 else:
2252 return self._wrappedctx[path].data()
2252 return self._wrappedctx[path].data()
2253
2253
2254 @propertycache
2254 @propertycache
2255 def _manifest(self):
2255 def _manifest(self):
2256 parents = self.parents()
2256 parents = self.parents()
2257 man = parents[0].manifest().copy()
2257 man = parents[0].manifest().copy()
2258
2258
2259 flag = self._flagfunc
2259 flag = self._flagfunc
2260 for path in self.added():
2260 for path in self.added():
2261 man[path] = self._repo.nodeconstants.addednodeid
2261 man[path] = self._repo.nodeconstants.addednodeid
2262 man.setflag(path, flag(path))
2262 man.setflag(path, flag(path))
2263 for path in self.modified():
2263 for path in self.modified():
2264 man[path] = self._repo.nodeconstants.modifiednodeid
2264 man[path] = self._repo.nodeconstants.modifiednodeid
2265 man.setflag(path, flag(path))
2265 man.setflag(path, flag(path))
2266 for path in self.removed():
2266 for path in self.removed():
2267 del man[path]
2267 del man[path]
2268 return man
2268 return man
2269
2269
2270 @propertycache
2270 @propertycache
2271 def _flagfunc(self):
2271 def _flagfunc(self):
2272 def f(path):
2272 def f(path):
2273 return self._cache[path][b'flags']
2273 return self._cache[path][b'flags']
2274
2274
2275 return f
2275 return f
2276
2276
2277 def files(self):
2277 def files(self):
2278 return sorted(self.added() + self.modified() + self.removed())
2278 return sorted(self.added() + self.modified() + self.removed())
2279
2279
2280 def modified(self):
2280 def modified(self):
2281 return [
2281 return [
2282 f
2282 f
2283 for f in self._cache.keys()
2283 for f in self._cache.keys()
2284 if self._cache[f][b'exists'] and self._existsinparent(f)
2284 if self._cache[f][b'exists'] and self._existsinparent(f)
2285 ]
2285 ]
2286
2286
2287 def added(self):
2287 def added(self):
2288 return [
2288 return [
2289 f
2289 f
2290 for f in self._cache.keys()
2290 for f in self._cache.keys()
2291 if self._cache[f][b'exists'] and not self._existsinparent(f)
2291 if self._cache[f][b'exists'] and not self._existsinparent(f)
2292 ]
2292 ]
2293
2293
2294 def removed(self):
2294 def removed(self):
2295 return [
2295 return [
2296 f
2296 f
2297 for f in self._cache.keys()
2297 for f in self._cache.keys()
2298 if not self._cache[f][b'exists'] and self._existsinparent(f)
2298 if not self._cache[f][b'exists'] and self._existsinparent(f)
2299 ]
2299 ]
2300
2300
2301 def p1copies(self):
2301 def p1copies(self):
2302 copies = {}
2302 copies = {}
2303 narrowmatch = self._repo.narrowmatch()
2303 narrowmatch = self._repo.narrowmatch()
2304 for f in self._cache.keys():
2304 for f in self._cache.keys():
2305 if not narrowmatch(f):
2305 if not narrowmatch(f):
2306 continue
2306 continue
2307 copies.pop(f, None) # delete if it exists
2307 copies.pop(f, None) # delete if it exists
2308 source = self._cache[f][b'copied']
2308 source = self._cache[f][b'copied']
2309 if source:
2309 if source:
2310 copies[f] = source
2310 copies[f] = source
2311 return copies
2311 return copies
2312
2312
2313 def p2copies(self):
2313 def p2copies(self):
2314 copies = {}
2314 copies = {}
2315 narrowmatch = self._repo.narrowmatch()
2315 narrowmatch = self._repo.narrowmatch()
2316 for f in self._cache.keys():
2316 for f in self._cache.keys():
2317 if not narrowmatch(f):
2317 if not narrowmatch(f):
2318 continue
2318 continue
2319 copies.pop(f, None) # delete if it exists
2319 copies.pop(f, None) # delete if it exists
2320 source = self._cache[f][b'copied']
2320 source = self._cache[f][b'copied']
2321 if source:
2321 if source:
2322 copies[f] = source
2322 copies[f] = source
2323 return copies
2323 return copies
2324
2324
2325 def isinmemory(self):
2325 def isinmemory(self):
2326 return True
2326 return True
2327
2327
2328 def filedate(self, path):
2328 def filedate(self, path):
2329 if self.isdirty(path):
2329 if self.isdirty(path):
2330 return self._cache[path][b'date']
2330 return self._cache[path][b'date']
2331 else:
2331 else:
2332 return self._wrappedctx[path].date()
2332 return self._wrappedctx[path].date()
2333
2333
2334 def markcopied(self, path, origin):
2334 def markcopied(self, path, origin):
2335 self._markdirty(
2335 self._markdirty(
2336 path,
2336 path,
2337 exists=True,
2337 exists=True,
2338 date=self.filedate(path),
2338 date=self.filedate(path),
2339 flags=self.flags(path),
2339 flags=self.flags(path),
2340 copied=origin,
2340 copied=origin,
2341 )
2341 )
2342
2342
2343 def copydata(self, path):
2343 def copydata(self, path):
2344 if self.isdirty(path):
2344 if self.isdirty(path):
2345 return self._cache[path][b'copied']
2345 return self._cache[path][b'copied']
2346 else:
2346 else:
2347 return None
2347 return None
2348
2348
2349 def flags(self, path):
2349 def flags(self, path):
2350 if self.isdirty(path):
2350 if self.isdirty(path):
2351 if self._cache[path][b'exists']:
2351 if self._cache[path][b'exists']:
2352 return self._cache[path][b'flags']
2352 return self._cache[path][b'flags']
2353 else:
2353 else:
2354 raise error.ProgrammingError(
2354 raise error.ProgrammingError(
2355 b"No such file or directory: %s" % path
2355 b"No such file or directory: %s" % path
2356 )
2356 )
2357 else:
2357 else:
2358 return self._wrappedctx[path].flags()
2358 return self._wrappedctx[path].flags()
2359
2359
2360 def __contains__(self, key):
2360 def __contains__(self, key):
2361 if key in self._cache:
2361 if key in self._cache:
2362 return self._cache[key][b'exists']
2362 return self._cache[key][b'exists']
2363 return key in self.p1()
2363 return key in self.p1()
2364
2364
2365 def _existsinparent(self, path):
2365 def _existsinparent(self, path):
2366 try:
2366 try:
2367 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2367 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2368 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2368 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2369 # with an ``exists()`` function.
2369 # with an ``exists()`` function.
2370 self._wrappedctx[path]
2370 self._wrappedctx[path]
2371 return True
2371 return True
2372 except error.ManifestLookupError:
2372 except error.ManifestLookupError:
2373 return False
2373 return False
2374
2374
2375 def _auditconflicts(self, path):
2375 def _auditconflicts(self, path):
2376 """Replicates conflict checks done by wvfs.write().
2376 """Replicates conflict checks done by wvfs.write().
2377
2377
2378 Since we never write to the filesystem and never call `applyupdates` in
2378 Since we never write to the filesystem and never call `applyupdates` in
2379 IMM, we'll never check that a path is actually writable -- e.g., because
2379 IMM, we'll never check that a path is actually writable -- e.g., because
2380 it adds `a/foo`, but `a` is actually a file in the other commit.
2380 it adds `a/foo`, but `a` is actually a file in the other commit.
2381 """
2381 """
2382
2382
2383 def fail(path, component):
2383 def fail(path, component):
2384 # p1() is the base and we're receiving "writes" for p2()'s
2384 # p1() is the base and we're receiving "writes" for p2()'s
2385 # files.
2385 # files.
2386 if b'l' in self.p1()[component].flags():
2386 if b'l' in self.p1()[component].flags():
2387 raise error.Abort(
2387 raise error.Abort(
2388 b"error: %s conflicts with symlink %s "
2388 b"error: %s conflicts with symlink %s "
2389 b"in %d." % (path, component, self.p1().rev())
2389 b"in %d." % (path, component, self.p1().rev())
2390 )
2390 )
2391 else:
2391 else:
2392 raise error.Abort(
2392 raise error.Abort(
2393 b"error: '%s' conflicts with file '%s' in "
2393 b"error: '%s' conflicts with file '%s' in "
2394 b"%d." % (path, component, self.p1().rev())
2394 b"%d." % (path, component, self.p1().rev())
2395 )
2395 )
2396
2396
2397 # Test that each new directory to be created to write this path from p2
2397 # Test that each new directory to be created to write this path from p2
2398 # is not a file in p1.
2398 # is not a file in p1.
2399 components = path.split(b'/')
2399 components = path.split(b'/')
2400 for i in pycompat.xrange(len(components)):
2400 for i in pycompat.xrange(len(components)):
2401 component = b"/".join(components[0:i])
2401 component = b"/".join(components[0:i])
2402 if component in self:
2402 if component in self:
2403 fail(path, component)
2403 fail(path, component)
2404
2404
2405 # Test the other direction -- that this path from p2 isn't a directory
2405 # Test the other direction -- that this path from p2 isn't a directory
2406 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2406 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2407 match = self.match([path], default=b'path')
2407 match = self.match([path], default=b'path')
2408 mfiles = list(self.p1().manifest().walk(match))
2408 mfiles = list(self.p1().manifest().walk(match))
2409 if len(mfiles) > 0:
2409 if len(mfiles) > 0:
2410 if len(mfiles) == 1 and mfiles[0] == path:
2410 if len(mfiles) == 1 and mfiles[0] == path:
2411 return
2411 return
2412 # omit the files which are deleted in current IMM wctx
2412 # omit the files which are deleted in current IMM wctx
2413 mfiles = [m for m in mfiles if m in self]
2413 mfiles = [m for m in mfiles if m in self]
2414 if not mfiles:
2414 if not mfiles:
2415 return
2415 return
2416 raise error.Abort(
2416 raise error.Abort(
2417 b"error: file '%s' cannot be written because "
2417 b"error: file '%s' cannot be written because "
2418 b" '%s/' is a directory in %s (containing %d "
2418 b" '%s/' is a directory in %s (containing %d "
2419 b"entries: %s)"
2419 b"entries: %s)"
2420 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2420 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2421 )
2421 )
2422
2422
2423 def write(self, path, data, flags=b'', **kwargs):
2423 def write(self, path, data, flags=b'', **kwargs):
2424 if data is None:
2424 if data is None:
2425 raise error.ProgrammingError(b"data must be non-None")
2425 raise error.ProgrammingError(b"data must be non-None")
2426 self._auditconflicts(path)
2426 self._auditconflicts(path)
2427 self._markdirty(
2427 self._markdirty(
2428 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2428 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2429 )
2429 )
2430
2430
2431 def setflags(self, path, l, x):
2431 def setflags(self, path, l, x):
2432 flag = b''
2432 flag = b''
2433 if l:
2433 if l:
2434 flag = b'l'
2434 flag = b'l'
2435 elif x:
2435 elif x:
2436 flag = b'x'
2436 flag = b'x'
2437 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2437 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2438
2438
2439 def remove(self, path):
2439 def remove(self, path):
2440 self._markdirty(path, exists=False)
2440 self._markdirty(path, exists=False)
2441
2441
2442 def exists(self, path):
2442 def exists(self, path):
2443 """exists behaves like `lexists`, but needs to follow symlinks and
2443 """exists behaves like `lexists`, but needs to follow symlinks and
2444 return False if they are broken.
2444 return False if they are broken.
2445 """
2445 """
2446 if self.isdirty(path):
2446 if self.isdirty(path):
2447 # If this path exists and is a symlink, "follow" it by calling
2447 # If this path exists and is a symlink, "follow" it by calling
2448 # exists on the destination path.
2448 # exists on the destination path.
2449 if (
2449 if (
2450 self._cache[path][b'exists']
2450 self._cache[path][b'exists']
2451 and b'l' in self._cache[path][b'flags']
2451 and b'l' in self._cache[path][b'flags']
2452 ):
2452 ):
2453 return self.exists(self._cache[path][b'data'].strip())
2453 return self.exists(self._cache[path][b'data'].strip())
2454 else:
2454 else:
2455 return self._cache[path][b'exists']
2455 return self._cache[path][b'exists']
2456
2456
2457 return self._existsinparent(path)
2457 return self._existsinparent(path)
2458
2458
2459 def lexists(self, path):
2459 def lexists(self, path):
2460 """lexists returns True if the path exists"""
2460 """lexists returns True if the path exists"""
2461 if self.isdirty(path):
2461 if self.isdirty(path):
2462 return self._cache[path][b'exists']
2462 return self._cache[path][b'exists']
2463
2463
2464 return self._existsinparent(path)
2464 return self._existsinparent(path)
2465
2465
2466 def size(self, path):
2466 def size(self, path):
2467 if self.isdirty(path):
2467 if self.isdirty(path):
2468 if self._cache[path][b'exists']:
2468 if self._cache[path][b'exists']:
2469 return len(self._cache[path][b'data'])
2469 return len(self._cache[path][b'data'])
2470 else:
2470 else:
2471 raise error.ProgrammingError(
2471 raise error.ProgrammingError(
2472 b"No such file or directory: %s" % path
2472 b"No such file or directory: %s" % path
2473 )
2473 )
2474 return self._wrappedctx[path].size()
2474 return self._wrappedctx[path].size()
2475
2475
2476 def tomemctx(
2476 def tomemctx(
2477 self,
2477 self,
2478 text,
2478 text,
2479 branch=None,
2479 branch=None,
2480 extra=None,
2480 extra=None,
2481 date=None,
2481 date=None,
2482 parents=None,
2482 parents=None,
2483 user=None,
2483 user=None,
2484 editor=None,
2484 editor=None,
2485 ):
2485 ):
2486 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2486 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2487 committed.
2487 committed.
2488
2488
2489 ``text`` is the commit message.
2489 ``text`` is the commit message.
2490 ``parents`` (optional) are rev numbers.
2490 ``parents`` (optional) are rev numbers.
2491 """
2491 """
2492 # Default parents to the wrapped context if not passed.
2492 # Default parents to the wrapped context if not passed.
2493 if parents is None:
2493 if parents is None:
2494 parents = self.parents()
2494 parents = self.parents()
2495 if len(parents) == 1:
2495 if len(parents) == 1:
2496 parents = (parents[0], None)
2496 parents = (parents[0], None)
2497
2497
2498 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2498 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2499 if parents[1] is None:
2499 if parents[1] is None:
2500 parents = (self._repo[parents[0]], None)
2500 parents = (self._repo[parents[0]], None)
2501 else:
2501 else:
2502 parents = (self._repo[parents[0]], self._repo[parents[1]])
2502 parents = (self._repo[parents[0]], self._repo[parents[1]])
2503
2503
2504 files = self.files()
2504 files = self.files()
2505
2505
2506 def getfile(repo, memctx, path):
2506 def getfile(repo, memctx, path):
2507 if self._cache[path][b'exists']:
2507 if self._cache[path][b'exists']:
2508 return memfilectx(
2508 return memfilectx(
2509 repo,
2509 repo,
2510 memctx,
2510 memctx,
2511 path,
2511 path,
2512 self._cache[path][b'data'],
2512 self._cache[path][b'data'],
2513 b'l' in self._cache[path][b'flags'],
2513 b'l' in self._cache[path][b'flags'],
2514 b'x' in self._cache[path][b'flags'],
2514 b'x' in self._cache[path][b'flags'],
2515 self._cache[path][b'copied'],
2515 self._cache[path][b'copied'],
2516 )
2516 )
2517 else:
2517 else:
2518 # Returning None, but including the path in `files`, is
2518 # Returning None, but including the path in `files`, is
2519 # necessary for memctx to register a deletion.
2519 # necessary for memctx to register a deletion.
2520 return None
2520 return None
2521
2521
2522 if branch is None:
2522 if branch is None:
2523 branch = self._wrappedctx.branch()
2523 branch = self._wrappedctx.branch()
2524
2524
2525 return memctx(
2525 return memctx(
2526 self._repo,
2526 self._repo,
2527 parents,
2527 parents,
2528 text,
2528 text,
2529 files,
2529 files,
2530 getfile,
2530 getfile,
2531 date=date,
2531 date=date,
2532 extra=extra,
2532 extra=extra,
2533 user=user,
2533 user=user,
2534 branch=branch,
2534 branch=branch,
2535 editor=editor,
2535 editor=editor,
2536 )
2536 )
2537
2537
2538 def tomemctx_for_amend(self, precursor):
2538 def tomemctx_for_amend(self, precursor):
2539 extra = precursor.extra().copy()
2539 extra = precursor.extra().copy()
2540 extra[b'amend_source'] = precursor.hex()
2540 extra[b'amend_source'] = precursor.hex()
2541 return self.tomemctx(
2541 return self.tomemctx(
2542 text=precursor.description(),
2542 text=precursor.description(),
2543 branch=precursor.branch(),
2543 branch=precursor.branch(),
2544 extra=extra,
2544 extra=extra,
2545 date=precursor.date(),
2545 date=precursor.date(),
2546 user=precursor.user(),
2546 user=precursor.user(),
2547 )
2547 )
2548
2548
2549 def isdirty(self, path):
2549 def isdirty(self, path):
2550 return path in self._cache
2550 return path in self._cache
2551
2551
2552 def clean(self):
2552 def clean(self):
2553 self._mergestate = None
2553 self._mergestate = None
2554 self._cache = {}
2554 self._cache = {}
2555
2555
2556 def _compact(self):
2556 def _compact(self):
2557 """Removes keys from the cache that are actually clean, by comparing
2557 """Removes keys from the cache that are actually clean, by comparing
2558 them with the underlying context.
2558 them with the underlying context.
2559
2559
2560 This can occur during the merge process, e.g. by passing --tool :local
2560 This can occur during the merge process, e.g. by passing --tool :local
2561 to resolve a conflict.
2561 to resolve a conflict.
2562 """
2562 """
2563 keys = []
2563 keys = []
2564 # This won't be perfect, but can help performance significantly when
2564 # This won't be perfect, but can help performance significantly when
2565 # using things like remotefilelog.
2565 # using things like remotefilelog.
2566 scmutil.prefetchfiles(
2566 scmutil.prefetchfiles(
2567 self.repo(),
2567 self.repo(),
2568 [
2568 [
2569 (
2569 (
2570 self.p1().rev(),
2570 self.p1().rev(),
2571 scmutil.matchfiles(self.repo(), self._cache.keys()),
2571 scmutil.matchfiles(self.repo(), self._cache.keys()),
2572 )
2572 )
2573 ],
2573 ],
2574 )
2574 )
2575
2575
2576 for path in self._cache.keys():
2576 for path in self._cache.keys():
2577 cache = self._cache[path]
2577 cache = self._cache[path]
2578 try:
2578 try:
2579 underlying = self._wrappedctx[path]
2579 underlying = self._wrappedctx[path]
2580 if (
2580 if (
2581 underlying.data() == cache[b'data']
2581 underlying.data() == cache[b'data']
2582 and underlying.flags() == cache[b'flags']
2582 and underlying.flags() == cache[b'flags']
2583 ):
2583 ):
2584 keys.append(path)
2584 keys.append(path)
2585 except error.ManifestLookupError:
2585 except error.ManifestLookupError:
2586 # Path not in the underlying manifest (created).
2586 # Path not in the underlying manifest (created).
2587 continue
2587 continue
2588
2588
2589 for path in keys:
2589 for path in keys:
2590 del self._cache[path]
2590 del self._cache[path]
2591 return keys
2591 return keys
2592
2592
2593 def _markdirty(
2593 def _markdirty(
2594 self, path, exists, data=None, date=None, flags=b'', copied=None
2594 self, path, exists, data=None, date=None, flags=b'', copied=None
2595 ):
2595 ):
2596 # data not provided, let's see if we already have some; if not, let's
2596 # data not provided, let's see if we already have some; if not, let's
2597 # grab it from our underlying context, so that we always have data if
2597 # grab it from our underlying context, so that we always have data if
2598 # the file is marked as existing.
2598 # the file is marked as existing.
2599 if exists and data is None:
2599 if exists and data is None:
2600 oldentry = self._cache.get(path) or {}
2600 oldentry = self._cache.get(path) or {}
2601 data = oldentry.get(b'data')
2601 data = oldentry.get(b'data')
2602 if data is None:
2602 if data is None:
2603 data = self._wrappedctx[path].data()
2603 data = self._wrappedctx[path].data()
2604
2604
2605 self._cache[path] = {
2605 self._cache[path] = {
2606 b'exists': exists,
2606 b'exists': exists,
2607 b'data': data,
2607 b'data': data,
2608 b'date': date,
2608 b'date': date,
2609 b'flags': flags,
2609 b'flags': flags,
2610 b'copied': copied,
2610 b'copied': copied,
2611 }
2611 }
2612 util.clearcachedproperty(self, b'_manifest')
2612 util.clearcachedproperty(self, b'_manifest')
2613
2613
2614 def filectx(self, path, filelog=None):
2614 def filectx(self, path, filelog=None):
2615 return overlayworkingfilectx(
2615 return overlayworkingfilectx(
2616 self._repo, path, parent=self, filelog=filelog
2616 self._repo, path, parent=self, filelog=filelog
2617 )
2617 )
2618
2618
2619 def mergestate(self, clean=False):
2619 def mergestate(self, clean=False):
2620 if clean or self._mergestate is None:
2620 if clean or self._mergestate is None:
2621 self._mergestate = mergestatemod.memmergestate(self._repo)
2621 self._mergestate = mergestatemod.memmergestate(self._repo)
2622 return self._mergestate
2622 return self._mergestate
2623
2623
2624
2624
2625 class overlayworkingfilectx(committablefilectx):
2625 class overlayworkingfilectx(committablefilectx):
2626 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2626 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2627 cache, which can be flushed through later by calling ``flush()``."""
2627 cache, which can be flushed through later by calling ``flush()``."""
2628
2628
2629 def __init__(self, repo, path, filelog=None, parent=None):
2629 def __init__(self, repo, path, filelog=None, parent=None):
2630 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2630 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2631 self._repo = repo
2631 self._repo = repo
2632 self._parent = parent
2632 self._parent = parent
2633 self._path = path
2633 self._path = path
2634
2634
2635 def cmp(self, fctx):
2635 def cmp(self, fctx):
2636 return self.data() != fctx.data()
2636 return self.data() != fctx.data()
2637
2637
2638 def changectx(self):
2638 def changectx(self):
2639 return self._parent
2639 return self._parent
2640
2640
2641 def data(self):
2641 def data(self):
2642 return self._parent.data(self._path)
2642 return self._parent.data(self._path)
2643
2643
2644 def date(self):
2644 def date(self):
2645 return self._parent.filedate(self._path)
2645 return self._parent.filedate(self._path)
2646
2646
2647 def exists(self):
2647 def exists(self):
2648 return self.lexists()
2648 return self.lexists()
2649
2649
2650 def lexists(self):
2650 def lexists(self):
2651 return self._parent.exists(self._path)
2651 return self._parent.exists(self._path)
2652
2652
2653 def copysource(self):
2653 def copysource(self):
2654 return self._parent.copydata(self._path)
2654 return self._parent.copydata(self._path)
2655
2655
2656 def size(self):
2656 def size(self):
2657 return self._parent.size(self._path)
2657 return self._parent.size(self._path)
2658
2658
2659 def markcopied(self, origin):
2659 def markcopied(self, origin):
2660 self._parent.markcopied(self._path, origin)
2660 self._parent.markcopied(self._path, origin)
2661
2661
2662 def audit(self):
2662 def audit(self):
2663 pass
2663 pass
2664
2664
2665 def flags(self):
2665 def flags(self):
2666 return self._parent.flags(self._path)
2666 return self._parent.flags(self._path)
2667
2667
2668 def setflags(self, islink, isexec):
2668 def setflags(self, islink, isexec):
2669 return self._parent.setflags(self._path, islink, isexec)
2669 return self._parent.setflags(self._path, islink, isexec)
2670
2670
2671 def write(self, data, flags, backgroundclose=False, **kwargs):
2671 def write(self, data, flags, backgroundclose=False, **kwargs):
2672 return self._parent.write(self._path, data, flags, **kwargs)
2672 return self._parent.write(self._path, data, flags, **kwargs)
2673
2673
2674 def remove(self, ignoremissing=False):
2674 def remove(self, ignoremissing=False):
2675 return self._parent.remove(self._path)
2675 return self._parent.remove(self._path)
2676
2676
2677 def clearunknown(self):
2677 def clearunknown(self):
2678 pass
2678 pass
2679
2679
2680
2680
2681 class workingcommitctx(workingctx):
2681 class workingcommitctx(workingctx):
2682 """A workingcommitctx object makes access to data related to
2682 """A workingcommitctx object makes access to data related to
2683 the revision being committed convenient.
2683 the revision being committed convenient.
2684
2684
2685 This hides changes in the working directory, if they aren't
2685 This hides changes in the working directory, if they aren't
2686 committed in this context.
2686 committed in this context.
2687 """
2687 """
2688
2688
2689 def __init__(
2689 def __init__(
2690 self, repo, changes, text=b"", user=None, date=None, extra=None
2690 self, repo, changes, text=b"", user=None, date=None, extra=None
2691 ):
2691 ):
2692 super(workingcommitctx, self).__init__(
2692 super(workingcommitctx, self).__init__(
2693 repo, text, user, date, extra, changes
2693 repo, text, user, date, extra, changes
2694 )
2694 )
2695
2695
2696 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2696 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2697 """Return matched files only in ``self._status``
2697 """Return matched files only in ``self._status``
2698
2698
2699 Uncommitted files appear "clean" via this context, even if
2699 Uncommitted files appear "clean" via this context, even if
2700 they aren't actually so in the working directory.
2700 they aren't actually so in the working directory.
2701 """
2701 """
2702 if clean:
2702 if clean:
2703 clean = [f for f in self._manifest if f not in self._changedset]
2703 clean = [f for f in self._manifest if f not in self._changedset]
2704 else:
2704 else:
2705 clean = []
2705 clean = []
2706 return scmutil.status(
2706 return scmutil.status(
2707 [f for f in self._status.modified if match(f)],
2707 [f for f in self._status.modified if match(f)],
2708 [f for f in self._status.added if match(f)],
2708 [f for f in self._status.added if match(f)],
2709 [f for f in self._status.removed if match(f)],
2709 [f for f in self._status.removed if match(f)],
2710 [],
2710 [],
2711 [],
2711 [],
2712 [],
2712 [],
2713 clean,
2713 clean,
2714 )
2714 )
2715
2715
2716 @propertycache
2716 @propertycache
2717 def _changedset(self):
2717 def _changedset(self):
2718 """Return the set of files changed in this context"""
2718 """Return the set of files changed in this context"""
2719 changed = set(self._status.modified)
2719 changed = set(self._status.modified)
2720 changed.update(self._status.added)
2720 changed.update(self._status.added)
2721 changed.update(self._status.removed)
2721 changed.update(self._status.removed)
2722 return changed
2722 return changed
2723
2723
2724
2724
2725 def makecachingfilectxfn(func):
2725 def makecachingfilectxfn(func):
2726 """Create a filectxfn that caches based on the path.
2726 """Create a filectxfn that caches based on the path.
2727
2727
2728 We can't use util.cachefunc because it uses all arguments as the cache
2728 We can't use util.cachefunc because it uses all arguments as the cache
2729 key and this creates a cycle since the arguments include the repo and
2729 key and this creates a cycle since the arguments include the repo and
2730 memctx.
2730 memctx.
2731 """
2731 """
2732 cache = {}
2732 cache = {}
2733
2733
2734 def getfilectx(repo, memctx, path):
2734 def getfilectx(repo, memctx, path):
2735 if path not in cache:
2735 if path not in cache:
2736 cache[path] = func(repo, memctx, path)
2736 cache[path] = func(repo, memctx, path)
2737 return cache[path]
2737 return cache[path]
2738
2738
2739 return getfilectx
2739 return getfilectx
2740
2740
2741
2741
2742 def memfilefromctx(ctx):
2742 def memfilefromctx(ctx):
2743 """Given a context return a memfilectx for ctx[path]
2743 """Given a context return a memfilectx for ctx[path]
2744
2744
2745 This is a convenience method for building a memctx based on another
2745 This is a convenience method for building a memctx based on another
2746 context.
2746 context.
2747 """
2747 """
2748
2748
2749 def getfilectx(repo, memctx, path):
2749 def getfilectx(repo, memctx, path):
2750 fctx = ctx[path]
2750 fctx = ctx[path]
2751 copysource = fctx.copysource()
2751 copysource = fctx.copysource()
2752 return memfilectx(
2752 return memfilectx(
2753 repo,
2753 repo,
2754 memctx,
2754 memctx,
2755 path,
2755 path,
2756 fctx.data(),
2756 fctx.data(),
2757 islink=fctx.islink(),
2757 islink=fctx.islink(),
2758 isexec=fctx.isexec(),
2758 isexec=fctx.isexec(),
2759 copysource=copysource,
2759 copysource=copysource,
2760 )
2760 )
2761
2761
2762 return getfilectx
2762 return getfilectx
2763
2763
2764
2764
2765 def memfilefrompatch(patchstore):
2765 def memfilefrompatch(patchstore):
2766 """Given a patch (e.g. patchstore object) return a memfilectx
2766 """Given a patch (e.g. patchstore object) return a memfilectx
2767
2767
2768 This is a convenience method for building a memctx based on a patchstore.
2768 This is a convenience method for building a memctx based on a patchstore.
2769 """
2769 """
2770
2770
2771 def getfilectx(repo, memctx, path):
2771 def getfilectx(repo, memctx, path):
2772 data, mode, copysource = patchstore.getfile(path)
2772 data, mode, copysource = patchstore.getfile(path)
2773 if data is None:
2773 if data is None:
2774 return None
2774 return None
2775 islink, isexec = mode
2775 islink, isexec = mode
2776 return memfilectx(
2776 return memfilectx(
2777 repo,
2777 repo,
2778 memctx,
2778 memctx,
2779 path,
2779 path,
2780 data,
2780 data,
2781 islink=islink,
2781 islink=islink,
2782 isexec=isexec,
2782 isexec=isexec,
2783 copysource=copysource,
2783 copysource=copysource,
2784 )
2784 )
2785
2785
2786 return getfilectx
2786 return getfilectx
2787
2787
2788
2788
2789 class memctx(committablectx):
2789 class memctx(committablectx):
2790 """Use memctx to perform in-memory commits via localrepo.commitctx().
2790 """Use memctx to perform in-memory commits via localrepo.commitctx().
2791
2791
2792 Revision information is supplied at initialization time while
2792 Revision information is supplied at initialization time while
2793 related files data and is made available through a callback
2793 related files data and is made available through a callback
2794 mechanism. 'repo' is the current localrepo, 'parents' is a
2794 mechanism. 'repo' is the current localrepo, 'parents' is a
2795 sequence of two parent revisions identifiers (pass None for every
2795 sequence of two parent revisions identifiers (pass None for every
2796 missing parent), 'text' is the commit message and 'files' lists
2796 missing parent), 'text' is the commit message and 'files' lists
2797 names of files touched by the revision (normalized and relative to
2797 names of files touched by the revision (normalized and relative to
2798 repository root).
2798 repository root).
2799
2799
2800 filectxfn(repo, memctx, path) is a callable receiving the
2800 filectxfn(repo, memctx, path) is a callable receiving the
2801 repository, the current memctx object and the normalized path of
2801 repository, the current memctx object and the normalized path of
2802 requested file, relative to repository root. It is fired by the
2802 requested file, relative to repository root. It is fired by the
2803 commit function for every file in 'files', but calls order is
2803 commit function for every file in 'files', but calls order is
2804 undefined. If the file is available in the revision being
2804 undefined. If the file is available in the revision being
2805 committed (updated or added), filectxfn returns a memfilectx
2805 committed (updated or added), filectxfn returns a memfilectx
2806 object. If the file was removed, filectxfn return None for recent
2806 object. If the file was removed, filectxfn return None for recent
2807 Mercurial. Moved files are represented by marking the source file
2807 Mercurial. Moved files are represented by marking the source file
2808 removed and the new file added with copy information (see
2808 removed and the new file added with copy information (see
2809 memfilectx).
2809 memfilectx).
2810
2810
2811 user receives the committer name and defaults to current
2811 user receives the committer name and defaults to current
2812 repository username, date is the commit date in any format
2812 repository username, date is the commit date in any format
2813 supported by dateutil.parsedate() and defaults to current date, extra
2813 supported by dateutil.parsedate() and defaults to current date, extra
2814 is a dictionary of metadata or is left empty.
2814 is a dictionary of metadata or is left empty.
2815 """
2815 """
2816
2816
2817 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2817 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2818 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2818 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2819 # this field to determine what to do in filectxfn.
2819 # this field to determine what to do in filectxfn.
2820 _returnnoneformissingfiles = True
2820 _returnnoneformissingfiles = True
2821
2821
2822 def __init__(
2822 def __init__(
2823 self,
2823 self,
2824 repo,
2824 repo,
2825 parents,
2825 parents,
2826 text,
2826 text,
2827 files,
2827 files,
2828 filectxfn,
2828 filectxfn,
2829 user=None,
2829 user=None,
2830 date=None,
2830 date=None,
2831 extra=None,
2831 extra=None,
2832 branch=None,
2832 branch=None,
2833 editor=None,
2833 editor=None,
2834 ):
2834 ):
2835 super(memctx, self).__init__(
2835 super(memctx, self).__init__(
2836 repo, text, user, date, extra, branch=branch
2836 repo, text, user, date, extra, branch=branch
2837 )
2837 )
2838 self._rev = None
2838 self._rev = None
2839 self._node = None
2839 self._node = None
2840 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2840 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2841 p1, p2 = parents
2841 p1, p2 = parents
2842 self._parents = [self._repo[p] for p in (p1, p2)]
2842 self._parents = [self._repo[p] for p in (p1, p2)]
2843 files = sorted(set(files))
2843 files = sorted(set(files))
2844 self._files = files
2844 self._files = files
2845 self.substate = {}
2845 self.substate = {}
2846
2846
2847 if isinstance(filectxfn, patch.filestore):
2847 if isinstance(filectxfn, patch.filestore):
2848 filectxfn = memfilefrompatch(filectxfn)
2848 filectxfn = memfilefrompatch(filectxfn)
2849 elif not callable(filectxfn):
2849 elif not callable(filectxfn):
2850 # if store is not callable, wrap it in a function
2850 # if store is not callable, wrap it in a function
2851 filectxfn = memfilefromctx(filectxfn)
2851 filectxfn = memfilefromctx(filectxfn)
2852
2852
2853 # memoizing increases performance for e.g. vcs convert scenarios.
2853 # memoizing increases performance for e.g. vcs convert scenarios.
2854 self._filectxfn = makecachingfilectxfn(filectxfn)
2854 self._filectxfn = makecachingfilectxfn(filectxfn)
2855
2855
2856 if editor:
2856 if editor:
2857 self._text = editor(self._repo, self, [])
2857 self._text = editor(self._repo, self, [])
2858 self._repo.savecommitmessage(self._text)
2858 self._repo.savecommitmessage(self._text)
2859
2859
2860 def filectx(self, path, filelog=None):
2860 def filectx(self, path, filelog=None):
2861 """get a file context from the working directory
2861 """get a file context from the working directory
2862
2862
2863 Returns None if file doesn't exist and should be removed."""
2863 Returns None if file doesn't exist and should be removed."""
2864 return self._filectxfn(self._repo, self, path)
2864 return self._filectxfn(self._repo, self, path)
2865
2865
2866 def commit(self):
2866 def commit(self):
2867 """commit context to the repo"""
2867 """commit context to the repo"""
2868 return self._repo.commitctx(self)
2868 return self._repo.commitctx(self)
2869
2869
2870 @propertycache
2870 @propertycache
2871 def _manifest(self):
2871 def _manifest(self):
2872 """generate a manifest based on the return values of filectxfn"""
2872 """generate a manifest based on the return values of filectxfn"""
2873
2873
2874 # keep this simple for now; just worry about p1
2874 # keep this simple for now; just worry about p1
2875 pctx = self._parents[0]
2875 pctx = self._parents[0]
2876 man = pctx.manifest().copy()
2876 man = pctx.manifest().copy()
2877
2877
2878 for f in self._status.modified:
2878 for f in self._status.modified:
2879 man[f] = self._repo.nodeconstants.modifiednodeid
2879 man[f] = self._repo.nodeconstants.modifiednodeid
2880
2880
2881 for f in self._status.added:
2881 for f in self._status.added:
2882 man[f] = self._repo.nodeconstants.addednodeid
2882 man[f] = self._repo.nodeconstants.addednodeid
2883
2883
2884 for f in self._status.removed:
2884 for f in self._status.removed:
2885 if f in man:
2885 if f in man:
2886 del man[f]
2886 del man[f]
2887
2887
2888 return man
2888 return man
2889
2889
2890 @propertycache
2890 @propertycache
2891 def _status(self):
2891 def _status(self):
2892 """Calculate exact status from ``files`` specified at construction"""
2892 """Calculate exact status from ``files`` specified at construction"""
2893 man1 = self.p1().manifest()
2893 man1 = self.p1().manifest()
2894 p2 = self._parents[1]
2894 p2 = self._parents[1]
2895 # "1 < len(self._parents)" can't be used for checking
2895 # "1 < len(self._parents)" can't be used for checking
2896 # existence of the 2nd parent, because "memctx._parents" is
2896 # existence of the 2nd parent, because "memctx._parents" is
2897 # explicitly initialized by the list, of which length is 2.
2897 # explicitly initialized by the list, of which length is 2.
2898 if p2.rev() != nullrev:
2898 if p2.rev() != nullrev:
2899 man2 = p2.manifest()
2899 man2 = p2.manifest()
2900 managing = lambda f: f in man1 or f in man2
2900 managing = lambda f: f in man1 or f in man2
2901 else:
2901 else:
2902 managing = lambda f: f in man1
2902 managing = lambda f: f in man1
2903
2903
2904 modified, added, removed = [], [], []
2904 modified, added, removed = [], [], []
2905 for f in self._files:
2905 for f in self._files:
2906 if not managing(f):
2906 if not managing(f):
2907 added.append(f)
2907 added.append(f)
2908 elif self[f]:
2908 elif self[f]:
2909 modified.append(f)
2909 modified.append(f)
2910 else:
2910 else:
2911 removed.append(f)
2911 removed.append(f)
2912
2912
2913 return scmutil.status(modified, added, removed, [], [], [], [])
2913 return scmutil.status(modified, added, removed, [], [], [], [])
2914
2914
2915 def parents(self):
2915 def parents(self):
2916 if self._parents[1].rev() == nullrev:
2916 if self._parents[1].rev() == nullrev:
2917 return [self._parents[0]]
2917 return [self._parents[0]]
2918 return self._parents
2918 return self._parents
2919
2919
2920
2920
2921 class memfilectx(committablefilectx):
2921 class memfilectx(committablefilectx):
2922 """memfilectx represents an in-memory file to commit.
2922 """memfilectx represents an in-memory file to commit.
2923
2923
2924 See memctx and committablefilectx for more details.
2924 See memctx and committablefilectx for more details.
2925 """
2925 """
2926
2926
2927 def __init__(
2927 def __init__(
2928 self,
2928 self,
2929 repo,
2929 repo,
2930 changectx,
2930 changectx,
2931 path,
2931 path,
2932 data,
2932 data,
2933 islink=False,
2933 islink=False,
2934 isexec=False,
2934 isexec=False,
2935 copysource=None,
2935 copysource=None,
2936 ):
2936 ):
2937 """
2937 """
2938 path is the normalized file path relative to repository root.
2938 path is the normalized file path relative to repository root.
2939 data is the file content as a string.
2939 data is the file content as a string.
2940 islink is True if the file is a symbolic link.
2940 islink is True if the file is a symbolic link.
2941 isexec is True if the file is executable.
2941 isexec is True if the file is executable.
2942 copied is the source file path if current file was copied in the
2942 copied is the source file path if current file was copied in the
2943 revision being committed, or None."""
2943 revision being committed, or None."""
2944 super(memfilectx, self).__init__(repo, path, None, changectx)
2944 super(memfilectx, self).__init__(repo, path, None, changectx)
2945 self._data = data
2945 self._data = data
2946 if islink:
2946 if islink:
2947 self._flags = b'l'
2947 self._flags = b'l'
2948 elif isexec:
2948 elif isexec:
2949 self._flags = b'x'
2949 self._flags = b'x'
2950 else:
2950 else:
2951 self._flags = b''
2951 self._flags = b''
2952 self._copysource = copysource
2952 self._copysource = copysource
2953
2953
2954 def copysource(self):
2954 def copysource(self):
2955 return self._copysource
2955 return self._copysource
2956
2956
2957 def cmp(self, fctx):
2957 def cmp(self, fctx):
2958 return self.data() != fctx.data()
2958 return self.data() != fctx.data()
2959
2959
2960 def data(self):
2960 def data(self):
2961 return self._data
2961 return self._data
2962
2962
2963 def remove(self, ignoremissing=False):
2963 def remove(self, ignoremissing=False):
2964 """wraps unlink for a repo's working directory"""
2964 """wraps unlink for a repo's working directory"""
2965 # need to figure out what to do here
2965 # need to figure out what to do here
2966 del self._changectx[self._path]
2966 del self._changectx[self._path]
2967
2967
2968 def write(self, data, flags, **kwargs):
2968 def write(self, data, flags, **kwargs):
2969 """wraps repo.wwrite"""
2969 """wraps repo.wwrite"""
2970 self._data = data
2970 self._data = data
2971
2971
2972
2972
2973 class metadataonlyctx(committablectx):
2973 class metadataonlyctx(committablectx):
2974 """Like memctx but it's reusing the manifest of different commit.
2974 """Like memctx but it's reusing the manifest of different commit.
2975 Intended to be used by lightweight operations that are creating
2975 Intended to be used by lightweight operations that are creating
2976 metadata-only changes.
2976 metadata-only changes.
2977
2977
2978 Revision information is supplied at initialization time. 'repo' is the
2978 Revision information is supplied at initialization time. 'repo' is the
2979 current localrepo, 'ctx' is original revision which manifest we're reuisng
2979 current localrepo, 'ctx' is original revision which manifest we're reuisng
2980 'parents' is a sequence of two parent revisions identifiers (pass None for
2980 'parents' is a sequence of two parent revisions identifiers (pass None for
2981 every missing parent), 'text' is the commit.
2981 every missing parent), 'text' is the commit.
2982
2982
2983 user receives the committer name and defaults to current repository
2983 user receives the committer name and defaults to current repository
2984 username, date is the commit date in any format supported by
2984 username, date is the commit date in any format supported by
2985 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2985 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2986 metadata or is left empty.
2986 metadata or is left empty.
2987 """
2987 """
2988
2988
2989 def __init__(
2989 def __init__(
2990 self,
2990 self,
2991 repo,
2991 repo,
2992 originalctx,
2992 originalctx,
2993 parents=None,
2993 parents=None,
2994 text=None,
2994 text=None,
2995 user=None,
2995 user=None,
2996 date=None,
2996 date=None,
2997 extra=None,
2997 extra=None,
2998 editor=None,
2998 editor=None,
2999 ):
2999 ):
3000 if text is None:
3000 if text is None:
3001 text = originalctx.description()
3001 text = originalctx.description()
3002 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3002 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3003 self._rev = None
3003 self._rev = None
3004 self._node = None
3004 self._node = None
3005 self._originalctx = originalctx
3005 self._originalctx = originalctx
3006 self._manifestnode = originalctx.manifestnode()
3006 self._manifestnode = originalctx.manifestnode()
3007 if parents is None:
3007 if parents is None:
3008 parents = originalctx.parents()
3008 parents = originalctx.parents()
3009 else:
3009 else:
3010 parents = [repo[p] for p in parents if p is not None]
3010 parents = [repo[p] for p in parents if p is not None]
3011 parents = parents[:]
3011 parents = parents[:]
3012 while len(parents) < 2:
3012 while len(parents) < 2:
3013 parents.append(repo[nullrev])
3013 parents.append(repo[nullrev])
3014 p1, p2 = self._parents = parents
3014 p1, p2 = self._parents = parents
3015
3015
3016 # sanity check to ensure that the reused manifest parents are
3016 # sanity check to ensure that the reused manifest parents are
3017 # manifests of our commit parents
3017 # manifests of our commit parents
3018 mp1, mp2 = self.manifestctx().parents
3018 mp1, mp2 = self.manifestctx().parents
3019 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3019 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3020 raise RuntimeError(
3020 raise RuntimeError(
3021 r"can't reuse the manifest: its p1 "
3021 r"can't reuse the manifest: its p1 "
3022 r"doesn't match the new ctx p1"
3022 r"doesn't match the new ctx p1"
3023 )
3023 )
3024 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3024 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3025 raise RuntimeError(
3025 raise RuntimeError(
3026 r"can't reuse the manifest: "
3026 r"can't reuse the manifest: "
3027 r"its p2 doesn't match the new ctx p2"
3027 r"its p2 doesn't match the new ctx p2"
3028 )
3028 )
3029
3029
3030 self._files = originalctx.files()
3030 self._files = originalctx.files()
3031 self.substate = {}
3031 self.substate = {}
3032
3032
3033 if editor:
3033 if editor:
3034 self._text = editor(self._repo, self, [])
3034 self._text = editor(self._repo, self, [])
3035 self._repo.savecommitmessage(self._text)
3035 self._repo.savecommitmessage(self._text)
3036
3036
3037 def manifestnode(self):
3037 def manifestnode(self):
3038 return self._manifestnode
3038 return self._manifestnode
3039
3039
3040 @property
3040 @property
3041 def _manifestctx(self):
3041 def _manifestctx(self):
3042 return self._repo.manifestlog[self._manifestnode]
3042 return self._repo.manifestlog[self._manifestnode]
3043
3043
3044 def filectx(self, path, filelog=None):
3044 def filectx(self, path, filelog=None):
3045 return self._originalctx.filectx(path, filelog=filelog)
3045 return self._originalctx.filectx(path, filelog=filelog)
3046
3046
3047 def commit(self):
3047 def commit(self):
3048 """commit context to the repo"""
3048 """commit context to the repo"""
3049 return self._repo.commitctx(self)
3049 return self._repo.commitctx(self)
3050
3050
3051 @property
3051 @property
3052 def _manifest(self):
3052 def _manifest(self):
3053 return self._originalctx.manifest()
3053 return self._originalctx.manifest()
3054
3054
3055 @propertycache
3055 @propertycache
3056 def _status(self):
3056 def _status(self):
3057 """Calculate exact status from ``files`` specified in the ``origctx``
3057 """Calculate exact status from ``files`` specified in the ``origctx``
3058 and parents manifests.
3058 and parents manifests.
3059 """
3059 """
3060 man1 = self.p1().manifest()
3060 man1 = self.p1().manifest()
3061 p2 = self._parents[1]
3061 p2 = self._parents[1]
3062 # "1 < len(self._parents)" can't be used for checking
3062 # "1 < len(self._parents)" can't be used for checking
3063 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3063 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3064 # explicitly initialized by the list, of which length is 2.
3064 # explicitly initialized by the list, of which length is 2.
3065 if p2.rev() != nullrev:
3065 if p2.rev() != nullrev:
3066 man2 = p2.manifest()
3066 man2 = p2.manifest()
3067 managing = lambda f: f in man1 or f in man2
3067 managing = lambda f: f in man1 or f in man2
3068 else:
3068 else:
3069 managing = lambda f: f in man1
3069 managing = lambda f: f in man1
3070
3070
3071 modified, added, removed = [], [], []
3071 modified, added, removed = [], [], []
3072 for f in self._files:
3072 for f in self._files:
3073 if not managing(f):
3073 if not managing(f):
3074 added.append(f)
3074 added.append(f)
3075 elif f in self:
3075 elif f in self:
3076 modified.append(f)
3076 modified.append(f)
3077 else:
3077 else:
3078 removed.append(f)
3078 removed.append(f)
3079
3079
3080 return scmutil.status(modified, added, removed, [], [], [], [])
3080 return scmutil.status(modified, added, removed, [], [], [], [])
3081
3081
3082
3082
3083 class arbitraryfilectx(object):
3083 class arbitraryfilectx(object):
3084 """Allows you to use filectx-like functions on a file in an arbitrary
3084 """Allows you to use filectx-like functions on a file in an arbitrary
3085 location on disk, possibly not in the working directory.
3085 location on disk, possibly not in the working directory.
3086 """
3086 """
3087
3087
3088 def __init__(self, path, repo=None):
3088 def __init__(self, path, repo=None):
3089 # Repo is optional because contrib/simplemerge uses this class.
3089 # Repo is optional because contrib/simplemerge uses this class.
3090 self._repo = repo
3090 self._repo = repo
3091 self._path = path
3091 self._path = path
3092
3092
3093 def cmp(self, fctx):
3093 def cmp(self, fctx):
3094 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3094 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3095 # path if either side is a symlink.
3095 # path if either side is a symlink.
3096 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3096 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3097 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3097 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3098 # Add a fast-path for merge if both sides are disk-backed.
3098 # Add a fast-path for merge if both sides are disk-backed.
3099 # Note that filecmp uses the opposite return values (True if same)
3099 # Note that filecmp uses the opposite return values (True if same)
3100 # from our cmp functions (True if different).
3100 # from our cmp functions (True if different).
3101 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3101 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3102 return self.data() != fctx.data()
3102 return self.data() != fctx.data()
3103
3103
3104 def path(self):
3104 def path(self):
3105 return self._path
3105 return self._path
3106
3106
3107 def flags(self):
3107 def flags(self):
3108 return b''
3108 return b''
3109
3109
3110 def data(self):
3110 def data(self):
3111 return util.readfile(self._path)
3111 return util.readfile(self._path)
3112
3112
3113 def decodeddata(self):
3113 def decodeddata(self):
3114 with open(self._path, b"rb") as f:
3114 with open(self._path, b"rb") as f:
3115 return f.read()
3115 return f.read()
3116
3116
3117 def remove(self):
3117 def remove(self):
3118 util.unlink(self._path)
3118 util.unlink(self._path)
3119
3119
3120 def write(self, data, flags, **kwargs):
3120 def write(self, data, flags, **kwargs):
3121 assert not flags
3121 assert not flags
3122 with open(self._path, b"wb") as f:
3122 with open(self._path, b"wb") as f:
3123 f.write(data)
3123 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now