##// END OF EJS Templates
context: use `dirstate.set_tracked` in context.copy...
marmoute -
r48395:14e2f4bd default
parent child Browse files
Show More
@@ -1,3120 +1,3117 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
70 return "<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(
102 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
103 self, other, s, match, listignored, listclean, listunknown
104 ):
104 ):
105 """build a status with respect to another context"""
105 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
106 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
111 # delta application.
112 mf2 = None
112 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
113 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
116 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118
118
119 modified, added = [], []
119 modified, added = [], []
120 removed = []
120 removed = []
121 clean = []
121 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
123 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
124 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
125 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
126 if fn in deletedset:
127 continue
127 continue
128 if value is None:
128 if value is None:
129 clean.append(fn)
129 clean.append(fn)
130 continue
130 continue
131 (node1, flag1), (node2, flag2) = value
131 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
132 if node1 is None:
133 added.append(fn)
133 added.append(fn)
134 elif node2 is None:
134 elif node2 is None:
135 removed.append(fn)
135 removed.append(fn)
136 elif flag1 != flag2:
136 elif flag1 != flag2:
137 modified.append(fn)
137 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
139 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
140 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
141 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
142 # to a file as a modification.
143 modified.append(fn)
143 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
144 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
145 modified.append(fn)
146 else:
146 else:
147 clean.append(fn)
147 clean.append(fn)
148
148
149 if removed:
149 if removed:
150 # need to filter files if they are already reported as removed
150 # need to filter files if they are already reported as removed
151 unknown = [
151 unknown = [
152 fn
152 fn
153 for fn in unknown
153 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
154 if fn not in mf1 and (not match or match(fn))
155 ]
155 ]
156 ignored = [
156 ignored = [
157 fn
157 fn
158 for fn in ignored
158 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
163
163
164 return scmutil.status(
164 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
165 modified, added, removed, deleted, unknown, ignored, clean
166 )
166 )
167
167
168 @propertycache
168 @propertycache
169 def substate(self):
169 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
170 return subrepoutil.state(self, self._repo.ui)
171
171
172 def subrev(self, subpath):
172 def subrev(self, subpath):
173 return self.substate[subpath][1]
173 return self.substate[subpath][1]
174
174
175 def rev(self):
175 def rev(self):
176 return self._rev
176 return self._rev
177
177
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180
180
181 def hex(self):
181 def hex(self):
182 return hex(self.node())
182 return hex(self.node())
183
183
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186
186
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189
189
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192
192
193 def phasestr(self):
193 def phasestr(self):
194 return phases.phasenames[self.phase()]
194 return phases.phasenames[self.phase()]
195
195
196 def mutable(self):
196 def mutable(self):
197 return self.phase() > phases.public
197 return self.phase() > phases.public
198
198
199 def matchfileset(self, cwd, expr, badfn=None):
199 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
200 return fileset.match(self, cwd, expr, badfn=badfn)
201
201
202 def obsolete(self):
202 def obsolete(self):
203 """True if the changeset is obsolete"""
203 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
205
206 def extinct(self):
206 def extinct(self):
207 """True if the changeset is extinct"""
207 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
211 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
213
214 def phasedivergent(self):
214 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
215 """True if the changeset tries to be a successor of a public changeset
216
216
217 Only non-public and non-obsolete changesets may be phase-divergent.
217 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
218 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
220
221 def contentdivergent(self):
221 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
222 """Is a successor of a changeset with multiple possible successor sets
223
223
224 Only non-public and non-obsolete changesets may be content-divergent.
224 Only non-public and non-obsolete changesets may be content-divergent.
225 """
225 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
227
228 def isunstable(self):
228 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
229 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
230 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
232
233 def instabilities(self):
233 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
234 """return the list of instabilities affecting this changeset.
235
235
236 Instabilities are returned as strings. possible values are:
236 Instabilities are returned as strings. possible values are:
237 - orphan,
237 - orphan,
238 - phase-divergent,
238 - phase-divergent,
239 - content-divergent.
239 - content-divergent.
240 """
240 """
241 instabilities = []
241 instabilities = []
242 if self.orphan():
242 if self.orphan():
243 instabilities.append(b'orphan')
243 instabilities.append(b'orphan')
244 if self.phasedivergent():
244 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
245 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
246 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
247 instabilities.append(b'content-divergent')
248 return instabilities
248 return instabilities
249
249
250 def parents(self):
250 def parents(self):
251 """return contexts for each parent changeset"""
251 """return contexts for each parent changeset"""
252 return self._parents
252 return self._parents
253
253
254 def p1(self):
254 def p1(self):
255 return self._parents[0]
255 return self._parents[0]
256
256
257 def p2(self):
257 def p2(self):
258 parents = self._parents
258 parents = self._parents
259 if len(parents) == 2:
259 if len(parents) == 2:
260 return parents[1]
260 return parents[1]
261 return self._repo[nullrev]
261 return self._repo[nullrev]
262
262
263 def _fileinfo(self, path):
263 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
264 if '_manifest' in self.__dict__:
265 try:
265 try:
266 return self._manifest.find(path)
266 return self._manifest.find(path)
267 except KeyError:
267 except KeyError:
268 raise error.ManifestLookupError(
268 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
269 self._node or b'None', path, _(b'not found in manifest')
270 )
270 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
272 if path in self._manifestdelta:
273 return (
273 return (
274 self._manifestdelta[path],
274 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
276 )
276 )
277 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
278 try:
278 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
280 except KeyError:
281 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
283 )
283 )
284
284
285 return node, flag
285 return node, flag
286
286
287 def filenode(self, path):
287 def filenode(self, path):
288 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
289
289
290 def flags(self, path):
290 def flags(self, path):
291 try:
291 try:
292 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
293 except error.LookupError:
293 except error.LookupError:
294 return b''
294 return b''
295
295
296 @propertycache
296 @propertycache
297 def _copies(self):
297 def _copies(self):
298 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
299
299
300 def p1copies(self):
300 def p1copies(self):
301 return self._copies[0]
301 return self._copies[0]
302
302
303 def p2copies(self):
303 def p2copies(self):
304 return self._copies[1]
304 return self._copies[1]
305
305
306 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
309
310 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
312
312
313 def workingsub(self, path):
313 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
315 context.
316 """
316 """
317 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
318
318
319 def match(
319 def match(
320 self,
320 self,
321 pats=None,
321 pats=None,
322 include=None,
322 include=None,
323 exclude=None,
323 exclude=None,
324 default=b'glob',
324 default=b'glob',
325 listsubrepos=False,
325 listsubrepos=False,
326 badfn=None,
326 badfn=None,
327 cwd=None,
327 cwd=None,
328 ):
328 ):
329 r = self._repo
329 r = self._repo
330 if not cwd:
330 if not cwd:
331 cwd = r.getcwd()
331 cwd = r.getcwd()
332 return matchmod.match(
332 return matchmod.match(
333 r.root,
333 r.root,
334 cwd,
334 cwd,
335 pats,
335 pats,
336 include,
336 include,
337 exclude,
337 exclude,
338 default,
338 default,
339 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
340 ctx=self,
340 ctx=self,
341 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
342 badfn=badfn,
342 badfn=badfn,
343 )
343 )
344
344
345 def diff(
345 def diff(
346 self,
346 self,
347 ctx2=None,
347 ctx2=None,
348 match=None,
348 match=None,
349 changes=None,
349 changes=None,
350 opts=None,
350 opts=None,
351 losedatafn=None,
351 losedatafn=None,
352 pathfn=None,
352 pathfn=None,
353 copy=None,
353 copy=None,
354 copysourcematch=None,
354 copysourcematch=None,
355 hunksfilterfn=None,
355 hunksfilterfn=None,
356 ):
356 ):
357 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
358 if ctx2 is None:
359 ctx2 = self.p1()
359 ctx2 = self.p1()
360 if ctx2 is not None:
360 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
362 return patch.diff(
362 return patch.diff(
363 self._repo,
363 self._repo,
364 ctx2,
364 ctx2,
365 self,
365 self,
366 match=match,
366 match=match,
367 changes=changes,
367 changes=changes,
368 opts=opts,
368 opts=opts,
369 losedatafn=losedatafn,
369 losedatafn=losedatafn,
370 pathfn=pathfn,
370 pathfn=pathfn,
371 copy=copy,
371 copy=copy,
372 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
374 )
374 )
375
375
376 def dirs(self):
376 def dirs(self):
377 return self._manifest.dirs()
377 return self._manifest.dirs()
378
378
379 def hasdir(self, dir):
379 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
381
381
382 def status(
382 def status(
383 self,
383 self,
384 other=None,
384 other=None,
385 match=None,
385 match=None,
386 listignored=False,
386 listignored=False,
387 listclean=False,
387 listclean=False,
388 listunknown=False,
388 listunknown=False,
389 listsubrepos=False,
389 listsubrepos=False,
390 ):
390 ):
391 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
392 directory.
392 directory.
393
393
394 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
395
395
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
397
398 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
399
399
400 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
401
401
402 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
403
403
404 or direct attribute access:
404 or direct attribute access:
405
405
406 s.modified, s.added, ...
406 s.modified, s.added, ...
407 """
407 """
408
408
409 ctx1 = self
409 ctx1 = self
410 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
411
411
412 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
415 # with its first parent.
416 #
416 #
417 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
418 #
418 #
419 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
420 #
420 #
421 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
424 reversed = False
424 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
426 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
428
428
429 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
434 )
434 )
435
435
436 if reversed:
436 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
438 # these make no sense to reverse.
439 r = scmutil.status(
439 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
441 )
442
442
443 if listsubrepos:
443 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
445 try:
446 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
447 except KeyError:
447 except KeyError:
448 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
451 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
453 s = sub.status(
454 rev2,
454 rev2,
455 match=submatch,
455 match=submatch,
456 ignored=listignored,
456 ignored=listignored,
457 clean=listclean,
457 clean=listclean,
458 unknown=listunknown,
458 unknown=listunknown,
459 listsubrepos=True,
459 listsubrepos=True,
460 )
460 )
461 for k in (
461 for k in (
462 'modified',
462 'modified',
463 'added',
463 'added',
464 'removed',
464 'removed',
465 'deleted',
465 'deleted',
466 'unknown',
466 'unknown',
467 'ignored',
467 'ignored',
468 'clean',
468 'clean',
469 ):
469 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
472
473 r.modified.sort()
473 r.modified.sort()
474 r.added.sort()
474 r.added.sort()
475 r.removed.sort()
475 r.removed.sort()
476 r.deleted.sort()
476 r.deleted.sort()
477 r.unknown.sort()
477 r.unknown.sort()
478 r.ignored.sort()
478 r.ignored.sort()
479 r.clean.sort()
479 r.clean.sort()
480
480
481 return r
481 return r
482
482
483 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
485 raise NotImplementedError(
485 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
487 )
487 )
488
488
489 def isempty(self):
489 def isempty(self):
490 return not (
490 return not (
491 len(self.parents()) > 1
491 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
493 or self.closesbranch()
493 or self.closesbranch()
494 or self.files()
494 or self.files()
495 )
495 )
496
496
497
497
498 class changectx(basectx):
498 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
501 the repo."""
501 the repo."""
502
502
503 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
505 self._rev = rev
505 self._rev = rev
506 self._node = node
506 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
509 #
509 #
510 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
514
514
515 def __hash__(self):
515 def __hash__(self):
516 try:
516 try:
517 return hash(self._rev)
517 return hash(self._rev)
518 except AttributeError:
518 except AttributeError:
519 return id(self)
519 return id(self)
520
520
521 def __nonzero__(self):
521 def __nonzero__(self):
522 return self._rev != nullrev
522 return self._rev != nullrev
523
523
524 __bool__ = __nonzero__
524 __bool__ = __nonzero__
525
525
526 @propertycache
526 @propertycache
527 def _changeset(self):
527 def _changeset(self):
528 if self._maybe_filtered:
528 if self._maybe_filtered:
529 repo = self._repo
529 repo = self._repo
530 else:
530 else:
531 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
533
533
534 @propertycache
534 @propertycache
535 def _manifest(self):
535 def _manifest(self):
536 return self._manifestctx.read()
536 return self._manifestctx.read()
537
537
538 @property
538 @property
539 def _manifestctx(self):
539 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
541
541
542 @propertycache
542 @propertycache
543 def _manifestdelta(self):
543 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
545
545
546 @propertycache
546 @propertycache
547 def _parents(self):
547 def _parents(self):
548 repo = self._repo
548 repo = self._repo
549 if self._maybe_filtered:
549 if self._maybe_filtered:
550 cl = repo.changelog
550 cl = repo.changelog
551 else:
551 else:
552 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
553
553
554 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
555 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
557 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
560 ]
561
561
562 def changeset(self):
562 def changeset(self):
563 c = self._changeset
563 c = self._changeset
564 return (
564 return (
565 c.manifest,
565 c.manifest,
566 c.user,
566 c.user,
567 c.date,
567 c.date,
568 c.files,
568 c.files,
569 c.description,
569 c.description,
570 c.extra,
570 c.extra,
571 )
571 )
572
572
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578
578
579 def date(self):
579 def date(self):
580 return self._changeset.date
580 return self._changeset.date
581
581
582 def files(self):
582 def files(self):
583 return self._changeset.files
583 return self._changeset.files
584
584
585 def filesmodified(self):
585 def filesmodified(self):
586 modified = set(self.files())
586 modified = set(self.files())
587 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
589 return sorted(modified)
589 return sorted(modified)
590
590
591 def filesadded(self):
591 def filesadded(self):
592 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
593 compute_on_none = True
593 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
595 compute_on_none = False
596 else:
596 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
598 if source == b'changeset-only':
599 compute_on_none = False
599 compute_on_none = False
600 elif source != b'compatibility':
600 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
602 filesadded = None
602 filesadded = None
603 if filesadded is None:
603 if filesadded is None:
604 if compute_on_none:
604 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
606 else:
606 else:
607 filesadded = []
607 filesadded = []
608 return filesadded
608 return filesadded
609
609
610 def filesremoved(self):
610 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
612 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
614 compute_on_none = False
615 else:
615 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
617 if source == b'changeset-only':
618 compute_on_none = False
618 compute_on_none = False
619 elif source != b'compatibility':
619 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
621 filesremoved = None
621 filesremoved = None
622 if filesremoved is None:
622 if filesremoved is None:
623 if compute_on_none:
623 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
625 else:
626 filesremoved = []
626 filesremoved = []
627 return filesremoved
627 return filesremoved
628
628
629 @propertycache
629 @propertycache
630 def _copies(self):
630 def _copies(self):
631 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
633 compute_on_none = True
633 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
635 compute_on_none = False
636 else:
636 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
642 # the filelogs (below).
643 #
643 #
644 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
646 #
646 #
647 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
649 if source == b'changeset-only':
649 if source == b'changeset-only':
650 compute_on_none = False
650 compute_on_none = False
651 elif source != b'compatibility':
651 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
653 p1copies = p2copies = None
654 if p1copies is None:
654 if p1copies is None:
655 if compute_on_none:
655 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
657 else:
657 else:
658 if p1copies is None:
658 if p1copies is None:
659 p1copies = {}
659 p1copies = {}
660 if p2copies is None:
660 if p2copies is None:
661 p2copies = {}
661 p2copies = {}
662 return p1copies, p2copies
662 return p1copies, p2copies
663
663
664 def description(self):
664 def description(self):
665 return self._changeset.description
665 return self._changeset.description
666
666
667 def branch(self):
667 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
669
670 def closesbranch(self):
670 def closesbranch(self):
671 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
672
672
673 def extra(self):
673 def extra(self):
674 """Return a dict of extra information."""
674 """Return a dict of extra information."""
675 return self._changeset.extra
675 return self._changeset.extra
676
676
677 def tags(self):
677 def tags(self):
678 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
680
680
681 def bookmarks(self):
681 def bookmarks(self):
682 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
684
684
685 def phase(self):
685 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
686 return self._repo._phasecache.phase(self._repo, self._rev)
687
687
688 def hidden(self):
688 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
690
691 def isinmemory(self):
691 def isinmemory(self):
692 return False
692 return False
693
693
694 def children(self):
694 def children(self):
695 """return list of changectx contexts for each child changeset.
695 """return list of changectx contexts for each child changeset.
696
696
697 This returns only the immediate child changesets. Use descendants() to
697 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
698 recursively walk children.
699 """
699 """
700 c = self._repo.changelog.children(self._node)
700 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
701 return [self._repo[x] for x in c]
702
702
703 def ancestors(self):
703 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
704 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
705 yield self._repo[a]
706
706
707 def descendants(self):
707 def descendants(self):
708 """Recursively yield all children of the changeset.
708 """Recursively yield all children of the changeset.
709
709
710 For just the immediate children, use children()
710 For just the immediate children, use children()
711 """
711 """
712 for d in self._repo.changelog.descendants([self._rev]):
712 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
713 yield self._repo[d]
714
714
715 def filectx(self, path, fileid=None, filelog=None):
715 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
716 """get a file context from this changeset"""
717 if fileid is None:
717 if fileid is None:
718 fileid = self.filenode(path)
718 fileid = self.filenode(path)
719 return filectx(
719 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
721 )
722
722
723 def ancestor(self, c2, warn=False):
723 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
724 """return the "best" ancestor context of self and c2
725
725
726 If there are multiple candidates, it will show a message and check
726 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
727 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
728 revlog ancestor."""
729 # deal with workingctxs
729 # deal with workingctxs
730 n2 = c2._node
730 n2 = c2._node
731 if n2 is None:
731 if n2 is None:
732 n2 = c2._parents[0]._node
732 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
734 if not cahs:
735 anc = self._repo.nodeconstants.nullid
735 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
736 elif len(cahs) == 1:
737 anc = cahs[0]
737 anc = cahs[0]
738 else:
738 else:
739 # experimental config: merge.preferancestor
739 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
741 try:
742 ctx = scmutil.revsymbol(self._repo, r)
742 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
743 except error.RepoLookupError:
744 continue
744 continue
745 anc = ctx.node()
745 anc = ctx.node()
746 if anc in cahs:
746 if anc in cahs:
747 break
747 break
748 else:
748 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
749 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
750 if warn:
751 self._repo.ui.status(
751 self._repo.ui.status(
752 (
752 (
753 _(b"note: using %s as ancestor of %s and %s\n")
753 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
754 % (short(anc), short(self._node), short(n2))
755 )
755 )
756 + b''.join(
756 + b''.join(
757 _(
757 _(
758 b" alternatively, use --config "
758 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
759 b"merge.preferancestor=%s\n"
760 )
760 )
761 % short(n)
761 % short(n)
762 for n in sorted(cahs)
762 for n in sorted(cahs)
763 if n != anc
763 if n != anc
764 )
764 )
765 )
765 )
766 return self._repo[anc]
766 return self._repo[anc]
767
767
768 def isancestorof(self, other):
768 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
769 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
771
772 def walk(self, match):
772 def walk(self, match):
773 '''Generates matching file names.'''
773 '''Generates matching file names.'''
774
774
775 # Wrap match.bad method to have message with nodeid
775 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
776 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
777 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
778 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
780 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
781 match.bad(fn, _(b'no such file in rev %s') % self)
782
782
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
784 return self._manifest.walk(m)
785
785
786 def matches(self, match):
786 def matches(self, match):
787 return self.walk(match)
787 return self.walk(match)
788
788
789
789
790 class basefilectx(object):
790 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
791 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
792 filectx: read-only access to a filerevision that is already present
793 in the repo,
793 in the repo,
794 workingfilectx: a filecontext that represents files from the working
794 workingfilectx: a filecontext that represents files from the working
795 directory,
795 directory,
796 memfilectx: a filecontext that represents files in-memory,
796 memfilectx: a filecontext that represents files in-memory,
797 """
797 """
798
798
799 @propertycache
799 @propertycache
800 def _filelog(self):
800 def _filelog(self):
801 return self._repo.file(self._path)
801 return self._repo.file(self._path)
802
802
803 @propertycache
803 @propertycache
804 def _changeid(self):
804 def _changeid(self):
805 if '_changectx' in self.__dict__:
805 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
806 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
807 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
808 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
809 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
810 return self._adjustlinkrev(self._descendantrev)
811 else:
811 else:
812 return self._filelog.linkrev(self._filerev)
812 return self._filelog.linkrev(self._filerev)
813
813
814 @propertycache
814 @propertycache
815 def _filenode(self):
815 def _filenode(self):
816 if '_fileid' in self.__dict__:
816 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
817 return self._filelog.lookup(self._fileid)
818 else:
818 else:
819 return self._changectx.filenode(self._path)
819 return self._changectx.filenode(self._path)
820
820
821 @propertycache
821 @propertycache
822 def _filerev(self):
822 def _filerev(self):
823 return self._filelog.rev(self._filenode)
823 return self._filelog.rev(self._filenode)
824
824
825 @propertycache
825 @propertycache
826 def _repopath(self):
826 def _repopath(self):
827 return self._path
827 return self._path
828
828
829 def __nonzero__(self):
829 def __nonzero__(self):
830 try:
830 try:
831 self._filenode
831 self._filenode
832 return True
832 return True
833 except error.LookupError:
833 except error.LookupError:
834 # file is missing
834 # file is missing
835 return False
835 return False
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __bytes__(self):
839 def __bytes__(self):
840 try:
840 try:
841 return b"%s@%s" % (self.path(), self._changectx)
841 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
842 except error.LookupError:
843 return b"%s@???" % self.path()
843 return b"%s@???" % self.path()
844
844
845 __str__ = encoding.strmethod(__bytes__)
845 __str__ = encoding.strmethod(__bytes__)
846
846
847 def __repr__(self):
847 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
848 return "<%s %s>" % (type(self).__name__, str(self))
849
849
850 def __hash__(self):
850 def __hash__(self):
851 try:
851 try:
852 return hash((self._path, self._filenode))
852 return hash((self._path, self._filenode))
853 except AttributeError:
853 except AttributeError:
854 return id(self)
854 return id(self)
855
855
856 def __eq__(self, other):
856 def __eq__(self, other):
857 try:
857 try:
858 return (
858 return (
859 type(self) == type(other)
859 type(self) == type(other)
860 and self._path == other._path
860 and self._path == other._path
861 and self._filenode == other._filenode
861 and self._filenode == other._filenode
862 )
862 )
863 except AttributeError:
863 except AttributeError:
864 return False
864 return False
865
865
866 def __ne__(self, other):
866 def __ne__(self, other):
867 return not (self == other)
867 return not (self == other)
868
868
869 def filerev(self):
869 def filerev(self):
870 return self._filerev
870 return self._filerev
871
871
872 def filenode(self):
872 def filenode(self):
873 return self._filenode
873 return self._filenode
874
874
875 @propertycache
875 @propertycache
876 def _flags(self):
876 def _flags(self):
877 return self._changectx.flags(self._path)
877 return self._changectx.flags(self._path)
878
878
879 def flags(self):
879 def flags(self):
880 return self._flags
880 return self._flags
881
881
882 def filelog(self):
882 def filelog(self):
883 return self._filelog
883 return self._filelog
884
884
885 def rev(self):
885 def rev(self):
886 return self._changeid
886 return self._changeid
887
887
888 def linkrev(self):
888 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
889 return self._filelog.linkrev(self._filerev)
890
890
891 def node(self):
891 def node(self):
892 return self._changectx.node()
892 return self._changectx.node()
893
893
894 def hex(self):
894 def hex(self):
895 return self._changectx.hex()
895 return self._changectx.hex()
896
896
897 def user(self):
897 def user(self):
898 return self._changectx.user()
898 return self._changectx.user()
899
899
900 def date(self):
900 def date(self):
901 return self._changectx.date()
901 return self._changectx.date()
902
902
903 def files(self):
903 def files(self):
904 return self._changectx.files()
904 return self._changectx.files()
905
905
906 def description(self):
906 def description(self):
907 return self._changectx.description()
907 return self._changectx.description()
908
908
909 def branch(self):
909 def branch(self):
910 return self._changectx.branch()
910 return self._changectx.branch()
911
911
912 def extra(self):
912 def extra(self):
913 return self._changectx.extra()
913 return self._changectx.extra()
914
914
915 def phase(self):
915 def phase(self):
916 return self._changectx.phase()
916 return self._changectx.phase()
917
917
918 def phasestr(self):
918 def phasestr(self):
919 return self._changectx.phasestr()
919 return self._changectx.phasestr()
920
920
921 def obsolete(self):
921 def obsolete(self):
922 return self._changectx.obsolete()
922 return self._changectx.obsolete()
923
923
924 def instabilities(self):
924 def instabilities(self):
925 return self._changectx.instabilities()
925 return self._changectx.instabilities()
926
926
927 def manifest(self):
927 def manifest(self):
928 return self._changectx.manifest()
928 return self._changectx.manifest()
929
929
930 def changectx(self):
930 def changectx(self):
931 return self._changectx
931 return self._changectx
932
932
933 def renamed(self):
933 def renamed(self):
934 return self._copied
934 return self._copied
935
935
936 def copysource(self):
936 def copysource(self):
937 return self._copied and self._copied[0]
937 return self._copied and self._copied[0]
938
938
939 def repo(self):
939 def repo(self):
940 return self._repo
940 return self._repo
941
941
942 def size(self):
942 def size(self):
943 return len(self.data())
943 return len(self.data())
944
944
945 def path(self):
945 def path(self):
946 return self._path
946 return self._path
947
947
948 def isbinary(self):
948 def isbinary(self):
949 try:
949 try:
950 return stringutil.binary(self.data())
950 return stringutil.binary(self.data())
951 except IOError:
951 except IOError:
952 return False
952 return False
953
953
954 def isexec(self):
954 def isexec(self):
955 return b'x' in self.flags()
955 return b'x' in self.flags()
956
956
957 def islink(self):
957 def islink(self):
958 return b'l' in self.flags()
958 return b'l' in self.flags()
959
959
960 def isabsent(self):
960 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
961 """whether this filectx represents a file not in self._changectx
962
962
963 This is mainly for merge code to detect change/delete conflicts. This is
963 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
964 expected to be True for all subclasses of basectx."""
965 return False
965 return False
966
966
967 _customcmp = False
967 _customcmp = False
968
968
969 def cmp(self, fctx):
969 def cmp(self, fctx):
970 """compare with other file context
970 """compare with other file context
971
971
972 returns True if different than fctx.
972 returns True if different than fctx.
973 """
973 """
974 if fctx._customcmp:
974 if fctx._customcmp:
975 return fctx.cmp(self)
975 return fctx.cmp(self)
976
976
977 if self._filenode is None:
977 if self._filenode is None:
978 raise error.ProgrammingError(
978 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
980 )
981
981
982 if fctx._filenode is None:
982 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
983 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
984 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
986 if self.size() - 4 == fctx.size():
987 # size() can match:
987 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
988 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
989 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
991 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
992 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
993 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
994 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996
996
997 # size() differs
997 # size() differs
998 return True
998 return True
999
999
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1001 """return the first ancestor of <srcrev> introducing <fnode>
1002
1002
1003 If the linkrev of the file revision does not point to an ancestor of
1003 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1005 this file revision.
1006
1006
1007 :srcrev: the changeset revision we search ancestors from
1007 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1008 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1010 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1011 revision, the function will returns "None" and stops its
1012 iteration.
1012 iteration.
1013 """
1013 """
1014 repo = self._repo
1014 repo = self._repo
1015 cl = repo.unfiltered().changelog
1015 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 # fetch the linkrev
1017 # fetch the linkrev
1018 lkr = self.linkrev()
1018 lkr = self.linkrev()
1019 if srcrev == lkr:
1019 if srcrev == lkr:
1020 return lkr
1020 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1021 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1023 iteranc = None
1024 if srcrev is None:
1024 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1025 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1027 inclusive = True # we skipped the real (revless) source
1028 else:
1028 else:
1029 revs = [srcrev]
1029 revs = [srcrev]
1030 if memberanc is None:
1030 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1032 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1033 if lkr not in memberanc:
1034 if iteranc is None:
1034 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 path = self._path
1037 path = self._path
1038 for a in iteranc:
1038 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1039 if stoprev is not None and a < stoprev:
1040 return None
1040 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1042 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1043 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1044 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1046 return a
1047 # In theory, we should never get out of that loop without a result.
1047 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1048 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1049 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1050 # result is crash somewhere else at to some point.
1051 return lkr
1051 return lkr
1052
1052
1053 def isintroducedafter(self, changelogrev):
1053 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1054 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1055 if self.linkrev() >= changelogrev:
1056 return True
1056 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1057 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1058 if introrev is None:
1059 return False
1059 return False
1060 return introrev >= changelogrev
1060 return introrev >= changelogrev
1061
1061
1062 def introrev(self):
1062 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1063 """return the rev of the changeset which introduced this file revision
1064
1064
1065 This method is different from linkrev because it take into account the
1065 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1066 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1067 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1068 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1069 changesets.
1070 """
1070 """
1071 return self._introrev()
1071 return self._introrev()
1072
1072
1073 def _introrev(self, stoprev=None):
1073 def _introrev(self, stoprev=None):
1074 """
1074 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1075 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1076 iteration range in some internal usecase.
1077
1077
1078 If `stoprev` is set, the `introrev` will not be searched past that
1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1079 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1080 limit the iteration range.
1081 """
1081 """
1082 toprev = None
1082 toprev = None
1083 attrs = vars(self)
1083 attrs = vars(self)
1084 if '_changeid' in attrs:
1084 if '_changeid' in attrs:
1085 # We have a cached value already
1085 # We have a cached value already
1086 toprev = self._changeid
1086 toprev = self._changeid
1087 elif '_changectx' in attrs:
1087 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1088 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1089 toprev = self._changectx.rev()
1090
1090
1091 if toprev is not None:
1091 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1093 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1095 # be nice and cache the result of the computation
1096 if introrev is not None:
1096 if introrev is not None:
1097 self._changeid = introrev
1097 self._changeid = introrev
1098 return introrev
1098 return introrev
1099 else:
1099 else:
1100 return self.linkrev()
1100 return self.linkrev()
1101
1101
1102 def introfilectx(self):
1102 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1103 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1104 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1105 introrev = self.introrev()
1106 if self.rev() == introrev:
1106 if self.rev() == introrev:
1107 return self
1107 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1108 return self.filectx(self.filenode(), changeid=introrev)
1109
1109
1110 def _parentfilectx(self, path, fileid, filelog):
1110 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1114 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1115 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1116 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1118 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1120 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1121 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1122 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1124 return fctx
1125
1125
1126 def parents(self):
1126 def parents(self):
1127 _path = self._path
1127 _path = self._path
1128 fl = self._filelog
1128 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1129 parents = self._filelog.parents(self._filenode)
1130 pl = [
1130 pl = [
1131 (_path, node, fl)
1131 (_path, node, fl)
1132 for node in parents
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1134 ]
1135
1135
1136 r = fl.renamed(self._filenode)
1136 r = fl.renamed(self._filenode)
1137 if r:
1137 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1139 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1140 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1141 # the first one.
1142 #
1142 #
1143 # As null id have always been filtered out in the previous list
1143 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1144 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1145 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1147
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1149
1150 def p1(self):
1150 def p1(self):
1151 return self.parents()[0]
1151 return self.parents()[0]
1152
1152
1153 def p2(self):
1153 def p2(self):
1154 p = self.parents()
1154 p = self.parents()
1155 if len(p) == 2:
1155 if len(p) == 2:
1156 return p[1]
1156 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1158
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1160 """Returns a list of annotateline objects for each line in the file
1161
1161
1162 - line.fctx is the filectx of the node where that line was last changed
1162 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1163 - line.lineno is the line number at the first appearance in the managed
1164 file
1164 file
1165 - line.text is the data on that line (including newline character)
1165 - line.text is the data on that line (including newline character)
1166 """
1166 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1168
1169 def parents(f):
1169 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1173 # isn't an ancestor of the srcrev.
1174 f._changeid
1174 f._changeid
1175 pl = f.parents()
1175 pl = f.parents()
1176
1176
1177 # Don't return renamed parents if we aren't following.
1177 # Don't return renamed parents if we aren't following.
1178 if not follow:
1178 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1179 pl = [p for p in pl if p.path() == f.path()]
1180
1180
1181 # renamed filectx won't have a filelog yet, so set it
1181 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1182 # from the cache to save time
1183 for p in pl:
1183 for p in pl:
1184 if not '_filelog' in p.__dict__:
1184 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1185 p._filelog = getlog(p.path())
1186
1186
1187 return pl
1187 return pl
1188
1188
1189 # use linkrev to find the first changeset where self appeared
1189 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1190 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1191 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1192 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1193 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1194 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1195 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1196 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1197 # is used to test filelog revisions
1198 ac = cl.ancestors(
1198 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1199 [p.rev() for p in base.parents()], inclusive=True
1200 )
1200 )
1201 else:
1201 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1203 base._ancestrycontext = ac
1204
1204
1205 return dagop.annotate(
1205 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1207 )
1208
1208
1209 def ancestors(self, followfirst=False):
1209 def ancestors(self, followfirst=False):
1210 visit = {}
1210 visit = {}
1211 c = self
1211 c = self
1212 if followfirst:
1212 if followfirst:
1213 cut = 1
1213 cut = 1
1214 else:
1214 else:
1215 cut = None
1215 cut = None
1216
1216
1217 while True:
1217 while True:
1218 for parent in c.parents()[:cut]:
1218 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1220 if not visit:
1221 break
1221 break
1222 c = visit.pop(max(visit))
1222 c = visit.pop(max(visit))
1223 yield c
1223 yield c
1224
1224
1225 def decodeddata(self):
1225 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1226 """Returns `data()` after running repository decoding filters.
1227
1227
1228 This is often equivalent to how the data would be expressed on disk.
1228 This is often equivalent to how the data would be expressed on disk.
1229 """
1229 """
1230 return self._repo.wwritedata(self.path(), self.data())
1230 return self._repo.wwritedata(self.path(), self.data())
1231
1231
1232
1232
1233 class filectx(basefilectx):
1233 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1234 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1235 filerevision convenient."""
1236
1236
1237 def __init__(
1237 def __init__(
1238 self,
1238 self,
1239 repo,
1239 repo,
1240 path,
1240 path,
1241 changeid=None,
1241 changeid=None,
1242 fileid=None,
1242 fileid=None,
1243 filelog=None,
1243 filelog=None,
1244 changectx=None,
1244 changectx=None,
1245 ):
1245 ):
1246 """changeid must be a revision number, if specified.
1246 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1247 fileid can be a file revision or node."""
1248 self._repo = repo
1248 self._repo = repo
1249 self._path = path
1249 self._path = path
1250
1250
1251 assert (
1251 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1252 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1254 changeid,
1255 fileid,
1255 fileid,
1256 changectx,
1256 changectx,
1257 )
1257 )
1258
1258
1259 if filelog is not None:
1259 if filelog is not None:
1260 self._filelog = filelog
1260 self._filelog = filelog
1261
1261
1262 if changeid is not None:
1262 if changeid is not None:
1263 self._changeid = changeid
1263 self._changeid = changeid
1264 if changectx is not None:
1264 if changectx is not None:
1265 self._changectx = changectx
1265 self._changectx = changectx
1266 if fileid is not None:
1266 if fileid is not None:
1267 self._fileid = fileid
1267 self._fileid = fileid
1268
1268
1269 @propertycache
1269 @propertycache
1270 def _changectx(self):
1270 def _changectx(self):
1271 try:
1271 try:
1272 return self._repo[self._changeid]
1272 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1273 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1274 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1275 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1276 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1277 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1278 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1279 # `filectx` are not used in complex operations that care about
1280 # filtering.
1280 # filtering.
1281 #
1281 #
1282 # This fallback is a cheap and dirty fix that prevent several
1282 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1283 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1284 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1285 # behavior" is seen as better as "crash"
1286 #
1286 #
1287 # Linkrevs have several serious troubles with filtering that are
1287 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1288 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1289 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1290 return self._repo.unfiltered()[self._changeid]
1291
1291
1292 def filectx(self, fileid, changeid=None):
1292 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1293 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1294 opening a new filelog"""
1295 return filectx(
1295 return filectx(
1296 self._repo,
1296 self._repo,
1297 self._path,
1297 self._path,
1298 fileid=fileid,
1298 fileid=fileid,
1299 filelog=self._filelog,
1299 filelog=self._filelog,
1300 changeid=changeid,
1300 changeid=changeid,
1301 )
1301 )
1302
1302
1303 def rawdata(self):
1303 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1304 return self._filelog.rawdata(self._filenode)
1305
1305
1306 def rawflags(self):
1306 def rawflags(self):
1307 """low-level revlog flags"""
1307 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1308 return self._filelog.flags(self._filerev)
1309
1309
1310 def data(self):
1310 def data(self):
1311 try:
1311 try:
1312 return self._filelog.read(self._filenode)
1312 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1313 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1315 return b""
1316 raise error.Abort(
1316 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1317 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1318 hint=_(b"set censor.policy to ignore errors"),
1319 )
1319 )
1320
1320
1321 def size(self):
1321 def size(self):
1322 return self._filelog.size(self._filerev)
1322 return self._filelog.size(self._filerev)
1323
1323
1324 @propertycache
1324 @propertycache
1325 def _copied(self):
1325 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1326 """check if file was actually renamed in this changeset revision
1327
1327
1328 If rename logged in file revision, we report copy for changeset only
1328 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1329 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1330 or both changeset parents contain different file revisions.
1331 """
1331 """
1332
1332
1333 renamed = self._filelog.renamed(self._filenode)
1333 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1334 if not renamed:
1335 return None
1335 return None
1336
1336
1337 if self.rev() == self.linkrev():
1337 if self.rev() == self.linkrev():
1338 return renamed
1338 return renamed
1339
1339
1340 name = self.path()
1340 name = self.path()
1341 fnode = self._filenode
1341 fnode = self._filenode
1342 for p in self._changectx.parents():
1342 for p in self._changectx.parents():
1343 try:
1343 try:
1344 if fnode == p.filenode(name):
1344 if fnode == p.filenode(name):
1345 return None
1345 return None
1346 except error.LookupError:
1346 except error.LookupError:
1347 pass
1347 pass
1348 return renamed
1348 return renamed
1349
1349
1350 def children(self):
1350 def children(self):
1351 # hard for renames
1351 # hard for renames
1352 c = self._filelog.children(self._filenode)
1352 c = self._filelog.children(self._filenode)
1353 return [
1353 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1355 for x in c
1356 ]
1356 ]
1357
1357
1358
1358
1359 class committablectx(basectx):
1359 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1360 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1361 wants the ability to commit, e.g. workingctx or memctx."""
1362
1362
1363 def __init__(
1363 def __init__(
1364 self,
1364 self,
1365 repo,
1365 repo,
1366 text=b"",
1366 text=b"",
1367 user=None,
1367 user=None,
1368 date=None,
1368 date=None,
1369 extra=None,
1369 extra=None,
1370 changes=None,
1370 changes=None,
1371 branch=None,
1371 branch=None,
1372 ):
1372 ):
1373 super(committablectx, self).__init__(repo)
1373 super(committablectx, self).__init__(repo)
1374 self._rev = None
1374 self._rev = None
1375 self._node = None
1375 self._node = None
1376 self._text = text
1376 self._text = text
1377 if date:
1377 if date:
1378 self._date = dateutil.parsedate(date)
1378 self._date = dateutil.parsedate(date)
1379 if user:
1379 if user:
1380 self._user = user
1380 self._user = user
1381 if changes:
1381 if changes:
1382 self._status = changes
1382 self._status = changes
1383
1383
1384 self._extra = {}
1384 self._extra = {}
1385 if extra:
1385 if extra:
1386 self._extra = extra.copy()
1386 self._extra = extra.copy()
1387 if branch is not None:
1387 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1389 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1390 self._extra[b'branch'] = b'default'
1391
1391
1392 def __bytes__(self):
1392 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1393 return bytes(self._parents[0]) + b"+"
1394
1394
1395 def hex(self):
1395 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1396 self._repo.nodeconstants.wdirhex
1397
1397
1398 __str__ = encoding.strmethod(__bytes__)
1398 __str__ = encoding.strmethod(__bytes__)
1399
1399
1400 def __nonzero__(self):
1400 def __nonzero__(self):
1401 return True
1401 return True
1402
1402
1403 __bool__ = __nonzero__
1403 __bool__ = __nonzero__
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _status(self):
1406 def _status(self):
1407 return self._repo.status()
1407 return self._repo.status()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _user(self):
1410 def _user(self):
1411 return self._repo.ui.username()
1411 return self._repo.ui.username()
1412
1412
1413 @propertycache
1413 @propertycache
1414 def _date(self):
1414 def _date(self):
1415 ui = self._repo.ui
1415 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1416 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1417 if date is None:
1418 date = dateutil.makedate()
1418 date = dateutil.makedate()
1419 return date
1419 return date
1420
1420
1421 def subrev(self, subpath):
1421 def subrev(self, subpath):
1422 return None
1422 return None
1423
1423
1424 def manifestnode(self):
1424 def manifestnode(self):
1425 return None
1425 return None
1426
1426
1427 def user(self):
1427 def user(self):
1428 return self._user or self._repo.ui.username()
1428 return self._user or self._repo.ui.username()
1429
1429
1430 def date(self):
1430 def date(self):
1431 return self._date
1431 return self._date
1432
1432
1433 def description(self):
1433 def description(self):
1434 return self._text
1434 return self._text
1435
1435
1436 def files(self):
1436 def files(self):
1437 return sorted(
1437 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1438 self._status.modified + self._status.added + self._status.removed
1439 )
1439 )
1440
1440
1441 def modified(self):
1441 def modified(self):
1442 return self._status.modified
1442 return self._status.modified
1443
1443
1444 def added(self):
1444 def added(self):
1445 return self._status.added
1445 return self._status.added
1446
1446
1447 def removed(self):
1447 def removed(self):
1448 return self._status.removed
1448 return self._status.removed
1449
1449
1450 def deleted(self):
1450 def deleted(self):
1451 return self._status.deleted
1451 return self._status.deleted
1452
1452
1453 filesmodified = modified
1453 filesmodified = modified
1454 filesadded = added
1454 filesadded = added
1455 filesremoved = removed
1455 filesremoved = removed
1456
1456
1457 def branch(self):
1457 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1458 return encoding.tolocal(self._extra[b'branch'])
1459
1459
1460 def closesbranch(self):
1460 def closesbranch(self):
1461 return b'close' in self._extra
1461 return b'close' in self._extra
1462
1462
1463 def extra(self):
1463 def extra(self):
1464 return self._extra
1464 return self._extra
1465
1465
1466 def isinmemory(self):
1466 def isinmemory(self):
1467 return False
1467 return False
1468
1468
1469 def tags(self):
1469 def tags(self):
1470 return []
1470 return []
1471
1471
1472 def bookmarks(self):
1472 def bookmarks(self):
1473 b = []
1473 b = []
1474 for p in self.parents():
1474 for p in self.parents():
1475 b.extend(p.bookmarks())
1475 b.extend(p.bookmarks())
1476 return b
1476 return b
1477
1477
1478 def phase(self):
1478 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1479 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1480 for p in self.parents():
1481 phase = max(phase, p.phase())
1481 phase = max(phase, p.phase())
1482 return phase
1482 return phase
1483
1483
1484 def hidden(self):
1484 def hidden(self):
1485 return False
1485 return False
1486
1486
1487 def children(self):
1487 def children(self):
1488 return []
1488 return []
1489
1489
1490 def flags(self, path):
1490 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1491 if '_manifest' in self.__dict__:
1492 try:
1492 try:
1493 return self._manifest.flags(path)
1493 return self._manifest.flags(path)
1494 except KeyError:
1494 except KeyError:
1495 return b''
1495 return b''
1496
1496
1497 try:
1497 try:
1498 return self._flagfunc(path)
1498 return self._flagfunc(path)
1499 except OSError:
1499 except OSError:
1500 return b''
1500 return b''
1501
1501
1502 def ancestor(self, c2):
1502 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1503 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1505
1506 def ancestors(self):
1506 def ancestors(self):
1507 for p in self._parents:
1507 for p in self._parents:
1508 yield p
1508 yield p
1509 for a in self._repo.changelog.ancestors(
1509 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1510 [p.rev() for p in self._parents]
1511 ):
1511 ):
1512 yield self._repo[a]
1512 yield self._repo[a]
1513
1513
1514 def markcommitted(self, node):
1514 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1515 """Perform post-commit cleanup necessary after committing this ctx
1516
1516
1517 Specifically, this updates backing stores this working context
1517 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1518 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1519 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1520 modified and added files as normal in the dirstate.
1521
1521
1522 """
1522 """
1523
1523
1524 def dirty(self, missing=False, merge=True, branch=True):
1524 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1525 return False
1526
1526
1527
1527
1528 class workingctx(committablectx):
1528 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1529 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1530 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1531 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1532 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1533 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1534 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1535 or None to use the repository status.
1536 """
1536 """
1537
1537
1538 def __init__(
1538 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1540 ):
1541 branch = None
1541 branch = None
1542 if not extra or b'branch' not in extra:
1542 if not extra or b'branch' not in extra:
1543 try:
1543 try:
1544 branch = repo.dirstate.branch()
1544 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1545 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1547 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1548 repo, text, user, date, extra, changes, branch=branch
1549 )
1549 )
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d[f] != b'r':
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate[key] not in b"?r"
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
1562
1562
1563 @propertycache
1563 @propertycache
1564 def _parents(self):
1564 def _parents(self):
1565 p = self._repo.dirstate.parents()
1565 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1566 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1567 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1569 unfi = self._repo.unfiltered()
1570 return [
1570 return [
1571 changectx(
1571 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1573 )
1574 for n in p
1574 for n in p
1575 ]
1575 ]
1576
1576
1577 def setparents(self, p1node, p2node=None):
1577 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1578 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1579 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1580 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1581 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1582 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1583 pctx = self._repo[p1node]
1584 if copies:
1584 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1585 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1586 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1587 # only for entries added to first parent.
1588 for f in copies:
1588 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1589 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1590 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1591 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1592 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1593 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1594 dirstate.copy(None, f)
1595
1595
1596 def _fileinfo(self, path):
1596 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1598 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1599 return super(workingctx, self)._fileinfo(path)
1600
1600
1601 def _buildflagfunc(self):
1601 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1602 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1603 # filesystem doesn't support them
1604
1604
1605 copiesget = self._repo.dirstate.copies().get
1605 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1606 parents = self.parents()
1607 if len(parents) < 2:
1607 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1608 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1609 man = parents[0].manifest()
1610
1610
1611 def func(f):
1611 def func(f):
1612 f = copiesget(f, f)
1612 f = copiesget(f, f)
1613 return man.flags(f)
1613 return man.flags(f)
1614
1614
1615 else:
1615 else:
1616 # merges are tricky: we try to reconstruct the unstored
1616 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1617 # result from the merge (issue1802)
1618 p1, p2 = parents
1618 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1619 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1621
1622 def func(f):
1622 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1625 if fl1 == fl2:
1626 return fl1
1626 return fl1
1627 if fl1 == fla:
1627 if fl1 == fla:
1628 return fl2
1628 return fl2
1629 if fl2 == fla:
1629 if fl2 == fla:
1630 return fl1
1630 return fl1
1631 return b'' # punt for conflicts
1631 return b'' # punt for conflicts
1632
1632
1633 return func
1633 return func
1634
1634
1635 @propertycache
1635 @propertycache
1636 def _flagfunc(self):
1636 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1638
1639 def flags(self, path):
1639 def flags(self, path):
1640 try:
1640 try:
1641 return self._flagfunc(path)
1641 return self._flagfunc(path)
1642 except OSError:
1642 except OSError:
1643 return b''
1643 return b''
1644
1644
1645 def filectx(self, path, filelog=None):
1645 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1646 """get a file context from the working directory"""
1647 return workingfilectx(
1647 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1648 self._repo, path, workingctx=self, filelog=filelog
1649 )
1649 )
1650
1650
1651 def dirty(self, missing=False, merge=True, branch=True):
1651 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1652 """check whether a working directory is modified"""
1653 # check subrepos first
1653 # check subrepos first
1654 for s in sorted(self.substate):
1654 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1655 if self.sub(s).dirty(missing=missing):
1656 return True
1656 return True
1657 # check current working dir
1657 # check current working dir
1658 return (
1658 return (
1659 (merge and self.p2())
1659 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1660 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1661 or self.modified()
1662 or self.added()
1662 or self.added()
1663 or self.removed()
1663 or self.removed()
1664 or (missing and self.deleted())
1664 or (missing and self.deleted())
1665 )
1665 )
1666
1666
1667 def add(self, list, prefix=b""):
1667 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1668 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1671 rejected = []
1672 lstat = self._repo.wvfs.lstat
1672 lstat = self._repo.wvfs.lstat
1673 for f in list:
1673 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1674 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1675 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1676 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1678 try:
1679 st = lstat(f)
1679 st = lstat(f)
1680 except OSError:
1680 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1682 rejected.append(f)
1683 continue
1683 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1685 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1686 ui.warn(
1687 _(
1687 _(
1688 b"%s: up to %d MB of RAM may be required "
1688 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1689 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1690 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1691 b"pending addition)\n"
1692 )
1692 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1694 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1696 ui.warn(
1697 _(
1697 _(
1698 b"%s not added: only files and symlinks "
1698 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1699 b"supported currently\n"
1700 )
1700 )
1701 % uipath(f)
1701 % uipath(f)
1702 )
1702 )
1703 rejected.append(f)
1703 rejected.append(f)
1704 elif not ds.set_tracked(f):
1704 elif not ds.set_tracked(f):
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 return rejected
1706 return rejected
1707
1707
1708 def forget(self, files, prefix=b""):
1708 def forget(self, files, prefix=b""):
1709 with self._repo.wlock():
1709 with self._repo.wlock():
1710 ds = self._repo.dirstate
1710 ds = self._repo.dirstate
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 rejected = []
1712 rejected = []
1713 for f in files:
1713 for f in files:
1714 if f not in ds:
1714 if f not in ds:
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 rejected.append(f)
1716 rejected.append(f)
1717 elif ds[f] != b'a':
1717 elif ds[f] != b'a':
1718 ds.remove(f)
1718 ds.remove(f)
1719 else:
1719 else:
1720 ds.drop(f)
1720 ds.drop(f)
1721 return rejected
1721 return rejected
1722
1722
1723 def copy(self, source, dest):
1723 def copy(self, source, dest):
1724 try:
1724 try:
1725 st = self._repo.wvfs.lstat(dest)
1725 st = self._repo.wvfs.lstat(dest)
1726 except OSError as err:
1726 except OSError as err:
1727 if err.errno != errno.ENOENT:
1727 if err.errno != errno.ENOENT:
1728 raise
1728 raise
1729 self._repo.ui.warn(
1729 self._repo.ui.warn(
1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1730 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1731 )
1731 )
1732 return
1732 return
1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1733 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1734 self._repo.ui.warn(
1734 self._repo.ui.warn(
1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1735 _(b"copy failed: %s is not a file or a symbolic link\n")
1736 % self._repo.dirstate.pathto(dest)
1736 % self._repo.dirstate.pathto(dest)
1737 )
1737 )
1738 else:
1738 else:
1739 with self._repo.wlock():
1739 with self._repo.wlock():
1740 ds = self._repo.dirstate
1740 ds = self._repo.dirstate
1741 if ds[dest] in b'?':
1741 ds.set_tracked(dest)
1742 ds.add(dest)
1743 elif ds[dest] in b'r':
1744 ds.normallookup(dest)
1745 ds.copy(source, dest)
1742 ds.copy(source, dest)
1746
1743
1747 def match(
1744 def match(
1748 self,
1745 self,
1749 pats=None,
1746 pats=None,
1750 include=None,
1747 include=None,
1751 exclude=None,
1748 exclude=None,
1752 default=b'glob',
1749 default=b'glob',
1753 listsubrepos=False,
1750 listsubrepos=False,
1754 badfn=None,
1751 badfn=None,
1755 cwd=None,
1752 cwd=None,
1756 ):
1753 ):
1757 r = self._repo
1754 r = self._repo
1758 if not cwd:
1755 if not cwd:
1759 cwd = r.getcwd()
1756 cwd = r.getcwd()
1760
1757
1761 # Only a case insensitive filesystem needs magic to translate user input
1758 # Only a case insensitive filesystem needs magic to translate user input
1762 # to actual case in the filesystem.
1759 # to actual case in the filesystem.
1763 icasefs = not util.fscasesensitive(r.root)
1760 icasefs = not util.fscasesensitive(r.root)
1764 return matchmod.match(
1761 return matchmod.match(
1765 r.root,
1762 r.root,
1766 cwd,
1763 cwd,
1767 pats,
1764 pats,
1768 include,
1765 include,
1769 exclude,
1766 exclude,
1770 default,
1767 default,
1771 auditor=r.auditor,
1768 auditor=r.auditor,
1772 ctx=self,
1769 ctx=self,
1773 listsubrepos=listsubrepos,
1770 listsubrepos=listsubrepos,
1774 badfn=badfn,
1771 badfn=badfn,
1775 icasefs=icasefs,
1772 icasefs=icasefs,
1776 )
1773 )
1777
1774
1778 def _filtersuspectsymlink(self, files):
1775 def _filtersuspectsymlink(self, files):
1779 if not files or self._repo.dirstate._checklink:
1776 if not files or self._repo.dirstate._checklink:
1780 return files
1777 return files
1781
1778
1782 # Symlink placeholders may get non-symlink-like contents
1779 # Symlink placeholders may get non-symlink-like contents
1783 # via user error or dereferencing by NFS or Samba servers,
1780 # via user error or dereferencing by NFS or Samba servers,
1784 # so we filter out any placeholders that don't look like a
1781 # so we filter out any placeholders that don't look like a
1785 # symlink
1782 # symlink
1786 sane = []
1783 sane = []
1787 for f in files:
1784 for f in files:
1788 if self.flags(f) == b'l':
1785 if self.flags(f) == b'l':
1789 d = self[f].data()
1786 d = self[f].data()
1790 if (
1787 if (
1791 d == b''
1788 d == b''
1792 or len(d) >= 1024
1789 or len(d) >= 1024
1793 or b'\n' in d
1790 or b'\n' in d
1794 or stringutil.binary(d)
1791 or stringutil.binary(d)
1795 ):
1792 ):
1796 self._repo.ui.debug(
1793 self._repo.ui.debug(
1797 b'ignoring suspect symlink placeholder "%s"\n' % f
1794 b'ignoring suspect symlink placeholder "%s"\n' % f
1798 )
1795 )
1799 continue
1796 continue
1800 sane.append(f)
1797 sane.append(f)
1801 return sane
1798 return sane
1802
1799
1803 def _checklookup(self, files):
1800 def _checklookup(self, files):
1804 # check for any possibly clean files
1801 # check for any possibly clean files
1805 if not files:
1802 if not files:
1806 return [], [], []
1803 return [], [], []
1807
1804
1808 modified = []
1805 modified = []
1809 deleted = []
1806 deleted = []
1810 fixup = []
1807 fixup = []
1811 pctx = self._parents[0]
1808 pctx = self._parents[0]
1812 # do a full compare of any files that might have changed
1809 # do a full compare of any files that might have changed
1813 for f in sorted(files):
1810 for f in sorted(files):
1814 try:
1811 try:
1815 # This will return True for a file that got replaced by a
1812 # This will return True for a file that got replaced by a
1816 # directory in the interim, but fixing that is pretty hard.
1813 # directory in the interim, but fixing that is pretty hard.
1817 if (
1814 if (
1818 f not in pctx
1815 f not in pctx
1819 or self.flags(f) != pctx.flags(f)
1816 or self.flags(f) != pctx.flags(f)
1820 or pctx[f].cmp(self[f])
1817 or pctx[f].cmp(self[f])
1821 ):
1818 ):
1822 modified.append(f)
1819 modified.append(f)
1823 else:
1820 else:
1824 fixup.append(f)
1821 fixup.append(f)
1825 except (IOError, OSError):
1822 except (IOError, OSError):
1826 # A file become inaccessible in between? Mark it as deleted,
1823 # A file become inaccessible in between? Mark it as deleted,
1827 # matching dirstate behavior (issue5584).
1824 # matching dirstate behavior (issue5584).
1828 # The dirstate has more complex behavior around whether a
1825 # The dirstate has more complex behavior around whether a
1829 # missing file matches a directory, etc, but we don't need to
1826 # missing file matches a directory, etc, but we don't need to
1830 # bother with that: if f has made it to this point, we're sure
1827 # bother with that: if f has made it to this point, we're sure
1831 # it's in the dirstate.
1828 # it's in the dirstate.
1832 deleted.append(f)
1829 deleted.append(f)
1833
1830
1834 return modified, deleted, fixup
1831 return modified, deleted, fixup
1835
1832
1836 def _poststatusfixup(self, status, fixup):
1833 def _poststatusfixup(self, status, fixup):
1837 """update dirstate for files that are actually clean"""
1834 """update dirstate for files that are actually clean"""
1838 poststatus = self._repo.postdsstatus()
1835 poststatus = self._repo.postdsstatus()
1839 if fixup or poststatus or self._repo.dirstate._dirty:
1836 if fixup or poststatus or self._repo.dirstate._dirty:
1840 try:
1837 try:
1841 oldid = self._repo.dirstate.identity()
1838 oldid = self._repo.dirstate.identity()
1842
1839
1843 # updating the dirstate is optional
1840 # updating the dirstate is optional
1844 # so we don't wait on the lock
1841 # so we don't wait on the lock
1845 # wlock can invalidate the dirstate, so cache normal _after_
1842 # wlock can invalidate the dirstate, so cache normal _after_
1846 # taking the lock
1843 # taking the lock
1847 with self._repo.wlock(False):
1844 with self._repo.wlock(False):
1848 if self._repo.dirstate.identity() == oldid:
1845 if self._repo.dirstate.identity() == oldid:
1849 if fixup:
1846 if fixup:
1850 normal = self._repo.dirstate.normal
1847 normal = self._repo.dirstate.normal
1851 for f in fixup:
1848 for f in fixup:
1852 normal(f)
1849 normal(f)
1853 # write changes out explicitly, because nesting
1850 # write changes out explicitly, because nesting
1854 # wlock at runtime may prevent 'wlock.release()'
1851 # wlock at runtime may prevent 'wlock.release()'
1855 # after this block from doing so for subsequent
1852 # after this block from doing so for subsequent
1856 # changing files
1853 # changing files
1857 tr = self._repo.currenttransaction()
1854 tr = self._repo.currenttransaction()
1858 self._repo.dirstate.write(tr)
1855 self._repo.dirstate.write(tr)
1859
1856
1860 if poststatus:
1857 if poststatus:
1861 for ps in poststatus:
1858 for ps in poststatus:
1862 ps(self, status)
1859 ps(self, status)
1863 else:
1860 else:
1864 # in this case, writing changes out breaks
1861 # in this case, writing changes out breaks
1865 # consistency, because .hg/dirstate was
1862 # consistency, because .hg/dirstate was
1866 # already changed simultaneously after last
1863 # already changed simultaneously after last
1867 # caching (see also issue5584 for detail)
1864 # caching (see also issue5584 for detail)
1868 self._repo.ui.debug(
1865 self._repo.ui.debug(
1869 b'skip updating dirstate: identity mismatch\n'
1866 b'skip updating dirstate: identity mismatch\n'
1870 )
1867 )
1871 except error.LockError:
1868 except error.LockError:
1872 pass
1869 pass
1873 finally:
1870 finally:
1874 # Even if the wlock couldn't be grabbed, clear out the list.
1871 # Even if the wlock couldn't be grabbed, clear out the list.
1875 self._repo.clearpostdsstatus()
1872 self._repo.clearpostdsstatus()
1876
1873
1877 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1874 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1878 '''Gets the status from the dirstate -- internal use only.'''
1875 '''Gets the status from the dirstate -- internal use only.'''
1879 subrepos = []
1876 subrepos = []
1880 if b'.hgsub' in self:
1877 if b'.hgsub' in self:
1881 subrepos = sorted(self.substate)
1878 subrepos = sorted(self.substate)
1882 cmp, s = self._repo.dirstate.status(
1879 cmp, s = self._repo.dirstate.status(
1883 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1880 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1884 )
1881 )
1885
1882
1886 # check for any possibly clean files
1883 # check for any possibly clean files
1887 fixup = []
1884 fixup = []
1888 if cmp:
1885 if cmp:
1889 modified2, deleted2, fixup = self._checklookup(cmp)
1886 modified2, deleted2, fixup = self._checklookup(cmp)
1890 s.modified.extend(modified2)
1887 s.modified.extend(modified2)
1891 s.deleted.extend(deleted2)
1888 s.deleted.extend(deleted2)
1892
1889
1893 if fixup and clean:
1890 if fixup and clean:
1894 s.clean.extend(fixup)
1891 s.clean.extend(fixup)
1895
1892
1896 self._poststatusfixup(s, fixup)
1893 self._poststatusfixup(s, fixup)
1897
1894
1898 if match.always():
1895 if match.always():
1899 # cache for performance
1896 # cache for performance
1900 if s.unknown or s.ignored or s.clean:
1897 if s.unknown or s.ignored or s.clean:
1901 # "_status" is cached with list*=False in the normal route
1898 # "_status" is cached with list*=False in the normal route
1902 self._status = scmutil.status(
1899 self._status = scmutil.status(
1903 s.modified, s.added, s.removed, s.deleted, [], [], []
1900 s.modified, s.added, s.removed, s.deleted, [], [], []
1904 )
1901 )
1905 else:
1902 else:
1906 self._status = s
1903 self._status = s
1907
1904
1908 return s
1905 return s
1909
1906
1910 @propertycache
1907 @propertycache
1911 def _copies(self):
1908 def _copies(self):
1912 p1copies = {}
1909 p1copies = {}
1913 p2copies = {}
1910 p2copies = {}
1914 parents = self._repo.dirstate.parents()
1911 parents = self._repo.dirstate.parents()
1915 p1manifest = self._repo[parents[0]].manifest()
1912 p1manifest = self._repo[parents[0]].manifest()
1916 p2manifest = self._repo[parents[1]].manifest()
1913 p2manifest = self._repo[parents[1]].manifest()
1917 changedset = set(self.added()) | set(self.modified())
1914 changedset = set(self.added()) | set(self.modified())
1918 narrowmatch = self._repo.narrowmatch()
1915 narrowmatch = self._repo.narrowmatch()
1919 for dst, src in self._repo.dirstate.copies().items():
1916 for dst, src in self._repo.dirstate.copies().items():
1920 if dst not in changedset or not narrowmatch(dst):
1917 if dst not in changedset or not narrowmatch(dst):
1921 continue
1918 continue
1922 if src in p1manifest:
1919 if src in p1manifest:
1923 p1copies[dst] = src
1920 p1copies[dst] = src
1924 elif src in p2manifest:
1921 elif src in p2manifest:
1925 p2copies[dst] = src
1922 p2copies[dst] = src
1926 return p1copies, p2copies
1923 return p1copies, p2copies
1927
1924
1928 @propertycache
1925 @propertycache
1929 def _manifest(self):
1926 def _manifest(self):
1930 """generate a manifest corresponding to the values in self._status
1927 """generate a manifest corresponding to the values in self._status
1931
1928
1932 This reuse the file nodeid from parent, but we use special node
1929 This reuse the file nodeid from parent, but we use special node
1933 identifiers for added and modified files. This is used by manifests
1930 identifiers for added and modified files. This is used by manifests
1934 merge to see that files are different and by update logic to avoid
1931 merge to see that files are different and by update logic to avoid
1935 deleting newly added files.
1932 deleting newly added files.
1936 """
1933 """
1937 return self._buildstatusmanifest(self._status)
1934 return self._buildstatusmanifest(self._status)
1938
1935
1939 def _buildstatusmanifest(self, status):
1936 def _buildstatusmanifest(self, status):
1940 """Builds a manifest that includes the given status results."""
1937 """Builds a manifest that includes the given status results."""
1941 parents = self.parents()
1938 parents = self.parents()
1942
1939
1943 man = parents[0].manifest().copy()
1940 man = parents[0].manifest().copy()
1944
1941
1945 ff = self._flagfunc
1942 ff = self._flagfunc
1946 for i, l in (
1943 for i, l in (
1947 (self._repo.nodeconstants.addednodeid, status.added),
1944 (self._repo.nodeconstants.addednodeid, status.added),
1948 (self._repo.nodeconstants.modifiednodeid, status.modified),
1945 (self._repo.nodeconstants.modifiednodeid, status.modified),
1949 ):
1946 ):
1950 for f in l:
1947 for f in l:
1951 man[f] = i
1948 man[f] = i
1952 try:
1949 try:
1953 man.setflag(f, ff(f))
1950 man.setflag(f, ff(f))
1954 except OSError:
1951 except OSError:
1955 pass
1952 pass
1956
1953
1957 for f in status.deleted + status.removed:
1954 for f in status.deleted + status.removed:
1958 if f in man:
1955 if f in man:
1959 del man[f]
1956 del man[f]
1960
1957
1961 return man
1958 return man
1962
1959
1963 def _buildstatus(
1960 def _buildstatus(
1964 self, other, s, match, listignored, listclean, listunknown
1961 self, other, s, match, listignored, listclean, listunknown
1965 ):
1962 ):
1966 """build a status with respect to another context
1963 """build a status with respect to another context
1967
1964
1968 This includes logic for maintaining the fast path of status when
1965 This includes logic for maintaining the fast path of status when
1969 comparing the working directory against its parent, which is to skip
1966 comparing the working directory against its parent, which is to skip
1970 building a new manifest if self (working directory) is not comparing
1967 building a new manifest if self (working directory) is not comparing
1971 against its parent (repo['.']).
1968 against its parent (repo['.']).
1972 """
1969 """
1973 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1970 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1974 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1971 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1975 # might have accidentally ended up with the entire contents of the file
1972 # might have accidentally ended up with the entire contents of the file
1976 # they are supposed to be linking to.
1973 # they are supposed to be linking to.
1977 s.modified[:] = self._filtersuspectsymlink(s.modified)
1974 s.modified[:] = self._filtersuspectsymlink(s.modified)
1978 if other != self._repo[b'.']:
1975 if other != self._repo[b'.']:
1979 s = super(workingctx, self)._buildstatus(
1976 s = super(workingctx, self)._buildstatus(
1980 other, s, match, listignored, listclean, listunknown
1977 other, s, match, listignored, listclean, listunknown
1981 )
1978 )
1982 return s
1979 return s
1983
1980
1984 def _matchstatus(self, other, match):
1981 def _matchstatus(self, other, match):
1985 """override the match method with a filter for directory patterns
1982 """override the match method with a filter for directory patterns
1986
1983
1987 We use inheritance to customize the match.bad method only in cases of
1984 We use inheritance to customize the match.bad method only in cases of
1988 workingctx since it belongs only to the working directory when
1985 workingctx since it belongs only to the working directory when
1989 comparing against the parent changeset.
1986 comparing against the parent changeset.
1990
1987
1991 If we aren't comparing against the working directory's parent, then we
1988 If we aren't comparing against the working directory's parent, then we
1992 just use the default match object sent to us.
1989 just use the default match object sent to us.
1993 """
1990 """
1994 if other != self._repo[b'.']:
1991 if other != self._repo[b'.']:
1995
1992
1996 def bad(f, msg):
1993 def bad(f, msg):
1997 # 'f' may be a directory pattern from 'match.files()',
1994 # 'f' may be a directory pattern from 'match.files()',
1998 # so 'f not in ctx1' is not enough
1995 # so 'f not in ctx1' is not enough
1999 if f not in other and not other.hasdir(f):
1996 if f not in other and not other.hasdir(f):
2000 self._repo.ui.warn(
1997 self._repo.ui.warn(
2001 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1998 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2002 )
1999 )
2003
2000
2004 match.bad = bad
2001 match.bad = bad
2005 return match
2002 return match
2006
2003
2007 def walk(self, match):
2004 def walk(self, match):
2008 '''Generates matching file names.'''
2005 '''Generates matching file names.'''
2009 return sorted(
2006 return sorted(
2010 self._repo.dirstate.walk(
2007 self._repo.dirstate.walk(
2011 self._repo.narrowmatch(match),
2008 self._repo.narrowmatch(match),
2012 subrepos=sorted(self.substate),
2009 subrepos=sorted(self.substate),
2013 unknown=True,
2010 unknown=True,
2014 ignored=False,
2011 ignored=False,
2015 )
2012 )
2016 )
2013 )
2017
2014
2018 def matches(self, match):
2015 def matches(self, match):
2019 match = self._repo.narrowmatch(match)
2016 match = self._repo.narrowmatch(match)
2020 ds = self._repo.dirstate
2017 ds = self._repo.dirstate
2021 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2018 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2022
2019
2023 def markcommitted(self, node):
2020 def markcommitted(self, node):
2024 with self._repo.dirstate.parentchange():
2021 with self._repo.dirstate.parentchange():
2025 for f in self.modified() + self.added():
2022 for f in self.modified() + self.added():
2026 self._repo.dirstate.normal(f)
2023 self._repo.dirstate.normal(f)
2027 for f in self.removed():
2024 for f in self.removed():
2028 self._repo.dirstate.drop(f)
2025 self._repo.dirstate.drop(f)
2029 self._repo.dirstate.setparents(node)
2026 self._repo.dirstate.setparents(node)
2030 self._repo._quick_access_changeid_invalidate()
2027 self._repo._quick_access_changeid_invalidate()
2031
2028
2032 # write changes out explicitly, because nesting wlock at
2029 # write changes out explicitly, because nesting wlock at
2033 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2030 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2034 # from immediately doing so for subsequent changing files
2031 # from immediately doing so for subsequent changing files
2035 self._repo.dirstate.write(self._repo.currenttransaction())
2032 self._repo.dirstate.write(self._repo.currenttransaction())
2036
2033
2037 sparse.aftercommit(self._repo, node)
2034 sparse.aftercommit(self._repo, node)
2038
2035
2039 def mergestate(self, clean=False):
2036 def mergestate(self, clean=False):
2040 if clean:
2037 if clean:
2041 return mergestatemod.mergestate.clean(self._repo)
2038 return mergestatemod.mergestate.clean(self._repo)
2042 return mergestatemod.mergestate.read(self._repo)
2039 return mergestatemod.mergestate.read(self._repo)
2043
2040
2044
2041
2045 class committablefilectx(basefilectx):
2042 class committablefilectx(basefilectx):
2046 """A committablefilectx provides common functionality for a file context
2043 """A committablefilectx provides common functionality for a file context
2047 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2044 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2048
2045
2049 def __init__(self, repo, path, filelog=None, ctx=None):
2046 def __init__(self, repo, path, filelog=None, ctx=None):
2050 self._repo = repo
2047 self._repo = repo
2051 self._path = path
2048 self._path = path
2052 self._changeid = None
2049 self._changeid = None
2053 self._filerev = self._filenode = None
2050 self._filerev = self._filenode = None
2054
2051
2055 if filelog is not None:
2052 if filelog is not None:
2056 self._filelog = filelog
2053 self._filelog = filelog
2057 if ctx:
2054 if ctx:
2058 self._changectx = ctx
2055 self._changectx = ctx
2059
2056
2060 def __nonzero__(self):
2057 def __nonzero__(self):
2061 return True
2058 return True
2062
2059
2063 __bool__ = __nonzero__
2060 __bool__ = __nonzero__
2064
2061
2065 def linkrev(self):
2062 def linkrev(self):
2066 # linked to self._changectx no matter if file is modified or not
2063 # linked to self._changectx no matter if file is modified or not
2067 return self.rev()
2064 return self.rev()
2068
2065
2069 def renamed(self):
2066 def renamed(self):
2070 path = self.copysource()
2067 path = self.copysource()
2071 if not path:
2068 if not path:
2072 return None
2069 return None
2073 return (
2070 return (
2074 path,
2071 path,
2075 self._changectx._parents[0]._manifest.get(
2072 self._changectx._parents[0]._manifest.get(
2076 path, self._repo.nodeconstants.nullid
2073 path, self._repo.nodeconstants.nullid
2077 ),
2074 ),
2078 )
2075 )
2079
2076
2080 def parents(self):
2077 def parents(self):
2081 '''return parent filectxs, following copies if necessary'''
2078 '''return parent filectxs, following copies if necessary'''
2082
2079
2083 def filenode(ctx, path):
2080 def filenode(ctx, path):
2084 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2081 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2085
2082
2086 path = self._path
2083 path = self._path
2087 fl = self._filelog
2084 fl = self._filelog
2088 pcl = self._changectx._parents
2085 pcl = self._changectx._parents
2089 renamed = self.renamed()
2086 renamed = self.renamed()
2090
2087
2091 if renamed:
2088 if renamed:
2092 pl = [renamed + (None,)]
2089 pl = [renamed + (None,)]
2093 else:
2090 else:
2094 pl = [(path, filenode(pcl[0], path), fl)]
2091 pl = [(path, filenode(pcl[0], path), fl)]
2095
2092
2096 for pc in pcl[1:]:
2093 for pc in pcl[1:]:
2097 pl.append((path, filenode(pc, path), fl))
2094 pl.append((path, filenode(pc, path), fl))
2098
2095
2099 return [
2096 return [
2100 self._parentfilectx(p, fileid=n, filelog=l)
2097 self._parentfilectx(p, fileid=n, filelog=l)
2101 for p, n, l in pl
2098 for p, n, l in pl
2102 if n != self._repo.nodeconstants.nullid
2099 if n != self._repo.nodeconstants.nullid
2103 ]
2100 ]
2104
2101
2105 def children(self):
2102 def children(self):
2106 return []
2103 return []
2107
2104
2108
2105
2109 class workingfilectx(committablefilectx):
2106 class workingfilectx(committablefilectx):
2110 """A workingfilectx object makes access to data related to a particular
2107 """A workingfilectx object makes access to data related to a particular
2111 file in the working directory convenient."""
2108 file in the working directory convenient."""
2112
2109
2113 def __init__(self, repo, path, filelog=None, workingctx=None):
2110 def __init__(self, repo, path, filelog=None, workingctx=None):
2114 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2111 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2115
2112
2116 @propertycache
2113 @propertycache
2117 def _changectx(self):
2114 def _changectx(self):
2118 return workingctx(self._repo)
2115 return workingctx(self._repo)
2119
2116
2120 def data(self):
2117 def data(self):
2121 return self._repo.wread(self._path)
2118 return self._repo.wread(self._path)
2122
2119
2123 def copysource(self):
2120 def copysource(self):
2124 return self._repo.dirstate.copied(self._path)
2121 return self._repo.dirstate.copied(self._path)
2125
2122
2126 def size(self):
2123 def size(self):
2127 return self._repo.wvfs.lstat(self._path).st_size
2124 return self._repo.wvfs.lstat(self._path).st_size
2128
2125
2129 def lstat(self):
2126 def lstat(self):
2130 return self._repo.wvfs.lstat(self._path)
2127 return self._repo.wvfs.lstat(self._path)
2131
2128
2132 def date(self):
2129 def date(self):
2133 t, tz = self._changectx.date()
2130 t, tz = self._changectx.date()
2134 try:
2131 try:
2135 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2132 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2136 except OSError as err:
2133 except OSError as err:
2137 if err.errno != errno.ENOENT:
2134 if err.errno != errno.ENOENT:
2138 raise
2135 raise
2139 return (t, tz)
2136 return (t, tz)
2140
2137
2141 def exists(self):
2138 def exists(self):
2142 return self._repo.wvfs.exists(self._path)
2139 return self._repo.wvfs.exists(self._path)
2143
2140
2144 def lexists(self):
2141 def lexists(self):
2145 return self._repo.wvfs.lexists(self._path)
2142 return self._repo.wvfs.lexists(self._path)
2146
2143
2147 def audit(self):
2144 def audit(self):
2148 return self._repo.wvfs.audit(self._path)
2145 return self._repo.wvfs.audit(self._path)
2149
2146
2150 def cmp(self, fctx):
2147 def cmp(self, fctx):
2151 """compare with other file context
2148 """compare with other file context
2152
2149
2153 returns True if different than fctx.
2150 returns True if different than fctx.
2154 """
2151 """
2155 # fctx should be a filectx (not a workingfilectx)
2152 # fctx should be a filectx (not a workingfilectx)
2156 # invert comparison to reuse the same code path
2153 # invert comparison to reuse the same code path
2157 return fctx.cmp(self)
2154 return fctx.cmp(self)
2158
2155
2159 def remove(self, ignoremissing=False):
2156 def remove(self, ignoremissing=False):
2160 """wraps unlink for a repo's working directory"""
2157 """wraps unlink for a repo's working directory"""
2161 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2158 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2162 self._repo.wvfs.unlinkpath(
2159 self._repo.wvfs.unlinkpath(
2163 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2160 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2164 )
2161 )
2165
2162
2166 def write(self, data, flags, backgroundclose=False, **kwargs):
2163 def write(self, data, flags, backgroundclose=False, **kwargs):
2167 """wraps repo.wwrite"""
2164 """wraps repo.wwrite"""
2168 return self._repo.wwrite(
2165 return self._repo.wwrite(
2169 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2166 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2170 )
2167 )
2171
2168
2172 def markcopied(self, src):
2169 def markcopied(self, src):
2173 """marks this file a copy of `src`"""
2170 """marks this file a copy of `src`"""
2174 self._repo.dirstate.copy(src, self._path)
2171 self._repo.dirstate.copy(src, self._path)
2175
2172
2176 def clearunknown(self):
2173 def clearunknown(self):
2177 """Removes conflicting items in the working directory so that
2174 """Removes conflicting items in the working directory so that
2178 ``write()`` can be called successfully.
2175 ``write()`` can be called successfully.
2179 """
2176 """
2180 wvfs = self._repo.wvfs
2177 wvfs = self._repo.wvfs
2181 f = self._path
2178 f = self._path
2182 wvfs.audit(f)
2179 wvfs.audit(f)
2183 if self._repo.ui.configbool(
2180 if self._repo.ui.configbool(
2184 b'experimental', b'merge.checkpathconflicts'
2181 b'experimental', b'merge.checkpathconflicts'
2185 ):
2182 ):
2186 # remove files under the directory as they should already be
2183 # remove files under the directory as they should already be
2187 # warned and backed up
2184 # warned and backed up
2188 if wvfs.isdir(f) and not wvfs.islink(f):
2185 if wvfs.isdir(f) and not wvfs.islink(f):
2189 wvfs.rmtree(f, forcibly=True)
2186 wvfs.rmtree(f, forcibly=True)
2190 for p in reversed(list(pathutil.finddirs(f))):
2187 for p in reversed(list(pathutil.finddirs(f))):
2191 if wvfs.isfileorlink(p):
2188 if wvfs.isfileorlink(p):
2192 wvfs.unlink(p)
2189 wvfs.unlink(p)
2193 break
2190 break
2194 else:
2191 else:
2195 # don't remove files if path conflicts are not processed
2192 # don't remove files if path conflicts are not processed
2196 if wvfs.isdir(f) and not wvfs.islink(f):
2193 if wvfs.isdir(f) and not wvfs.islink(f):
2197 wvfs.removedirs(f)
2194 wvfs.removedirs(f)
2198
2195
2199 def setflags(self, l, x):
2196 def setflags(self, l, x):
2200 self._repo.wvfs.setflags(self._path, l, x)
2197 self._repo.wvfs.setflags(self._path, l, x)
2201
2198
2202
2199
2203 class overlayworkingctx(committablectx):
2200 class overlayworkingctx(committablectx):
2204 """Wraps another mutable context with a write-back cache that can be
2201 """Wraps another mutable context with a write-back cache that can be
2205 converted into a commit context.
2202 converted into a commit context.
2206
2203
2207 self._cache[path] maps to a dict with keys: {
2204 self._cache[path] maps to a dict with keys: {
2208 'exists': bool?
2205 'exists': bool?
2209 'date': date?
2206 'date': date?
2210 'data': str?
2207 'data': str?
2211 'flags': str?
2208 'flags': str?
2212 'copied': str? (path or None)
2209 'copied': str? (path or None)
2213 }
2210 }
2214 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2211 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2215 is `False`, the file was deleted.
2212 is `False`, the file was deleted.
2216 """
2213 """
2217
2214
2218 def __init__(self, repo):
2215 def __init__(self, repo):
2219 super(overlayworkingctx, self).__init__(repo)
2216 super(overlayworkingctx, self).__init__(repo)
2220 self.clean()
2217 self.clean()
2221
2218
2222 def setbase(self, wrappedctx):
2219 def setbase(self, wrappedctx):
2223 self._wrappedctx = wrappedctx
2220 self._wrappedctx = wrappedctx
2224 self._parents = [wrappedctx]
2221 self._parents = [wrappedctx]
2225 # Drop old manifest cache as it is now out of date.
2222 # Drop old manifest cache as it is now out of date.
2226 # This is necessary when, e.g., rebasing several nodes with one
2223 # This is necessary when, e.g., rebasing several nodes with one
2227 # ``overlayworkingctx`` (e.g. with --collapse).
2224 # ``overlayworkingctx`` (e.g. with --collapse).
2228 util.clearcachedproperty(self, b'_manifest')
2225 util.clearcachedproperty(self, b'_manifest')
2229
2226
2230 def setparents(self, p1node, p2node=None):
2227 def setparents(self, p1node, p2node=None):
2231 if p2node is None:
2228 if p2node is None:
2232 p2node = self._repo.nodeconstants.nullid
2229 p2node = self._repo.nodeconstants.nullid
2233 assert p1node == self._wrappedctx.node()
2230 assert p1node == self._wrappedctx.node()
2234 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2231 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2235
2232
2236 def data(self, path):
2233 def data(self, path):
2237 if self.isdirty(path):
2234 if self.isdirty(path):
2238 if self._cache[path][b'exists']:
2235 if self._cache[path][b'exists']:
2239 if self._cache[path][b'data'] is not None:
2236 if self._cache[path][b'data'] is not None:
2240 return self._cache[path][b'data']
2237 return self._cache[path][b'data']
2241 else:
2238 else:
2242 # Must fallback here, too, because we only set flags.
2239 # Must fallback here, too, because we only set flags.
2243 return self._wrappedctx[path].data()
2240 return self._wrappedctx[path].data()
2244 else:
2241 else:
2245 raise error.ProgrammingError(
2242 raise error.ProgrammingError(
2246 b"No such file or directory: %s" % path
2243 b"No such file or directory: %s" % path
2247 )
2244 )
2248 else:
2245 else:
2249 return self._wrappedctx[path].data()
2246 return self._wrappedctx[path].data()
2250
2247
2251 @propertycache
2248 @propertycache
2252 def _manifest(self):
2249 def _manifest(self):
2253 parents = self.parents()
2250 parents = self.parents()
2254 man = parents[0].manifest().copy()
2251 man = parents[0].manifest().copy()
2255
2252
2256 flag = self._flagfunc
2253 flag = self._flagfunc
2257 for path in self.added():
2254 for path in self.added():
2258 man[path] = self._repo.nodeconstants.addednodeid
2255 man[path] = self._repo.nodeconstants.addednodeid
2259 man.setflag(path, flag(path))
2256 man.setflag(path, flag(path))
2260 for path in self.modified():
2257 for path in self.modified():
2261 man[path] = self._repo.nodeconstants.modifiednodeid
2258 man[path] = self._repo.nodeconstants.modifiednodeid
2262 man.setflag(path, flag(path))
2259 man.setflag(path, flag(path))
2263 for path in self.removed():
2260 for path in self.removed():
2264 del man[path]
2261 del man[path]
2265 return man
2262 return man
2266
2263
2267 @propertycache
2264 @propertycache
2268 def _flagfunc(self):
2265 def _flagfunc(self):
2269 def f(path):
2266 def f(path):
2270 return self._cache[path][b'flags']
2267 return self._cache[path][b'flags']
2271
2268
2272 return f
2269 return f
2273
2270
2274 def files(self):
2271 def files(self):
2275 return sorted(self.added() + self.modified() + self.removed())
2272 return sorted(self.added() + self.modified() + self.removed())
2276
2273
2277 def modified(self):
2274 def modified(self):
2278 return [
2275 return [
2279 f
2276 f
2280 for f in self._cache.keys()
2277 for f in self._cache.keys()
2281 if self._cache[f][b'exists'] and self._existsinparent(f)
2278 if self._cache[f][b'exists'] and self._existsinparent(f)
2282 ]
2279 ]
2283
2280
2284 def added(self):
2281 def added(self):
2285 return [
2282 return [
2286 f
2283 f
2287 for f in self._cache.keys()
2284 for f in self._cache.keys()
2288 if self._cache[f][b'exists'] and not self._existsinparent(f)
2285 if self._cache[f][b'exists'] and not self._existsinparent(f)
2289 ]
2286 ]
2290
2287
2291 def removed(self):
2288 def removed(self):
2292 return [
2289 return [
2293 f
2290 f
2294 for f in self._cache.keys()
2291 for f in self._cache.keys()
2295 if not self._cache[f][b'exists'] and self._existsinparent(f)
2292 if not self._cache[f][b'exists'] and self._existsinparent(f)
2296 ]
2293 ]
2297
2294
2298 def p1copies(self):
2295 def p1copies(self):
2299 copies = {}
2296 copies = {}
2300 narrowmatch = self._repo.narrowmatch()
2297 narrowmatch = self._repo.narrowmatch()
2301 for f in self._cache.keys():
2298 for f in self._cache.keys():
2302 if not narrowmatch(f):
2299 if not narrowmatch(f):
2303 continue
2300 continue
2304 copies.pop(f, None) # delete if it exists
2301 copies.pop(f, None) # delete if it exists
2305 source = self._cache[f][b'copied']
2302 source = self._cache[f][b'copied']
2306 if source:
2303 if source:
2307 copies[f] = source
2304 copies[f] = source
2308 return copies
2305 return copies
2309
2306
2310 def p2copies(self):
2307 def p2copies(self):
2311 copies = {}
2308 copies = {}
2312 narrowmatch = self._repo.narrowmatch()
2309 narrowmatch = self._repo.narrowmatch()
2313 for f in self._cache.keys():
2310 for f in self._cache.keys():
2314 if not narrowmatch(f):
2311 if not narrowmatch(f):
2315 continue
2312 continue
2316 copies.pop(f, None) # delete if it exists
2313 copies.pop(f, None) # delete if it exists
2317 source = self._cache[f][b'copied']
2314 source = self._cache[f][b'copied']
2318 if source:
2315 if source:
2319 copies[f] = source
2316 copies[f] = source
2320 return copies
2317 return copies
2321
2318
2322 def isinmemory(self):
2319 def isinmemory(self):
2323 return True
2320 return True
2324
2321
2325 def filedate(self, path):
2322 def filedate(self, path):
2326 if self.isdirty(path):
2323 if self.isdirty(path):
2327 return self._cache[path][b'date']
2324 return self._cache[path][b'date']
2328 else:
2325 else:
2329 return self._wrappedctx[path].date()
2326 return self._wrappedctx[path].date()
2330
2327
2331 def markcopied(self, path, origin):
2328 def markcopied(self, path, origin):
2332 self._markdirty(
2329 self._markdirty(
2333 path,
2330 path,
2334 exists=True,
2331 exists=True,
2335 date=self.filedate(path),
2332 date=self.filedate(path),
2336 flags=self.flags(path),
2333 flags=self.flags(path),
2337 copied=origin,
2334 copied=origin,
2338 )
2335 )
2339
2336
2340 def copydata(self, path):
2337 def copydata(self, path):
2341 if self.isdirty(path):
2338 if self.isdirty(path):
2342 return self._cache[path][b'copied']
2339 return self._cache[path][b'copied']
2343 else:
2340 else:
2344 return None
2341 return None
2345
2342
2346 def flags(self, path):
2343 def flags(self, path):
2347 if self.isdirty(path):
2344 if self.isdirty(path):
2348 if self._cache[path][b'exists']:
2345 if self._cache[path][b'exists']:
2349 return self._cache[path][b'flags']
2346 return self._cache[path][b'flags']
2350 else:
2347 else:
2351 raise error.ProgrammingError(
2348 raise error.ProgrammingError(
2352 b"No such file or directory: %s" % path
2349 b"No such file or directory: %s" % path
2353 )
2350 )
2354 else:
2351 else:
2355 return self._wrappedctx[path].flags()
2352 return self._wrappedctx[path].flags()
2356
2353
2357 def __contains__(self, key):
2354 def __contains__(self, key):
2358 if key in self._cache:
2355 if key in self._cache:
2359 return self._cache[key][b'exists']
2356 return self._cache[key][b'exists']
2360 return key in self.p1()
2357 return key in self.p1()
2361
2358
2362 def _existsinparent(self, path):
2359 def _existsinparent(self, path):
2363 try:
2360 try:
2364 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2361 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2365 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2362 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2366 # with an ``exists()`` function.
2363 # with an ``exists()`` function.
2367 self._wrappedctx[path]
2364 self._wrappedctx[path]
2368 return True
2365 return True
2369 except error.ManifestLookupError:
2366 except error.ManifestLookupError:
2370 return False
2367 return False
2371
2368
2372 def _auditconflicts(self, path):
2369 def _auditconflicts(self, path):
2373 """Replicates conflict checks done by wvfs.write().
2370 """Replicates conflict checks done by wvfs.write().
2374
2371
2375 Since we never write to the filesystem and never call `applyupdates` in
2372 Since we never write to the filesystem and never call `applyupdates` in
2376 IMM, we'll never check that a path is actually writable -- e.g., because
2373 IMM, we'll never check that a path is actually writable -- e.g., because
2377 it adds `a/foo`, but `a` is actually a file in the other commit.
2374 it adds `a/foo`, but `a` is actually a file in the other commit.
2378 """
2375 """
2379
2376
2380 def fail(path, component):
2377 def fail(path, component):
2381 # p1() is the base and we're receiving "writes" for p2()'s
2378 # p1() is the base and we're receiving "writes" for p2()'s
2382 # files.
2379 # files.
2383 if b'l' in self.p1()[component].flags():
2380 if b'l' in self.p1()[component].flags():
2384 raise error.Abort(
2381 raise error.Abort(
2385 b"error: %s conflicts with symlink %s "
2382 b"error: %s conflicts with symlink %s "
2386 b"in %d." % (path, component, self.p1().rev())
2383 b"in %d." % (path, component, self.p1().rev())
2387 )
2384 )
2388 else:
2385 else:
2389 raise error.Abort(
2386 raise error.Abort(
2390 b"error: '%s' conflicts with file '%s' in "
2387 b"error: '%s' conflicts with file '%s' in "
2391 b"%d." % (path, component, self.p1().rev())
2388 b"%d." % (path, component, self.p1().rev())
2392 )
2389 )
2393
2390
2394 # Test that each new directory to be created to write this path from p2
2391 # Test that each new directory to be created to write this path from p2
2395 # is not a file in p1.
2392 # is not a file in p1.
2396 components = path.split(b'/')
2393 components = path.split(b'/')
2397 for i in pycompat.xrange(len(components)):
2394 for i in pycompat.xrange(len(components)):
2398 component = b"/".join(components[0:i])
2395 component = b"/".join(components[0:i])
2399 if component in self:
2396 if component in self:
2400 fail(path, component)
2397 fail(path, component)
2401
2398
2402 # Test the other direction -- that this path from p2 isn't a directory
2399 # Test the other direction -- that this path from p2 isn't a directory
2403 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2400 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2404 match = self.match([path], default=b'path')
2401 match = self.match([path], default=b'path')
2405 mfiles = list(self.p1().manifest().walk(match))
2402 mfiles = list(self.p1().manifest().walk(match))
2406 if len(mfiles) > 0:
2403 if len(mfiles) > 0:
2407 if len(mfiles) == 1 and mfiles[0] == path:
2404 if len(mfiles) == 1 and mfiles[0] == path:
2408 return
2405 return
2409 # omit the files which are deleted in current IMM wctx
2406 # omit the files which are deleted in current IMM wctx
2410 mfiles = [m for m in mfiles if m in self]
2407 mfiles = [m for m in mfiles if m in self]
2411 if not mfiles:
2408 if not mfiles:
2412 return
2409 return
2413 raise error.Abort(
2410 raise error.Abort(
2414 b"error: file '%s' cannot be written because "
2411 b"error: file '%s' cannot be written because "
2415 b" '%s/' is a directory in %s (containing %d "
2412 b" '%s/' is a directory in %s (containing %d "
2416 b"entries: %s)"
2413 b"entries: %s)"
2417 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2414 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2418 )
2415 )
2419
2416
2420 def write(self, path, data, flags=b'', **kwargs):
2417 def write(self, path, data, flags=b'', **kwargs):
2421 if data is None:
2418 if data is None:
2422 raise error.ProgrammingError(b"data must be non-None")
2419 raise error.ProgrammingError(b"data must be non-None")
2423 self._auditconflicts(path)
2420 self._auditconflicts(path)
2424 self._markdirty(
2421 self._markdirty(
2425 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2422 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2426 )
2423 )
2427
2424
2428 def setflags(self, path, l, x):
2425 def setflags(self, path, l, x):
2429 flag = b''
2426 flag = b''
2430 if l:
2427 if l:
2431 flag = b'l'
2428 flag = b'l'
2432 elif x:
2429 elif x:
2433 flag = b'x'
2430 flag = b'x'
2434 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2431 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2435
2432
2436 def remove(self, path):
2433 def remove(self, path):
2437 self._markdirty(path, exists=False)
2434 self._markdirty(path, exists=False)
2438
2435
2439 def exists(self, path):
2436 def exists(self, path):
2440 """exists behaves like `lexists`, but needs to follow symlinks and
2437 """exists behaves like `lexists`, but needs to follow symlinks and
2441 return False if they are broken.
2438 return False if they are broken.
2442 """
2439 """
2443 if self.isdirty(path):
2440 if self.isdirty(path):
2444 # If this path exists and is a symlink, "follow" it by calling
2441 # If this path exists and is a symlink, "follow" it by calling
2445 # exists on the destination path.
2442 # exists on the destination path.
2446 if (
2443 if (
2447 self._cache[path][b'exists']
2444 self._cache[path][b'exists']
2448 and b'l' in self._cache[path][b'flags']
2445 and b'l' in self._cache[path][b'flags']
2449 ):
2446 ):
2450 return self.exists(self._cache[path][b'data'].strip())
2447 return self.exists(self._cache[path][b'data'].strip())
2451 else:
2448 else:
2452 return self._cache[path][b'exists']
2449 return self._cache[path][b'exists']
2453
2450
2454 return self._existsinparent(path)
2451 return self._existsinparent(path)
2455
2452
2456 def lexists(self, path):
2453 def lexists(self, path):
2457 """lexists returns True if the path exists"""
2454 """lexists returns True if the path exists"""
2458 if self.isdirty(path):
2455 if self.isdirty(path):
2459 return self._cache[path][b'exists']
2456 return self._cache[path][b'exists']
2460
2457
2461 return self._existsinparent(path)
2458 return self._existsinparent(path)
2462
2459
2463 def size(self, path):
2460 def size(self, path):
2464 if self.isdirty(path):
2461 if self.isdirty(path):
2465 if self._cache[path][b'exists']:
2462 if self._cache[path][b'exists']:
2466 return len(self._cache[path][b'data'])
2463 return len(self._cache[path][b'data'])
2467 else:
2464 else:
2468 raise error.ProgrammingError(
2465 raise error.ProgrammingError(
2469 b"No such file or directory: %s" % path
2466 b"No such file or directory: %s" % path
2470 )
2467 )
2471 return self._wrappedctx[path].size()
2468 return self._wrappedctx[path].size()
2472
2469
2473 def tomemctx(
2470 def tomemctx(
2474 self,
2471 self,
2475 text,
2472 text,
2476 branch=None,
2473 branch=None,
2477 extra=None,
2474 extra=None,
2478 date=None,
2475 date=None,
2479 parents=None,
2476 parents=None,
2480 user=None,
2477 user=None,
2481 editor=None,
2478 editor=None,
2482 ):
2479 ):
2483 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2480 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2484 committed.
2481 committed.
2485
2482
2486 ``text`` is the commit message.
2483 ``text`` is the commit message.
2487 ``parents`` (optional) are rev numbers.
2484 ``parents`` (optional) are rev numbers.
2488 """
2485 """
2489 # Default parents to the wrapped context if not passed.
2486 # Default parents to the wrapped context if not passed.
2490 if parents is None:
2487 if parents is None:
2491 parents = self.parents()
2488 parents = self.parents()
2492 if len(parents) == 1:
2489 if len(parents) == 1:
2493 parents = (parents[0], None)
2490 parents = (parents[0], None)
2494
2491
2495 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2492 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2496 if parents[1] is None:
2493 if parents[1] is None:
2497 parents = (self._repo[parents[0]], None)
2494 parents = (self._repo[parents[0]], None)
2498 else:
2495 else:
2499 parents = (self._repo[parents[0]], self._repo[parents[1]])
2496 parents = (self._repo[parents[0]], self._repo[parents[1]])
2500
2497
2501 files = self.files()
2498 files = self.files()
2502
2499
2503 def getfile(repo, memctx, path):
2500 def getfile(repo, memctx, path):
2504 if self._cache[path][b'exists']:
2501 if self._cache[path][b'exists']:
2505 return memfilectx(
2502 return memfilectx(
2506 repo,
2503 repo,
2507 memctx,
2504 memctx,
2508 path,
2505 path,
2509 self._cache[path][b'data'],
2506 self._cache[path][b'data'],
2510 b'l' in self._cache[path][b'flags'],
2507 b'l' in self._cache[path][b'flags'],
2511 b'x' in self._cache[path][b'flags'],
2508 b'x' in self._cache[path][b'flags'],
2512 self._cache[path][b'copied'],
2509 self._cache[path][b'copied'],
2513 )
2510 )
2514 else:
2511 else:
2515 # Returning None, but including the path in `files`, is
2512 # Returning None, but including the path in `files`, is
2516 # necessary for memctx to register a deletion.
2513 # necessary for memctx to register a deletion.
2517 return None
2514 return None
2518
2515
2519 if branch is None:
2516 if branch is None:
2520 branch = self._wrappedctx.branch()
2517 branch = self._wrappedctx.branch()
2521
2518
2522 return memctx(
2519 return memctx(
2523 self._repo,
2520 self._repo,
2524 parents,
2521 parents,
2525 text,
2522 text,
2526 files,
2523 files,
2527 getfile,
2524 getfile,
2528 date=date,
2525 date=date,
2529 extra=extra,
2526 extra=extra,
2530 user=user,
2527 user=user,
2531 branch=branch,
2528 branch=branch,
2532 editor=editor,
2529 editor=editor,
2533 )
2530 )
2534
2531
2535 def tomemctx_for_amend(self, precursor):
2532 def tomemctx_for_amend(self, precursor):
2536 extra = precursor.extra().copy()
2533 extra = precursor.extra().copy()
2537 extra[b'amend_source'] = precursor.hex()
2534 extra[b'amend_source'] = precursor.hex()
2538 return self.tomemctx(
2535 return self.tomemctx(
2539 text=precursor.description(),
2536 text=precursor.description(),
2540 branch=precursor.branch(),
2537 branch=precursor.branch(),
2541 extra=extra,
2538 extra=extra,
2542 date=precursor.date(),
2539 date=precursor.date(),
2543 user=precursor.user(),
2540 user=precursor.user(),
2544 )
2541 )
2545
2542
2546 def isdirty(self, path):
2543 def isdirty(self, path):
2547 return path in self._cache
2544 return path in self._cache
2548
2545
2549 def clean(self):
2546 def clean(self):
2550 self._mergestate = None
2547 self._mergestate = None
2551 self._cache = {}
2548 self._cache = {}
2552
2549
2553 def _compact(self):
2550 def _compact(self):
2554 """Removes keys from the cache that are actually clean, by comparing
2551 """Removes keys from the cache that are actually clean, by comparing
2555 them with the underlying context.
2552 them with the underlying context.
2556
2553
2557 This can occur during the merge process, e.g. by passing --tool :local
2554 This can occur during the merge process, e.g. by passing --tool :local
2558 to resolve a conflict.
2555 to resolve a conflict.
2559 """
2556 """
2560 keys = []
2557 keys = []
2561 # This won't be perfect, but can help performance significantly when
2558 # This won't be perfect, but can help performance significantly when
2562 # using things like remotefilelog.
2559 # using things like remotefilelog.
2563 scmutil.prefetchfiles(
2560 scmutil.prefetchfiles(
2564 self.repo(),
2561 self.repo(),
2565 [
2562 [
2566 (
2563 (
2567 self.p1().rev(),
2564 self.p1().rev(),
2568 scmutil.matchfiles(self.repo(), self._cache.keys()),
2565 scmutil.matchfiles(self.repo(), self._cache.keys()),
2569 )
2566 )
2570 ],
2567 ],
2571 )
2568 )
2572
2569
2573 for path in self._cache.keys():
2570 for path in self._cache.keys():
2574 cache = self._cache[path]
2571 cache = self._cache[path]
2575 try:
2572 try:
2576 underlying = self._wrappedctx[path]
2573 underlying = self._wrappedctx[path]
2577 if (
2574 if (
2578 underlying.data() == cache[b'data']
2575 underlying.data() == cache[b'data']
2579 and underlying.flags() == cache[b'flags']
2576 and underlying.flags() == cache[b'flags']
2580 ):
2577 ):
2581 keys.append(path)
2578 keys.append(path)
2582 except error.ManifestLookupError:
2579 except error.ManifestLookupError:
2583 # Path not in the underlying manifest (created).
2580 # Path not in the underlying manifest (created).
2584 continue
2581 continue
2585
2582
2586 for path in keys:
2583 for path in keys:
2587 del self._cache[path]
2584 del self._cache[path]
2588 return keys
2585 return keys
2589
2586
2590 def _markdirty(
2587 def _markdirty(
2591 self, path, exists, data=None, date=None, flags=b'', copied=None
2588 self, path, exists, data=None, date=None, flags=b'', copied=None
2592 ):
2589 ):
2593 # data not provided, let's see if we already have some; if not, let's
2590 # data not provided, let's see if we already have some; if not, let's
2594 # grab it from our underlying context, so that we always have data if
2591 # grab it from our underlying context, so that we always have data if
2595 # the file is marked as existing.
2592 # the file is marked as existing.
2596 if exists and data is None:
2593 if exists and data is None:
2597 oldentry = self._cache.get(path) or {}
2594 oldentry = self._cache.get(path) or {}
2598 data = oldentry.get(b'data')
2595 data = oldentry.get(b'data')
2599 if data is None:
2596 if data is None:
2600 data = self._wrappedctx[path].data()
2597 data = self._wrappedctx[path].data()
2601
2598
2602 self._cache[path] = {
2599 self._cache[path] = {
2603 b'exists': exists,
2600 b'exists': exists,
2604 b'data': data,
2601 b'data': data,
2605 b'date': date,
2602 b'date': date,
2606 b'flags': flags,
2603 b'flags': flags,
2607 b'copied': copied,
2604 b'copied': copied,
2608 }
2605 }
2609 util.clearcachedproperty(self, b'_manifest')
2606 util.clearcachedproperty(self, b'_manifest')
2610
2607
2611 def filectx(self, path, filelog=None):
2608 def filectx(self, path, filelog=None):
2612 return overlayworkingfilectx(
2609 return overlayworkingfilectx(
2613 self._repo, path, parent=self, filelog=filelog
2610 self._repo, path, parent=self, filelog=filelog
2614 )
2611 )
2615
2612
2616 def mergestate(self, clean=False):
2613 def mergestate(self, clean=False):
2617 if clean or self._mergestate is None:
2614 if clean or self._mergestate is None:
2618 self._mergestate = mergestatemod.memmergestate(self._repo)
2615 self._mergestate = mergestatemod.memmergestate(self._repo)
2619 return self._mergestate
2616 return self._mergestate
2620
2617
2621
2618
2622 class overlayworkingfilectx(committablefilectx):
2619 class overlayworkingfilectx(committablefilectx):
2623 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2620 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2624 cache, which can be flushed through later by calling ``flush()``."""
2621 cache, which can be flushed through later by calling ``flush()``."""
2625
2622
2626 def __init__(self, repo, path, filelog=None, parent=None):
2623 def __init__(self, repo, path, filelog=None, parent=None):
2627 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2624 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2628 self._repo = repo
2625 self._repo = repo
2629 self._parent = parent
2626 self._parent = parent
2630 self._path = path
2627 self._path = path
2631
2628
2632 def cmp(self, fctx):
2629 def cmp(self, fctx):
2633 return self.data() != fctx.data()
2630 return self.data() != fctx.data()
2634
2631
2635 def changectx(self):
2632 def changectx(self):
2636 return self._parent
2633 return self._parent
2637
2634
2638 def data(self):
2635 def data(self):
2639 return self._parent.data(self._path)
2636 return self._parent.data(self._path)
2640
2637
2641 def date(self):
2638 def date(self):
2642 return self._parent.filedate(self._path)
2639 return self._parent.filedate(self._path)
2643
2640
2644 def exists(self):
2641 def exists(self):
2645 return self.lexists()
2642 return self.lexists()
2646
2643
2647 def lexists(self):
2644 def lexists(self):
2648 return self._parent.exists(self._path)
2645 return self._parent.exists(self._path)
2649
2646
2650 def copysource(self):
2647 def copysource(self):
2651 return self._parent.copydata(self._path)
2648 return self._parent.copydata(self._path)
2652
2649
2653 def size(self):
2650 def size(self):
2654 return self._parent.size(self._path)
2651 return self._parent.size(self._path)
2655
2652
2656 def markcopied(self, origin):
2653 def markcopied(self, origin):
2657 self._parent.markcopied(self._path, origin)
2654 self._parent.markcopied(self._path, origin)
2658
2655
2659 def audit(self):
2656 def audit(self):
2660 pass
2657 pass
2661
2658
2662 def flags(self):
2659 def flags(self):
2663 return self._parent.flags(self._path)
2660 return self._parent.flags(self._path)
2664
2661
2665 def setflags(self, islink, isexec):
2662 def setflags(self, islink, isexec):
2666 return self._parent.setflags(self._path, islink, isexec)
2663 return self._parent.setflags(self._path, islink, isexec)
2667
2664
2668 def write(self, data, flags, backgroundclose=False, **kwargs):
2665 def write(self, data, flags, backgroundclose=False, **kwargs):
2669 return self._parent.write(self._path, data, flags, **kwargs)
2666 return self._parent.write(self._path, data, flags, **kwargs)
2670
2667
2671 def remove(self, ignoremissing=False):
2668 def remove(self, ignoremissing=False):
2672 return self._parent.remove(self._path)
2669 return self._parent.remove(self._path)
2673
2670
2674 def clearunknown(self):
2671 def clearunknown(self):
2675 pass
2672 pass
2676
2673
2677
2674
2678 class workingcommitctx(workingctx):
2675 class workingcommitctx(workingctx):
2679 """A workingcommitctx object makes access to data related to
2676 """A workingcommitctx object makes access to data related to
2680 the revision being committed convenient.
2677 the revision being committed convenient.
2681
2678
2682 This hides changes in the working directory, if they aren't
2679 This hides changes in the working directory, if they aren't
2683 committed in this context.
2680 committed in this context.
2684 """
2681 """
2685
2682
2686 def __init__(
2683 def __init__(
2687 self, repo, changes, text=b"", user=None, date=None, extra=None
2684 self, repo, changes, text=b"", user=None, date=None, extra=None
2688 ):
2685 ):
2689 super(workingcommitctx, self).__init__(
2686 super(workingcommitctx, self).__init__(
2690 repo, text, user, date, extra, changes
2687 repo, text, user, date, extra, changes
2691 )
2688 )
2692
2689
2693 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2690 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2694 """Return matched files only in ``self._status``
2691 """Return matched files only in ``self._status``
2695
2692
2696 Uncommitted files appear "clean" via this context, even if
2693 Uncommitted files appear "clean" via this context, even if
2697 they aren't actually so in the working directory.
2694 they aren't actually so in the working directory.
2698 """
2695 """
2699 if clean:
2696 if clean:
2700 clean = [f for f in self._manifest if f not in self._changedset]
2697 clean = [f for f in self._manifest if f not in self._changedset]
2701 else:
2698 else:
2702 clean = []
2699 clean = []
2703 return scmutil.status(
2700 return scmutil.status(
2704 [f for f in self._status.modified if match(f)],
2701 [f for f in self._status.modified if match(f)],
2705 [f for f in self._status.added if match(f)],
2702 [f for f in self._status.added if match(f)],
2706 [f for f in self._status.removed if match(f)],
2703 [f for f in self._status.removed if match(f)],
2707 [],
2704 [],
2708 [],
2705 [],
2709 [],
2706 [],
2710 clean,
2707 clean,
2711 )
2708 )
2712
2709
2713 @propertycache
2710 @propertycache
2714 def _changedset(self):
2711 def _changedset(self):
2715 """Return the set of files changed in this context"""
2712 """Return the set of files changed in this context"""
2716 changed = set(self._status.modified)
2713 changed = set(self._status.modified)
2717 changed.update(self._status.added)
2714 changed.update(self._status.added)
2718 changed.update(self._status.removed)
2715 changed.update(self._status.removed)
2719 return changed
2716 return changed
2720
2717
2721
2718
2722 def makecachingfilectxfn(func):
2719 def makecachingfilectxfn(func):
2723 """Create a filectxfn that caches based on the path.
2720 """Create a filectxfn that caches based on the path.
2724
2721
2725 We can't use util.cachefunc because it uses all arguments as the cache
2722 We can't use util.cachefunc because it uses all arguments as the cache
2726 key and this creates a cycle since the arguments include the repo and
2723 key and this creates a cycle since the arguments include the repo and
2727 memctx.
2724 memctx.
2728 """
2725 """
2729 cache = {}
2726 cache = {}
2730
2727
2731 def getfilectx(repo, memctx, path):
2728 def getfilectx(repo, memctx, path):
2732 if path not in cache:
2729 if path not in cache:
2733 cache[path] = func(repo, memctx, path)
2730 cache[path] = func(repo, memctx, path)
2734 return cache[path]
2731 return cache[path]
2735
2732
2736 return getfilectx
2733 return getfilectx
2737
2734
2738
2735
2739 def memfilefromctx(ctx):
2736 def memfilefromctx(ctx):
2740 """Given a context return a memfilectx for ctx[path]
2737 """Given a context return a memfilectx for ctx[path]
2741
2738
2742 This is a convenience method for building a memctx based on another
2739 This is a convenience method for building a memctx based on another
2743 context.
2740 context.
2744 """
2741 """
2745
2742
2746 def getfilectx(repo, memctx, path):
2743 def getfilectx(repo, memctx, path):
2747 fctx = ctx[path]
2744 fctx = ctx[path]
2748 copysource = fctx.copysource()
2745 copysource = fctx.copysource()
2749 return memfilectx(
2746 return memfilectx(
2750 repo,
2747 repo,
2751 memctx,
2748 memctx,
2752 path,
2749 path,
2753 fctx.data(),
2750 fctx.data(),
2754 islink=fctx.islink(),
2751 islink=fctx.islink(),
2755 isexec=fctx.isexec(),
2752 isexec=fctx.isexec(),
2756 copysource=copysource,
2753 copysource=copysource,
2757 )
2754 )
2758
2755
2759 return getfilectx
2756 return getfilectx
2760
2757
2761
2758
2762 def memfilefrompatch(patchstore):
2759 def memfilefrompatch(patchstore):
2763 """Given a patch (e.g. patchstore object) return a memfilectx
2760 """Given a patch (e.g. patchstore object) return a memfilectx
2764
2761
2765 This is a convenience method for building a memctx based on a patchstore.
2762 This is a convenience method for building a memctx based on a patchstore.
2766 """
2763 """
2767
2764
2768 def getfilectx(repo, memctx, path):
2765 def getfilectx(repo, memctx, path):
2769 data, mode, copysource = patchstore.getfile(path)
2766 data, mode, copysource = patchstore.getfile(path)
2770 if data is None:
2767 if data is None:
2771 return None
2768 return None
2772 islink, isexec = mode
2769 islink, isexec = mode
2773 return memfilectx(
2770 return memfilectx(
2774 repo,
2771 repo,
2775 memctx,
2772 memctx,
2776 path,
2773 path,
2777 data,
2774 data,
2778 islink=islink,
2775 islink=islink,
2779 isexec=isexec,
2776 isexec=isexec,
2780 copysource=copysource,
2777 copysource=copysource,
2781 )
2778 )
2782
2779
2783 return getfilectx
2780 return getfilectx
2784
2781
2785
2782
2786 class memctx(committablectx):
2783 class memctx(committablectx):
2787 """Use memctx to perform in-memory commits via localrepo.commitctx().
2784 """Use memctx to perform in-memory commits via localrepo.commitctx().
2788
2785
2789 Revision information is supplied at initialization time while
2786 Revision information is supplied at initialization time while
2790 related files data and is made available through a callback
2787 related files data and is made available through a callback
2791 mechanism. 'repo' is the current localrepo, 'parents' is a
2788 mechanism. 'repo' is the current localrepo, 'parents' is a
2792 sequence of two parent revisions identifiers (pass None for every
2789 sequence of two parent revisions identifiers (pass None for every
2793 missing parent), 'text' is the commit message and 'files' lists
2790 missing parent), 'text' is the commit message and 'files' lists
2794 names of files touched by the revision (normalized and relative to
2791 names of files touched by the revision (normalized and relative to
2795 repository root).
2792 repository root).
2796
2793
2797 filectxfn(repo, memctx, path) is a callable receiving the
2794 filectxfn(repo, memctx, path) is a callable receiving the
2798 repository, the current memctx object and the normalized path of
2795 repository, the current memctx object and the normalized path of
2799 requested file, relative to repository root. It is fired by the
2796 requested file, relative to repository root. It is fired by the
2800 commit function for every file in 'files', but calls order is
2797 commit function for every file in 'files', but calls order is
2801 undefined. If the file is available in the revision being
2798 undefined. If the file is available in the revision being
2802 committed (updated or added), filectxfn returns a memfilectx
2799 committed (updated or added), filectxfn returns a memfilectx
2803 object. If the file was removed, filectxfn return None for recent
2800 object. If the file was removed, filectxfn return None for recent
2804 Mercurial. Moved files are represented by marking the source file
2801 Mercurial. Moved files are represented by marking the source file
2805 removed and the new file added with copy information (see
2802 removed and the new file added with copy information (see
2806 memfilectx).
2803 memfilectx).
2807
2804
2808 user receives the committer name and defaults to current
2805 user receives the committer name and defaults to current
2809 repository username, date is the commit date in any format
2806 repository username, date is the commit date in any format
2810 supported by dateutil.parsedate() and defaults to current date, extra
2807 supported by dateutil.parsedate() and defaults to current date, extra
2811 is a dictionary of metadata or is left empty.
2808 is a dictionary of metadata or is left empty.
2812 """
2809 """
2813
2810
2814 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2811 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2815 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2812 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2816 # this field to determine what to do in filectxfn.
2813 # this field to determine what to do in filectxfn.
2817 _returnnoneformissingfiles = True
2814 _returnnoneformissingfiles = True
2818
2815
2819 def __init__(
2816 def __init__(
2820 self,
2817 self,
2821 repo,
2818 repo,
2822 parents,
2819 parents,
2823 text,
2820 text,
2824 files,
2821 files,
2825 filectxfn,
2822 filectxfn,
2826 user=None,
2823 user=None,
2827 date=None,
2824 date=None,
2828 extra=None,
2825 extra=None,
2829 branch=None,
2826 branch=None,
2830 editor=None,
2827 editor=None,
2831 ):
2828 ):
2832 super(memctx, self).__init__(
2829 super(memctx, self).__init__(
2833 repo, text, user, date, extra, branch=branch
2830 repo, text, user, date, extra, branch=branch
2834 )
2831 )
2835 self._rev = None
2832 self._rev = None
2836 self._node = None
2833 self._node = None
2837 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2834 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2838 p1, p2 = parents
2835 p1, p2 = parents
2839 self._parents = [self._repo[p] for p in (p1, p2)]
2836 self._parents = [self._repo[p] for p in (p1, p2)]
2840 files = sorted(set(files))
2837 files = sorted(set(files))
2841 self._files = files
2838 self._files = files
2842 self.substate = {}
2839 self.substate = {}
2843
2840
2844 if isinstance(filectxfn, patch.filestore):
2841 if isinstance(filectxfn, patch.filestore):
2845 filectxfn = memfilefrompatch(filectxfn)
2842 filectxfn = memfilefrompatch(filectxfn)
2846 elif not callable(filectxfn):
2843 elif not callable(filectxfn):
2847 # if store is not callable, wrap it in a function
2844 # if store is not callable, wrap it in a function
2848 filectxfn = memfilefromctx(filectxfn)
2845 filectxfn = memfilefromctx(filectxfn)
2849
2846
2850 # memoizing increases performance for e.g. vcs convert scenarios.
2847 # memoizing increases performance for e.g. vcs convert scenarios.
2851 self._filectxfn = makecachingfilectxfn(filectxfn)
2848 self._filectxfn = makecachingfilectxfn(filectxfn)
2852
2849
2853 if editor:
2850 if editor:
2854 self._text = editor(self._repo, self, [])
2851 self._text = editor(self._repo, self, [])
2855 self._repo.savecommitmessage(self._text)
2852 self._repo.savecommitmessage(self._text)
2856
2853
2857 def filectx(self, path, filelog=None):
2854 def filectx(self, path, filelog=None):
2858 """get a file context from the working directory
2855 """get a file context from the working directory
2859
2856
2860 Returns None if file doesn't exist and should be removed."""
2857 Returns None if file doesn't exist and should be removed."""
2861 return self._filectxfn(self._repo, self, path)
2858 return self._filectxfn(self._repo, self, path)
2862
2859
2863 def commit(self):
2860 def commit(self):
2864 """commit context to the repo"""
2861 """commit context to the repo"""
2865 return self._repo.commitctx(self)
2862 return self._repo.commitctx(self)
2866
2863
2867 @propertycache
2864 @propertycache
2868 def _manifest(self):
2865 def _manifest(self):
2869 """generate a manifest based on the return values of filectxfn"""
2866 """generate a manifest based on the return values of filectxfn"""
2870
2867
2871 # keep this simple for now; just worry about p1
2868 # keep this simple for now; just worry about p1
2872 pctx = self._parents[0]
2869 pctx = self._parents[0]
2873 man = pctx.manifest().copy()
2870 man = pctx.manifest().copy()
2874
2871
2875 for f in self._status.modified:
2872 for f in self._status.modified:
2876 man[f] = self._repo.nodeconstants.modifiednodeid
2873 man[f] = self._repo.nodeconstants.modifiednodeid
2877
2874
2878 for f in self._status.added:
2875 for f in self._status.added:
2879 man[f] = self._repo.nodeconstants.addednodeid
2876 man[f] = self._repo.nodeconstants.addednodeid
2880
2877
2881 for f in self._status.removed:
2878 for f in self._status.removed:
2882 if f in man:
2879 if f in man:
2883 del man[f]
2880 del man[f]
2884
2881
2885 return man
2882 return man
2886
2883
2887 @propertycache
2884 @propertycache
2888 def _status(self):
2885 def _status(self):
2889 """Calculate exact status from ``files`` specified at construction"""
2886 """Calculate exact status from ``files`` specified at construction"""
2890 man1 = self.p1().manifest()
2887 man1 = self.p1().manifest()
2891 p2 = self._parents[1]
2888 p2 = self._parents[1]
2892 # "1 < len(self._parents)" can't be used for checking
2889 # "1 < len(self._parents)" can't be used for checking
2893 # existence of the 2nd parent, because "memctx._parents" is
2890 # existence of the 2nd parent, because "memctx._parents" is
2894 # explicitly initialized by the list, of which length is 2.
2891 # explicitly initialized by the list, of which length is 2.
2895 if p2.rev() != nullrev:
2892 if p2.rev() != nullrev:
2896 man2 = p2.manifest()
2893 man2 = p2.manifest()
2897 managing = lambda f: f in man1 or f in man2
2894 managing = lambda f: f in man1 or f in man2
2898 else:
2895 else:
2899 managing = lambda f: f in man1
2896 managing = lambda f: f in man1
2900
2897
2901 modified, added, removed = [], [], []
2898 modified, added, removed = [], [], []
2902 for f in self._files:
2899 for f in self._files:
2903 if not managing(f):
2900 if not managing(f):
2904 added.append(f)
2901 added.append(f)
2905 elif self[f]:
2902 elif self[f]:
2906 modified.append(f)
2903 modified.append(f)
2907 else:
2904 else:
2908 removed.append(f)
2905 removed.append(f)
2909
2906
2910 return scmutil.status(modified, added, removed, [], [], [], [])
2907 return scmutil.status(modified, added, removed, [], [], [], [])
2911
2908
2912 def parents(self):
2909 def parents(self):
2913 if self._parents[1].rev() == nullrev:
2910 if self._parents[1].rev() == nullrev:
2914 return [self._parents[0]]
2911 return [self._parents[0]]
2915 return self._parents
2912 return self._parents
2916
2913
2917
2914
2918 class memfilectx(committablefilectx):
2915 class memfilectx(committablefilectx):
2919 """memfilectx represents an in-memory file to commit.
2916 """memfilectx represents an in-memory file to commit.
2920
2917
2921 See memctx and committablefilectx for more details.
2918 See memctx and committablefilectx for more details.
2922 """
2919 """
2923
2920
2924 def __init__(
2921 def __init__(
2925 self,
2922 self,
2926 repo,
2923 repo,
2927 changectx,
2924 changectx,
2928 path,
2925 path,
2929 data,
2926 data,
2930 islink=False,
2927 islink=False,
2931 isexec=False,
2928 isexec=False,
2932 copysource=None,
2929 copysource=None,
2933 ):
2930 ):
2934 """
2931 """
2935 path is the normalized file path relative to repository root.
2932 path is the normalized file path relative to repository root.
2936 data is the file content as a string.
2933 data is the file content as a string.
2937 islink is True if the file is a symbolic link.
2934 islink is True if the file is a symbolic link.
2938 isexec is True if the file is executable.
2935 isexec is True if the file is executable.
2939 copied is the source file path if current file was copied in the
2936 copied is the source file path if current file was copied in the
2940 revision being committed, or None."""
2937 revision being committed, or None."""
2941 super(memfilectx, self).__init__(repo, path, None, changectx)
2938 super(memfilectx, self).__init__(repo, path, None, changectx)
2942 self._data = data
2939 self._data = data
2943 if islink:
2940 if islink:
2944 self._flags = b'l'
2941 self._flags = b'l'
2945 elif isexec:
2942 elif isexec:
2946 self._flags = b'x'
2943 self._flags = b'x'
2947 else:
2944 else:
2948 self._flags = b''
2945 self._flags = b''
2949 self._copysource = copysource
2946 self._copysource = copysource
2950
2947
2951 def copysource(self):
2948 def copysource(self):
2952 return self._copysource
2949 return self._copysource
2953
2950
2954 def cmp(self, fctx):
2951 def cmp(self, fctx):
2955 return self.data() != fctx.data()
2952 return self.data() != fctx.data()
2956
2953
2957 def data(self):
2954 def data(self):
2958 return self._data
2955 return self._data
2959
2956
2960 def remove(self, ignoremissing=False):
2957 def remove(self, ignoremissing=False):
2961 """wraps unlink for a repo's working directory"""
2958 """wraps unlink for a repo's working directory"""
2962 # need to figure out what to do here
2959 # need to figure out what to do here
2963 del self._changectx[self._path]
2960 del self._changectx[self._path]
2964
2961
2965 def write(self, data, flags, **kwargs):
2962 def write(self, data, flags, **kwargs):
2966 """wraps repo.wwrite"""
2963 """wraps repo.wwrite"""
2967 self._data = data
2964 self._data = data
2968
2965
2969
2966
2970 class metadataonlyctx(committablectx):
2967 class metadataonlyctx(committablectx):
2971 """Like memctx but it's reusing the manifest of different commit.
2968 """Like memctx but it's reusing the manifest of different commit.
2972 Intended to be used by lightweight operations that are creating
2969 Intended to be used by lightweight operations that are creating
2973 metadata-only changes.
2970 metadata-only changes.
2974
2971
2975 Revision information is supplied at initialization time. 'repo' is the
2972 Revision information is supplied at initialization time. 'repo' is the
2976 current localrepo, 'ctx' is original revision which manifest we're reuisng
2973 current localrepo, 'ctx' is original revision which manifest we're reuisng
2977 'parents' is a sequence of two parent revisions identifiers (pass None for
2974 'parents' is a sequence of two parent revisions identifiers (pass None for
2978 every missing parent), 'text' is the commit.
2975 every missing parent), 'text' is the commit.
2979
2976
2980 user receives the committer name and defaults to current repository
2977 user receives the committer name and defaults to current repository
2981 username, date is the commit date in any format supported by
2978 username, date is the commit date in any format supported by
2982 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2979 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2983 metadata or is left empty.
2980 metadata or is left empty.
2984 """
2981 """
2985
2982
2986 def __init__(
2983 def __init__(
2987 self,
2984 self,
2988 repo,
2985 repo,
2989 originalctx,
2986 originalctx,
2990 parents=None,
2987 parents=None,
2991 text=None,
2988 text=None,
2992 user=None,
2989 user=None,
2993 date=None,
2990 date=None,
2994 extra=None,
2991 extra=None,
2995 editor=None,
2992 editor=None,
2996 ):
2993 ):
2997 if text is None:
2994 if text is None:
2998 text = originalctx.description()
2995 text = originalctx.description()
2999 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2996 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3000 self._rev = None
2997 self._rev = None
3001 self._node = None
2998 self._node = None
3002 self._originalctx = originalctx
2999 self._originalctx = originalctx
3003 self._manifestnode = originalctx.manifestnode()
3000 self._manifestnode = originalctx.manifestnode()
3004 if parents is None:
3001 if parents is None:
3005 parents = originalctx.parents()
3002 parents = originalctx.parents()
3006 else:
3003 else:
3007 parents = [repo[p] for p in parents if p is not None]
3004 parents = [repo[p] for p in parents if p is not None]
3008 parents = parents[:]
3005 parents = parents[:]
3009 while len(parents) < 2:
3006 while len(parents) < 2:
3010 parents.append(repo[nullrev])
3007 parents.append(repo[nullrev])
3011 p1, p2 = self._parents = parents
3008 p1, p2 = self._parents = parents
3012
3009
3013 # sanity check to ensure that the reused manifest parents are
3010 # sanity check to ensure that the reused manifest parents are
3014 # manifests of our commit parents
3011 # manifests of our commit parents
3015 mp1, mp2 = self.manifestctx().parents
3012 mp1, mp2 = self.manifestctx().parents
3016 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3013 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3017 raise RuntimeError(
3014 raise RuntimeError(
3018 r"can't reuse the manifest: its p1 "
3015 r"can't reuse the manifest: its p1 "
3019 r"doesn't match the new ctx p1"
3016 r"doesn't match the new ctx p1"
3020 )
3017 )
3021 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3018 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3022 raise RuntimeError(
3019 raise RuntimeError(
3023 r"can't reuse the manifest: "
3020 r"can't reuse the manifest: "
3024 r"its p2 doesn't match the new ctx p2"
3021 r"its p2 doesn't match the new ctx p2"
3025 )
3022 )
3026
3023
3027 self._files = originalctx.files()
3024 self._files = originalctx.files()
3028 self.substate = {}
3025 self.substate = {}
3029
3026
3030 if editor:
3027 if editor:
3031 self._text = editor(self._repo, self, [])
3028 self._text = editor(self._repo, self, [])
3032 self._repo.savecommitmessage(self._text)
3029 self._repo.savecommitmessage(self._text)
3033
3030
3034 def manifestnode(self):
3031 def manifestnode(self):
3035 return self._manifestnode
3032 return self._manifestnode
3036
3033
3037 @property
3034 @property
3038 def _manifestctx(self):
3035 def _manifestctx(self):
3039 return self._repo.manifestlog[self._manifestnode]
3036 return self._repo.manifestlog[self._manifestnode]
3040
3037
3041 def filectx(self, path, filelog=None):
3038 def filectx(self, path, filelog=None):
3042 return self._originalctx.filectx(path, filelog=filelog)
3039 return self._originalctx.filectx(path, filelog=filelog)
3043
3040
3044 def commit(self):
3041 def commit(self):
3045 """commit context to the repo"""
3042 """commit context to the repo"""
3046 return self._repo.commitctx(self)
3043 return self._repo.commitctx(self)
3047
3044
3048 @property
3045 @property
3049 def _manifest(self):
3046 def _manifest(self):
3050 return self._originalctx.manifest()
3047 return self._originalctx.manifest()
3051
3048
3052 @propertycache
3049 @propertycache
3053 def _status(self):
3050 def _status(self):
3054 """Calculate exact status from ``files`` specified in the ``origctx``
3051 """Calculate exact status from ``files`` specified in the ``origctx``
3055 and parents manifests.
3052 and parents manifests.
3056 """
3053 """
3057 man1 = self.p1().manifest()
3054 man1 = self.p1().manifest()
3058 p2 = self._parents[1]
3055 p2 = self._parents[1]
3059 # "1 < len(self._parents)" can't be used for checking
3056 # "1 < len(self._parents)" can't be used for checking
3060 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3057 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3061 # explicitly initialized by the list, of which length is 2.
3058 # explicitly initialized by the list, of which length is 2.
3062 if p2.rev() != nullrev:
3059 if p2.rev() != nullrev:
3063 man2 = p2.manifest()
3060 man2 = p2.manifest()
3064 managing = lambda f: f in man1 or f in man2
3061 managing = lambda f: f in man1 or f in man2
3065 else:
3062 else:
3066 managing = lambda f: f in man1
3063 managing = lambda f: f in man1
3067
3064
3068 modified, added, removed = [], [], []
3065 modified, added, removed = [], [], []
3069 for f in self._files:
3066 for f in self._files:
3070 if not managing(f):
3067 if not managing(f):
3071 added.append(f)
3068 added.append(f)
3072 elif f in self:
3069 elif f in self:
3073 modified.append(f)
3070 modified.append(f)
3074 else:
3071 else:
3075 removed.append(f)
3072 removed.append(f)
3076
3073
3077 return scmutil.status(modified, added, removed, [], [], [], [])
3074 return scmutil.status(modified, added, removed, [], [], [], [])
3078
3075
3079
3076
3080 class arbitraryfilectx(object):
3077 class arbitraryfilectx(object):
3081 """Allows you to use filectx-like functions on a file in an arbitrary
3078 """Allows you to use filectx-like functions on a file in an arbitrary
3082 location on disk, possibly not in the working directory.
3079 location on disk, possibly not in the working directory.
3083 """
3080 """
3084
3081
3085 def __init__(self, path, repo=None):
3082 def __init__(self, path, repo=None):
3086 # Repo is optional because contrib/simplemerge uses this class.
3083 # Repo is optional because contrib/simplemerge uses this class.
3087 self._repo = repo
3084 self._repo = repo
3088 self._path = path
3085 self._path = path
3089
3086
3090 def cmp(self, fctx):
3087 def cmp(self, fctx):
3091 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3088 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3092 # path if either side is a symlink.
3089 # path if either side is a symlink.
3093 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3090 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3094 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3091 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3095 # Add a fast-path for merge if both sides are disk-backed.
3092 # Add a fast-path for merge if both sides are disk-backed.
3096 # Note that filecmp uses the opposite return values (True if same)
3093 # Note that filecmp uses the opposite return values (True if same)
3097 # from our cmp functions (True if different).
3094 # from our cmp functions (True if different).
3098 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3095 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3099 return self.data() != fctx.data()
3096 return self.data() != fctx.data()
3100
3097
3101 def path(self):
3098 def path(self):
3102 return self._path
3099 return self._path
3103
3100
3104 def flags(self):
3101 def flags(self):
3105 return b''
3102 return b''
3106
3103
3107 def data(self):
3104 def data(self):
3108 return util.readfile(self._path)
3105 return util.readfile(self._path)
3109
3106
3110 def decodeddata(self):
3107 def decodeddata(self):
3111 with open(self._path, b"rb") as f:
3108 with open(self._path, b"rb") as f:
3112 return f.read()
3109 return f.read()
3113
3110
3114 def remove(self):
3111 def remove(self):
3115 util.unlink(self._path)
3112 util.unlink(self._path)
3116
3113
3117 def write(self, data, flags, **kwargs):
3114 def write(self, data, flags, **kwargs):
3118 assert not flags
3115 assert not flags
3119 with open(self._path, b"wb") as f:
3116 with open(self._path, b"wb") as f:
3120 f.write(data)
3117 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now