##// END OF EJS Templates
context: use `update_file` instead of `normal` in `markcommitted`...
marmoute -
r48506:5bbf3042 default
parent child Browse files
Show More
@@ -1,3119 +1,3121 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52
52
53 class basectx(object):
53 class basectx(object):
54 """A basectx object represents the common logic for its children:
54 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
55 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
56 workingctx: a context that represents the working directory and can
57 be committed,
57 be committed,
58 memctx: a context that represents changes in-memory and can also
58 memctx: a context that represents changes in-memory and can also
59 be committed."""
59 be committed."""
60
60
61 def __init__(self, repo):
61 def __init__(self, repo):
62 self._repo = repo
62 self._repo = repo
63
63
64 def __bytes__(self):
64 def __bytes__(self):
65 return short(self.node())
65 return short(self.node())
66
66
67 __str__ = encoding.strmethod(__bytes__)
67 __str__ = encoding.strmethod(__bytes__)
68
68
69 def __repr__(self):
69 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
70 return "<%s %s>" % (type(self).__name__, str(self))
71
71
72 def __eq__(self, other):
72 def __eq__(self, other):
73 try:
73 try:
74 return type(self) == type(other) and self._rev == other._rev
74 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
75 except AttributeError:
76 return False
76 return False
77
77
78 def __ne__(self, other):
78 def __ne__(self, other):
79 return not (self == other)
79 return not (self == other)
80
80
81 def __contains__(self, key):
81 def __contains__(self, key):
82 return key in self._manifest
82 return key in self._manifest
83
83
84 def __getitem__(self, key):
84 def __getitem__(self, key):
85 return self.filectx(key)
85 return self.filectx(key)
86
86
87 def __iter__(self):
87 def __iter__(self):
88 return iter(self._manifest)
88 return iter(self._manifest)
89
89
90 def _buildstatusmanifest(self, status):
90 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
91 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
92 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
93 the normal manifest."""
94 return self.manifest()
94 return self.manifest()
95
95
96 def _matchstatus(self, other, match):
96 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
97 """This internal method provides a way for child objects to override the
98 match operator.
98 match operator.
99 """
99 """
100 return match
100 return match
101
101
102 def _buildstatus(
102 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
103 self, other, s, match, listignored, listclean, listunknown
104 ):
104 ):
105 """build a status with respect to another context"""
105 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
106 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
109 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
110 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
111 # delta application.
112 mf2 = None
112 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
113 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
116 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118
118
119 modified, added = [], []
119 modified, added = [], []
120 removed = []
120 removed = []
121 clean = []
121 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
123 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
124 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
125 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
126 if fn in deletedset:
127 continue
127 continue
128 if value is None:
128 if value is None:
129 clean.append(fn)
129 clean.append(fn)
130 continue
130 continue
131 (node1, flag1), (node2, flag2) = value
131 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
132 if node1 is None:
133 added.append(fn)
133 added.append(fn)
134 elif node2 is None:
134 elif node2 is None:
135 removed.append(fn)
135 removed.append(fn)
136 elif flag1 != flag2:
136 elif flag1 != flag2:
137 modified.append(fn)
137 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
139 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
140 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
141 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
142 # to a file as a modification.
143 modified.append(fn)
143 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
144 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
145 modified.append(fn)
146 else:
146 else:
147 clean.append(fn)
147 clean.append(fn)
148
148
149 if removed:
149 if removed:
150 # need to filter files if they are already reported as removed
150 # need to filter files if they are already reported as removed
151 unknown = [
151 unknown = [
152 fn
152 fn
153 for fn in unknown
153 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
154 if fn not in mf1 and (not match or match(fn))
155 ]
155 ]
156 ignored = [
156 ignored = [
157 fn
157 fn
158 for fn in ignored
158 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 # if they're deleted, don't report them as removed
161 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
162 removed = [fn for fn in removed if fn not in deletedset]
163
163
164 return scmutil.status(
164 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
165 modified, added, removed, deleted, unknown, ignored, clean
166 )
166 )
167
167
168 @propertycache
168 @propertycache
169 def substate(self):
169 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
170 return subrepoutil.state(self, self._repo.ui)
171
171
172 def subrev(self, subpath):
172 def subrev(self, subpath):
173 return self.substate[subpath][1]
173 return self.substate[subpath][1]
174
174
175 def rev(self):
175 def rev(self):
176 return self._rev
176 return self._rev
177
177
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180
180
181 def hex(self):
181 def hex(self):
182 return hex(self.node())
182 return hex(self.node())
183
183
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186
186
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189
189
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192
192
193 def phasestr(self):
193 def phasestr(self):
194 return phases.phasenames[self.phase()]
194 return phases.phasenames[self.phase()]
195
195
196 def mutable(self):
196 def mutable(self):
197 return self.phase() > phases.public
197 return self.phase() > phases.public
198
198
199 def matchfileset(self, cwd, expr, badfn=None):
199 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
200 return fileset.match(self, cwd, expr, badfn=badfn)
201
201
202 def obsolete(self):
202 def obsolete(self):
203 """True if the changeset is obsolete"""
203 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
205
206 def extinct(self):
206 def extinct(self):
207 """True if the changeset is extinct"""
207 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
211 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
213
214 def phasedivergent(self):
214 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
215 """True if the changeset tries to be a successor of a public changeset
216
216
217 Only non-public and non-obsolete changesets may be phase-divergent.
217 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
218 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
220
221 def contentdivergent(self):
221 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
222 """Is a successor of a changeset with multiple possible successor sets
223
223
224 Only non-public and non-obsolete changesets may be content-divergent.
224 Only non-public and non-obsolete changesets may be content-divergent.
225 """
225 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
227
228 def isunstable(self):
228 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
229 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
230 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
232
233 def instabilities(self):
233 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
234 """return the list of instabilities affecting this changeset.
235
235
236 Instabilities are returned as strings. possible values are:
236 Instabilities are returned as strings. possible values are:
237 - orphan,
237 - orphan,
238 - phase-divergent,
238 - phase-divergent,
239 - content-divergent.
239 - content-divergent.
240 """
240 """
241 instabilities = []
241 instabilities = []
242 if self.orphan():
242 if self.orphan():
243 instabilities.append(b'orphan')
243 instabilities.append(b'orphan')
244 if self.phasedivergent():
244 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
245 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
246 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
247 instabilities.append(b'content-divergent')
248 return instabilities
248 return instabilities
249
249
250 def parents(self):
250 def parents(self):
251 """return contexts for each parent changeset"""
251 """return contexts for each parent changeset"""
252 return self._parents
252 return self._parents
253
253
254 def p1(self):
254 def p1(self):
255 return self._parents[0]
255 return self._parents[0]
256
256
257 def p2(self):
257 def p2(self):
258 parents = self._parents
258 parents = self._parents
259 if len(parents) == 2:
259 if len(parents) == 2:
260 return parents[1]
260 return parents[1]
261 return self._repo[nullrev]
261 return self._repo[nullrev]
262
262
263 def _fileinfo(self, path):
263 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
264 if '_manifest' in self.__dict__:
265 try:
265 try:
266 return self._manifest.find(path)
266 return self._manifest.find(path)
267 except KeyError:
267 except KeyError:
268 raise error.ManifestLookupError(
268 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
269 self._node or b'None', path, _(b'not found in manifest')
270 )
270 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
272 if path in self._manifestdelta:
273 return (
273 return (
274 self._manifestdelta[path],
274 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
276 )
276 )
277 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
278 try:
278 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
280 except KeyError:
281 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
283 )
283 )
284
284
285 return node, flag
285 return node, flag
286
286
287 def filenode(self, path):
287 def filenode(self, path):
288 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
289
289
290 def flags(self, path):
290 def flags(self, path):
291 try:
291 try:
292 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
293 except error.LookupError:
293 except error.LookupError:
294 return b''
294 return b''
295
295
296 @propertycache
296 @propertycache
297 def _copies(self):
297 def _copies(self):
298 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
299
299
300 def p1copies(self):
300 def p1copies(self):
301 return self._copies[0]
301 return self._copies[0]
302
302
303 def p2copies(self):
303 def p2copies(self):
304 return self._copies[1]
304 return self._copies[1]
305
305
306 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
309
310 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
312
312
313 def workingsub(self, path):
313 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
315 context.
316 """
316 """
317 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
318
318
319 def match(
319 def match(
320 self,
320 self,
321 pats=None,
321 pats=None,
322 include=None,
322 include=None,
323 exclude=None,
323 exclude=None,
324 default=b'glob',
324 default=b'glob',
325 listsubrepos=False,
325 listsubrepos=False,
326 badfn=None,
326 badfn=None,
327 cwd=None,
327 cwd=None,
328 ):
328 ):
329 r = self._repo
329 r = self._repo
330 if not cwd:
330 if not cwd:
331 cwd = r.getcwd()
331 cwd = r.getcwd()
332 return matchmod.match(
332 return matchmod.match(
333 r.root,
333 r.root,
334 cwd,
334 cwd,
335 pats,
335 pats,
336 include,
336 include,
337 exclude,
337 exclude,
338 default,
338 default,
339 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
340 ctx=self,
340 ctx=self,
341 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
342 badfn=badfn,
342 badfn=badfn,
343 )
343 )
344
344
345 def diff(
345 def diff(
346 self,
346 self,
347 ctx2=None,
347 ctx2=None,
348 match=None,
348 match=None,
349 changes=None,
349 changes=None,
350 opts=None,
350 opts=None,
351 losedatafn=None,
351 losedatafn=None,
352 pathfn=None,
352 pathfn=None,
353 copy=None,
353 copy=None,
354 copysourcematch=None,
354 copysourcematch=None,
355 hunksfilterfn=None,
355 hunksfilterfn=None,
356 ):
356 ):
357 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
358 if ctx2 is None:
359 ctx2 = self.p1()
359 ctx2 = self.p1()
360 if ctx2 is not None:
360 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
362 return patch.diff(
362 return patch.diff(
363 self._repo,
363 self._repo,
364 ctx2,
364 ctx2,
365 self,
365 self,
366 match=match,
366 match=match,
367 changes=changes,
367 changes=changes,
368 opts=opts,
368 opts=opts,
369 losedatafn=losedatafn,
369 losedatafn=losedatafn,
370 pathfn=pathfn,
370 pathfn=pathfn,
371 copy=copy,
371 copy=copy,
372 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
374 )
374 )
375
375
376 def dirs(self):
376 def dirs(self):
377 return self._manifest.dirs()
377 return self._manifest.dirs()
378
378
379 def hasdir(self, dir):
379 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
381
381
382 def status(
382 def status(
383 self,
383 self,
384 other=None,
384 other=None,
385 match=None,
385 match=None,
386 listignored=False,
386 listignored=False,
387 listclean=False,
387 listclean=False,
388 listunknown=False,
388 listunknown=False,
389 listsubrepos=False,
389 listsubrepos=False,
390 ):
390 ):
391 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
392 directory.
392 directory.
393
393
394 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
395
395
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
397
398 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
399
399
400 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
401
401
402 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
403
403
404 or direct attribute access:
404 or direct attribute access:
405
405
406 s.modified, s.added, ...
406 s.modified, s.added, ...
407 """
407 """
408
408
409 ctx1 = self
409 ctx1 = self
410 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
411
411
412 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
415 # with its first parent.
416 #
416 #
417 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
418 #
418 #
419 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
420 #
420 #
421 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
424 reversed = False
424 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
426 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
428
428
429 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
434 )
434 )
435
435
436 if reversed:
436 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
438 # these make no sense to reverse.
439 r = scmutil.status(
439 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
441 )
442
442
443 if listsubrepos:
443 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
445 try:
446 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
447 except KeyError:
447 except KeyError:
448 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
451 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
453 s = sub.status(
454 rev2,
454 rev2,
455 match=submatch,
455 match=submatch,
456 ignored=listignored,
456 ignored=listignored,
457 clean=listclean,
457 clean=listclean,
458 unknown=listunknown,
458 unknown=listunknown,
459 listsubrepos=True,
459 listsubrepos=True,
460 )
460 )
461 for k in (
461 for k in (
462 'modified',
462 'modified',
463 'added',
463 'added',
464 'removed',
464 'removed',
465 'deleted',
465 'deleted',
466 'unknown',
466 'unknown',
467 'ignored',
467 'ignored',
468 'clean',
468 'clean',
469 ):
469 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
472
473 r.modified.sort()
473 r.modified.sort()
474 r.added.sort()
474 r.added.sort()
475 r.removed.sort()
475 r.removed.sort()
476 r.deleted.sort()
476 r.deleted.sort()
477 r.unknown.sort()
477 r.unknown.sort()
478 r.ignored.sort()
478 r.ignored.sort()
479 r.clean.sort()
479 r.clean.sort()
480
480
481 return r
481 return r
482
482
483 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
485 raise NotImplementedError(
485 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
487 )
487 )
488
488
489 def isempty(self):
489 def isempty(self):
490 return not (
490 return not (
491 len(self.parents()) > 1
491 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
493 or self.closesbranch()
493 or self.closesbranch()
494 or self.files()
494 or self.files()
495 )
495 )
496
496
497
497
498 class changectx(basectx):
498 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
501 the repo."""
501 the repo."""
502
502
503 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
505 self._rev = rev
505 self._rev = rev
506 self._node = node
506 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
509 #
509 #
510 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
514
514
515 def __hash__(self):
515 def __hash__(self):
516 try:
516 try:
517 return hash(self._rev)
517 return hash(self._rev)
518 except AttributeError:
518 except AttributeError:
519 return id(self)
519 return id(self)
520
520
521 def __nonzero__(self):
521 def __nonzero__(self):
522 return self._rev != nullrev
522 return self._rev != nullrev
523
523
524 __bool__ = __nonzero__
524 __bool__ = __nonzero__
525
525
526 @propertycache
526 @propertycache
527 def _changeset(self):
527 def _changeset(self):
528 if self._maybe_filtered:
528 if self._maybe_filtered:
529 repo = self._repo
529 repo = self._repo
530 else:
530 else:
531 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
533
533
534 @propertycache
534 @propertycache
535 def _manifest(self):
535 def _manifest(self):
536 return self._manifestctx.read()
536 return self._manifestctx.read()
537
537
538 @property
538 @property
539 def _manifestctx(self):
539 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
541
541
542 @propertycache
542 @propertycache
543 def _manifestdelta(self):
543 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
545
545
546 @propertycache
546 @propertycache
547 def _parents(self):
547 def _parents(self):
548 repo = self._repo
548 repo = self._repo
549 if self._maybe_filtered:
549 if self._maybe_filtered:
550 cl = repo.changelog
550 cl = repo.changelog
551 else:
551 else:
552 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
553
553
554 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
555 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
557 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
560 ]
561
561
562 def changeset(self):
562 def changeset(self):
563 c = self._changeset
563 c = self._changeset
564 return (
564 return (
565 c.manifest,
565 c.manifest,
566 c.user,
566 c.user,
567 c.date,
567 c.date,
568 c.files,
568 c.files,
569 c.description,
569 c.description,
570 c.extra,
570 c.extra,
571 )
571 )
572
572
573 def manifestnode(self):
573 def manifestnode(self):
574 return self._changeset.manifest
574 return self._changeset.manifest
575
575
576 def user(self):
576 def user(self):
577 return self._changeset.user
577 return self._changeset.user
578
578
579 def date(self):
579 def date(self):
580 return self._changeset.date
580 return self._changeset.date
581
581
582 def files(self):
582 def files(self):
583 return self._changeset.files
583 return self._changeset.files
584
584
585 def filesmodified(self):
585 def filesmodified(self):
586 modified = set(self.files())
586 modified = set(self.files())
587 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
589 return sorted(modified)
589 return sorted(modified)
590
590
591 def filesadded(self):
591 def filesadded(self):
592 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
593 compute_on_none = True
593 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
595 compute_on_none = False
596 else:
596 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
598 if source == b'changeset-only':
599 compute_on_none = False
599 compute_on_none = False
600 elif source != b'compatibility':
600 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
602 filesadded = None
602 filesadded = None
603 if filesadded is None:
603 if filesadded is None:
604 if compute_on_none:
604 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
606 else:
606 else:
607 filesadded = []
607 filesadded = []
608 return filesadded
608 return filesadded
609
609
610 def filesremoved(self):
610 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
612 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
614 compute_on_none = False
615 else:
615 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
617 if source == b'changeset-only':
618 compute_on_none = False
618 compute_on_none = False
619 elif source != b'compatibility':
619 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
621 filesremoved = None
621 filesremoved = None
622 if filesremoved is None:
622 if filesremoved is None:
623 if compute_on_none:
623 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
625 else:
626 filesremoved = []
626 filesremoved = []
627 return filesremoved
627 return filesremoved
628
628
629 @propertycache
629 @propertycache
630 def _copies(self):
630 def _copies(self):
631 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
633 compute_on_none = True
633 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
635 compute_on_none = False
636 else:
636 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
642 # the filelogs (below).
643 #
643 #
644 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
646 #
646 #
647 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
649 if source == b'changeset-only':
649 if source == b'changeset-only':
650 compute_on_none = False
650 compute_on_none = False
651 elif source != b'compatibility':
651 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
653 p1copies = p2copies = None
654 if p1copies is None:
654 if p1copies is None:
655 if compute_on_none:
655 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
657 else:
657 else:
658 if p1copies is None:
658 if p1copies is None:
659 p1copies = {}
659 p1copies = {}
660 if p2copies is None:
660 if p2copies is None:
661 p2copies = {}
661 p2copies = {}
662 return p1copies, p2copies
662 return p1copies, p2copies
663
663
664 def description(self):
664 def description(self):
665 return self._changeset.description
665 return self._changeset.description
666
666
667 def branch(self):
667 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
669
670 def closesbranch(self):
670 def closesbranch(self):
671 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
672
672
673 def extra(self):
673 def extra(self):
674 """Return a dict of extra information."""
674 """Return a dict of extra information."""
675 return self._changeset.extra
675 return self._changeset.extra
676
676
677 def tags(self):
677 def tags(self):
678 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
680
680
681 def bookmarks(self):
681 def bookmarks(self):
682 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
684
684
685 def phase(self):
685 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
686 return self._repo._phasecache.phase(self._repo, self._rev)
687
687
688 def hidden(self):
688 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
690
691 def isinmemory(self):
691 def isinmemory(self):
692 return False
692 return False
693
693
694 def children(self):
694 def children(self):
695 """return list of changectx contexts for each child changeset.
695 """return list of changectx contexts for each child changeset.
696
696
697 This returns only the immediate child changesets. Use descendants() to
697 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
698 recursively walk children.
699 """
699 """
700 c = self._repo.changelog.children(self._node)
700 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
701 return [self._repo[x] for x in c]
702
702
703 def ancestors(self):
703 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
704 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
705 yield self._repo[a]
706
706
707 def descendants(self):
707 def descendants(self):
708 """Recursively yield all children of the changeset.
708 """Recursively yield all children of the changeset.
709
709
710 For just the immediate children, use children()
710 For just the immediate children, use children()
711 """
711 """
712 for d in self._repo.changelog.descendants([self._rev]):
712 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
713 yield self._repo[d]
714
714
715 def filectx(self, path, fileid=None, filelog=None):
715 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
716 """get a file context from this changeset"""
717 if fileid is None:
717 if fileid is None:
718 fileid = self.filenode(path)
718 fileid = self.filenode(path)
719 return filectx(
719 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
721 )
722
722
723 def ancestor(self, c2, warn=False):
723 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
724 """return the "best" ancestor context of self and c2
725
725
726 If there are multiple candidates, it will show a message and check
726 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
727 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
728 revlog ancestor."""
729 # deal with workingctxs
729 # deal with workingctxs
730 n2 = c2._node
730 n2 = c2._node
731 if n2 is None:
731 if n2 is None:
732 n2 = c2._parents[0]._node
732 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
734 if not cahs:
735 anc = self._repo.nodeconstants.nullid
735 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
736 elif len(cahs) == 1:
737 anc = cahs[0]
737 anc = cahs[0]
738 else:
738 else:
739 # experimental config: merge.preferancestor
739 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
741 try:
742 ctx = scmutil.revsymbol(self._repo, r)
742 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
743 except error.RepoLookupError:
744 continue
744 continue
745 anc = ctx.node()
745 anc = ctx.node()
746 if anc in cahs:
746 if anc in cahs:
747 break
747 break
748 else:
748 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
749 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
750 if warn:
751 self._repo.ui.status(
751 self._repo.ui.status(
752 (
752 (
753 _(b"note: using %s as ancestor of %s and %s\n")
753 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
754 % (short(anc), short(self._node), short(n2))
755 )
755 )
756 + b''.join(
756 + b''.join(
757 _(
757 _(
758 b" alternatively, use --config "
758 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
759 b"merge.preferancestor=%s\n"
760 )
760 )
761 % short(n)
761 % short(n)
762 for n in sorted(cahs)
762 for n in sorted(cahs)
763 if n != anc
763 if n != anc
764 )
764 )
765 )
765 )
766 return self._repo[anc]
766 return self._repo[anc]
767
767
768 def isancestorof(self, other):
768 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
769 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
771
772 def walk(self, match):
772 def walk(self, match):
773 '''Generates matching file names.'''
773 '''Generates matching file names.'''
774
774
775 # Wrap match.bad method to have message with nodeid
775 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
776 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
777 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
778 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
780 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
781 match.bad(fn, _(b'no such file in rev %s') % self)
782
782
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
784 return self._manifest.walk(m)
785
785
786 def matches(self, match):
786 def matches(self, match):
787 return self.walk(match)
787 return self.walk(match)
788
788
789
789
790 class basefilectx(object):
790 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
791 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
792 filectx: read-only access to a filerevision that is already present
793 in the repo,
793 in the repo,
794 workingfilectx: a filecontext that represents files from the working
794 workingfilectx: a filecontext that represents files from the working
795 directory,
795 directory,
796 memfilectx: a filecontext that represents files in-memory,
796 memfilectx: a filecontext that represents files in-memory,
797 """
797 """
798
798
799 @propertycache
799 @propertycache
800 def _filelog(self):
800 def _filelog(self):
801 return self._repo.file(self._path)
801 return self._repo.file(self._path)
802
802
803 @propertycache
803 @propertycache
804 def _changeid(self):
804 def _changeid(self):
805 if '_changectx' in self.__dict__:
805 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
806 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
807 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
808 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
809 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
810 return self._adjustlinkrev(self._descendantrev)
811 else:
811 else:
812 return self._filelog.linkrev(self._filerev)
812 return self._filelog.linkrev(self._filerev)
813
813
814 @propertycache
814 @propertycache
815 def _filenode(self):
815 def _filenode(self):
816 if '_fileid' in self.__dict__:
816 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
817 return self._filelog.lookup(self._fileid)
818 else:
818 else:
819 return self._changectx.filenode(self._path)
819 return self._changectx.filenode(self._path)
820
820
821 @propertycache
821 @propertycache
822 def _filerev(self):
822 def _filerev(self):
823 return self._filelog.rev(self._filenode)
823 return self._filelog.rev(self._filenode)
824
824
825 @propertycache
825 @propertycache
826 def _repopath(self):
826 def _repopath(self):
827 return self._path
827 return self._path
828
828
829 def __nonzero__(self):
829 def __nonzero__(self):
830 try:
830 try:
831 self._filenode
831 self._filenode
832 return True
832 return True
833 except error.LookupError:
833 except error.LookupError:
834 # file is missing
834 # file is missing
835 return False
835 return False
836
836
837 __bool__ = __nonzero__
837 __bool__ = __nonzero__
838
838
839 def __bytes__(self):
839 def __bytes__(self):
840 try:
840 try:
841 return b"%s@%s" % (self.path(), self._changectx)
841 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
842 except error.LookupError:
843 return b"%s@???" % self.path()
843 return b"%s@???" % self.path()
844
844
845 __str__ = encoding.strmethod(__bytes__)
845 __str__ = encoding.strmethod(__bytes__)
846
846
847 def __repr__(self):
847 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
848 return "<%s %s>" % (type(self).__name__, str(self))
849
849
850 def __hash__(self):
850 def __hash__(self):
851 try:
851 try:
852 return hash((self._path, self._filenode))
852 return hash((self._path, self._filenode))
853 except AttributeError:
853 except AttributeError:
854 return id(self)
854 return id(self)
855
855
856 def __eq__(self, other):
856 def __eq__(self, other):
857 try:
857 try:
858 return (
858 return (
859 type(self) == type(other)
859 type(self) == type(other)
860 and self._path == other._path
860 and self._path == other._path
861 and self._filenode == other._filenode
861 and self._filenode == other._filenode
862 )
862 )
863 except AttributeError:
863 except AttributeError:
864 return False
864 return False
865
865
866 def __ne__(self, other):
866 def __ne__(self, other):
867 return not (self == other)
867 return not (self == other)
868
868
869 def filerev(self):
869 def filerev(self):
870 return self._filerev
870 return self._filerev
871
871
872 def filenode(self):
872 def filenode(self):
873 return self._filenode
873 return self._filenode
874
874
875 @propertycache
875 @propertycache
876 def _flags(self):
876 def _flags(self):
877 return self._changectx.flags(self._path)
877 return self._changectx.flags(self._path)
878
878
879 def flags(self):
879 def flags(self):
880 return self._flags
880 return self._flags
881
881
882 def filelog(self):
882 def filelog(self):
883 return self._filelog
883 return self._filelog
884
884
885 def rev(self):
885 def rev(self):
886 return self._changeid
886 return self._changeid
887
887
888 def linkrev(self):
888 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
889 return self._filelog.linkrev(self._filerev)
890
890
891 def node(self):
891 def node(self):
892 return self._changectx.node()
892 return self._changectx.node()
893
893
894 def hex(self):
894 def hex(self):
895 return self._changectx.hex()
895 return self._changectx.hex()
896
896
897 def user(self):
897 def user(self):
898 return self._changectx.user()
898 return self._changectx.user()
899
899
900 def date(self):
900 def date(self):
901 return self._changectx.date()
901 return self._changectx.date()
902
902
903 def files(self):
903 def files(self):
904 return self._changectx.files()
904 return self._changectx.files()
905
905
906 def description(self):
906 def description(self):
907 return self._changectx.description()
907 return self._changectx.description()
908
908
909 def branch(self):
909 def branch(self):
910 return self._changectx.branch()
910 return self._changectx.branch()
911
911
912 def extra(self):
912 def extra(self):
913 return self._changectx.extra()
913 return self._changectx.extra()
914
914
915 def phase(self):
915 def phase(self):
916 return self._changectx.phase()
916 return self._changectx.phase()
917
917
918 def phasestr(self):
918 def phasestr(self):
919 return self._changectx.phasestr()
919 return self._changectx.phasestr()
920
920
921 def obsolete(self):
921 def obsolete(self):
922 return self._changectx.obsolete()
922 return self._changectx.obsolete()
923
923
924 def instabilities(self):
924 def instabilities(self):
925 return self._changectx.instabilities()
925 return self._changectx.instabilities()
926
926
927 def manifest(self):
927 def manifest(self):
928 return self._changectx.manifest()
928 return self._changectx.manifest()
929
929
930 def changectx(self):
930 def changectx(self):
931 return self._changectx
931 return self._changectx
932
932
933 def renamed(self):
933 def renamed(self):
934 return self._copied
934 return self._copied
935
935
936 def copysource(self):
936 def copysource(self):
937 return self._copied and self._copied[0]
937 return self._copied and self._copied[0]
938
938
939 def repo(self):
939 def repo(self):
940 return self._repo
940 return self._repo
941
941
942 def size(self):
942 def size(self):
943 return len(self.data())
943 return len(self.data())
944
944
945 def path(self):
945 def path(self):
946 return self._path
946 return self._path
947
947
948 def isbinary(self):
948 def isbinary(self):
949 try:
949 try:
950 return stringutil.binary(self.data())
950 return stringutil.binary(self.data())
951 except IOError:
951 except IOError:
952 return False
952 return False
953
953
954 def isexec(self):
954 def isexec(self):
955 return b'x' in self.flags()
955 return b'x' in self.flags()
956
956
957 def islink(self):
957 def islink(self):
958 return b'l' in self.flags()
958 return b'l' in self.flags()
959
959
960 def isabsent(self):
960 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
961 """whether this filectx represents a file not in self._changectx
962
962
963 This is mainly for merge code to detect change/delete conflicts. This is
963 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
964 expected to be True for all subclasses of basectx."""
965 return False
965 return False
966
966
967 _customcmp = False
967 _customcmp = False
968
968
969 def cmp(self, fctx):
969 def cmp(self, fctx):
970 """compare with other file context
970 """compare with other file context
971
971
972 returns True if different than fctx.
972 returns True if different than fctx.
973 """
973 """
974 if fctx._customcmp:
974 if fctx._customcmp:
975 return fctx.cmp(self)
975 return fctx.cmp(self)
976
976
977 if self._filenode is None:
977 if self._filenode is None:
978 raise error.ProgrammingError(
978 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
980 )
981
981
982 if fctx._filenode is None:
982 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
983 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
984 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
986 if self.size() - 4 == fctx.size():
987 # size() can match:
987 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
988 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
989 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
991 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
992 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
993 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
994 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996
996
997 # size() differs
997 # size() differs
998 return True
998 return True
999
999
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1001 """return the first ancestor of <srcrev> introducing <fnode>
1002
1002
1003 If the linkrev of the file revision does not point to an ancestor of
1003 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1004 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1005 this file revision.
1006
1006
1007 :srcrev: the changeset revision we search ancestors from
1007 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1008 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1010 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1011 revision, the function will returns "None" and stops its
1012 iteration.
1012 iteration.
1013 """
1013 """
1014 repo = self._repo
1014 repo = self._repo
1015 cl = repo.unfiltered().changelog
1015 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1016 mfl = repo.manifestlog
1017 # fetch the linkrev
1017 # fetch the linkrev
1018 lkr = self.linkrev()
1018 lkr = self.linkrev()
1019 if srcrev == lkr:
1019 if srcrev == lkr:
1020 return lkr
1020 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1021 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1022 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1023 iteranc = None
1024 if srcrev is None:
1024 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1025 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1026 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1027 inclusive = True # we skipped the real (revless) source
1028 else:
1028 else:
1029 revs = [srcrev]
1029 revs = [srcrev]
1030 if memberanc is None:
1030 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1032 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1033 if lkr not in memberanc:
1034 if iteranc is None:
1034 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 path = self._path
1037 path = self._path
1038 for a in iteranc:
1038 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1039 if stoprev is not None and a < stoprev:
1040 return None
1040 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1042 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1043 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1044 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1045 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1046 return a
1047 # In theory, we should never get out of that loop without a result.
1047 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1048 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1049 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1050 # result is crash somewhere else at to some point.
1051 return lkr
1051 return lkr
1052
1052
1053 def isintroducedafter(self, changelogrev):
1053 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1054 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1055 if self.linkrev() >= changelogrev:
1056 return True
1056 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1057 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1058 if introrev is None:
1059 return False
1059 return False
1060 return introrev >= changelogrev
1060 return introrev >= changelogrev
1061
1061
1062 def introrev(self):
1062 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1063 """return the rev of the changeset which introduced this file revision
1064
1064
1065 This method is different from linkrev because it take into account the
1065 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1066 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1067 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1068 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1069 changesets.
1070 """
1070 """
1071 return self._introrev()
1071 return self._introrev()
1072
1072
1073 def _introrev(self, stoprev=None):
1073 def _introrev(self, stoprev=None):
1074 """
1074 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1075 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1076 iteration range in some internal usecase.
1077
1077
1078 If `stoprev` is set, the `introrev` will not be searched past that
1078 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1079 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1080 limit the iteration range.
1081 """
1081 """
1082 toprev = None
1082 toprev = None
1083 attrs = vars(self)
1083 attrs = vars(self)
1084 if '_changeid' in attrs:
1084 if '_changeid' in attrs:
1085 # We have a cached value already
1085 # We have a cached value already
1086 toprev = self._changeid
1086 toprev = self._changeid
1087 elif '_changectx' in attrs:
1087 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1088 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1089 toprev = self._changectx.rev()
1090
1090
1091 if toprev is not None:
1091 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1093 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1095 # be nice and cache the result of the computation
1096 if introrev is not None:
1096 if introrev is not None:
1097 self._changeid = introrev
1097 self._changeid = introrev
1098 return introrev
1098 return introrev
1099 else:
1099 else:
1100 return self.linkrev()
1100 return self.linkrev()
1101
1101
1102 def introfilectx(self):
1102 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1103 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1104 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1105 introrev = self.introrev()
1106 if self.rev() == introrev:
1106 if self.rev() == introrev:
1107 return self
1107 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1108 return self.filectx(self.filenode(), changeid=introrev)
1109
1109
1110 def _parentfilectx(self, path, fileid, filelog):
1110 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1114 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1115 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1116 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1118 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1120 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1121 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1122 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1124 return fctx
1125
1125
1126 def parents(self):
1126 def parents(self):
1127 _path = self._path
1127 _path = self._path
1128 fl = self._filelog
1128 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1129 parents = self._filelog.parents(self._filenode)
1130 pl = [
1130 pl = [
1131 (_path, node, fl)
1131 (_path, node, fl)
1132 for node in parents
1132 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1133 if node != self._repo.nodeconstants.nullid
1134 ]
1134 ]
1135
1135
1136 r = fl.renamed(self._filenode)
1136 r = fl.renamed(self._filenode)
1137 if r:
1137 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1139 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1140 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1141 # the first one.
1142 #
1142 #
1143 # As null id have always been filtered out in the previous list
1143 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1144 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1145 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1147
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1149
1150 def p1(self):
1150 def p1(self):
1151 return self.parents()[0]
1151 return self.parents()[0]
1152
1152
1153 def p2(self):
1153 def p2(self):
1154 p = self.parents()
1154 p = self.parents()
1155 if len(p) == 2:
1155 if len(p) == 2:
1156 return p[1]
1156 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1158
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1160 """Returns a list of annotateline objects for each line in the file
1161
1161
1162 - line.fctx is the filectx of the node where that line was last changed
1162 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1163 - line.lineno is the line number at the first appearance in the managed
1164 file
1164 file
1165 - line.text is the data on that line (including newline character)
1165 - line.text is the data on that line (including newline character)
1166 """
1166 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1168
1169 def parents(f):
1169 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1173 # isn't an ancestor of the srcrev.
1174 f._changeid
1174 f._changeid
1175 pl = f.parents()
1175 pl = f.parents()
1176
1176
1177 # Don't return renamed parents if we aren't following.
1177 # Don't return renamed parents if we aren't following.
1178 if not follow:
1178 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1179 pl = [p for p in pl if p.path() == f.path()]
1180
1180
1181 # renamed filectx won't have a filelog yet, so set it
1181 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1182 # from the cache to save time
1183 for p in pl:
1183 for p in pl:
1184 if not '_filelog' in p.__dict__:
1184 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1185 p._filelog = getlog(p.path())
1186
1186
1187 return pl
1187 return pl
1188
1188
1189 # use linkrev to find the first changeset where self appeared
1189 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1190 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1191 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1192 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1193 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1194 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1195 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1196 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1197 # is used to test filelog revisions
1198 ac = cl.ancestors(
1198 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1199 [p.rev() for p in base.parents()], inclusive=True
1200 )
1200 )
1201 else:
1201 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1203 base._ancestrycontext = ac
1204
1204
1205 return dagop.annotate(
1205 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1207 )
1208
1208
1209 def ancestors(self, followfirst=False):
1209 def ancestors(self, followfirst=False):
1210 visit = {}
1210 visit = {}
1211 c = self
1211 c = self
1212 if followfirst:
1212 if followfirst:
1213 cut = 1
1213 cut = 1
1214 else:
1214 else:
1215 cut = None
1215 cut = None
1216
1216
1217 while True:
1217 while True:
1218 for parent in c.parents()[:cut]:
1218 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1220 if not visit:
1221 break
1221 break
1222 c = visit.pop(max(visit))
1222 c = visit.pop(max(visit))
1223 yield c
1223 yield c
1224
1224
1225 def decodeddata(self):
1225 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1226 """Returns `data()` after running repository decoding filters.
1227
1227
1228 This is often equivalent to how the data would be expressed on disk.
1228 This is often equivalent to how the data would be expressed on disk.
1229 """
1229 """
1230 return self._repo.wwritedata(self.path(), self.data())
1230 return self._repo.wwritedata(self.path(), self.data())
1231
1231
1232
1232
1233 class filectx(basefilectx):
1233 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1234 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1235 filerevision convenient."""
1236
1236
1237 def __init__(
1237 def __init__(
1238 self,
1238 self,
1239 repo,
1239 repo,
1240 path,
1240 path,
1241 changeid=None,
1241 changeid=None,
1242 fileid=None,
1242 fileid=None,
1243 filelog=None,
1243 filelog=None,
1244 changectx=None,
1244 changectx=None,
1245 ):
1245 ):
1246 """changeid must be a revision number, if specified.
1246 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1247 fileid can be a file revision or node."""
1248 self._repo = repo
1248 self._repo = repo
1249 self._path = path
1249 self._path = path
1250
1250
1251 assert (
1251 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1252 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1254 changeid,
1255 fileid,
1255 fileid,
1256 changectx,
1256 changectx,
1257 )
1257 )
1258
1258
1259 if filelog is not None:
1259 if filelog is not None:
1260 self._filelog = filelog
1260 self._filelog = filelog
1261
1261
1262 if changeid is not None:
1262 if changeid is not None:
1263 self._changeid = changeid
1263 self._changeid = changeid
1264 if changectx is not None:
1264 if changectx is not None:
1265 self._changectx = changectx
1265 self._changectx = changectx
1266 if fileid is not None:
1266 if fileid is not None:
1267 self._fileid = fileid
1267 self._fileid = fileid
1268
1268
1269 @propertycache
1269 @propertycache
1270 def _changectx(self):
1270 def _changectx(self):
1271 try:
1271 try:
1272 return self._repo[self._changeid]
1272 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1273 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1274 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1275 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1276 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1277 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1278 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1279 # `filectx` are not used in complex operations that care about
1280 # filtering.
1280 # filtering.
1281 #
1281 #
1282 # This fallback is a cheap and dirty fix that prevent several
1282 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1283 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1284 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1285 # behavior" is seen as better as "crash"
1286 #
1286 #
1287 # Linkrevs have several serious troubles with filtering that are
1287 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1288 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1289 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1290 return self._repo.unfiltered()[self._changeid]
1291
1291
1292 def filectx(self, fileid, changeid=None):
1292 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1293 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1294 opening a new filelog"""
1295 return filectx(
1295 return filectx(
1296 self._repo,
1296 self._repo,
1297 self._path,
1297 self._path,
1298 fileid=fileid,
1298 fileid=fileid,
1299 filelog=self._filelog,
1299 filelog=self._filelog,
1300 changeid=changeid,
1300 changeid=changeid,
1301 )
1301 )
1302
1302
1303 def rawdata(self):
1303 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1304 return self._filelog.rawdata(self._filenode)
1305
1305
1306 def rawflags(self):
1306 def rawflags(self):
1307 """low-level revlog flags"""
1307 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1308 return self._filelog.flags(self._filerev)
1309
1309
1310 def data(self):
1310 def data(self):
1311 try:
1311 try:
1312 return self._filelog.read(self._filenode)
1312 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1313 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1315 return b""
1316 raise error.Abort(
1316 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1317 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1318 hint=_(b"set censor.policy to ignore errors"),
1319 )
1319 )
1320
1320
1321 def size(self):
1321 def size(self):
1322 return self._filelog.size(self._filerev)
1322 return self._filelog.size(self._filerev)
1323
1323
1324 @propertycache
1324 @propertycache
1325 def _copied(self):
1325 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1326 """check if file was actually renamed in this changeset revision
1327
1327
1328 If rename logged in file revision, we report copy for changeset only
1328 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1329 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1330 or both changeset parents contain different file revisions.
1331 """
1331 """
1332
1332
1333 renamed = self._filelog.renamed(self._filenode)
1333 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1334 if not renamed:
1335 return None
1335 return None
1336
1336
1337 if self.rev() == self.linkrev():
1337 if self.rev() == self.linkrev():
1338 return renamed
1338 return renamed
1339
1339
1340 name = self.path()
1340 name = self.path()
1341 fnode = self._filenode
1341 fnode = self._filenode
1342 for p in self._changectx.parents():
1342 for p in self._changectx.parents():
1343 try:
1343 try:
1344 if fnode == p.filenode(name):
1344 if fnode == p.filenode(name):
1345 return None
1345 return None
1346 except error.LookupError:
1346 except error.LookupError:
1347 pass
1347 pass
1348 return renamed
1348 return renamed
1349
1349
1350 def children(self):
1350 def children(self):
1351 # hard for renames
1351 # hard for renames
1352 c = self._filelog.children(self._filenode)
1352 c = self._filelog.children(self._filenode)
1353 return [
1353 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1355 for x in c
1356 ]
1356 ]
1357
1357
1358
1358
1359 class committablectx(basectx):
1359 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1360 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1361 wants the ability to commit, e.g. workingctx or memctx."""
1362
1362
1363 def __init__(
1363 def __init__(
1364 self,
1364 self,
1365 repo,
1365 repo,
1366 text=b"",
1366 text=b"",
1367 user=None,
1367 user=None,
1368 date=None,
1368 date=None,
1369 extra=None,
1369 extra=None,
1370 changes=None,
1370 changes=None,
1371 branch=None,
1371 branch=None,
1372 ):
1372 ):
1373 super(committablectx, self).__init__(repo)
1373 super(committablectx, self).__init__(repo)
1374 self._rev = None
1374 self._rev = None
1375 self._node = None
1375 self._node = None
1376 self._text = text
1376 self._text = text
1377 if date:
1377 if date:
1378 self._date = dateutil.parsedate(date)
1378 self._date = dateutil.parsedate(date)
1379 if user:
1379 if user:
1380 self._user = user
1380 self._user = user
1381 if changes:
1381 if changes:
1382 self._status = changes
1382 self._status = changes
1383
1383
1384 self._extra = {}
1384 self._extra = {}
1385 if extra:
1385 if extra:
1386 self._extra = extra.copy()
1386 self._extra = extra.copy()
1387 if branch is not None:
1387 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1389 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1390 self._extra[b'branch'] = b'default'
1391
1391
1392 def __bytes__(self):
1392 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1393 return bytes(self._parents[0]) + b"+"
1394
1394
1395 def hex(self):
1395 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1396 self._repo.nodeconstants.wdirhex
1397
1397
1398 __str__ = encoding.strmethod(__bytes__)
1398 __str__ = encoding.strmethod(__bytes__)
1399
1399
1400 def __nonzero__(self):
1400 def __nonzero__(self):
1401 return True
1401 return True
1402
1402
1403 __bool__ = __nonzero__
1403 __bool__ = __nonzero__
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _status(self):
1406 def _status(self):
1407 return self._repo.status()
1407 return self._repo.status()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _user(self):
1410 def _user(self):
1411 return self._repo.ui.username()
1411 return self._repo.ui.username()
1412
1412
1413 @propertycache
1413 @propertycache
1414 def _date(self):
1414 def _date(self):
1415 ui = self._repo.ui
1415 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1416 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1417 if date is None:
1418 date = dateutil.makedate()
1418 date = dateutil.makedate()
1419 return date
1419 return date
1420
1420
1421 def subrev(self, subpath):
1421 def subrev(self, subpath):
1422 return None
1422 return None
1423
1423
1424 def manifestnode(self):
1424 def manifestnode(self):
1425 return None
1425 return None
1426
1426
1427 def user(self):
1427 def user(self):
1428 return self._user or self._repo.ui.username()
1428 return self._user or self._repo.ui.username()
1429
1429
1430 def date(self):
1430 def date(self):
1431 return self._date
1431 return self._date
1432
1432
1433 def description(self):
1433 def description(self):
1434 return self._text
1434 return self._text
1435
1435
1436 def files(self):
1436 def files(self):
1437 return sorted(
1437 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1438 self._status.modified + self._status.added + self._status.removed
1439 )
1439 )
1440
1440
1441 def modified(self):
1441 def modified(self):
1442 return self._status.modified
1442 return self._status.modified
1443
1443
1444 def added(self):
1444 def added(self):
1445 return self._status.added
1445 return self._status.added
1446
1446
1447 def removed(self):
1447 def removed(self):
1448 return self._status.removed
1448 return self._status.removed
1449
1449
1450 def deleted(self):
1450 def deleted(self):
1451 return self._status.deleted
1451 return self._status.deleted
1452
1452
1453 filesmodified = modified
1453 filesmodified = modified
1454 filesadded = added
1454 filesadded = added
1455 filesremoved = removed
1455 filesremoved = removed
1456
1456
1457 def branch(self):
1457 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1458 return encoding.tolocal(self._extra[b'branch'])
1459
1459
1460 def closesbranch(self):
1460 def closesbranch(self):
1461 return b'close' in self._extra
1461 return b'close' in self._extra
1462
1462
1463 def extra(self):
1463 def extra(self):
1464 return self._extra
1464 return self._extra
1465
1465
1466 def isinmemory(self):
1466 def isinmemory(self):
1467 return False
1467 return False
1468
1468
1469 def tags(self):
1469 def tags(self):
1470 return []
1470 return []
1471
1471
1472 def bookmarks(self):
1472 def bookmarks(self):
1473 b = []
1473 b = []
1474 for p in self.parents():
1474 for p in self.parents():
1475 b.extend(p.bookmarks())
1475 b.extend(p.bookmarks())
1476 return b
1476 return b
1477
1477
1478 def phase(self):
1478 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1479 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1480 for p in self.parents():
1481 phase = max(phase, p.phase())
1481 phase = max(phase, p.phase())
1482 return phase
1482 return phase
1483
1483
1484 def hidden(self):
1484 def hidden(self):
1485 return False
1485 return False
1486
1486
1487 def children(self):
1487 def children(self):
1488 return []
1488 return []
1489
1489
1490 def flags(self, path):
1490 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1491 if '_manifest' in self.__dict__:
1492 try:
1492 try:
1493 return self._manifest.flags(path)
1493 return self._manifest.flags(path)
1494 except KeyError:
1494 except KeyError:
1495 return b''
1495 return b''
1496
1496
1497 try:
1497 try:
1498 return self._flagfunc(path)
1498 return self._flagfunc(path)
1499 except OSError:
1499 except OSError:
1500 return b''
1500 return b''
1501
1501
1502 def ancestor(self, c2):
1502 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1503 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1505
1506 def ancestors(self):
1506 def ancestors(self):
1507 for p in self._parents:
1507 for p in self._parents:
1508 yield p
1508 yield p
1509 for a in self._repo.changelog.ancestors(
1509 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1510 [p.rev() for p in self._parents]
1511 ):
1511 ):
1512 yield self._repo[a]
1512 yield self._repo[a]
1513
1513
1514 def markcommitted(self, node):
1514 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1515 """Perform post-commit cleanup necessary after committing this ctx
1516
1516
1517 Specifically, this updates backing stores this working context
1517 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1518 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1519 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1520 modified and added files as normal in the dirstate.
1521
1521
1522 """
1522 """
1523
1523
1524 def dirty(self, missing=False, merge=True, branch=True):
1524 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1525 return False
1526
1526
1527
1527
1528 class workingctx(committablectx):
1528 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1529 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1530 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1531 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1532 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1533 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1534 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1535 or None to use the repository status.
1536 """
1536 """
1537
1537
1538 def __init__(
1538 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1540 ):
1541 branch = None
1541 branch = None
1542 if not extra or b'branch' not in extra:
1542 if not extra or b'branch' not in extra:
1543 try:
1543 try:
1544 branch = repo.dirstate.branch()
1544 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1545 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1547 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1548 repo, text, user, date, extra, changes, branch=branch
1549 )
1549 )
1550
1550
1551 def __iter__(self):
1551 def __iter__(self):
1552 d = self._repo.dirstate
1552 d = self._repo.dirstate
1553 for f in d:
1553 for f in d:
1554 if d[f] != b'r':
1554 if d[f] != b'r':
1555 yield f
1555 yield f
1556
1556
1557 def __contains__(self, key):
1557 def __contains__(self, key):
1558 return self._repo.dirstate[key] not in b"?r"
1558 return self._repo.dirstate[key] not in b"?r"
1559
1559
1560 def hex(self):
1560 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1561 return self._repo.nodeconstants.wdirhex
1562
1562
1563 @propertycache
1563 @propertycache
1564 def _parents(self):
1564 def _parents(self):
1565 p = self._repo.dirstate.parents()
1565 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1566 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1567 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1569 unfi = self._repo.unfiltered()
1570 return [
1570 return [
1571 changectx(
1571 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1573 )
1574 for n in p
1574 for n in p
1575 ]
1575 ]
1576
1576
1577 def setparents(self, p1node, p2node=None):
1577 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1578 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1579 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1580 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1581 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1582 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1583 pctx = self._repo[p1node]
1584 if copies:
1584 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1585 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1586 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1587 # only for entries added to first parent.
1588 for f in copies:
1588 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1589 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1590 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1591 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1592 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1593 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1594 dirstate.copy(None, f)
1595
1595
1596 def _fileinfo(self, path):
1596 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1598 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1599 return super(workingctx, self)._fileinfo(path)
1600
1600
1601 def _buildflagfunc(self):
1601 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1602 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1603 # filesystem doesn't support them
1604
1604
1605 copiesget = self._repo.dirstate.copies().get
1605 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1606 parents = self.parents()
1607 if len(parents) < 2:
1607 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1608 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1609 man = parents[0].manifest()
1610
1610
1611 def func(f):
1611 def func(f):
1612 f = copiesget(f, f)
1612 f = copiesget(f, f)
1613 return man.flags(f)
1613 return man.flags(f)
1614
1614
1615 else:
1615 else:
1616 # merges are tricky: we try to reconstruct the unstored
1616 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1617 # result from the merge (issue1802)
1618 p1, p2 = parents
1618 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1619 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1621
1622 def func(f):
1622 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1623 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1625 if fl1 == fl2:
1626 return fl1
1626 return fl1
1627 if fl1 == fla:
1627 if fl1 == fla:
1628 return fl2
1628 return fl2
1629 if fl2 == fla:
1629 if fl2 == fla:
1630 return fl1
1630 return fl1
1631 return b'' # punt for conflicts
1631 return b'' # punt for conflicts
1632
1632
1633 return func
1633 return func
1634
1634
1635 @propertycache
1635 @propertycache
1636 def _flagfunc(self):
1636 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1638
1639 def flags(self, path):
1639 def flags(self, path):
1640 try:
1640 try:
1641 return self._flagfunc(path)
1641 return self._flagfunc(path)
1642 except OSError:
1642 except OSError:
1643 return b''
1643 return b''
1644
1644
1645 def filectx(self, path, filelog=None):
1645 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1646 """get a file context from the working directory"""
1647 return workingfilectx(
1647 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1648 self._repo, path, workingctx=self, filelog=filelog
1649 )
1649 )
1650
1650
1651 def dirty(self, missing=False, merge=True, branch=True):
1651 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1652 """check whether a working directory is modified"""
1653 # check subrepos first
1653 # check subrepos first
1654 for s in sorted(self.substate):
1654 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1655 if self.sub(s).dirty(missing=missing):
1656 return True
1656 return True
1657 # check current working dir
1657 # check current working dir
1658 return (
1658 return (
1659 (merge and self.p2())
1659 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1660 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1661 or self.modified()
1662 or self.added()
1662 or self.added()
1663 or self.removed()
1663 or self.removed()
1664 or (missing and self.deleted())
1664 or (missing and self.deleted())
1665 )
1665 )
1666
1666
1667 def add(self, list, prefix=b""):
1667 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1668 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1669 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1671 rejected = []
1672 lstat = self._repo.wvfs.lstat
1672 lstat = self._repo.wvfs.lstat
1673 for f in list:
1673 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1674 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1675 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1676 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1678 try:
1679 st = lstat(f)
1679 st = lstat(f)
1680 except OSError:
1680 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1682 rejected.append(f)
1683 continue
1683 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1685 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1686 ui.warn(
1687 _(
1687 _(
1688 b"%s: up to %d MB of RAM may be required "
1688 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1689 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1690 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1691 b"pending addition)\n"
1692 )
1692 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1694 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1696 ui.warn(
1697 _(
1697 _(
1698 b"%s not added: only files and symlinks "
1698 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1699 b"supported currently\n"
1700 )
1700 )
1701 % uipath(f)
1701 % uipath(f)
1702 )
1702 )
1703 rejected.append(f)
1703 rejected.append(f)
1704 elif not ds.set_tracked(f):
1704 elif not ds.set_tracked(f):
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 return rejected
1706 return rejected
1707
1707
1708 def forget(self, files, prefix=b""):
1708 def forget(self, files, prefix=b""):
1709 with self._repo.wlock():
1709 with self._repo.wlock():
1710 ds = self._repo.dirstate
1710 ds = self._repo.dirstate
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 rejected = []
1712 rejected = []
1713 for f in files:
1713 for f in files:
1714 if not ds.set_untracked(f):
1714 if not ds.set_untracked(f):
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 rejected.append(f)
1716 rejected.append(f)
1717 return rejected
1717 return rejected
1718
1718
1719 def copy(self, source, dest):
1719 def copy(self, source, dest):
1720 try:
1720 try:
1721 st = self._repo.wvfs.lstat(dest)
1721 st = self._repo.wvfs.lstat(dest)
1722 except OSError as err:
1722 except OSError as err:
1723 if err.errno != errno.ENOENT:
1723 if err.errno != errno.ENOENT:
1724 raise
1724 raise
1725 self._repo.ui.warn(
1725 self._repo.ui.warn(
1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 )
1727 )
1728 return
1728 return
1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 self._repo.ui.warn(
1730 self._repo.ui.warn(
1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 % self._repo.dirstate.pathto(dest)
1732 % self._repo.dirstate.pathto(dest)
1733 )
1733 )
1734 else:
1734 else:
1735 with self._repo.wlock():
1735 with self._repo.wlock():
1736 ds = self._repo.dirstate
1736 ds = self._repo.dirstate
1737 ds.set_tracked(dest)
1737 ds.set_tracked(dest)
1738 ds.copy(source, dest)
1738 ds.copy(source, dest)
1739
1739
1740 def match(
1740 def match(
1741 self,
1741 self,
1742 pats=None,
1742 pats=None,
1743 include=None,
1743 include=None,
1744 exclude=None,
1744 exclude=None,
1745 default=b'glob',
1745 default=b'glob',
1746 listsubrepos=False,
1746 listsubrepos=False,
1747 badfn=None,
1747 badfn=None,
1748 cwd=None,
1748 cwd=None,
1749 ):
1749 ):
1750 r = self._repo
1750 r = self._repo
1751 if not cwd:
1751 if not cwd:
1752 cwd = r.getcwd()
1752 cwd = r.getcwd()
1753
1753
1754 # Only a case insensitive filesystem needs magic to translate user input
1754 # Only a case insensitive filesystem needs magic to translate user input
1755 # to actual case in the filesystem.
1755 # to actual case in the filesystem.
1756 icasefs = not util.fscasesensitive(r.root)
1756 icasefs = not util.fscasesensitive(r.root)
1757 return matchmod.match(
1757 return matchmod.match(
1758 r.root,
1758 r.root,
1759 cwd,
1759 cwd,
1760 pats,
1760 pats,
1761 include,
1761 include,
1762 exclude,
1762 exclude,
1763 default,
1763 default,
1764 auditor=r.auditor,
1764 auditor=r.auditor,
1765 ctx=self,
1765 ctx=self,
1766 listsubrepos=listsubrepos,
1766 listsubrepos=listsubrepos,
1767 badfn=badfn,
1767 badfn=badfn,
1768 icasefs=icasefs,
1768 icasefs=icasefs,
1769 )
1769 )
1770
1770
1771 def _filtersuspectsymlink(self, files):
1771 def _filtersuspectsymlink(self, files):
1772 if not files or self._repo.dirstate._checklink:
1772 if not files or self._repo.dirstate._checklink:
1773 return files
1773 return files
1774
1774
1775 # Symlink placeholders may get non-symlink-like contents
1775 # Symlink placeholders may get non-symlink-like contents
1776 # via user error or dereferencing by NFS or Samba servers,
1776 # via user error or dereferencing by NFS or Samba servers,
1777 # so we filter out any placeholders that don't look like a
1777 # so we filter out any placeholders that don't look like a
1778 # symlink
1778 # symlink
1779 sane = []
1779 sane = []
1780 for f in files:
1780 for f in files:
1781 if self.flags(f) == b'l':
1781 if self.flags(f) == b'l':
1782 d = self[f].data()
1782 d = self[f].data()
1783 if (
1783 if (
1784 d == b''
1784 d == b''
1785 or len(d) >= 1024
1785 or len(d) >= 1024
1786 or b'\n' in d
1786 or b'\n' in d
1787 or stringutil.binary(d)
1787 or stringutil.binary(d)
1788 ):
1788 ):
1789 self._repo.ui.debug(
1789 self._repo.ui.debug(
1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 )
1791 )
1792 continue
1792 continue
1793 sane.append(f)
1793 sane.append(f)
1794 return sane
1794 return sane
1795
1795
1796 def _checklookup(self, files):
1796 def _checklookup(self, files):
1797 # check for any possibly clean files
1797 # check for any possibly clean files
1798 if not files:
1798 if not files:
1799 return [], [], []
1799 return [], [], []
1800
1800
1801 modified = []
1801 modified = []
1802 deleted = []
1802 deleted = []
1803 fixup = []
1803 fixup = []
1804 pctx = self._parents[0]
1804 pctx = self._parents[0]
1805 # do a full compare of any files that might have changed
1805 # do a full compare of any files that might have changed
1806 for f in sorted(files):
1806 for f in sorted(files):
1807 try:
1807 try:
1808 # This will return True for a file that got replaced by a
1808 # This will return True for a file that got replaced by a
1809 # directory in the interim, but fixing that is pretty hard.
1809 # directory in the interim, but fixing that is pretty hard.
1810 if (
1810 if (
1811 f not in pctx
1811 f not in pctx
1812 or self.flags(f) != pctx.flags(f)
1812 or self.flags(f) != pctx.flags(f)
1813 or pctx[f].cmp(self[f])
1813 or pctx[f].cmp(self[f])
1814 ):
1814 ):
1815 modified.append(f)
1815 modified.append(f)
1816 else:
1816 else:
1817 fixup.append(f)
1817 fixup.append(f)
1818 except (IOError, OSError):
1818 except (IOError, OSError):
1819 # A file become inaccessible in between? Mark it as deleted,
1819 # A file become inaccessible in between? Mark it as deleted,
1820 # matching dirstate behavior (issue5584).
1820 # matching dirstate behavior (issue5584).
1821 # The dirstate has more complex behavior around whether a
1821 # The dirstate has more complex behavior around whether a
1822 # missing file matches a directory, etc, but we don't need to
1822 # missing file matches a directory, etc, but we don't need to
1823 # bother with that: if f has made it to this point, we're sure
1823 # bother with that: if f has made it to this point, we're sure
1824 # it's in the dirstate.
1824 # it's in the dirstate.
1825 deleted.append(f)
1825 deleted.append(f)
1826
1826
1827 return modified, deleted, fixup
1827 return modified, deleted, fixup
1828
1828
1829 def _poststatusfixup(self, status, fixup):
1829 def _poststatusfixup(self, status, fixup):
1830 """update dirstate for files that are actually clean"""
1830 """update dirstate for files that are actually clean"""
1831 poststatus = self._repo.postdsstatus()
1831 poststatus = self._repo.postdsstatus()
1832 if fixup or poststatus or self._repo.dirstate._dirty:
1832 if fixup or poststatus or self._repo.dirstate._dirty:
1833 try:
1833 try:
1834 oldid = self._repo.dirstate.identity()
1834 oldid = self._repo.dirstate.identity()
1835
1835
1836 # updating the dirstate is optional
1836 # updating the dirstate is optional
1837 # so we don't wait on the lock
1837 # so we don't wait on the lock
1838 # wlock can invalidate the dirstate, so cache normal _after_
1838 # wlock can invalidate the dirstate, so cache normal _after_
1839 # taking the lock
1839 # taking the lock
1840 with self._repo.wlock(False):
1840 with self._repo.wlock(False):
1841 dirstate = self._repo.dirstate
1841 dirstate = self._repo.dirstate
1842 if dirstate.identity() == oldid:
1842 if dirstate.identity() == oldid:
1843 if fixup:
1843 if fixup:
1844 if dirstate.pendingparentchange():
1844 if dirstate.pendingparentchange():
1845 normal = lambda f: dirstate.update_file(
1845 normal = lambda f: dirstate.update_file(
1846 f, p1_tracked=True, wc_tracked=True
1846 f, p1_tracked=True, wc_tracked=True
1847 )
1847 )
1848 else:
1848 else:
1849 normal = dirstate.set_clean
1849 normal = dirstate.set_clean
1850 for f in fixup:
1850 for f in fixup:
1851 normal(f)
1851 normal(f)
1852 # write changes out explicitly, because nesting
1852 # write changes out explicitly, because nesting
1853 # wlock at runtime may prevent 'wlock.release()'
1853 # wlock at runtime may prevent 'wlock.release()'
1854 # after this block from doing so for subsequent
1854 # after this block from doing so for subsequent
1855 # changing files
1855 # changing files
1856 tr = self._repo.currenttransaction()
1856 tr = self._repo.currenttransaction()
1857 self._repo.dirstate.write(tr)
1857 self._repo.dirstate.write(tr)
1858
1858
1859 if poststatus:
1859 if poststatus:
1860 for ps in poststatus:
1860 for ps in poststatus:
1861 ps(self, status)
1861 ps(self, status)
1862 else:
1862 else:
1863 # in this case, writing changes out breaks
1863 # in this case, writing changes out breaks
1864 # consistency, because .hg/dirstate was
1864 # consistency, because .hg/dirstate was
1865 # already changed simultaneously after last
1865 # already changed simultaneously after last
1866 # caching (see also issue5584 for detail)
1866 # caching (see also issue5584 for detail)
1867 self._repo.ui.debug(
1867 self._repo.ui.debug(
1868 b'skip updating dirstate: identity mismatch\n'
1868 b'skip updating dirstate: identity mismatch\n'
1869 )
1869 )
1870 except error.LockError:
1870 except error.LockError:
1871 pass
1871 pass
1872 finally:
1872 finally:
1873 # Even if the wlock couldn't be grabbed, clear out the list.
1873 # Even if the wlock couldn't be grabbed, clear out the list.
1874 self._repo.clearpostdsstatus()
1874 self._repo.clearpostdsstatus()
1875
1875
1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 '''Gets the status from the dirstate -- internal use only.'''
1877 '''Gets the status from the dirstate -- internal use only.'''
1878 subrepos = []
1878 subrepos = []
1879 if b'.hgsub' in self:
1879 if b'.hgsub' in self:
1880 subrepos = sorted(self.substate)
1880 subrepos = sorted(self.substate)
1881 cmp, s = self._repo.dirstate.status(
1881 cmp, s = self._repo.dirstate.status(
1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 )
1883 )
1884
1884
1885 # check for any possibly clean files
1885 # check for any possibly clean files
1886 fixup = []
1886 fixup = []
1887 if cmp:
1887 if cmp:
1888 modified2, deleted2, fixup = self._checklookup(cmp)
1888 modified2, deleted2, fixup = self._checklookup(cmp)
1889 s.modified.extend(modified2)
1889 s.modified.extend(modified2)
1890 s.deleted.extend(deleted2)
1890 s.deleted.extend(deleted2)
1891
1891
1892 if fixup and clean:
1892 if fixup and clean:
1893 s.clean.extend(fixup)
1893 s.clean.extend(fixup)
1894
1894
1895 self._poststatusfixup(s, fixup)
1895 self._poststatusfixup(s, fixup)
1896
1896
1897 if match.always():
1897 if match.always():
1898 # cache for performance
1898 # cache for performance
1899 if s.unknown or s.ignored or s.clean:
1899 if s.unknown or s.ignored or s.clean:
1900 # "_status" is cached with list*=False in the normal route
1900 # "_status" is cached with list*=False in the normal route
1901 self._status = scmutil.status(
1901 self._status = scmutil.status(
1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 )
1903 )
1904 else:
1904 else:
1905 self._status = s
1905 self._status = s
1906
1906
1907 return s
1907 return s
1908
1908
1909 @propertycache
1909 @propertycache
1910 def _copies(self):
1910 def _copies(self):
1911 p1copies = {}
1911 p1copies = {}
1912 p2copies = {}
1912 p2copies = {}
1913 parents = self._repo.dirstate.parents()
1913 parents = self._repo.dirstate.parents()
1914 p1manifest = self._repo[parents[0]].manifest()
1914 p1manifest = self._repo[parents[0]].manifest()
1915 p2manifest = self._repo[parents[1]].manifest()
1915 p2manifest = self._repo[parents[1]].manifest()
1916 changedset = set(self.added()) | set(self.modified())
1916 changedset = set(self.added()) | set(self.modified())
1917 narrowmatch = self._repo.narrowmatch()
1917 narrowmatch = self._repo.narrowmatch()
1918 for dst, src in self._repo.dirstate.copies().items():
1918 for dst, src in self._repo.dirstate.copies().items():
1919 if dst not in changedset or not narrowmatch(dst):
1919 if dst not in changedset or not narrowmatch(dst):
1920 continue
1920 continue
1921 if src in p1manifest:
1921 if src in p1manifest:
1922 p1copies[dst] = src
1922 p1copies[dst] = src
1923 elif src in p2manifest:
1923 elif src in p2manifest:
1924 p2copies[dst] = src
1924 p2copies[dst] = src
1925 return p1copies, p2copies
1925 return p1copies, p2copies
1926
1926
1927 @propertycache
1927 @propertycache
1928 def _manifest(self):
1928 def _manifest(self):
1929 """generate a manifest corresponding to the values in self._status
1929 """generate a manifest corresponding to the values in self._status
1930
1930
1931 This reuse the file nodeid from parent, but we use special node
1931 This reuse the file nodeid from parent, but we use special node
1932 identifiers for added and modified files. This is used by manifests
1932 identifiers for added and modified files. This is used by manifests
1933 merge to see that files are different and by update logic to avoid
1933 merge to see that files are different and by update logic to avoid
1934 deleting newly added files.
1934 deleting newly added files.
1935 """
1935 """
1936 return self._buildstatusmanifest(self._status)
1936 return self._buildstatusmanifest(self._status)
1937
1937
1938 def _buildstatusmanifest(self, status):
1938 def _buildstatusmanifest(self, status):
1939 """Builds a manifest that includes the given status results."""
1939 """Builds a manifest that includes the given status results."""
1940 parents = self.parents()
1940 parents = self.parents()
1941
1941
1942 man = parents[0].manifest().copy()
1942 man = parents[0].manifest().copy()
1943
1943
1944 ff = self._flagfunc
1944 ff = self._flagfunc
1945 for i, l in (
1945 for i, l in (
1946 (self._repo.nodeconstants.addednodeid, status.added),
1946 (self._repo.nodeconstants.addednodeid, status.added),
1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 ):
1948 ):
1949 for f in l:
1949 for f in l:
1950 man[f] = i
1950 man[f] = i
1951 try:
1951 try:
1952 man.setflag(f, ff(f))
1952 man.setflag(f, ff(f))
1953 except OSError:
1953 except OSError:
1954 pass
1954 pass
1955
1955
1956 for f in status.deleted + status.removed:
1956 for f in status.deleted + status.removed:
1957 if f in man:
1957 if f in man:
1958 del man[f]
1958 del man[f]
1959
1959
1960 return man
1960 return man
1961
1961
1962 def _buildstatus(
1962 def _buildstatus(
1963 self, other, s, match, listignored, listclean, listunknown
1963 self, other, s, match, listignored, listclean, listunknown
1964 ):
1964 ):
1965 """build a status with respect to another context
1965 """build a status with respect to another context
1966
1966
1967 This includes logic for maintaining the fast path of status when
1967 This includes logic for maintaining the fast path of status when
1968 comparing the working directory against its parent, which is to skip
1968 comparing the working directory against its parent, which is to skip
1969 building a new manifest if self (working directory) is not comparing
1969 building a new manifest if self (working directory) is not comparing
1970 against its parent (repo['.']).
1970 against its parent (repo['.']).
1971 """
1971 """
1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 # might have accidentally ended up with the entire contents of the file
1974 # might have accidentally ended up with the entire contents of the file
1975 # they are supposed to be linking to.
1975 # they are supposed to be linking to.
1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 if other != self._repo[b'.']:
1977 if other != self._repo[b'.']:
1978 s = super(workingctx, self)._buildstatus(
1978 s = super(workingctx, self)._buildstatus(
1979 other, s, match, listignored, listclean, listunknown
1979 other, s, match, listignored, listclean, listunknown
1980 )
1980 )
1981 return s
1981 return s
1982
1982
1983 def _matchstatus(self, other, match):
1983 def _matchstatus(self, other, match):
1984 """override the match method with a filter for directory patterns
1984 """override the match method with a filter for directory patterns
1985
1985
1986 We use inheritance to customize the match.bad method only in cases of
1986 We use inheritance to customize the match.bad method only in cases of
1987 workingctx since it belongs only to the working directory when
1987 workingctx since it belongs only to the working directory when
1988 comparing against the parent changeset.
1988 comparing against the parent changeset.
1989
1989
1990 If we aren't comparing against the working directory's parent, then we
1990 If we aren't comparing against the working directory's parent, then we
1991 just use the default match object sent to us.
1991 just use the default match object sent to us.
1992 """
1992 """
1993 if other != self._repo[b'.']:
1993 if other != self._repo[b'.']:
1994
1994
1995 def bad(f, msg):
1995 def bad(f, msg):
1996 # 'f' may be a directory pattern from 'match.files()',
1996 # 'f' may be a directory pattern from 'match.files()',
1997 # so 'f not in ctx1' is not enough
1997 # so 'f not in ctx1' is not enough
1998 if f not in other and not other.hasdir(f):
1998 if f not in other and not other.hasdir(f):
1999 self._repo.ui.warn(
1999 self._repo.ui.warn(
2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 )
2001 )
2002
2002
2003 match.bad = bad
2003 match.bad = bad
2004 return match
2004 return match
2005
2005
2006 def walk(self, match):
2006 def walk(self, match):
2007 '''Generates matching file names.'''
2007 '''Generates matching file names.'''
2008 return sorted(
2008 return sorted(
2009 self._repo.dirstate.walk(
2009 self._repo.dirstate.walk(
2010 self._repo.narrowmatch(match),
2010 self._repo.narrowmatch(match),
2011 subrepos=sorted(self.substate),
2011 subrepos=sorted(self.substate),
2012 unknown=True,
2012 unknown=True,
2013 ignored=False,
2013 ignored=False,
2014 )
2014 )
2015 )
2015 )
2016
2016
2017 def matches(self, match):
2017 def matches(self, match):
2018 match = self._repo.narrowmatch(match)
2018 match = self._repo.narrowmatch(match)
2019 ds = self._repo.dirstate
2019 ds = self._repo.dirstate
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2020 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2021
2021
2022 def markcommitted(self, node):
2022 def markcommitted(self, node):
2023 with self._repo.dirstate.parentchange():
2023 with self._repo.dirstate.parentchange():
2024 for f in self.modified() + self.added():
2024 for f in self.modified() + self.added():
2025 self._repo.dirstate.normal(f)
2025 self._repo.dirstate.update_file(
2026 f, p1_tracked=True, wc_tracked=True
2027 )
2026 for f in self.removed():
2028 for f in self.removed():
2027 self._repo.dirstate.drop(f)
2029 self._repo.dirstate.drop(f)
2028 self._repo.dirstate.setparents(node)
2030 self._repo.dirstate.setparents(node)
2029 self._repo._quick_access_changeid_invalidate()
2031 self._repo._quick_access_changeid_invalidate()
2030
2032
2031 sparse.aftercommit(self._repo, node)
2033 sparse.aftercommit(self._repo, node)
2032
2034
2033 # write changes out explicitly, because nesting wlock at
2035 # write changes out explicitly, because nesting wlock at
2034 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2036 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2035 # from immediately doing so for subsequent changing files
2037 # from immediately doing so for subsequent changing files
2036 self._repo.dirstate.write(self._repo.currenttransaction())
2038 self._repo.dirstate.write(self._repo.currenttransaction())
2037
2039
2038 def mergestate(self, clean=False):
2040 def mergestate(self, clean=False):
2039 if clean:
2041 if clean:
2040 return mergestatemod.mergestate.clean(self._repo)
2042 return mergestatemod.mergestate.clean(self._repo)
2041 return mergestatemod.mergestate.read(self._repo)
2043 return mergestatemod.mergestate.read(self._repo)
2042
2044
2043
2045
2044 class committablefilectx(basefilectx):
2046 class committablefilectx(basefilectx):
2045 """A committablefilectx provides common functionality for a file context
2047 """A committablefilectx provides common functionality for a file context
2046 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2048 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2047
2049
2048 def __init__(self, repo, path, filelog=None, ctx=None):
2050 def __init__(self, repo, path, filelog=None, ctx=None):
2049 self._repo = repo
2051 self._repo = repo
2050 self._path = path
2052 self._path = path
2051 self._changeid = None
2053 self._changeid = None
2052 self._filerev = self._filenode = None
2054 self._filerev = self._filenode = None
2053
2055
2054 if filelog is not None:
2056 if filelog is not None:
2055 self._filelog = filelog
2057 self._filelog = filelog
2056 if ctx:
2058 if ctx:
2057 self._changectx = ctx
2059 self._changectx = ctx
2058
2060
2059 def __nonzero__(self):
2061 def __nonzero__(self):
2060 return True
2062 return True
2061
2063
2062 __bool__ = __nonzero__
2064 __bool__ = __nonzero__
2063
2065
2064 def linkrev(self):
2066 def linkrev(self):
2065 # linked to self._changectx no matter if file is modified or not
2067 # linked to self._changectx no matter if file is modified or not
2066 return self.rev()
2068 return self.rev()
2067
2069
2068 def renamed(self):
2070 def renamed(self):
2069 path = self.copysource()
2071 path = self.copysource()
2070 if not path:
2072 if not path:
2071 return None
2073 return None
2072 return (
2074 return (
2073 path,
2075 path,
2074 self._changectx._parents[0]._manifest.get(
2076 self._changectx._parents[0]._manifest.get(
2075 path, self._repo.nodeconstants.nullid
2077 path, self._repo.nodeconstants.nullid
2076 ),
2078 ),
2077 )
2079 )
2078
2080
2079 def parents(self):
2081 def parents(self):
2080 '''return parent filectxs, following copies if necessary'''
2082 '''return parent filectxs, following copies if necessary'''
2081
2083
2082 def filenode(ctx, path):
2084 def filenode(ctx, path):
2083 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2085 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2084
2086
2085 path = self._path
2087 path = self._path
2086 fl = self._filelog
2088 fl = self._filelog
2087 pcl = self._changectx._parents
2089 pcl = self._changectx._parents
2088 renamed = self.renamed()
2090 renamed = self.renamed()
2089
2091
2090 if renamed:
2092 if renamed:
2091 pl = [renamed + (None,)]
2093 pl = [renamed + (None,)]
2092 else:
2094 else:
2093 pl = [(path, filenode(pcl[0], path), fl)]
2095 pl = [(path, filenode(pcl[0], path), fl)]
2094
2096
2095 for pc in pcl[1:]:
2097 for pc in pcl[1:]:
2096 pl.append((path, filenode(pc, path), fl))
2098 pl.append((path, filenode(pc, path), fl))
2097
2099
2098 return [
2100 return [
2099 self._parentfilectx(p, fileid=n, filelog=l)
2101 self._parentfilectx(p, fileid=n, filelog=l)
2100 for p, n, l in pl
2102 for p, n, l in pl
2101 if n != self._repo.nodeconstants.nullid
2103 if n != self._repo.nodeconstants.nullid
2102 ]
2104 ]
2103
2105
2104 def children(self):
2106 def children(self):
2105 return []
2107 return []
2106
2108
2107
2109
2108 class workingfilectx(committablefilectx):
2110 class workingfilectx(committablefilectx):
2109 """A workingfilectx object makes access to data related to a particular
2111 """A workingfilectx object makes access to data related to a particular
2110 file in the working directory convenient."""
2112 file in the working directory convenient."""
2111
2113
2112 def __init__(self, repo, path, filelog=None, workingctx=None):
2114 def __init__(self, repo, path, filelog=None, workingctx=None):
2113 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2115 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2114
2116
2115 @propertycache
2117 @propertycache
2116 def _changectx(self):
2118 def _changectx(self):
2117 return workingctx(self._repo)
2119 return workingctx(self._repo)
2118
2120
2119 def data(self):
2121 def data(self):
2120 return self._repo.wread(self._path)
2122 return self._repo.wread(self._path)
2121
2123
2122 def copysource(self):
2124 def copysource(self):
2123 return self._repo.dirstate.copied(self._path)
2125 return self._repo.dirstate.copied(self._path)
2124
2126
2125 def size(self):
2127 def size(self):
2126 return self._repo.wvfs.lstat(self._path).st_size
2128 return self._repo.wvfs.lstat(self._path).st_size
2127
2129
2128 def lstat(self):
2130 def lstat(self):
2129 return self._repo.wvfs.lstat(self._path)
2131 return self._repo.wvfs.lstat(self._path)
2130
2132
2131 def date(self):
2133 def date(self):
2132 t, tz = self._changectx.date()
2134 t, tz = self._changectx.date()
2133 try:
2135 try:
2134 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2136 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2135 except OSError as err:
2137 except OSError as err:
2136 if err.errno != errno.ENOENT:
2138 if err.errno != errno.ENOENT:
2137 raise
2139 raise
2138 return (t, tz)
2140 return (t, tz)
2139
2141
2140 def exists(self):
2142 def exists(self):
2141 return self._repo.wvfs.exists(self._path)
2143 return self._repo.wvfs.exists(self._path)
2142
2144
2143 def lexists(self):
2145 def lexists(self):
2144 return self._repo.wvfs.lexists(self._path)
2146 return self._repo.wvfs.lexists(self._path)
2145
2147
2146 def audit(self):
2148 def audit(self):
2147 return self._repo.wvfs.audit(self._path)
2149 return self._repo.wvfs.audit(self._path)
2148
2150
2149 def cmp(self, fctx):
2151 def cmp(self, fctx):
2150 """compare with other file context
2152 """compare with other file context
2151
2153
2152 returns True if different than fctx.
2154 returns True if different than fctx.
2153 """
2155 """
2154 # fctx should be a filectx (not a workingfilectx)
2156 # fctx should be a filectx (not a workingfilectx)
2155 # invert comparison to reuse the same code path
2157 # invert comparison to reuse the same code path
2156 return fctx.cmp(self)
2158 return fctx.cmp(self)
2157
2159
2158 def remove(self, ignoremissing=False):
2160 def remove(self, ignoremissing=False):
2159 """wraps unlink for a repo's working directory"""
2161 """wraps unlink for a repo's working directory"""
2160 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2162 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2161 self._repo.wvfs.unlinkpath(
2163 self._repo.wvfs.unlinkpath(
2162 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2164 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2163 )
2165 )
2164
2166
2165 def write(self, data, flags, backgroundclose=False, **kwargs):
2167 def write(self, data, flags, backgroundclose=False, **kwargs):
2166 """wraps repo.wwrite"""
2168 """wraps repo.wwrite"""
2167 return self._repo.wwrite(
2169 return self._repo.wwrite(
2168 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2170 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2169 )
2171 )
2170
2172
2171 def markcopied(self, src):
2173 def markcopied(self, src):
2172 """marks this file a copy of `src`"""
2174 """marks this file a copy of `src`"""
2173 self._repo.dirstate.copy(src, self._path)
2175 self._repo.dirstate.copy(src, self._path)
2174
2176
2175 def clearunknown(self):
2177 def clearunknown(self):
2176 """Removes conflicting items in the working directory so that
2178 """Removes conflicting items in the working directory so that
2177 ``write()`` can be called successfully.
2179 ``write()`` can be called successfully.
2178 """
2180 """
2179 wvfs = self._repo.wvfs
2181 wvfs = self._repo.wvfs
2180 f = self._path
2182 f = self._path
2181 wvfs.audit(f)
2183 wvfs.audit(f)
2182 if self._repo.ui.configbool(
2184 if self._repo.ui.configbool(
2183 b'experimental', b'merge.checkpathconflicts'
2185 b'experimental', b'merge.checkpathconflicts'
2184 ):
2186 ):
2185 # remove files under the directory as they should already be
2187 # remove files under the directory as they should already be
2186 # warned and backed up
2188 # warned and backed up
2187 if wvfs.isdir(f) and not wvfs.islink(f):
2189 if wvfs.isdir(f) and not wvfs.islink(f):
2188 wvfs.rmtree(f, forcibly=True)
2190 wvfs.rmtree(f, forcibly=True)
2189 for p in reversed(list(pathutil.finddirs(f))):
2191 for p in reversed(list(pathutil.finddirs(f))):
2190 if wvfs.isfileorlink(p):
2192 if wvfs.isfileorlink(p):
2191 wvfs.unlink(p)
2193 wvfs.unlink(p)
2192 break
2194 break
2193 else:
2195 else:
2194 # don't remove files if path conflicts are not processed
2196 # don't remove files if path conflicts are not processed
2195 if wvfs.isdir(f) and not wvfs.islink(f):
2197 if wvfs.isdir(f) and not wvfs.islink(f):
2196 wvfs.removedirs(f)
2198 wvfs.removedirs(f)
2197
2199
2198 def setflags(self, l, x):
2200 def setflags(self, l, x):
2199 self._repo.wvfs.setflags(self._path, l, x)
2201 self._repo.wvfs.setflags(self._path, l, x)
2200
2202
2201
2203
2202 class overlayworkingctx(committablectx):
2204 class overlayworkingctx(committablectx):
2203 """Wraps another mutable context with a write-back cache that can be
2205 """Wraps another mutable context with a write-back cache that can be
2204 converted into a commit context.
2206 converted into a commit context.
2205
2207
2206 self._cache[path] maps to a dict with keys: {
2208 self._cache[path] maps to a dict with keys: {
2207 'exists': bool?
2209 'exists': bool?
2208 'date': date?
2210 'date': date?
2209 'data': str?
2211 'data': str?
2210 'flags': str?
2212 'flags': str?
2211 'copied': str? (path or None)
2213 'copied': str? (path or None)
2212 }
2214 }
2213 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2215 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2214 is `False`, the file was deleted.
2216 is `False`, the file was deleted.
2215 """
2217 """
2216
2218
2217 def __init__(self, repo):
2219 def __init__(self, repo):
2218 super(overlayworkingctx, self).__init__(repo)
2220 super(overlayworkingctx, self).__init__(repo)
2219 self.clean()
2221 self.clean()
2220
2222
2221 def setbase(self, wrappedctx):
2223 def setbase(self, wrappedctx):
2222 self._wrappedctx = wrappedctx
2224 self._wrappedctx = wrappedctx
2223 self._parents = [wrappedctx]
2225 self._parents = [wrappedctx]
2224 # Drop old manifest cache as it is now out of date.
2226 # Drop old manifest cache as it is now out of date.
2225 # This is necessary when, e.g., rebasing several nodes with one
2227 # This is necessary when, e.g., rebasing several nodes with one
2226 # ``overlayworkingctx`` (e.g. with --collapse).
2228 # ``overlayworkingctx`` (e.g. with --collapse).
2227 util.clearcachedproperty(self, b'_manifest')
2229 util.clearcachedproperty(self, b'_manifest')
2228
2230
2229 def setparents(self, p1node, p2node=None):
2231 def setparents(self, p1node, p2node=None):
2230 if p2node is None:
2232 if p2node is None:
2231 p2node = self._repo.nodeconstants.nullid
2233 p2node = self._repo.nodeconstants.nullid
2232 assert p1node == self._wrappedctx.node()
2234 assert p1node == self._wrappedctx.node()
2233 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2235 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2234
2236
2235 def data(self, path):
2237 def data(self, path):
2236 if self.isdirty(path):
2238 if self.isdirty(path):
2237 if self._cache[path][b'exists']:
2239 if self._cache[path][b'exists']:
2238 if self._cache[path][b'data'] is not None:
2240 if self._cache[path][b'data'] is not None:
2239 return self._cache[path][b'data']
2241 return self._cache[path][b'data']
2240 else:
2242 else:
2241 # Must fallback here, too, because we only set flags.
2243 # Must fallback here, too, because we only set flags.
2242 return self._wrappedctx[path].data()
2244 return self._wrappedctx[path].data()
2243 else:
2245 else:
2244 raise error.ProgrammingError(
2246 raise error.ProgrammingError(
2245 b"No such file or directory: %s" % path
2247 b"No such file or directory: %s" % path
2246 )
2248 )
2247 else:
2249 else:
2248 return self._wrappedctx[path].data()
2250 return self._wrappedctx[path].data()
2249
2251
2250 @propertycache
2252 @propertycache
2251 def _manifest(self):
2253 def _manifest(self):
2252 parents = self.parents()
2254 parents = self.parents()
2253 man = parents[0].manifest().copy()
2255 man = parents[0].manifest().copy()
2254
2256
2255 flag = self._flagfunc
2257 flag = self._flagfunc
2256 for path in self.added():
2258 for path in self.added():
2257 man[path] = self._repo.nodeconstants.addednodeid
2259 man[path] = self._repo.nodeconstants.addednodeid
2258 man.setflag(path, flag(path))
2260 man.setflag(path, flag(path))
2259 for path in self.modified():
2261 for path in self.modified():
2260 man[path] = self._repo.nodeconstants.modifiednodeid
2262 man[path] = self._repo.nodeconstants.modifiednodeid
2261 man.setflag(path, flag(path))
2263 man.setflag(path, flag(path))
2262 for path in self.removed():
2264 for path in self.removed():
2263 del man[path]
2265 del man[path]
2264 return man
2266 return man
2265
2267
2266 @propertycache
2268 @propertycache
2267 def _flagfunc(self):
2269 def _flagfunc(self):
2268 def f(path):
2270 def f(path):
2269 return self._cache[path][b'flags']
2271 return self._cache[path][b'flags']
2270
2272
2271 return f
2273 return f
2272
2274
2273 def files(self):
2275 def files(self):
2274 return sorted(self.added() + self.modified() + self.removed())
2276 return sorted(self.added() + self.modified() + self.removed())
2275
2277
2276 def modified(self):
2278 def modified(self):
2277 return [
2279 return [
2278 f
2280 f
2279 for f in self._cache.keys()
2281 for f in self._cache.keys()
2280 if self._cache[f][b'exists'] and self._existsinparent(f)
2282 if self._cache[f][b'exists'] and self._existsinparent(f)
2281 ]
2283 ]
2282
2284
2283 def added(self):
2285 def added(self):
2284 return [
2286 return [
2285 f
2287 f
2286 for f in self._cache.keys()
2288 for f in self._cache.keys()
2287 if self._cache[f][b'exists'] and not self._existsinparent(f)
2289 if self._cache[f][b'exists'] and not self._existsinparent(f)
2288 ]
2290 ]
2289
2291
2290 def removed(self):
2292 def removed(self):
2291 return [
2293 return [
2292 f
2294 f
2293 for f in self._cache.keys()
2295 for f in self._cache.keys()
2294 if not self._cache[f][b'exists'] and self._existsinparent(f)
2296 if not self._cache[f][b'exists'] and self._existsinparent(f)
2295 ]
2297 ]
2296
2298
2297 def p1copies(self):
2299 def p1copies(self):
2298 copies = {}
2300 copies = {}
2299 narrowmatch = self._repo.narrowmatch()
2301 narrowmatch = self._repo.narrowmatch()
2300 for f in self._cache.keys():
2302 for f in self._cache.keys():
2301 if not narrowmatch(f):
2303 if not narrowmatch(f):
2302 continue
2304 continue
2303 copies.pop(f, None) # delete if it exists
2305 copies.pop(f, None) # delete if it exists
2304 source = self._cache[f][b'copied']
2306 source = self._cache[f][b'copied']
2305 if source:
2307 if source:
2306 copies[f] = source
2308 copies[f] = source
2307 return copies
2309 return copies
2308
2310
2309 def p2copies(self):
2311 def p2copies(self):
2310 copies = {}
2312 copies = {}
2311 narrowmatch = self._repo.narrowmatch()
2313 narrowmatch = self._repo.narrowmatch()
2312 for f in self._cache.keys():
2314 for f in self._cache.keys():
2313 if not narrowmatch(f):
2315 if not narrowmatch(f):
2314 continue
2316 continue
2315 copies.pop(f, None) # delete if it exists
2317 copies.pop(f, None) # delete if it exists
2316 source = self._cache[f][b'copied']
2318 source = self._cache[f][b'copied']
2317 if source:
2319 if source:
2318 copies[f] = source
2320 copies[f] = source
2319 return copies
2321 return copies
2320
2322
2321 def isinmemory(self):
2323 def isinmemory(self):
2322 return True
2324 return True
2323
2325
2324 def filedate(self, path):
2326 def filedate(self, path):
2325 if self.isdirty(path):
2327 if self.isdirty(path):
2326 return self._cache[path][b'date']
2328 return self._cache[path][b'date']
2327 else:
2329 else:
2328 return self._wrappedctx[path].date()
2330 return self._wrappedctx[path].date()
2329
2331
2330 def markcopied(self, path, origin):
2332 def markcopied(self, path, origin):
2331 self._markdirty(
2333 self._markdirty(
2332 path,
2334 path,
2333 exists=True,
2335 exists=True,
2334 date=self.filedate(path),
2336 date=self.filedate(path),
2335 flags=self.flags(path),
2337 flags=self.flags(path),
2336 copied=origin,
2338 copied=origin,
2337 )
2339 )
2338
2340
2339 def copydata(self, path):
2341 def copydata(self, path):
2340 if self.isdirty(path):
2342 if self.isdirty(path):
2341 return self._cache[path][b'copied']
2343 return self._cache[path][b'copied']
2342 else:
2344 else:
2343 return None
2345 return None
2344
2346
2345 def flags(self, path):
2347 def flags(self, path):
2346 if self.isdirty(path):
2348 if self.isdirty(path):
2347 if self._cache[path][b'exists']:
2349 if self._cache[path][b'exists']:
2348 return self._cache[path][b'flags']
2350 return self._cache[path][b'flags']
2349 else:
2351 else:
2350 raise error.ProgrammingError(
2352 raise error.ProgrammingError(
2351 b"No such file or directory: %s" % path
2353 b"No such file or directory: %s" % path
2352 )
2354 )
2353 else:
2355 else:
2354 return self._wrappedctx[path].flags()
2356 return self._wrappedctx[path].flags()
2355
2357
2356 def __contains__(self, key):
2358 def __contains__(self, key):
2357 if key in self._cache:
2359 if key in self._cache:
2358 return self._cache[key][b'exists']
2360 return self._cache[key][b'exists']
2359 return key in self.p1()
2361 return key in self.p1()
2360
2362
2361 def _existsinparent(self, path):
2363 def _existsinparent(self, path):
2362 try:
2364 try:
2363 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2365 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2364 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2366 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2365 # with an ``exists()`` function.
2367 # with an ``exists()`` function.
2366 self._wrappedctx[path]
2368 self._wrappedctx[path]
2367 return True
2369 return True
2368 except error.ManifestLookupError:
2370 except error.ManifestLookupError:
2369 return False
2371 return False
2370
2372
2371 def _auditconflicts(self, path):
2373 def _auditconflicts(self, path):
2372 """Replicates conflict checks done by wvfs.write().
2374 """Replicates conflict checks done by wvfs.write().
2373
2375
2374 Since we never write to the filesystem and never call `applyupdates` in
2376 Since we never write to the filesystem and never call `applyupdates` in
2375 IMM, we'll never check that a path is actually writable -- e.g., because
2377 IMM, we'll never check that a path is actually writable -- e.g., because
2376 it adds `a/foo`, but `a` is actually a file in the other commit.
2378 it adds `a/foo`, but `a` is actually a file in the other commit.
2377 """
2379 """
2378
2380
2379 def fail(path, component):
2381 def fail(path, component):
2380 # p1() is the base and we're receiving "writes" for p2()'s
2382 # p1() is the base and we're receiving "writes" for p2()'s
2381 # files.
2383 # files.
2382 if b'l' in self.p1()[component].flags():
2384 if b'l' in self.p1()[component].flags():
2383 raise error.Abort(
2385 raise error.Abort(
2384 b"error: %s conflicts with symlink %s "
2386 b"error: %s conflicts with symlink %s "
2385 b"in %d." % (path, component, self.p1().rev())
2387 b"in %d." % (path, component, self.p1().rev())
2386 )
2388 )
2387 else:
2389 else:
2388 raise error.Abort(
2390 raise error.Abort(
2389 b"error: '%s' conflicts with file '%s' in "
2391 b"error: '%s' conflicts with file '%s' in "
2390 b"%d." % (path, component, self.p1().rev())
2392 b"%d." % (path, component, self.p1().rev())
2391 )
2393 )
2392
2394
2393 # Test that each new directory to be created to write this path from p2
2395 # Test that each new directory to be created to write this path from p2
2394 # is not a file in p1.
2396 # is not a file in p1.
2395 components = path.split(b'/')
2397 components = path.split(b'/')
2396 for i in pycompat.xrange(len(components)):
2398 for i in pycompat.xrange(len(components)):
2397 component = b"/".join(components[0:i])
2399 component = b"/".join(components[0:i])
2398 if component in self:
2400 if component in self:
2399 fail(path, component)
2401 fail(path, component)
2400
2402
2401 # Test the other direction -- that this path from p2 isn't a directory
2403 # Test the other direction -- that this path from p2 isn't a directory
2402 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2404 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2403 match = self.match([path], default=b'path')
2405 match = self.match([path], default=b'path')
2404 mfiles = list(self.p1().manifest().walk(match))
2406 mfiles = list(self.p1().manifest().walk(match))
2405 if len(mfiles) > 0:
2407 if len(mfiles) > 0:
2406 if len(mfiles) == 1 and mfiles[0] == path:
2408 if len(mfiles) == 1 and mfiles[0] == path:
2407 return
2409 return
2408 # omit the files which are deleted in current IMM wctx
2410 # omit the files which are deleted in current IMM wctx
2409 mfiles = [m for m in mfiles if m in self]
2411 mfiles = [m for m in mfiles if m in self]
2410 if not mfiles:
2412 if not mfiles:
2411 return
2413 return
2412 raise error.Abort(
2414 raise error.Abort(
2413 b"error: file '%s' cannot be written because "
2415 b"error: file '%s' cannot be written because "
2414 b" '%s/' is a directory in %s (containing %d "
2416 b" '%s/' is a directory in %s (containing %d "
2415 b"entries: %s)"
2417 b"entries: %s)"
2416 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2418 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2417 )
2419 )
2418
2420
2419 def write(self, path, data, flags=b'', **kwargs):
2421 def write(self, path, data, flags=b'', **kwargs):
2420 if data is None:
2422 if data is None:
2421 raise error.ProgrammingError(b"data must be non-None")
2423 raise error.ProgrammingError(b"data must be non-None")
2422 self._auditconflicts(path)
2424 self._auditconflicts(path)
2423 self._markdirty(
2425 self._markdirty(
2424 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2426 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2425 )
2427 )
2426
2428
2427 def setflags(self, path, l, x):
2429 def setflags(self, path, l, x):
2428 flag = b''
2430 flag = b''
2429 if l:
2431 if l:
2430 flag = b'l'
2432 flag = b'l'
2431 elif x:
2433 elif x:
2432 flag = b'x'
2434 flag = b'x'
2433 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2435 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2434
2436
2435 def remove(self, path):
2437 def remove(self, path):
2436 self._markdirty(path, exists=False)
2438 self._markdirty(path, exists=False)
2437
2439
2438 def exists(self, path):
2440 def exists(self, path):
2439 """exists behaves like `lexists`, but needs to follow symlinks and
2441 """exists behaves like `lexists`, but needs to follow symlinks and
2440 return False if they are broken.
2442 return False if they are broken.
2441 """
2443 """
2442 if self.isdirty(path):
2444 if self.isdirty(path):
2443 # If this path exists and is a symlink, "follow" it by calling
2445 # If this path exists and is a symlink, "follow" it by calling
2444 # exists on the destination path.
2446 # exists on the destination path.
2445 if (
2447 if (
2446 self._cache[path][b'exists']
2448 self._cache[path][b'exists']
2447 and b'l' in self._cache[path][b'flags']
2449 and b'l' in self._cache[path][b'flags']
2448 ):
2450 ):
2449 return self.exists(self._cache[path][b'data'].strip())
2451 return self.exists(self._cache[path][b'data'].strip())
2450 else:
2452 else:
2451 return self._cache[path][b'exists']
2453 return self._cache[path][b'exists']
2452
2454
2453 return self._existsinparent(path)
2455 return self._existsinparent(path)
2454
2456
2455 def lexists(self, path):
2457 def lexists(self, path):
2456 """lexists returns True if the path exists"""
2458 """lexists returns True if the path exists"""
2457 if self.isdirty(path):
2459 if self.isdirty(path):
2458 return self._cache[path][b'exists']
2460 return self._cache[path][b'exists']
2459
2461
2460 return self._existsinparent(path)
2462 return self._existsinparent(path)
2461
2463
2462 def size(self, path):
2464 def size(self, path):
2463 if self.isdirty(path):
2465 if self.isdirty(path):
2464 if self._cache[path][b'exists']:
2466 if self._cache[path][b'exists']:
2465 return len(self._cache[path][b'data'])
2467 return len(self._cache[path][b'data'])
2466 else:
2468 else:
2467 raise error.ProgrammingError(
2469 raise error.ProgrammingError(
2468 b"No such file or directory: %s" % path
2470 b"No such file or directory: %s" % path
2469 )
2471 )
2470 return self._wrappedctx[path].size()
2472 return self._wrappedctx[path].size()
2471
2473
2472 def tomemctx(
2474 def tomemctx(
2473 self,
2475 self,
2474 text,
2476 text,
2475 branch=None,
2477 branch=None,
2476 extra=None,
2478 extra=None,
2477 date=None,
2479 date=None,
2478 parents=None,
2480 parents=None,
2479 user=None,
2481 user=None,
2480 editor=None,
2482 editor=None,
2481 ):
2483 ):
2482 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2484 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2483 committed.
2485 committed.
2484
2486
2485 ``text`` is the commit message.
2487 ``text`` is the commit message.
2486 ``parents`` (optional) are rev numbers.
2488 ``parents`` (optional) are rev numbers.
2487 """
2489 """
2488 # Default parents to the wrapped context if not passed.
2490 # Default parents to the wrapped context if not passed.
2489 if parents is None:
2491 if parents is None:
2490 parents = self.parents()
2492 parents = self.parents()
2491 if len(parents) == 1:
2493 if len(parents) == 1:
2492 parents = (parents[0], None)
2494 parents = (parents[0], None)
2493
2495
2494 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2496 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2495 if parents[1] is None:
2497 if parents[1] is None:
2496 parents = (self._repo[parents[0]], None)
2498 parents = (self._repo[parents[0]], None)
2497 else:
2499 else:
2498 parents = (self._repo[parents[0]], self._repo[parents[1]])
2500 parents = (self._repo[parents[0]], self._repo[parents[1]])
2499
2501
2500 files = self.files()
2502 files = self.files()
2501
2503
2502 def getfile(repo, memctx, path):
2504 def getfile(repo, memctx, path):
2503 if self._cache[path][b'exists']:
2505 if self._cache[path][b'exists']:
2504 return memfilectx(
2506 return memfilectx(
2505 repo,
2507 repo,
2506 memctx,
2508 memctx,
2507 path,
2509 path,
2508 self._cache[path][b'data'],
2510 self._cache[path][b'data'],
2509 b'l' in self._cache[path][b'flags'],
2511 b'l' in self._cache[path][b'flags'],
2510 b'x' in self._cache[path][b'flags'],
2512 b'x' in self._cache[path][b'flags'],
2511 self._cache[path][b'copied'],
2513 self._cache[path][b'copied'],
2512 )
2514 )
2513 else:
2515 else:
2514 # Returning None, but including the path in `files`, is
2516 # Returning None, but including the path in `files`, is
2515 # necessary for memctx to register a deletion.
2517 # necessary for memctx to register a deletion.
2516 return None
2518 return None
2517
2519
2518 if branch is None:
2520 if branch is None:
2519 branch = self._wrappedctx.branch()
2521 branch = self._wrappedctx.branch()
2520
2522
2521 return memctx(
2523 return memctx(
2522 self._repo,
2524 self._repo,
2523 parents,
2525 parents,
2524 text,
2526 text,
2525 files,
2527 files,
2526 getfile,
2528 getfile,
2527 date=date,
2529 date=date,
2528 extra=extra,
2530 extra=extra,
2529 user=user,
2531 user=user,
2530 branch=branch,
2532 branch=branch,
2531 editor=editor,
2533 editor=editor,
2532 )
2534 )
2533
2535
2534 def tomemctx_for_amend(self, precursor):
2536 def tomemctx_for_amend(self, precursor):
2535 extra = precursor.extra().copy()
2537 extra = precursor.extra().copy()
2536 extra[b'amend_source'] = precursor.hex()
2538 extra[b'amend_source'] = precursor.hex()
2537 return self.tomemctx(
2539 return self.tomemctx(
2538 text=precursor.description(),
2540 text=precursor.description(),
2539 branch=precursor.branch(),
2541 branch=precursor.branch(),
2540 extra=extra,
2542 extra=extra,
2541 date=precursor.date(),
2543 date=precursor.date(),
2542 user=precursor.user(),
2544 user=precursor.user(),
2543 )
2545 )
2544
2546
2545 def isdirty(self, path):
2547 def isdirty(self, path):
2546 return path in self._cache
2548 return path in self._cache
2547
2549
2548 def clean(self):
2550 def clean(self):
2549 self._mergestate = None
2551 self._mergestate = None
2550 self._cache = {}
2552 self._cache = {}
2551
2553
2552 def _compact(self):
2554 def _compact(self):
2553 """Removes keys from the cache that are actually clean, by comparing
2555 """Removes keys from the cache that are actually clean, by comparing
2554 them with the underlying context.
2556 them with the underlying context.
2555
2557
2556 This can occur during the merge process, e.g. by passing --tool :local
2558 This can occur during the merge process, e.g. by passing --tool :local
2557 to resolve a conflict.
2559 to resolve a conflict.
2558 """
2560 """
2559 keys = []
2561 keys = []
2560 # This won't be perfect, but can help performance significantly when
2562 # This won't be perfect, but can help performance significantly when
2561 # using things like remotefilelog.
2563 # using things like remotefilelog.
2562 scmutil.prefetchfiles(
2564 scmutil.prefetchfiles(
2563 self.repo(),
2565 self.repo(),
2564 [
2566 [
2565 (
2567 (
2566 self.p1().rev(),
2568 self.p1().rev(),
2567 scmutil.matchfiles(self.repo(), self._cache.keys()),
2569 scmutil.matchfiles(self.repo(), self._cache.keys()),
2568 )
2570 )
2569 ],
2571 ],
2570 )
2572 )
2571
2573
2572 for path in self._cache.keys():
2574 for path in self._cache.keys():
2573 cache = self._cache[path]
2575 cache = self._cache[path]
2574 try:
2576 try:
2575 underlying = self._wrappedctx[path]
2577 underlying = self._wrappedctx[path]
2576 if (
2578 if (
2577 underlying.data() == cache[b'data']
2579 underlying.data() == cache[b'data']
2578 and underlying.flags() == cache[b'flags']
2580 and underlying.flags() == cache[b'flags']
2579 ):
2581 ):
2580 keys.append(path)
2582 keys.append(path)
2581 except error.ManifestLookupError:
2583 except error.ManifestLookupError:
2582 # Path not in the underlying manifest (created).
2584 # Path not in the underlying manifest (created).
2583 continue
2585 continue
2584
2586
2585 for path in keys:
2587 for path in keys:
2586 del self._cache[path]
2588 del self._cache[path]
2587 return keys
2589 return keys
2588
2590
2589 def _markdirty(
2591 def _markdirty(
2590 self, path, exists, data=None, date=None, flags=b'', copied=None
2592 self, path, exists, data=None, date=None, flags=b'', copied=None
2591 ):
2593 ):
2592 # data not provided, let's see if we already have some; if not, let's
2594 # data not provided, let's see if we already have some; if not, let's
2593 # grab it from our underlying context, so that we always have data if
2595 # grab it from our underlying context, so that we always have data if
2594 # the file is marked as existing.
2596 # the file is marked as existing.
2595 if exists and data is None:
2597 if exists and data is None:
2596 oldentry = self._cache.get(path) or {}
2598 oldentry = self._cache.get(path) or {}
2597 data = oldentry.get(b'data')
2599 data = oldentry.get(b'data')
2598 if data is None:
2600 if data is None:
2599 data = self._wrappedctx[path].data()
2601 data = self._wrappedctx[path].data()
2600
2602
2601 self._cache[path] = {
2603 self._cache[path] = {
2602 b'exists': exists,
2604 b'exists': exists,
2603 b'data': data,
2605 b'data': data,
2604 b'date': date,
2606 b'date': date,
2605 b'flags': flags,
2607 b'flags': flags,
2606 b'copied': copied,
2608 b'copied': copied,
2607 }
2609 }
2608 util.clearcachedproperty(self, b'_manifest')
2610 util.clearcachedproperty(self, b'_manifest')
2609
2611
2610 def filectx(self, path, filelog=None):
2612 def filectx(self, path, filelog=None):
2611 return overlayworkingfilectx(
2613 return overlayworkingfilectx(
2612 self._repo, path, parent=self, filelog=filelog
2614 self._repo, path, parent=self, filelog=filelog
2613 )
2615 )
2614
2616
2615 def mergestate(self, clean=False):
2617 def mergestate(self, clean=False):
2616 if clean or self._mergestate is None:
2618 if clean or self._mergestate is None:
2617 self._mergestate = mergestatemod.memmergestate(self._repo)
2619 self._mergestate = mergestatemod.memmergestate(self._repo)
2618 return self._mergestate
2620 return self._mergestate
2619
2621
2620
2622
2621 class overlayworkingfilectx(committablefilectx):
2623 class overlayworkingfilectx(committablefilectx):
2622 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2624 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2623 cache, which can be flushed through later by calling ``flush()``."""
2625 cache, which can be flushed through later by calling ``flush()``."""
2624
2626
2625 def __init__(self, repo, path, filelog=None, parent=None):
2627 def __init__(self, repo, path, filelog=None, parent=None):
2626 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2628 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2627 self._repo = repo
2629 self._repo = repo
2628 self._parent = parent
2630 self._parent = parent
2629 self._path = path
2631 self._path = path
2630
2632
2631 def cmp(self, fctx):
2633 def cmp(self, fctx):
2632 return self.data() != fctx.data()
2634 return self.data() != fctx.data()
2633
2635
2634 def changectx(self):
2636 def changectx(self):
2635 return self._parent
2637 return self._parent
2636
2638
2637 def data(self):
2639 def data(self):
2638 return self._parent.data(self._path)
2640 return self._parent.data(self._path)
2639
2641
2640 def date(self):
2642 def date(self):
2641 return self._parent.filedate(self._path)
2643 return self._parent.filedate(self._path)
2642
2644
2643 def exists(self):
2645 def exists(self):
2644 return self.lexists()
2646 return self.lexists()
2645
2647
2646 def lexists(self):
2648 def lexists(self):
2647 return self._parent.exists(self._path)
2649 return self._parent.exists(self._path)
2648
2650
2649 def copysource(self):
2651 def copysource(self):
2650 return self._parent.copydata(self._path)
2652 return self._parent.copydata(self._path)
2651
2653
2652 def size(self):
2654 def size(self):
2653 return self._parent.size(self._path)
2655 return self._parent.size(self._path)
2654
2656
2655 def markcopied(self, origin):
2657 def markcopied(self, origin):
2656 self._parent.markcopied(self._path, origin)
2658 self._parent.markcopied(self._path, origin)
2657
2659
2658 def audit(self):
2660 def audit(self):
2659 pass
2661 pass
2660
2662
2661 def flags(self):
2663 def flags(self):
2662 return self._parent.flags(self._path)
2664 return self._parent.flags(self._path)
2663
2665
2664 def setflags(self, islink, isexec):
2666 def setflags(self, islink, isexec):
2665 return self._parent.setflags(self._path, islink, isexec)
2667 return self._parent.setflags(self._path, islink, isexec)
2666
2668
2667 def write(self, data, flags, backgroundclose=False, **kwargs):
2669 def write(self, data, flags, backgroundclose=False, **kwargs):
2668 return self._parent.write(self._path, data, flags, **kwargs)
2670 return self._parent.write(self._path, data, flags, **kwargs)
2669
2671
2670 def remove(self, ignoremissing=False):
2672 def remove(self, ignoremissing=False):
2671 return self._parent.remove(self._path)
2673 return self._parent.remove(self._path)
2672
2674
2673 def clearunknown(self):
2675 def clearunknown(self):
2674 pass
2676 pass
2675
2677
2676
2678
2677 class workingcommitctx(workingctx):
2679 class workingcommitctx(workingctx):
2678 """A workingcommitctx object makes access to data related to
2680 """A workingcommitctx object makes access to data related to
2679 the revision being committed convenient.
2681 the revision being committed convenient.
2680
2682
2681 This hides changes in the working directory, if they aren't
2683 This hides changes in the working directory, if they aren't
2682 committed in this context.
2684 committed in this context.
2683 """
2685 """
2684
2686
2685 def __init__(
2687 def __init__(
2686 self, repo, changes, text=b"", user=None, date=None, extra=None
2688 self, repo, changes, text=b"", user=None, date=None, extra=None
2687 ):
2689 ):
2688 super(workingcommitctx, self).__init__(
2690 super(workingcommitctx, self).__init__(
2689 repo, text, user, date, extra, changes
2691 repo, text, user, date, extra, changes
2690 )
2692 )
2691
2693
2692 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2694 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2693 """Return matched files only in ``self._status``
2695 """Return matched files only in ``self._status``
2694
2696
2695 Uncommitted files appear "clean" via this context, even if
2697 Uncommitted files appear "clean" via this context, even if
2696 they aren't actually so in the working directory.
2698 they aren't actually so in the working directory.
2697 """
2699 """
2698 if clean:
2700 if clean:
2699 clean = [f for f in self._manifest if f not in self._changedset]
2701 clean = [f for f in self._manifest if f not in self._changedset]
2700 else:
2702 else:
2701 clean = []
2703 clean = []
2702 return scmutil.status(
2704 return scmutil.status(
2703 [f for f in self._status.modified if match(f)],
2705 [f for f in self._status.modified if match(f)],
2704 [f for f in self._status.added if match(f)],
2706 [f for f in self._status.added if match(f)],
2705 [f for f in self._status.removed if match(f)],
2707 [f for f in self._status.removed if match(f)],
2706 [],
2708 [],
2707 [],
2709 [],
2708 [],
2710 [],
2709 clean,
2711 clean,
2710 )
2712 )
2711
2713
2712 @propertycache
2714 @propertycache
2713 def _changedset(self):
2715 def _changedset(self):
2714 """Return the set of files changed in this context"""
2716 """Return the set of files changed in this context"""
2715 changed = set(self._status.modified)
2717 changed = set(self._status.modified)
2716 changed.update(self._status.added)
2718 changed.update(self._status.added)
2717 changed.update(self._status.removed)
2719 changed.update(self._status.removed)
2718 return changed
2720 return changed
2719
2721
2720
2722
2721 def makecachingfilectxfn(func):
2723 def makecachingfilectxfn(func):
2722 """Create a filectxfn that caches based on the path.
2724 """Create a filectxfn that caches based on the path.
2723
2725
2724 We can't use util.cachefunc because it uses all arguments as the cache
2726 We can't use util.cachefunc because it uses all arguments as the cache
2725 key and this creates a cycle since the arguments include the repo and
2727 key and this creates a cycle since the arguments include the repo and
2726 memctx.
2728 memctx.
2727 """
2729 """
2728 cache = {}
2730 cache = {}
2729
2731
2730 def getfilectx(repo, memctx, path):
2732 def getfilectx(repo, memctx, path):
2731 if path not in cache:
2733 if path not in cache:
2732 cache[path] = func(repo, memctx, path)
2734 cache[path] = func(repo, memctx, path)
2733 return cache[path]
2735 return cache[path]
2734
2736
2735 return getfilectx
2737 return getfilectx
2736
2738
2737
2739
2738 def memfilefromctx(ctx):
2740 def memfilefromctx(ctx):
2739 """Given a context return a memfilectx for ctx[path]
2741 """Given a context return a memfilectx for ctx[path]
2740
2742
2741 This is a convenience method for building a memctx based on another
2743 This is a convenience method for building a memctx based on another
2742 context.
2744 context.
2743 """
2745 """
2744
2746
2745 def getfilectx(repo, memctx, path):
2747 def getfilectx(repo, memctx, path):
2746 fctx = ctx[path]
2748 fctx = ctx[path]
2747 copysource = fctx.copysource()
2749 copysource = fctx.copysource()
2748 return memfilectx(
2750 return memfilectx(
2749 repo,
2751 repo,
2750 memctx,
2752 memctx,
2751 path,
2753 path,
2752 fctx.data(),
2754 fctx.data(),
2753 islink=fctx.islink(),
2755 islink=fctx.islink(),
2754 isexec=fctx.isexec(),
2756 isexec=fctx.isexec(),
2755 copysource=copysource,
2757 copysource=copysource,
2756 )
2758 )
2757
2759
2758 return getfilectx
2760 return getfilectx
2759
2761
2760
2762
2761 def memfilefrompatch(patchstore):
2763 def memfilefrompatch(patchstore):
2762 """Given a patch (e.g. patchstore object) return a memfilectx
2764 """Given a patch (e.g. patchstore object) return a memfilectx
2763
2765
2764 This is a convenience method for building a memctx based on a patchstore.
2766 This is a convenience method for building a memctx based on a patchstore.
2765 """
2767 """
2766
2768
2767 def getfilectx(repo, memctx, path):
2769 def getfilectx(repo, memctx, path):
2768 data, mode, copysource = patchstore.getfile(path)
2770 data, mode, copysource = patchstore.getfile(path)
2769 if data is None:
2771 if data is None:
2770 return None
2772 return None
2771 islink, isexec = mode
2773 islink, isexec = mode
2772 return memfilectx(
2774 return memfilectx(
2773 repo,
2775 repo,
2774 memctx,
2776 memctx,
2775 path,
2777 path,
2776 data,
2778 data,
2777 islink=islink,
2779 islink=islink,
2778 isexec=isexec,
2780 isexec=isexec,
2779 copysource=copysource,
2781 copysource=copysource,
2780 )
2782 )
2781
2783
2782 return getfilectx
2784 return getfilectx
2783
2785
2784
2786
2785 class memctx(committablectx):
2787 class memctx(committablectx):
2786 """Use memctx to perform in-memory commits via localrepo.commitctx().
2788 """Use memctx to perform in-memory commits via localrepo.commitctx().
2787
2789
2788 Revision information is supplied at initialization time while
2790 Revision information is supplied at initialization time while
2789 related files data and is made available through a callback
2791 related files data and is made available through a callback
2790 mechanism. 'repo' is the current localrepo, 'parents' is a
2792 mechanism. 'repo' is the current localrepo, 'parents' is a
2791 sequence of two parent revisions identifiers (pass None for every
2793 sequence of two parent revisions identifiers (pass None for every
2792 missing parent), 'text' is the commit message and 'files' lists
2794 missing parent), 'text' is the commit message and 'files' lists
2793 names of files touched by the revision (normalized and relative to
2795 names of files touched by the revision (normalized and relative to
2794 repository root).
2796 repository root).
2795
2797
2796 filectxfn(repo, memctx, path) is a callable receiving the
2798 filectxfn(repo, memctx, path) is a callable receiving the
2797 repository, the current memctx object and the normalized path of
2799 repository, the current memctx object and the normalized path of
2798 requested file, relative to repository root. It is fired by the
2800 requested file, relative to repository root. It is fired by the
2799 commit function for every file in 'files', but calls order is
2801 commit function for every file in 'files', but calls order is
2800 undefined. If the file is available in the revision being
2802 undefined. If the file is available in the revision being
2801 committed (updated or added), filectxfn returns a memfilectx
2803 committed (updated or added), filectxfn returns a memfilectx
2802 object. If the file was removed, filectxfn return None for recent
2804 object. If the file was removed, filectxfn return None for recent
2803 Mercurial. Moved files are represented by marking the source file
2805 Mercurial. Moved files are represented by marking the source file
2804 removed and the new file added with copy information (see
2806 removed and the new file added with copy information (see
2805 memfilectx).
2807 memfilectx).
2806
2808
2807 user receives the committer name and defaults to current
2809 user receives the committer name and defaults to current
2808 repository username, date is the commit date in any format
2810 repository username, date is the commit date in any format
2809 supported by dateutil.parsedate() and defaults to current date, extra
2811 supported by dateutil.parsedate() and defaults to current date, extra
2810 is a dictionary of metadata or is left empty.
2812 is a dictionary of metadata or is left empty.
2811 """
2813 """
2812
2814
2813 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2815 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2814 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2816 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2815 # this field to determine what to do in filectxfn.
2817 # this field to determine what to do in filectxfn.
2816 _returnnoneformissingfiles = True
2818 _returnnoneformissingfiles = True
2817
2819
2818 def __init__(
2820 def __init__(
2819 self,
2821 self,
2820 repo,
2822 repo,
2821 parents,
2823 parents,
2822 text,
2824 text,
2823 files,
2825 files,
2824 filectxfn,
2826 filectxfn,
2825 user=None,
2827 user=None,
2826 date=None,
2828 date=None,
2827 extra=None,
2829 extra=None,
2828 branch=None,
2830 branch=None,
2829 editor=None,
2831 editor=None,
2830 ):
2832 ):
2831 super(memctx, self).__init__(
2833 super(memctx, self).__init__(
2832 repo, text, user, date, extra, branch=branch
2834 repo, text, user, date, extra, branch=branch
2833 )
2835 )
2834 self._rev = None
2836 self._rev = None
2835 self._node = None
2837 self._node = None
2836 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2838 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2837 p1, p2 = parents
2839 p1, p2 = parents
2838 self._parents = [self._repo[p] for p in (p1, p2)]
2840 self._parents = [self._repo[p] for p in (p1, p2)]
2839 files = sorted(set(files))
2841 files = sorted(set(files))
2840 self._files = files
2842 self._files = files
2841 self.substate = {}
2843 self.substate = {}
2842
2844
2843 if isinstance(filectxfn, patch.filestore):
2845 if isinstance(filectxfn, patch.filestore):
2844 filectxfn = memfilefrompatch(filectxfn)
2846 filectxfn = memfilefrompatch(filectxfn)
2845 elif not callable(filectxfn):
2847 elif not callable(filectxfn):
2846 # if store is not callable, wrap it in a function
2848 # if store is not callable, wrap it in a function
2847 filectxfn = memfilefromctx(filectxfn)
2849 filectxfn = memfilefromctx(filectxfn)
2848
2850
2849 # memoizing increases performance for e.g. vcs convert scenarios.
2851 # memoizing increases performance for e.g. vcs convert scenarios.
2850 self._filectxfn = makecachingfilectxfn(filectxfn)
2852 self._filectxfn = makecachingfilectxfn(filectxfn)
2851
2853
2852 if editor:
2854 if editor:
2853 self._text = editor(self._repo, self, [])
2855 self._text = editor(self._repo, self, [])
2854 self._repo.savecommitmessage(self._text)
2856 self._repo.savecommitmessage(self._text)
2855
2857
2856 def filectx(self, path, filelog=None):
2858 def filectx(self, path, filelog=None):
2857 """get a file context from the working directory
2859 """get a file context from the working directory
2858
2860
2859 Returns None if file doesn't exist and should be removed."""
2861 Returns None if file doesn't exist and should be removed."""
2860 return self._filectxfn(self._repo, self, path)
2862 return self._filectxfn(self._repo, self, path)
2861
2863
2862 def commit(self):
2864 def commit(self):
2863 """commit context to the repo"""
2865 """commit context to the repo"""
2864 return self._repo.commitctx(self)
2866 return self._repo.commitctx(self)
2865
2867
2866 @propertycache
2868 @propertycache
2867 def _manifest(self):
2869 def _manifest(self):
2868 """generate a manifest based on the return values of filectxfn"""
2870 """generate a manifest based on the return values of filectxfn"""
2869
2871
2870 # keep this simple for now; just worry about p1
2872 # keep this simple for now; just worry about p1
2871 pctx = self._parents[0]
2873 pctx = self._parents[0]
2872 man = pctx.manifest().copy()
2874 man = pctx.manifest().copy()
2873
2875
2874 for f in self._status.modified:
2876 for f in self._status.modified:
2875 man[f] = self._repo.nodeconstants.modifiednodeid
2877 man[f] = self._repo.nodeconstants.modifiednodeid
2876
2878
2877 for f in self._status.added:
2879 for f in self._status.added:
2878 man[f] = self._repo.nodeconstants.addednodeid
2880 man[f] = self._repo.nodeconstants.addednodeid
2879
2881
2880 for f in self._status.removed:
2882 for f in self._status.removed:
2881 if f in man:
2883 if f in man:
2882 del man[f]
2884 del man[f]
2883
2885
2884 return man
2886 return man
2885
2887
2886 @propertycache
2888 @propertycache
2887 def _status(self):
2889 def _status(self):
2888 """Calculate exact status from ``files`` specified at construction"""
2890 """Calculate exact status from ``files`` specified at construction"""
2889 man1 = self.p1().manifest()
2891 man1 = self.p1().manifest()
2890 p2 = self._parents[1]
2892 p2 = self._parents[1]
2891 # "1 < len(self._parents)" can't be used for checking
2893 # "1 < len(self._parents)" can't be used for checking
2892 # existence of the 2nd parent, because "memctx._parents" is
2894 # existence of the 2nd parent, because "memctx._parents" is
2893 # explicitly initialized by the list, of which length is 2.
2895 # explicitly initialized by the list, of which length is 2.
2894 if p2.rev() != nullrev:
2896 if p2.rev() != nullrev:
2895 man2 = p2.manifest()
2897 man2 = p2.manifest()
2896 managing = lambda f: f in man1 or f in man2
2898 managing = lambda f: f in man1 or f in man2
2897 else:
2899 else:
2898 managing = lambda f: f in man1
2900 managing = lambda f: f in man1
2899
2901
2900 modified, added, removed = [], [], []
2902 modified, added, removed = [], [], []
2901 for f in self._files:
2903 for f in self._files:
2902 if not managing(f):
2904 if not managing(f):
2903 added.append(f)
2905 added.append(f)
2904 elif self[f]:
2906 elif self[f]:
2905 modified.append(f)
2907 modified.append(f)
2906 else:
2908 else:
2907 removed.append(f)
2909 removed.append(f)
2908
2910
2909 return scmutil.status(modified, added, removed, [], [], [], [])
2911 return scmutil.status(modified, added, removed, [], [], [], [])
2910
2912
2911 def parents(self):
2913 def parents(self):
2912 if self._parents[1].rev() == nullrev:
2914 if self._parents[1].rev() == nullrev:
2913 return [self._parents[0]]
2915 return [self._parents[0]]
2914 return self._parents
2916 return self._parents
2915
2917
2916
2918
2917 class memfilectx(committablefilectx):
2919 class memfilectx(committablefilectx):
2918 """memfilectx represents an in-memory file to commit.
2920 """memfilectx represents an in-memory file to commit.
2919
2921
2920 See memctx and committablefilectx for more details.
2922 See memctx and committablefilectx for more details.
2921 """
2923 """
2922
2924
2923 def __init__(
2925 def __init__(
2924 self,
2926 self,
2925 repo,
2927 repo,
2926 changectx,
2928 changectx,
2927 path,
2929 path,
2928 data,
2930 data,
2929 islink=False,
2931 islink=False,
2930 isexec=False,
2932 isexec=False,
2931 copysource=None,
2933 copysource=None,
2932 ):
2934 ):
2933 """
2935 """
2934 path is the normalized file path relative to repository root.
2936 path is the normalized file path relative to repository root.
2935 data is the file content as a string.
2937 data is the file content as a string.
2936 islink is True if the file is a symbolic link.
2938 islink is True if the file is a symbolic link.
2937 isexec is True if the file is executable.
2939 isexec is True if the file is executable.
2938 copied is the source file path if current file was copied in the
2940 copied is the source file path if current file was copied in the
2939 revision being committed, or None."""
2941 revision being committed, or None."""
2940 super(memfilectx, self).__init__(repo, path, None, changectx)
2942 super(memfilectx, self).__init__(repo, path, None, changectx)
2941 self._data = data
2943 self._data = data
2942 if islink:
2944 if islink:
2943 self._flags = b'l'
2945 self._flags = b'l'
2944 elif isexec:
2946 elif isexec:
2945 self._flags = b'x'
2947 self._flags = b'x'
2946 else:
2948 else:
2947 self._flags = b''
2949 self._flags = b''
2948 self._copysource = copysource
2950 self._copysource = copysource
2949
2951
2950 def copysource(self):
2952 def copysource(self):
2951 return self._copysource
2953 return self._copysource
2952
2954
2953 def cmp(self, fctx):
2955 def cmp(self, fctx):
2954 return self.data() != fctx.data()
2956 return self.data() != fctx.data()
2955
2957
2956 def data(self):
2958 def data(self):
2957 return self._data
2959 return self._data
2958
2960
2959 def remove(self, ignoremissing=False):
2961 def remove(self, ignoremissing=False):
2960 """wraps unlink for a repo's working directory"""
2962 """wraps unlink for a repo's working directory"""
2961 # need to figure out what to do here
2963 # need to figure out what to do here
2962 del self._changectx[self._path]
2964 del self._changectx[self._path]
2963
2965
2964 def write(self, data, flags, **kwargs):
2966 def write(self, data, flags, **kwargs):
2965 """wraps repo.wwrite"""
2967 """wraps repo.wwrite"""
2966 self._data = data
2968 self._data = data
2967
2969
2968
2970
2969 class metadataonlyctx(committablectx):
2971 class metadataonlyctx(committablectx):
2970 """Like memctx but it's reusing the manifest of different commit.
2972 """Like memctx but it's reusing the manifest of different commit.
2971 Intended to be used by lightweight operations that are creating
2973 Intended to be used by lightweight operations that are creating
2972 metadata-only changes.
2974 metadata-only changes.
2973
2975
2974 Revision information is supplied at initialization time. 'repo' is the
2976 Revision information is supplied at initialization time. 'repo' is the
2975 current localrepo, 'ctx' is original revision which manifest we're reuisng
2977 current localrepo, 'ctx' is original revision which manifest we're reuisng
2976 'parents' is a sequence of two parent revisions identifiers (pass None for
2978 'parents' is a sequence of two parent revisions identifiers (pass None for
2977 every missing parent), 'text' is the commit.
2979 every missing parent), 'text' is the commit.
2978
2980
2979 user receives the committer name and defaults to current repository
2981 user receives the committer name and defaults to current repository
2980 username, date is the commit date in any format supported by
2982 username, date is the commit date in any format supported by
2981 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2983 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2982 metadata or is left empty.
2984 metadata or is left empty.
2983 """
2985 """
2984
2986
2985 def __init__(
2987 def __init__(
2986 self,
2988 self,
2987 repo,
2989 repo,
2988 originalctx,
2990 originalctx,
2989 parents=None,
2991 parents=None,
2990 text=None,
2992 text=None,
2991 user=None,
2993 user=None,
2992 date=None,
2994 date=None,
2993 extra=None,
2995 extra=None,
2994 editor=None,
2996 editor=None,
2995 ):
2997 ):
2996 if text is None:
2998 if text is None:
2997 text = originalctx.description()
2999 text = originalctx.description()
2998 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3000 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2999 self._rev = None
3001 self._rev = None
3000 self._node = None
3002 self._node = None
3001 self._originalctx = originalctx
3003 self._originalctx = originalctx
3002 self._manifestnode = originalctx.manifestnode()
3004 self._manifestnode = originalctx.manifestnode()
3003 if parents is None:
3005 if parents is None:
3004 parents = originalctx.parents()
3006 parents = originalctx.parents()
3005 else:
3007 else:
3006 parents = [repo[p] for p in parents if p is not None]
3008 parents = [repo[p] for p in parents if p is not None]
3007 parents = parents[:]
3009 parents = parents[:]
3008 while len(parents) < 2:
3010 while len(parents) < 2:
3009 parents.append(repo[nullrev])
3011 parents.append(repo[nullrev])
3010 p1, p2 = self._parents = parents
3012 p1, p2 = self._parents = parents
3011
3013
3012 # sanity check to ensure that the reused manifest parents are
3014 # sanity check to ensure that the reused manifest parents are
3013 # manifests of our commit parents
3015 # manifests of our commit parents
3014 mp1, mp2 = self.manifestctx().parents
3016 mp1, mp2 = self.manifestctx().parents
3015 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3017 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3016 raise RuntimeError(
3018 raise RuntimeError(
3017 r"can't reuse the manifest: its p1 "
3019 r"can't reuse the manifest: its p1 "
3018 r"doesn't match the new ctx p1"
3020 r"doesn't match the new ctx p1"
3019 )
3021 )
3020 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3022 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3021 raise RuntimeError(
3023 raise RuntimeError(
3022 r"can't reuse the manifest: "
3024 r"can't reuse the manifest: "
3023 r"its p2 doesn't match the new ctx p2"
3025 r"its p2 doesn't match the new ctx p2"
3024 )
3026 )
3025
3027
3026 self._files = originalctx.files()
3028 self._files = originalctx.files()
3027 self.substate = {}
3029 self.substate = {}
3028
3030
3029 if editor:
3031 if editor:
3030 self._text = editor(self._repo, self, [])
3032 self._text = editor(self._repo, self, [])
3031 self._repo.savecommitmessage(self._text)
3033 self._repo.savecommitmessage(self._text)
3032
3034
3033 def manifestnode(self):
3035 def manifestnode(self):
3034 return self._manifestnode
3036 return self._manifestnode
3035
3037
3036 @property
3038 @property
3037 def _manifestctx(self):
3039 def _manifestctx(self):
3038 return self._repo.manifestlog[self._manifestnode]
3040 return self._repo.manifestlog[self._manifestnode]
3039
3041
3040 def filectx(self, path, filelog=None):
3042 def filectx(self, path, filelog=None):
3041 return self._originalctx.filectx(path, filelog=filelog)
3043 return self._originalctx.filectx(path, filelog=filelog)
3042
3044
3043 def commit(self):
3045 def commit(self):
3044 """commit context to the repo"""
3046 """commit context to the repo"""
3045 return self._repo.commitctx(self)
3047 return self._repo.commitctx(self)
3046
3048
3047 @property
3049 @property
3048 def _manifest(self):
3050 def _manifest(self):
3049 return self._originalctx.manifest()
3051 return self._originalctx.manifest()
3050
3052
3051 @propertycache
3053 @propertycache
3052 def _status(self):
3054 def _status(self):
3053 """Calculate exact status from ``files`` specified in the ``origctx``
3055 """Calculate exact status from ``files`` specified in the ``origctx``
3054 and parents manifests.
3056 and parents manifests.
3055 """
3057 """
3056 man1 = self.p1().manifest()
3058 man1 = self.p1().manifest()
3057 p2 = self._parents[1]
3059 p2 = self._parents[1]
3058 # "1 < len(self._parents)" can't be used for checking
3060 # "1 < len(self._parents)" can't be used for checking
3059 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3061 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3060 # explicitly initialized by the list, of which length is 2.
3062 # explicitly initialized by the list, of which length is 2.
3061 if p2.rev() != nullrev:
3063 if p2.rev() != nullrev:
3062 man2 = p2.manifest()
3064 man2 = p2.manifest()
3063 managing = lambda f: f in man1 or f in man2
3065 managing = lambda f: f in man1 or f in man2
3064 else:
3066 else:
3065 managing = lambda f: f in man1
3067 managing = lambda f: f in man1
3066
3068
3067 modified, added, removed = [], [], []
3069 modified, added, removed = [], [], []
3068 for f in self._files:
3070 for f in self._files:
3069 if not managing(f):
3071 if not managing(f):
3070 added.append(f)
3072 added.append(f)
3071 elif f in self:
3073 elif f in self:
3072 modified.append(f)
3074 modified.append(f)
3073 else:
3075 else:
3074 removed.append(f)
3076 removed.append(f)
3075
3077
3076 return scmutil.status(modified, added, removed, [], [], [], [])
3078 return scmutil.status(modified, added, removed, [], [], [], [])
3077
3079
3078
3080
3079 class arbitraryfilectx(object):
3081 class arbitraryfilectx(object):
3080 """Allows you to use filectx-like functions on a file in an arbitrary
3082 """Allows you to use filectx-like functions on a file in an arbitrary
3081 location on disk, possibly not in the working directory.
3083 location on disk, possibly not in the working directory.
3082 """
3084 """
3083
3085
3084 def __init__(self, path, repo=None):
3086 def __init__(self, path, repo=None):
3085 # Repo is optional because contrib/simplemerge uses this class.
3087 # Repo is optional because contrib/simplemerge uses this class.
3086 self._repo = repo
3088 self._repo = repo
3087 self._path = path
3089 self._path = path
3088
3090
3089 def cmp(self, fctx):
3091 def cmp(self, fctx):
3090 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3092 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3091 # path if either side is a symlink.
3093 # path if either side is a symlink.
3092 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3094 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3093 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3095 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3094 # Add a fast-path for merge if both sides are disk-backed.
3096 # Add a fast-path for merge if both sides are disk-backed.
3095 # Note that filecmp uses the opposite return values (True if same)
3097 # Note that filecmp uses the opposite return values (True if same)
3096 # from our cmp functions (True if different).
3098 # from our cmp functions (True if different).
3097 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3099 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3098 return self.data() != fctx.data()
3100 return self.data() != fctx.data()
3099
3101
3100 def path(self):
3102 def path(self):
3101 return self._path
3103 return self._path
3102
3104
3103 def flags(self):
3105 def flags(self):
3104 return b''
3106 return b''
3105
3107
3106 def data(self):
3108 def data(self):
3107 return util.readfile(self._path)
3109 return util.readfile(self._path)
3108
3110
3109 def decodeddata(self):
3111 def decodeddata(self):
3110 with open(self._path, b"rb") as f:
3112 with open(self._path, b"rb") as f:
3111 return f.read()
3113 return f.read()
3112
3114
3113 def remove(self):
3115 def remove(self):
3114 util.unlink(self._path)
3116 util.unlink(self._path)
3115
3117
3116 def write(self, data, flags, **kwargs):
3118 def write(self, data, flags, **kwargs):
3117 assert not flags
3119 assert not flags
3118 with open(self._path, b"wb") as f:
3120 with open(self._path, b"wb") as f:
3119 f.write(data)
3121 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now