##// END OF EJS Templates
context: add missing manifest invalidation after write in overlayworkingctx...
Augie Fackler -
r47144:bc3f3b59 default
parent child Browse files
Show More
@@ -1,3110 +1,3111 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 dagop,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mergestate as mergestatemod,
36 mergestate as mergestatemod,
37 metadata,
37 metadata,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node or b'None', path, _(b'not found in manifest')
274 self._node or b'None', path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node or b'None', path, _(b'not found in manifest')
287 self._node or b'None', path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return metadata.computechangesetcopies(self)
303 return metadata.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 """return a subrepo for the stored revision, or wdir if this is a wdir
319 """return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 """
321 """
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
401 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
402
402
403 Returns a mercurial.scmutils.status object.
403 Returns a mercurial.scmutils.status object.
404
404
405 Data can be accessed using either tuple notation:
405 Data can be accessed using either tuple notation:
406
406
407 (modified, added, removed, deleted, unknown, ignored, clean)
407 (modified, added, removed, deleted, unknown, ignored, clean)
408
408
409 or direct attribute access:
409 or direct attribute access:
410
410
411 s.modified, s.added, ...
411 s.modified, s.added, ...
412 """
412 """
413
413
414 ctx1 = self
414 ctx1 = self
415 ctx2 = self._repo[other]
415 ctx2 = self._repo[other]
416
416
417 # This next code block is, admittedly, fragile logic that tests for
417 # This next code block is, admittedly, fragile logic that tests for
418 # reversing the contexts and wouldn't need to exist if it weren't for
418 # reversing the contexts and wouldn't need to exist if it weren't for
419 # the fast (and common) code path of comparing the working directory
419 # the fast (and common) code path of comparing the working directory
420 # with its first parent.
420 # with its first parent.
421 #
421 #
422 # What we're aiming for here is the ability to call:
422 # What we're aiming for here is the ability to call:
423 #
423 #
424 # workingctx.status(parentctx)
424 # workingctx.status(parentctx)
425 #
425 #
426 # If we always built the manifest for each context and compared those,
426 # If we always built the manifest for each context and compared those,
427 # then we'd be done. But the special case of the above call means we
427 # then we'd be done. But the special case of the above call means we
428 # just copy the manifest of the parent.
428 # just copy the manifest of the parent.
429 reversed = False
429 reversed = False
430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
430 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
431 reversed = True
431 reversed = True
432 ctx1, ctx2 = ctx2, ctx1
432 ctx1, ctx2 = ctx2, ctx1
433
433
434 match = self._repo.narrowmatch(match)
434 match = self._repo.narrowmatch(match)
435 match = ctx2._matchstatus(ctx1, match)
435 match = ctx2._matchstatus(ctx1, match)
436 r = scmutil.status([], [], [], [], [], [], [])
436 r = scmutil.status([], [], [], [], [], [], [])
437 r = ctx2._buildstatus(
437 r = ctx2._buildstatus(
438 ctx1, r, match, listignored, listclean, listunknown
438 ctx1, r, match, listignored, listclean, listunknown
439 )
439 )
440
440
441 if reversed:
441 if reversed:
442 # Reverse added and removed. Clear deleted, unknown and ignored as
442 # Reverse added and removed. Clear deleted, unknown and ignored as
443 # these make no sense to reverse.
443 # these make no sense to reverse.
444 r = scmutil.status(
444 r = scmutil.status(
445 r.modified, r.removed, r.added, [], [], [], r.clean
445 r.modified, r.removed, r.added, [], [], [], r.clean
446 )
446 )
447
447
448 if listsubrepos:
448 if listsubrepos:
449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
449 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
450 try:
450 try:
451 rev2 = ctx2.subrev(subpath)
451 rev2 = ctx2.subrev(subpath)
452 except KeyError:
452 except KeyError:
453 # A subrepo that existed in node1 was deleted between
453 # A subrepo that existed in node1 was deleted between
454 # node1 and node2 (inclusive). Thus, ctx2's substate
454 # node1 and node2 (inclusive). Thus, ctx2's substate
455 # won't contain that subpath. The best we can do ignore it.
455 # won't contain that subpath. The best we can do ignore it.
456 rev2 = None
456 rev2 = None
457 submatch = matchmod.subdirmatcher(subpath, match)
457 submatch = matchmod.subdirmatcher(subpath, match)
458 s = sub.status(
458 s = sub.status(
459 rev2,
459 rev2,
460 match=submatch,
460 match=submatch,
461 ignored=listignored,
461 ignored=listignored,
462 clean=listclean,
462 clean=listclean,
463 unknown=listunknown,
463 unknown=listunknown,
464 listsubrepos=True,
464 listsubrepos=True,
465 )
465 )
466 for k in (
466 for k in (
467 'modified',
467 'modified',
468 'added',
468 'added',
469 'removed',
469 'removed',
470 'deleted',
470 'deleted',
471 'unknown',
471 'unknown',
472 'ignored',
472 'ignored',
473 'clean',
473 'clean',
474 ):
474 ):
475 rfiles, sfiles = getattr(r, k), getattr(s, k)
475 rfiles, sfiles = getattr(r, k), getattr(s, k)
476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
476 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
477
477
478 r.modified.sort()
478 r.modified.sort()
479 r.added.sort()
479 r.added.sort()
480 r.removed.sort()
480 r.removed.sort()
481 r.deleted.sort()
481 r.deleted.sort()
482 r.unknown.sort()
482 r.unknown.sort()
483 r.ignored.sort()
483 r.ignored.sort()
484 r.clean.sort()
484 r.clean.sort()
485
485
486 return r
486 return r
487
487
488 def mergestate(self, clean=False):
488 def mergestate(self, clean=False):
489 """Get a mergestate object for this context."""
489 """Get a mergestate object for this context."""
490 raise NotImplementedError(
490 raise NotImplementedError(
491 '%s does not implement mergestate()' % self.__class__
491 '%s does not implement mergestate()' % self.__class__
492 )
492 )
493
493
494 def isempty(self):
494 def isempty(self):
495 return not (
495 return not (
496 len(self.parents()) > 1
496 len(self.parents()) > 1
497 or self.branch() != self.p1().branch()
497 or self.branch() != self.p1().branch()
498 or self.closesbranch()
498 or self.closesbranch()
499 or self.files()
499 or self.files()
500 )
500 )
501
501
502
502
503 class changectx(basectx):
503 class changectx(basectx):
504 """A changecontext object makes access to data related to a particular
504 """A changecontext object makes access to data related to a particular
505 changeset convenient. It represents a read-only context already present in
505 changeset convenient. It represents a read-only context already present in
506 the repo."""
506 the repo."""
507
507
508 def __init__(self, repo, rev, node, maybe_filtered=True):
508 def __init__(self, repo, rev, node, maybe_filtered=True):
509 super(changectx, self).__init__(repo)
509 super(changectx, self).__init__(repo)
510 self._rev = rev
510 self._rev = rev
511 self._node = node
511 self._node = node
512 # When maybe_filtered is True, the revision might be affected by
512 # When maybe_filtered is True, the revision might be affected by
513 # changelog filtering and operation through the filtered changelog must be used.
513 # changelog filtering and operation through the filtered changelog must be used.
514 #
514 #
515 # When maybe_filtered is False, the revision has already been checked
515 # When maybe_filtered is False, the revision has already been checked
516 # against filtering and is not filtered. Operation through the
516 # against filtering and is not filtered. Operation through the
517 # unfiltered changelog might be used in some case.
517 # unfiltered changelog might be used in some case.
518 self._maybe_filtered = maybe_filtered
518 self._maybe_filtered = maybe_filtered
519
519
520 def __hash__(self):
520 def __hash__(self):
521 try:
521 try:
522 return hash(self._rev)
522 return hash(self._rev)
523 except AttributeError:
523 except AttributeError:
524 return id(self)
524 return id(self)
525
525
526 def __nonzero__(self):
526 def __nonzero__(self):
527 return self._rev != nullrev
527 return self._rev != nullrev
528
528
529 __bool__ = __nonzero__
529 __bool__ = __nonzero__
530
530
531 @propertycache
531 @propertycache
532 def _changeset(self):
532 def _changeset(self):
533 if self._maybe_filtered:
533 if self._maybe_filtered:
534 repo = self._repo
534 repo = self._repo
535 else:
535 else:
536 repo = self._repo.unfiltered()
536 repo = self._repo.unfiltered()
537 return repo.changelog.changelogrevision(self.rev())
537 return repo.changelog.changelogrevision(self.rev())
538
538
539 @propertycache
539 @propertycache
540 def _manifest(self):
540 def _manifest(self):
541 return self._manifestctx.read()
541 return self._manifestctx.read()
542
542
543 @property
543 @property
544 def _manifestctx(self):
544 def _manifestctx(self):
545 return self._repo.manifestlog[self._changeset.manifest]
545 return self._repo.manifestlog[self._changeset.manifest]
546
546
547 @propertycache
547 @propertycache
548 def _manifestdelta(self):
548 def _manifestdelta(self):
549 return self._manifestctx.readdelta()
549 return self._manifestctx.readdelta()
550
550
551 @propertycache
551 @propertycache
552 def _parents(self):
552 def _parents(self):
553 repo = self._repo
553 repo = self._repo
554 if self._maybe_filtered:
554 if self._maybe_filtered:
555 cl = repo.changelog
555 cl = repo.changelog
556 else:
556 else:
557 cl = repo.unfiltered().changelog
557 cl = repo.unfiltered().changelog
558
558
559 p1, p2 = cl.parentrevs(self._rev)
559 p1, p2 = cl.parentrevs(self._rev)
560 if p2 == nullrev:
560 if p2 == nullrev:
561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
561 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
562 return [
562 return [
563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
563 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
564 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
565 ]
565 ]
566
566
567 def changeset(self):
567 def changeset(self):
568 c = self._changeset
568 c = self._changeset
569 return (
569 return (
570 c.manifest,
570 c.manifest,
571 c.user,
571 c.user,
572 c.date,
572 c.date,
573 c.files,
573 c.files,
574 c.description,
574 c.description,
575 c.extra,
575 c.extra,
576 )
576 )
577
577
578 def manifestnode(self):
578 def manifestnode(self):
579 return self._changeset.manifest
579 return self._changeset.manifest
580
580
581 def user(self):
581 def user(self):
582 return self._changeset.user
582 return self._changeset.user
583
583
584 def date(self):
584 def date(self):
585 return self._changeset.date
585 return self._changeset.date
586
586
587 def files(self):
587 def files(self):
588 return self._changeset.files
588 return self._changeset.files
589
589
590 def filesmodified(self):
590 def filesmodified(self):
591 modified = set(self.files())
591 modified = set(self.files())
592 modified.difference_update(self.filesadded())
592 modified.difference_update(self.filesadded())
593 modified.difference_update(self.filesremoved())
593 modified.difference_update(self.filesremoved())
594 return sorted(modified)
594 return sorted(modified)
595
595
596 def filesadded(self):
596 def filesadded(self):
597 filesadded = self._changeset.filesadded
597 filesadded = self._changeset.filesadded
598 compute_on_none = True
598 compute_on_none = True
599 if self._repo.filecopiesmode == b'changeset-sidedata':
599 if self._repo.filecopiesmode == b'changeset-sidedata':
600 compute_on_none = False
600 compute_on_none = False
601 else:
601 else:
602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
602 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 if source == b'changeset-only':
603 if source == b'changeset-only':
604 compute_on_none = False
604 compute_on_none = False
605 elif source != b'compatibility':
605 elif source != b'compatibility':
606 # filelog mode, ignore any changelog content
606 # filelog mode, ignore any changelog content
607 filesadded = None
607 filesadded = None
608 if filesadded is None:
608 if filesadded is None:
609 if compute_on_none:
609 if compute_on_none:
610 filesadded = metadata.computechangesetfilesadded(self)
610 filesadded = metadata.computechangesetfilesadded(self)
611 else:
611 else:
612 filesadded = []
612 filesadded = []
613 return filesadded
613 return filesadded
614
614
615 def filesremoved(self):
615 def filesremoved(self):
616 filesremoved = self._changeset.filesremoved
616 filesremoved = self._changeset.filesremoved
617 compute_on_none = True
617 compute_on_none = True
618 if self._repo.filecopiesmode == b'changeset-sidedata':
618 if self._repo.filecopiesmode == b'changeset-sidedata':
619 compute_on_none = False
619 compute_on_none = False
620 else:
620 else:
621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
621 source = self._repo.ui.config(b'experimental', b'copies.read-from')
622 if source == b'changeset-only':
622 if source == b'changeset-only':
623 compute_on_none = False
623 compute_on_none = False
624 elif source != b'compatibility':
624 elif source != b'compatibility':
625 # filelog mode, ignore any changelog content
625 # filelog mode, ignore any changelog content
626 filesremoved = None
626 filesremoved = None
627 if filesremoved is None:
627 if filesremoved is None:
628 if compute_on_none:
628 if compute_on_none:
629 filesremoved = metadata.computechangesetfilesremoved(self)
629 filesremoved = metadata.computechangesetfilesremoved(self)
630 else:
630 else:
631 filesremoved = []
631 filesremoved = []
632 return filesremoved
632 return filesremoved
633
633
634 @propertycache
634 @propertycache
635 def _copies(self):
635 def _copies(self):
636 p1copies = self._changeset.p1copies
636 p1copies = self._changeset.p1copies
637 p2copies = self._changeset.p2copies
637 p2copies = self._changeset.p2copies
638 compute_on_none = True
638 compute_on_none = True
639 if self._repo.filecopiesmode == b'changeset-sidedata':
639 if self._repo.filecopiesmode == b'changeset-sidedata':
640 compute_on_none = False
640 compute_on_none = False
641 else:
641 else:
642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
642 source = self._repo.ui.config(b'experimental', b'copies.read-from')
643 # If config says to get copy metadata only from changeset, then
643 # If config says to get copy metadata only from changeset, then
644 # return that, defaulting to {} if there was no copy metadata. In
644 # return that, defaulting to {} if there was no copy metadata. In
645 # compatibility mode, we return copy data from the changeset if it
645 # compatibility mode, we return copy data from the changeset if it
646 # was recorded there, and otherwise we fall back to getting it from
646 # was recorded there, and otherwise we fall back to getting it from
647 # the filelogs (below).
647 # the filelogs (below).
648 #
648 #
649 # If we are in compatiblity mode and there is not data in the
649 # If we are in compatiblity mode and there is not data in the
650 # changeset), we get the copy metadata from the filelogs.
650 # changeset), we get the copy metadata from the filelogs.
651 #
651 #
652 # otherwise, when config said to read only from filelog, we get the
652 # otherwise, when config said to read only from filelog, we get the
653 # copy metadata from the filelogs.
653 # copy metadata from the filelogs.
654 if source == b'changeset-only':
654 if source == b'changeset-only':
655 compute_on_none = False
655 compute_on_none = False
656 elif source != b'compatibility':
656 elif source != b'compatibility':
657 # filelog mode, ignore any changelog content
657 # filelog mode, ignore any changelog content
658 p1copies = p2copies = None
658 p1copies = p2copies = None
659 if p1copies is None:
659 if p1copies is None:
660 if compute_on_none:
660 if compute_on_none:
661 p1copies, p2copies = super(changectx, self)._copies
661 p1copies, p2copies = super(changectx, self)._copies
662 else:
662 else:
663 if p1copies is None:
663 if p1copies is None:
664 p1copies = {}
664 p1copies = {}
665 if p2copies is None:
665 if p2copies is None:
666 p2copies = {}
666 p2copies = {}
667 return p1copies, p2copies
667 return p1copies, p2copies
668
668
669 def description(self):
669 def description(self):
670 return self._changeset.description
670 return self._changeset.description
671
671
672 def branch(self):
672 def branch(self):
673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
673 return encoding.tolocal(self._changeset.extra.get(b"branch"))
674
674
675 def closesbranch(self):
675 def closesbranch(self):
676 return b'close' in self._changeset.extra
676 return b'close' in self._changeset.extra
677
677
678 def extra(self):
678 def extra(self):
679 """Return a dict of extra information."""
679 """Return a dict of extra information."""
680 return self._changeset.extra
680 return self._changeset.extra
681
681
682 def tags(self):
682 def tags(self):
683 """Return a list of byte tag names"""
683 """Return a list of byte tag names"""
684 return self._repo.nodetags(self._node)
684 return self._repo.nodetags(self._node)
685
685
686 def bookmarks(self):
686 def bookmarks(self):
687 """Return a list of byte bookmark names."""
687 """Return a list of byte bookmark names."""
688 return self._repo.nodebookmarks(self._node)
688 return self._repo.nodebookmarks(self._node)
689
689
690 def phase(self):
690 def phase(self):
691 return self._repo._phasecache.phase(self._repo, self._rev)
691 return self._repo._phasecache.phase(self._repo, self._rev)
692
692
693 def hidden(self):
693 def hidden(self):
694 return self._rev in repoview.filterrevs(self._repo, b'visible')
694 return self._rev in repoview.filterrevs(self._repo, b'visible')
695
695
696 def isinmemory(self):
696 def isinmemory(self):
697 return False
697 return False
698
698
699 def children(self):
699 def children(self):
700 """return list of changectx contexts for each child changeset.
700 """return list of changectx contexts for each child changeset.
701
701
702 This returns only the immediate child changesets. Use descendants() to
702 This returns only the immediate child changesets. Use descendants() to
703 recursively walk children.
703 recursively walk children.
704 """
704 """
705 c = self._repo.changelog.children(self._node)
705 c = self._repo.changelog.children(self._node)
706 return [self._repo[x] for x in c]
706 return [self._repo[x] for x in c]
707
707
708 def ancestors(self):
708 def ancestors(self):
709 for a in self._repo.changelog.ancestors([self._rev]):
709 for a in self._repo.changelog.ancestors([self._rev]):
710 yield self._repo[a]
710 yield self._repo[a]
711
711
712 def descendants(self):
712 def descendants(self):
713 """Recursively yield all children of the changeset.
713 """Recursively yield all children of the changeset.
714
714
715 For just the immediate children, use children()
715 For just the immediate children, use children()
716 """
716 """
717 for d in self._repo.changelog.descendants([self._rev]):
717 for d in self._repo.changelog.descendants([self._rev]):
718 yield self._repo[d]
718 yield self._repo[d]
719
719
720 def filectx(self, path, fileid=None, filelog=None):
720 def filectx(self, path, fileid=None, filelog=None):
721 """get a file context from this changeset"""
721 """get a file context from this changeset"""
722 if fileid is None:
722 if fileid is None:
723 fileid = self.filenode(path)
723 fileid = self.filenode(path)
724 return filectx(
724 return filectx(
725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 )
726 )
727
727
728 def ancestor(self, c2, warn=False):
728 def ancestor(self, c2, warn=False):
729 """return the "best" ancestor context of self and c2
729 """return the "best" ancestor context of self and c2
730
730
731 If there are multiple candidates, it will show a message and check
731 If there are multiple candidates, it will show a message and check
732 merge.preferancestor configuration before falling back to the
732 merge.preferancestor configuration before falling back to the
733 revlog ancestor."""
733 revlog ancestor."""
734 # deal with workingctxs
734 # deal with workingctxs
735 n2 = c2._node
735 n2 = c2._node
736 if n2 is None:
736 if n2 is None:
737 n2 = c2._parents[0]._node
737 n2 = c2._parents[0]._node
738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 if not cahs:
739 if not cahs:
740 anc = nullid
740 anc = nullid
741 elif len(cahs) == 1:
741 elif len(cahs) == 1:
742 anc = cahs[0]
742 anc = cahs[0]
743 else:
743 else:
744 # experimental config: merge.preferancestor
744 # experimental config: merge.preferancestor
745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 try:
746 try:
747 ctx = scmutil.revsymbol(self._repo, r)
747 ctx = scmutil.revsymbol(self._repo, r)
748 except error.RepoLookupError:
748 except error.RepoLookupError:
749 continue
749 continue
750 anc = ctx.node()
750 anc = ctx.node()
751 if anc in cahs:
751 if anc in cahs:
752 break
752 break
753 else:
753 else:
754 anc = self._repo.changelog.ancestor(self._node, n2)
754 anc = self._repo.changelog.ancestor(self._node, n2)
755 if warn:
755 if warn:
756 self._repo.ui.status(
756 self._repo.ui.status(
757 (
757 (
758 _(b"note: using %s as ancestor of %s and %s\n")
758 _(b"note: using %s as ancestor of %s and %s\n")
759 % (short(anc), short(self._node), short(n2))
759 % (short(anc), short(self._node), short(n2))
760 )
760 )
761 + b''.join(
761 + b''.join(
762 _(
762 _(
763 b" alternatively, use --config "
763 b" alternatively, use --config "
764 b"merge.preferancestor=%s\n"
764 b"merge.preferancestor=%s\n"
765 )
765 )
766 % short(n)
766 % short(n)
767 for n in sorted(cahs)
767 for n in sorted(cahs)
768 if n != anc
768 if n != anc
769 )
769 )
770 )
770 )
771 return self._repo[anc]
771 return self._repo[anc]
772
772
773 def isancestorof(self, other):
773 def isancestorof(self, other):
774 """True if this changeset is an ancestor of other"""
774 """True if this changeset is an ancestor of other"""
775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776
776
777 def walk(self, match):
777 def walk(self, match):
778 '''Generates matching file names.'''
778 '''Generates matching file names.'''
779
779
780 # Wrap match.bad method to have message with nodeid
780 # Wrap match.bad method to have message with nodeid
781 def bad(fn, msg):
781 def bad(fn, msg):
782 # The manifest doesn't know about subrepos, so don't complain about
782 # The manifest doesn't know about subrepos, so don't complain about
783 # paths into valid subrepos.
783 # paths into valid subrepos.
784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 return
785 return
786 match.bad(fn, _(b'no such file in rev %s') % self)
786 match.bad(fn, _(b'no such file in rev %s') % self)
787
787
788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 return self._manifest.walk(m)
789 return self._manifest.walk(m)
790
790
791 def matches(self, match):
791 def matches(self, match):
792 return self.walk(match)
792 return self.walk(match)
793
793
794
794
795 class basefilectx(object):
795 class basefilectx(object):
796 """A filecontext object represents the common logic for its children:
796 """A filecontext object represents the common logic for its children:
797 filectx: read-only access to a filerevision that is already present
797 filectx: read-only access to a filerevision that is already present
798 in the repo,
798 in the repo,
799 workingfilectx: a filecontext that represents files from the working
799 workingfilectx: a filecontext that represents files from the working
800 directory,
800 directory,
801 memfilectx: a filecontext that represents files in-memory,
801 memfilectx: a filecontext that represents files in-memory,
802 """
802 """
803
803
804 @propertycache
804 @propertycache
805 def _filelog(self):
805 def _filelog(self):
806 return self._repo.file(self._path)
806 return self._repo.file(self._path)
807
807
808 @propertycache
808 @propertycache
809 def _changeid(self):
809 def _changeid(self):
810 if '_changectx' in self.__dict__:
810 if '_changectx' in self.__dict__:
811 return self._changectx.rev()
811 return self._changectx.rev()
812 elif '_descendantrev' in self.__dict__:
812 elif '_descendantrev' in self.__dict__:
813 # this file context was created from a revision with a known
813 # this file context was created from a revision with a known
814 # descendant, we can (lazily) correct for linkrev aliases
814 # descendant, we can (lazily) correct for linkrev aliases
815 return self._adjustlinkrev(self._descendantrev)
815 return self._adjustlinkrev(self._descendantrev)
816 else:
816 else:
817 return self._filelog.linkrev(self._filerev)
817 return self._filelog.linkrev(self._filerev)
818
818
819 @propertycache
819 @propertycache
820 def _filenode(self):
820 def _filenode(self):
821 if '_fileid' in self.__dict__:
821 if '_fileid' in self.__dict__:
822 return self._filelog.lookup(self._fileid)
822 return self._filelog.lookup(self._fileid)
823 else:
823 else:
824 return self._changectx.filenode(self._path)
824 return self._changectx.filenode(self._path)
825
825
826 @propertycache
826 @propertycache
827 def _filerev(self):
827 def _filerev(self):
828 return self._filelog.rev(self._filenode)
828 return self._filelog.rev(self._filenode)
829
829
830 @propertycache
830 @propertycache
831 def _repopath(self):
831 def _repopath(self):
832 return self._path
832 return self._path
833
833
834 def __nonzero__(self):
834 def __nonzero__(self):
835 try:
835 try:
836 self._filenode
836 self._filenode
837 return True
837 return True
838 except error.LookupError:
838 except error.LookupError:
839 # file is missing
839 # file is missing
840 return False
840 return False
841
841
842 __bool__ = __nonzero__
842 __bool__ = __nonzero__
843
843
844 def __bytes__(self):
844 def __bytes__(self):
845 try:
845 try:
846 return b"%s@%s" % (self.path(), self._changectx)
846 return b"%s@%s" % (self.path(), self._changectx)
847 except error.LookupError:
847 except error.LookupError:
848 return b"%s@???" % self.path()
848 return b"%s@???" % self.path()
849
849
850 __str__ = encoding.strmethod(__bytes__)
850 __str__ = encoding.strmethod(__bytes__)
851
851
852 def __repr__(self):
852 def __repr__(self):
853 return "<%s %s>" % (type(self).__name__, str(self))
853 return "<%s %s>" % (type(self).__name__, str(self))
854
854
855 def __hash__(self):
855 def __hash__(self):
856 try:
856 try:
857 return hash((self._path, self._filenode))
857 return hash((self._path, self._filenode))
858 except AttributeError:
858 except AttributeError:
859 return id(self)
859 return id(self)
860
860
861 def __eq__(self, other):
861 def __eq__(self, other):
862 try:
862 try:
863 return (
863 return (
864 type(self) == type(other)
864 type(self) == type(other)
865 and self._path == other._path
865 and self._path == other._path
866 and self._filenode == other._filenode
866 and self._filenode == other._filenode
867 )
867 )
868 except AttributeError:
868 except AttributeError:
869 return False
869 return False
870
870
871 def __ne__(self, other):
871 def __ne__(self, other):
872 return not (self == other)
872 return not (self == other)
873
873
874 def filerev(self):
874 def filerev(self):
875 return self._filerev
875 return self._filerev
876
876
877 def filenode(self):
877 def filenode(self):
878 return self._filenode
878 return self._filenode
879
879
880 @propertycache
880 @propertycache
881 def _flags(self):
881 def _flags(self):
882 return self._changectx.flags(self._path)
882 return self._changectx.flags(self._path)
883
883
884 def flags(self):
884 def flags(self):
885 return self._flags
885 return self._flags
886
886
887 def filelog(self):
887 def filelog(self):
888 return self._filelog
888 return self._filelog
889
889
890 def rev(self):
890 def rev(self):
891 return self._changeid
891 return self._changeid
892
892
893 def linkrev(self):
893 def linkrev(self):
894 return self._filelog.linkrev(self._filerev)
894 return self._filelog.linkrev(self._filerev)
895
895
896 def node(self):
896 def node(self):
897 return self._changectx.node()
897 return self._changectx.node()
898
898
899 def hex(self):
899 def hex(self):
900 return self._changectx.hex()
900 return self._changectx.hex()
901
901
902 def user(self):
902 def user(self):
903 return self._changectx.user()
903 return self._changectx.user()
904
904
905 def date(self):
905 def date(self):
906 return self._changectx.date()
906 return self._changectx.date()
907
907
908 def files(self):
908 def files(self):
909 return self._changectx.files()
909 return self._changectx.files()
910
910
911 def description(self):
911 def description(self):
912 return self._changectx.description()
912 return self._changectx.description()
913
913
914 def branch(self):
914 def branch(self):
915 return self._changectx.branch()
915 return self._changectx.branch()
916
916
917 def extra(self):
917 def extra(self):
918 return self._changectx.extra()
918 return self._changectx.extra()
919
919
920 def phase(self):
920 def phase(self):
921 return self._changectx.phase()
921 return self._changectx.phase()
922
922
923 def phasestr(self):
923 def phasestr(self):
924 return self._changectx.phasestr()
924 return self._changectx.phasestr()
925
925
926 def obsolete(self):
926 def obsolete(self):
927 return self._changectx.obsolete()
927 return self._changectx.obsolete()
928
928
929 def instabilities(self):
929 def instabilities(self):
930 return self._changectx.instabilities()
930 return self._changectx.instabilities()
931
931
932 def manifest(self):
932 def manifest(self):
933 return self._changectx.manifest()
933 return self._changectx.manifest()
934
934
935 def changectx(self):
935 def changectx(self):
936 return self._changectx
936 return self._changectx
937
937
938 def renamed(self):
938 def renamed(self):
939 return self._copied
939 return self._copied
940
940
941 def copysource(self):
941 def copysource(self):
942 return self._copied and self._copied[0]
942 return self._copied and self._copied[0]
943
943
944 def repo(self):
944 def repo(self):
945 return self._repo
945 return self._repo
946
946
947 def size(self):
947 def size(self):
948 return len(self.data())
948 return len(self.data())
949
949
950 def path(self):
950 def path(self):
951 return self._path
951 return self._path
952
952
953 def isbinary(self):
953 def isbinary(self):
954 try:
954 try:
955 return stringutil.binary(self.data())
955 return stringutil.binary(self.data())
956 except IOError:
956 except IOError:
957 return False
957 return False
958
958
959 def isexec(self):
959 def isexec(self):
960 return b'x' in self.flags()
960 return b'x' in self.flags()
961
961
962 def islink(self):
962 def islink(self):
963 return b'l' in self.flags()
963 return b'l' in self.flags()
964
964
965 def isabsent(self):
965 def isabsent(self):
966 """whether this filectx represents a file not in self._changectx
966 """whether this filectx represents a file not in self._changectx
967
967
968 This is mainly for merge code to detect change/delete conflicts. This is
968 This is mainly for merge code to detect change/delete conflicts. This is
969 expected to be True for all subclasses of basectx."""
969 expected to be True for all subclasses of basectx."""
970 return False
970 return False
971
971
972 _customcmp = False
972 _customcmp = False
973
973
974 def cmp(self, fctx):
974 def cmp(self, fctx):
975 """compare with other file context
975 """compare with other file context
976
976
977 returns True if different than fctx.
977 returns True if different than fctx.
978 """
978 """
979 if fctx._customcmp:
979 if fctx._customcmp:
980 return fctx.cmp(self)
980 return fctx.cmp(self)
981
981
982 if self._filenode is None:
982 if self._filenode is None:
983 raise error.ProgrammingError(
983 raise error.ProgrammingError(
984 b'filectx.cmp() must be reimplemented if not backed by revlog'
984 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 )
985 )
986
986
987 if fctx._filenode is None:
987 if fctx._filenode is None:
988 if self._repo._encodefilterpats:
988 if self._repo._encodefilterpats:
989 # can't rely on size() because wdir content may be decoded
989 # can't rely on size() because wdir content may be decoded
990 return self._filelog.cmp(self._filenode, fctx.data())
990 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() - 4 == fctx.size():
991 if self.size() - 4 == fctx.size():
992 # size() can match:
992 # size() can match:
993 # if file data starts with '\1\n', empty metadata block is
993 # if file data starts with '\1\n', empty metadata block is
994 # prepended, which adds 4 bytes to filelog.size().
994 # prepended, which adds 4 bytes to filelog.size().
995 return self._filelog.cmp(self._filenode, fctx.data())
995 return self._filelog.cmp(self._filenode, fctx.data())
996 if self.size() == fctx.size():
996 if self.size() == fctx.size():
997 # size() matches: need to compare content
997 # size() matches: need to compare content
998 return self._filelog.cmp(self._filenode, fctx.data())
998 return self._filelog.cmp(self._filenode, fctx.data())
999
999
1000 # size() differs
1000 # size() differs
1001 return True
1001 return True
1002
1002
1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1004 """return the first ancestor of <srcrev> introducing <fnode>
1004 """return the first ancestor of <srcrev> introducing <fnode>
1005
1005
1006 If the linkrev of the file revision does not point to an ancestor of
1006 If the linkrev of the file revision does not point to an ancestor of
1007 srcrev, we'll walk down the ancestors until we find one introducing
1007 srcrev, we'll walk down the ancestors until we find one introducing
1008 this file revision.
1008 this file revision.
1009
1009
1010 :srcrev: the changeset revision we search ancestors from
1010 :srcrev: the changeset revision we search ancestors from
1011 :inclusive: if true, the src revision will also be checked
1011 :inclusive: if true, the src revision will also be checked
1012 :stoprev: an optional revision to stop the walk at. If no introduction
1012 :stoprev: an optional revision to stop the walk at. If no introduction
1013 of this file content could be found before this floor
1013 of this file content could be found before this floor
1014 revision, the function will returns "None" and stops its
1014 revision, the function will returns "None" and stops its
1015 iteration.
1015 iteration.
1016 """
1016 """
1017 repo = self._repo
1017 repo = self._repo
1018 cl = repo.unfiltered().changelog
1018 cl = repo.unfiltered().changelog
1019 mfl = repo.manifestlog
1019 mfl = repo.manifestlog
1020 # fetch the linkrev
1020 # fetch the linkrev
1021 lkr = self.linkrev()
1021 lkr = self.linkrev()
1022 if srcrev == lkr:
1022 if srcrev == lkr:
1023 return lkr
1023 return lkr
1024 # hack to reuse ancestor computation when searching for renames
1024 # hack to reuse ancestor computation when searching for renames
1025 memberanc = getattr(self, '_ancestrycontext', None)
1025 memberanc = getattr(self, '_ancestrycontext', None)
1026 iteranc = None
1026 iteranc = None
1027 if srcrev is None:
1027 if srcrev is None:
1028 # wctx case, used by workingfilectx during mergecopy
1028 # wctx case, used by workingfilectx during mergecopy
1029 revs = [p.rev() for p in self._repo[None].parents()]
1029 revs = [p.rev() for p in self._repo[None].parents()]
1030 inclusive = True # we skipped the real (revless) source
1030 inclusive = True # we skipped the real (revless) source
1031 else:
1031 else:
1032 revs = [srcrev]
1032 revs = [srcrev]
1033 if memberanc is None:
1033 if memberanc is None:
1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1035 # check if this linkrev is an ancestor of srcrev
1035 # check if this linkrev is an ancestor of srcrev
1036 if lkr not in memberanc:
1036 if lkr not in memberanc:
1037 if iteranc is None:
1037 if iteranc is None:
1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1039 fnode = self._filenode
1039 fnode = self._filenode
1040 path = self._path
1040 path = self._path
1041 for a in iteranc:
1041 for a in iteranc:
1042 if stoprev is not None and a < stoprev:
1042 if stoprev is not None and a < stoprev:
1043 return None
1043 return None
1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1045 if path in ac[3]: # checking the 'files' field.
1045 if path in ac[3]: # checking the 'files' field.
1046 # The file has been touched, check if the content is
1046 # The file has been touched, check if the content is
1047 # similar to the one we search for.
1047 # similar to the one we search for.
1048 if fnode == mfl[ac[0]].readfast().get(path):
1048 if fnode == mfl[ac[0]].readfast().get(path):
1049 return a
1049 return a
1050 # In theory, we should never get out of that loop without a result.
1050 # In theory, we should never get out of that loop without a result.
1051 # But if manifest uses a buggy file revision (not children of the
1051 # But if manifest uses a buggy file revision (not children of the
1052 # one it replaces) we could. Such a buggy situation will likely
1052 # one it replaces) we could. Such a buggy situation will likely
1053 # result is crash somewhere else at to some point.
1053 # result is crash somewhere else at to some point.
1054 return lkr
1054 return lkr
1055
1055
1056 def isintroducedafter(self, changelogrev):
1056 def isintroducedafter(self, changelogrev):
1057 """True if a filectx has been introduced after a given floor revision"""
1057 """True if a filectx has been introduced after a given floor revision"""
1058 if self.linkrev() >= changelogrev:
1058 if self.linkrev() >= changelogrev:
1059 return True
1059 return True
1060 introrev = self._introrev(stoprev=changelogrev)
1060 introrev = self._introrev(stoprev=changelogrev)
1061 if introrev is None:
1061 if introrev is None:
1062 return False
1062 return False
1063 return introrev >= changelogrev
1063 return introrev >= changelogrev
1064
1064
1065 def introrev(self):
1065 def introrev(self):
1066 """return the rev of the changeset which introduced this file revision
1066 """return the rev of the changeset which introduced this file revision
1067
1067
1068 This method is different from linkrev because it take into account the
1068 This method is different from linkrev because it take into account the
1069 changeset the filectx was created from. It ensures the returned
1069 changeset the filectx was created from. It ensures the returned
1070 revision is one of its ancestors. This prevents bugs from
1070 revision is one of its ancestors. This prevents bugs from
1071 'linkrev-shadowing' when a file revision is used by multiple
1071 'linkrev-shadowing' when a file revision is used by multiple
1072 changesets.
1072 changesets.
1073 """
1073 """
1074 return self._introrev()
1074 return self._introrev()
1075
1075
1076 def _introrev(self, stoprev=None):
1076 def _introrev(self, stoprev=None):
1077 """
1077 """
1078 Same as `introrev` but, with an extra argument to limit changelog
1078 Same as `introrev` but, with an extra argument to limit changelog
1079 iteration range in some internal usecase.
1079 iteration range in some internal usecase.
1080
1080
1081 If `stoprev` is set, the `introrev` will not be searched past that
1081 If `stoprev` is set, the `introrev` will not be searched past that
1082 `stoprev` revision and "None" might be returned. This is useful to
1082 `stoprev` revision and "None" might be returned. This is useful to
1083 limit the iteration range.
1083 limit the iteration range.
1084 """
1084 """
1085 toprev = None
1085 toprev = None
1086 attrs = vars(self)
1086 attrs = vars(self)
1087 if '_changeid' in attrs:
1087 if '_changeid' in attrs:
1088 # We have a cached value already
1088 # We have a cached value already
1089 toprev = self._changeid
1089 toprev = self._changeid
1090 elif '_changectx' in attrs:
1090 elif '_changectx' in attrs:
1091 # We know which changelog entry we are coming from
1091 # We know which changelog entry we are coming from
1092 toprev = self._changectx.rev()
1092 toprev = self._changectx.rev()
1093
1093
1094 if toprev is not None:
1094 if toprev is not None:
1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1096 elif '_descendantrev' in attrs:
1096 elif '_descendantrev' in attrs:
1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1098 # be nice and cache the result of the computation
1098 # be nice and cache the result of the computation
1099 if introrev is not None:
1099 if introrev is not None:
1100 self._changeid = introrev
1100 self._changeid = introrev
1101 return introrev
1101 return introrev
1102 else:
1102 else:
1103 return self.linkrev()
1103 return self.linkrev()
1104
1104
1105 def introfilectx(self):
1105 def introfilectx(self):
1106 """Return filectx having identical contents, but pointing to the
1106 """Return filectx having identical contents, but pointing to the
1107 changeset revision where this filectx was introduced"""
1107 changeset revision where this filectx was introduced"""
1108 introrev = self.introrev()
1108 introrev = self.introrev()
1109 if self.rev() == introrev:
1109 if self.rev() == introrev:
1110 return self
1110 return self
1111 return self.filectx(self.filenode(), changeid=introrev)
1111 return self.filectx(self.filenode(), changeid=introrev)
1112
1112
1113 def _parentfilectx(self, path, fileid, filelog):
1113 def _parentfilectx(self, path, fileid, filelog):
1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1117 # If self is associated with a changeset (probably explicitly
1117 # If self is associated with a changeset (probably explicitly
1118 # fed), ensure the created filectx is associated with a
1118 # fed), ensure the created filectx is associated with a
1119 # changeset that is an ancestor of self.changectx.
1119 # changeset that is an ancestor of self.changectx.
1120 # This lets us later use _adjustlinkrev to get a correct link.
1120 # This lets us later use _adjustlinkrev to get a correct link.
1121 fctx._descendantrev = self.rev()
1121 fctx._descendantrev = self.rev()
1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1123 elif '_descendantrev' in vars(self):
1123 elif '_descendantrev' in vars(self):
1124 # Otherwise propagate _descendantrev if we have one associated.
1124 # Otherwise propagate _descendantrev if we have one associated.
1125 fctx._descendantrev = self._descendantrev
1125 fctx._descendantrev = self._descendantrev
1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1127 return fctx
1127 return fctx
1128
1128
1129 def parents(self):
1129 def parents(self):
1130 _path = self._path
1130 _path = self._path
1131 fl = self._filelog
1131 fl = self._filelog
1132 parents = self._filelog.parents(self._filenode)
1132 parents = self._filelog.parents(self._filenode)
1133 pl = [(_path, node, fl) for node in parents if node != nullid]
1133 pl = [(_path, node, fl) for node in parents if node != nullid]
1134
1134
1135 r = fl.renamed(self._filenode)
1135 r = fl.renamed(self._filenode)
1136 if r:
1136 if r:
1137 # - In the simple rename case, both parent are nullid, pl is empty.
1137 # - In the simple rename case, both parent are nullid, pl is empty.
1138 # - In case of merge, only one of the parent is null id and should
1138 # - In case of merge, only one of the parent is null id and should
1139 # be replaced with the rename information. This parent is -always-
1139 # be replaced with the rename information. This parent is -always-
1140 # the first one.
1140 # the first one.
1141 #
1141 #
1142 # As null id have always been filtered out in the previous list
1142 # As null id have always been filtered out in the previous list
1143 # comprehension, inserting to 0 will always result in "replacing
1143 # comprehension, inserting to 0 will always result in "replacing
1144 # first nullid parent with rename information.
1144 # first nullid parent with rename information.
1145 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1145 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1146
1146
1147 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1147 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1148
1148
1149 def p1(self):
1149 def p1(self):
1150 return self.parents()[0]
1150 return self.parents()[0]
1151
1151
1152 def p2(self):
1152 def p2(self):
1153 p = self.parents()
1153 p = self.parents()
1154 if len(p) == 2:
1154 if len(p) == 2:
1155 return p[1]
1155 return p[1]
1156 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1156 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1157
1157
1158 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1158 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1159 """Returns a list of annotateline objects for each line in the file
1159 """Returns a list of annotateline objects for each line in the file
1160
1160
1161 - line.fctx is the filectx of the node where that line was last changed
1161 - line.fctx is the filectx of the node where that line was last changed
1162 - line.lineno is the line number at the first appearance in the managed
1162 - line.lineno is the line number at the first appearance in the managed
1163 file
1163 file
1164 - line.text is the data on that line (including newline character)
1164 - line.text is the data on that line (including newline character)
1165 """
1165 """
1166 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1166 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1167
1167
1168 def parents(f):
1168 def parents(f):
1169 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1169 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1170 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1170 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1171 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1171 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1172 # isn't an ancestor of the srcrev.
1172 # isn't an ancestor of the srcrev.
1173 f._changeid
1173 f._changeid
1174 pl = f.parents()
1174 pl = f.parents()
1175
1175
1176 # Don't return renamed parents if we aren't following.
1176 # Don't return renamed parents if we aren't following.
1177 if not follow:
1177 if not follow:
1178 pl = [p for p in pl if p.path() == f.path()]
1178 pl = [p for p in pl if p.path() == f.path()]
1179
1179
1180 # renamed filectx won't have a filelog yet, so set it
1180 # renamed filectx won't have a filelog yet, so set it
1181 # from the cache to save time
1181 # from the cache to save time
1182 for p in pl:
1182 for p in pl:
1183 if not '_filelog' in p.__dict__:
1183 if not '_filelog' in p.__dict__:
1184 p._filelog = getlog(p.path())
1184 p._filelog = getlog(p.path())
1185
1185
1186 return pl
1186 return pl
1187
1187
1188 # use linkrev to find the first changeset where self appeared
1188 # use linkrev to find the first changeset where self appeared
1189 base = self.introfilectx()
1189 base = self.introfilectx()
1190 if getattr(base, '_ancestrycontext', None) is None:
1190 if getattr(base, '_ancestrycontext', None) is None:
1191 # it is safe to use an unfiltered repository here because we are
1191 # it is safe to use an unfiltered repository here because we are
1192 # walking ancestors only.
1192 # walking ancestors only.
1193 cl = self._repo.unfiltered().changelog
1193 cl = self._repo.unfiltered().changelog
1194 if base.rev() is None:
1194 if base.rev() is None:
1195 # wctx is not inclusive, but works because _ancestrycontext
1195 # wctx is not inclusive, but works because _ancestrycontext
1196 # is used to test filelog revisions
1196 # is used to test filelog revisions
1197 ac = cl.ancestors(
1197 ac = cl.ancestors(
1198 [p.rev() for p in base.parents()], inclusive=True
1198 [p.rev() for p in base.parents()], inclusive=True
1199 )
1199 )
1200 else:
1200 else:
1201 ac = cl.ancestors([base.rev()], inclusive=True)
1201 ac = cl.ancestors([base.rev()], inclusive=True)
1202 base._ancestrycontext = ac
1202 base._ancestrycontext = ac
1203
1203
1204 return dagop.annotate(
1204 return dagop.annotate(
1205 base, parents, skiprevs=skiprevs, diffopts=diffopts
1205 base, parents, skiprevs=skiprevs, diffopts=diffopts
1206 )
1206 )
1207
1207
1208 def ancestors(self, followfirst=False):
1208 def ancestors(self, followfirst=False):
1209 visit = {}
1209 visit = {}
1210 c = self
1210 c = self
1211 if followfirst:
1211 if followfirst:
1212 cut = 1
1212 cut = 1
1213 else:
1213 else:
1214 cut = None
1214 cut = None
1215
1215
1216 while True:
1216 while True:
1217 for parent in c.parents()[:cut]:
1217 for parent in c.parents()[:cut]:
1218 visit[(parent.linkrev(), parent.filenode())] = parent
1218 visit[(parent.linkrev(), parent.filenode())] = parent
1219 if not visit:
1219 if not visit:
1220 break
1220 break
1221 c = visit.pop(max(visit))
1221 c = visit.pop(max(visit))
1222 yield c
1222 yield c
1223
1223
1224 def decodeddata(self):
1224 def decodeddata(self):
1225 """Returns `data()` after running repository decoding filters.
1225 """Returns `data()` after running repository decoding filters.
1226
1226
1227 This is often equivalent to how the data would be expressed on disk.
1227 This is often equivalent to how the data would be expressed on disk.
1228 """
1228 """
1229 return self._repo.wwritedata(self.path(), self.data())
1229 return self._repo.wwritedata(self.path(), self.data())
1230
1230
1231
1231
1232 class filectx(basefilectx):
1232 class filectx(basefilectx):
1233 """A filecontext object makes access to data related to a particular
1233 """A filecontext object makes access to data related to a particular
1234 filerevision convenient."""
1234 filerevision convenient."""
1235
1235
1236 def __init__(
1236 def __init__(
1237 self,
1237 self,
1238 repo,
1238 repo,
1239 path,
1239 path,
1240 changeid=None,
1240 changeid=None,
1241 fileid=None,
1241 fileid=None,
1242 filelog=None,
1242 filelog=None,
1243 changectx=None,
1243 changectx=None,
1244 ):
1244 ):
1245 """changeid must be a revision number, if specified.
1245 """changeid must be a revision number, if specified.
1246 fileid can be a file revision or node."""
1246 fileid can be a file revision or node."""
1247 self._repo = repo
1247 self._repo = repo
1248 self._path = path
1248 self._path = path
1249
1249
1250 assert (
1250 assert (
1251 changeid is not None or fileid is not None or changectx is not None
1251 changeid is not None or fileid is not None or changectx is not None
1252 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1252 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1253 changeid,
1253 changeid,
1254 fileid,
1254 fileid,
1255 changectx,
1255 changectx,
1256 )
1256 )
1257
1257
1258 if filelog is not None:
1258 if filelog is not None:
1259 self._filelog = filelog
1259 self._filelog = filelog
1260
1260
1261 if changeid is not None:
1261 if changeid is not None:
1262 self._changeid = changeid
1262 self._changeid = changeid
1263 if changectx is not None:
1263 if changectx is not None:
1264 self._changectx = changectx
1264 self._changectx = changectx
1265 if fileid is not None:
1265 if fileid is not None:
1266 self._fileid = fileid
1266 self._fileid = fileid
1267
1267
1268 @propertycache
1268 @propertycache
1269 def _changectx(self):
1269 def _changectx(self):
1270 try:
1270 try:
1271 return self._repo[self._changeid]
1271 return self._repo[self._changeid]
1272 except error.FilteredRepoLookupError:
1272 except error.FilteredRepoLookupError:
1273 # Linkrev may point to any revision in the repository. When the
1273 # Linkrev may point to any revision in the repository. When the
1274 # repository is filtered this may lead to `filectx` trying to build
1274 # repository is filtered this may lead to `filectx` trying to build
1275 # `changectx` for filtered revision. In such case we fallback to
1275 # `changectx` for filtered revision. In such case we fallback to
1276 # creating `changectx` on the unfiltered version of the reposition.
1276 # creating `changectx` on the unfiltered version of the reposition.
1277 # This fallback should not be an issue because `changectx` from
1277 # This fallback should not be an issue because `changectx` from
1278 # `filectx` are not used in complex operations that care about
1278 # `filectx` are not used in complex operations that care about
1279 # filtering.
1279 # filtering.
1280 #
1280 #
1281 # This fallback is a cheap and dirty fix that prevent several
1281 # This fallback is a cheap and dirty fix that prevent several
1282 # crashes. It does not ensure the behavior is correct. However the
1282 # crashes. It does not ensure the behavior is correct. However the
1283 # behavior was not correct before filtering either and "incorrect
1283 # behavior was not correct before filtering either and "incorrect
1284 # behavior" is seen as better as "crash"
1284 # behavior" is seen as better as "crash"
1285 #
1285 #
1286 # Linkrevs have several serious troubles with filtering that are
1286 # Linkrevs have several serious troubles with filtering that are
1287 # complicated to solve. Proper handling of the issue here should be
1287 # complicated to solve. Proper handling of the issue here should be
1288 # considered when solving linkrev issue are on the table.
1288 # considered when solving linkrev issue are on the table.
1289 return self._repo.unfiltered()[self._changeid]
1289 return self._repo.unfiltered()[self._changeid]
1290
1290
1291 def filectx(self, fileid, changeid=None):
1291 def filectx(self, fileid, changeid=None):
1292 """opens an arbitrary revision of the file without
1292 """opens an arbitrary revision of the file without
1293 opening a new filelog"""
1293 opening a new filelog"""
1294 return filectx(
1294 return filectx(
1295 self._repo,
1295 self._repo,
1296 self._path,
1296 self._path,
1297 fileid=fileid,
1297 fileid=fileid,
1298 filelog=self._filelog,
1298 filelog=self._filelog,
1299 changeid=changeid,
1299 changeid=changeid,
1300 )
1300 )
1301
1301
1302 def rawdata(self):
1302 def rawdata(self):
1303 return self._filelog.rawdata(self._filenode)
1303 return self._filelog.rawdata(self._filenode)
1304
1304
1305 def rawflags(self):
1305 def rawflags(self):
1306 """low-level revlog flags"""
1306 """low-level revlog flags"""
1307 return self._filelog.flags(self._filerev)
1307 return self._filelog.flags(self._filerev)
1308
1308
1309 def data(self):
1309 def data(self):
1310 try:
1310 try:
1311 return self._filelog.read(self._filenode)
1311 return self._filelog.read(self._filenode)
1312 except error.CensoredNodeError:
1312 except error.CensoredNodeError:
1313 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1313 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1314 return b""
1314 return b""
1315 raise error.Abort(
1315 raise error.Abort(
1316 _(b"censored node: %s") % short(self._filenode),
1316 _(b"censored node: %s") % short(self._filenode),
1317 hint=_(b"set censor.policy to ignore errors"),
1317 hint=_(b"set censor.policy to ignore errors"),
1318 )
1318 )
1319
1319
1320 def size(self):
1320 def size(self):
1321 return self._filelog.size(self._filerev)
1321 return self._filelog.size(self._filerev)
1322
1322
1323 @propertycache
1323 @propertycache
1324 def _copied(self):
1324 def _copied(self):
1325 """check if file was actually renamed in this changeset revision
1325 """check if file was actually renamed in this changeset revision
1326
1326
1327 If rename logged in file revision, we report copy for changeset only
1327 If rename logged in file revision, we report copy for changeset only
1328 if file revisions linkrev points back to the changeset in question
1328 if file revisions linkrev points back to the changeset in question
1329 or both changeset parents contain different file revisions.
1329 or both changeset parents contain different file revisions.
1330 """
1330 """
1331
1331
1332 renamed = self._filelog.renamed(self._filenode)
1332 renamed = self._filelog.renamed(self._filenode)
1333 if not renamed:
1333 if not renamed:
1334 return None
1334 return None
1335
1335
1336 if self.rev() == self.linkrev():
1336 if self.rev() == self.linkrev():
1337 return renamed
1337 return renamed
1338
1338
1339 name = self.path()
1339 name = self.path()
1340 fnode = self._filenode
1340 fnode = self._filenode
1341 for p in self._changectx.parents():
1341 for p in self._changectx.parents():
1342 try:
1342 try:
1343 if fnode == p.filenode(name):
1343 if fnode == p.filenode(name):
1344 return None
1344 return None
1345 except error.LookupError:
1345 except error.LookupError:
1346 pass
1346 pass
1347 return renamed
1347 return renamed
1348
1348
1349 def children(self):
1349 def children(self):
1350 # hard for renames
1350 # hard for renames
1351 c = self._filelog.children(self._filenode)
1351 c = self._filelog.children(self._filenode)
1352 return [
1352 return [
1353 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1353 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1354 for x in c
1354 for x in c
1355 ]
1355 ]
1356
1356
1357
1357
1358 class committablectx(basectx):
1358 class committablectx(basectx):
1359 """A committablectx object provides common functionality for a context that
1359 """A committablectx object provides common functionality for a context that
1360 wants the ability to commit, e.g. workingctx or memctx."""
1360 wants the ability to commit, e.g. workingctx or memctx."""
1361
1361
1362 def __init__(
1362 def __init__(
1363 self,
1363 self,
1364 repo,
1364 repo,
1365 text=b"",
1365 text=b"",
1366 user=None,
1366 user=None,
1367 date=None,
1367 date=None,
1368 extra=None,
1368 extra=None,
1369 changes=None,
1369 changes=None,
1370 branch=None,
1370 branch=None,
1371 ):
1371 ):
1372 super(committablectx, self).__init__(repo)
1372 super(committablectx, self).__init__(repo)
1373 self._rev = None
1373 self._rev = None
1374 self._node = None
1374 self._node = None
1375 self._text = text
1375 self._text = text
1376 if date:
1376 if date:
1377 self._date = dateutil.parsedate(date)
1377 self._date = dateutil.parsedate(date)
1378 if user:
1378 if user:
1379 self._user = user
1379 self._user = user
1380 if changes:
1380 if changes:
1381 self._status = changes
1381 self._status = changes
1382
1382
1383 self._extra = {}
1383 self._extra = {}
1384 if extra:
1384 if extra:
1385 self._extra = extra.copy()
1385 self._extra = extra.copy()
1386 if branch is not None:
1386 if branch is not None:
1387 self._extra[b'branch'] = encoding.fromlocal(branch)
1387 self._extra[b'branch'] = encoding.fromlocal(branch)
1388 if not self._extra.get(b'branch'):
1388 if not self._extra.get(b'branch'):
1389 self._extra[b'branch'] = b'default'
1389 self._extra[b'branch'] = b'default'
1390
1390
1391 def __bytes__(self):
1391 def __bytes__(self):
1392 return bytes(self._parents[0]) + b"+"
1392 return bytes(self._parents[0]) + b"+"
1393
1393
1394 __str__ = encoding.strmethod(__bytes__)
1394 __str__ = encoding.strmethod(__bytes__)
1395
1395
1396 def __nonzero__(self):
1396 def __nonzero__(self):
1397 return True
1397 return True
1398
1398
1399 __bool__ = __nonzero__
1399 __bool__ = __nonzero__
1400
1400
1401 @propertycache
1401 @propertycache
1402 def _status(self):
1402 def _status(self):
1403 return self._repo.status()
1403 return self._repo.status()
1404
1404
1405 @propertycache
1405 @propertycache
1406 def _user(self):
1406 def _user(self):
1407 return self._repo.ui.username()
1407 return self._repo.ui.username()
1408
1408
1409 @propertycache
1409 @propertycache
1410 def _date(self):
1410 def _date(self):
1411 ui = self._repo.ui
1411 ui = self._repo.ui
1412 date = ui.configdate(b'devel', b'default-date')
1412 date = ui.configdate(b'devel', b'default-date')
1413 if date is None:
1413 if date is None:
1414 date = dateutil.makedate()
1414 date = dateutil.makedate()
1415 return date
1415 return date
1416
1416
1417 def subrev(self, subpath):
1417 def subrev(self, subpath):
1418 return None
1418 return None
1419
1419
1420 def manifestnode(self):
1420 def manifestnode(self):
1421 return None
1421 return None
1422
1422
1423 def user(self):
1423 def user(self):
1424 return self._user or self._repo.ui.username()
1424 return self._user or self._repo.ui.username()
1425
1425
1426 def date(self):
1426 def date(self):
1427 return self._date
1427 return self._date
1428
1428
1429 def description(self):
1429 def description(self):
1430 return self._text
1430 return self._text
1431
1431
1432 def files(self):
1432 def files(self):
1433 return sorted(
1433 return sorted(
1434 self._status.modified + self._status.added + self._status.removed
1434 self._status.modified + self._status.added + self._status.removed
1435 )
1435 )
1436
1436
1437 def modified(self):
1437 def modified(self):
1438 return self._status.modified
1438 return self._status.modified
1439
1439
1440 def added(self):
1440 def added(self):
1441 return self._status.added
1441 return self._status.added
1442
1442
1443 def removed(self):
1443 def removed(self):
1444 return self._status.removed
1444 return self._status.removed
1445
1445
1446 def deleted(self):
1446 def deleted(self):
1447 return self._status.deleted
1447 return self._status.deleted
1448
1448
1449 filesmodified = modified
1449 filesmodified = modified
1450 filesadded = added
1450 filesadded = added
1451 filesremoved = removed
1451 filesremoved = removed
1452
1452
1453 def branch(self):
1453 def branch(self):
1454 return encoding.tolocal(self._extra[b'branch'])
1454 return encoding.tolocal(self._extra[b'branch'])
1455
1455
1456 def closesbranch(self):
1456 def closesbranch(self):
1457 return b'close' in self._extra
1457 return b'close' in self._extra
1458
1458
1459 def extra(self):
1459 def extra(self):
1460 return self._extra
1460 return self._extra
1461
1461
1462 def isinmemory(self):
1462 def isinmemory(self):
1463 return False
1463 return False
1464
1464
1465 def tags(self):
1465 def tags(self):
1466 return []
1466 return []
1467
1467
1468 def bookmarks(self):
1468 def bookmarks(self):
1469 b = []
1469 b = []
1470 for p in self.parents():
1470 for p in self.parents():
1471 b.extend(p.bookmarks())
1471 b.extend(p.bookmarks())
1472 return b
1472 return b
1473
1473
1474 def phase(self):
1474 def phase(self):
1475 phase = phases.newcommitphase(self._repo.ui)
1475 phase = phases.newcommitphase(self._repo.ui)
1476 for p in self.parents():
1476 for p in self.parents():
1477 phase = max(phase, p.phase())
1477 phase = max(phase, p.phase())
1478 return phase
1478 return phase
1479
1479
1480 def hidden(self):
1480 def hidden(self):
1481 return False
1481 return False
1482
1482
1483 def children(self):
1483 def children(self):
1484 return []
1484 return []
1485
1485
1486 def flags(self, path):
1486 def flags(self, path):
1487 if '_manifest' in self.__dict__:
1487 if '_manifest' in self.__dict__:
1488 try:
1488 try:
1489 return self._manifest.flags(path)
1489 return self._manifest.flags(path)
1490 except KeyError:
1490 except KeyError:
1491 return b''
1491 return b''
1492
1492
1493 try:
1493 try:
1494 return self._flagfunc(path)
1494 return self._flagfunc(path)
1495 except OSError:
1495 except OSError:
1496 return b''
1496 return b''
1497
1497
1498 def ancestor(self, c2):
1498 def ancestor(self, c2):
1499 """return the "best" ancestor context of self and c2"""
1499 """return the "best" ancestor context of self and c2"""
1500 return self._parents[0].ancestor(c2) # punt on two parents for now
1500 return self._parents[0].ancestor(c2) # punt on two parents for now
1501
1501
1502 def ancestors(self):
1502 def ancestors(self):
1503 for p in self._parents:
1503 for p in self._parents:
1504 yield p
1504 yield p
1505 for a in self._repo.changelog.ancestors(
1505 for a in self._repo.changelog.ancestors(
1506 [p.rev() for p in self._parents]
1506 [p.rev() for p in self._parents]
1507 ):
1507 ):
1508 yield self._repo[a]
1508 yield self._repo[a]
1509
1509
1510 def markcommitted(self, node):
1510 def markcommitted(self, node):
1511 """Perform post-commit cleanup necessary after committing this ctx
1511 """Perform post-commit cleanup necessary after committing this ctx
1512
1512
1513 Specifically, this updates backing stores this working context
1513 Specifically, this updates backing stores this working context
1514 wraps to reflect the fact that the changes reflected by this
1514 wraps to reflect the fact that the changes reflected by this
1515 workingctx have been committed. For example, it marks
1515 workingctx have been committed. For example, it marks
1516 modified and added files as normal in the dirstate.
1516 modified and added files as normal in the dirstate.
1517
1517
1518 """
1518 """
1519
1519
1520 def dirty(self, missing=False, merge=True, branch=True):
1520 def dirty(self, missing=False, merge=True, branch=True):
1521 return False
1521 return False
1522
1522
1523
1523
1524 class workingctx(committablectx):
1524 class workingctx(committablectx):
1525 """A workingctx object makes access to data related to
1525 """A workingctx object makes access to data related to
1526 the current working directory convenient.
1526 the current working directory convenient.
1527 date - any valid date string or (unixtime, offset), or None.
1527 date - any valid date string or (unixtime, offset), or None.
1528 user - username string, or None.
1528 user - username string, or None.
1529 extra - a dictionary of extra values, or None.
1529 extra - a dictionary of extra values, or None.
1530 changes - a list of file lists as returned by localrepo.status()
1530 changes - a list of file lists as returned by localrepo.status()
1531 or None to use the repository status.
1531 or None to use the repository status.
1532 """
1532 """
1533
1533
1534 def __init__(
1534 def __init__(
1535 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1535 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1536 ):
1536 ):
1537 branch = None
1537 branch = None
1538 if not extra or b'branch' not in extra:
1538 if not extra or b'branch' not in extra:
1539 try:
1539 try:
1540 branch = repo.dirstate.branch()
1540 branch = repo.dirstate.branch()
1541 except UnicodeDecodeError:
1541 except UnicodeDecodeError:
1542 raise error.Abort(_(b'branch name not in UTF-8!'))
1542 raise error.Abort(_(b'branch name not in UTF-8!'))
1543 super(workingctx, self).__init__(
1543 super(workingctx, self).__init__(
1544 repo, text, user, date, extra, changes, branch=branch
1544 repo, text, user, date, extra, changes, branch=branch
1545 )
1545 )
1546
1546
1547 def __iter__(self):
1547 def __iter__(self):
1548 d = self._repo.dirstate
1548 d = self._repo.dirstate
1549 for f in d:
1549 for f in d:
1550 if d[f] != b'r':
1550 if d[f] != b'r':
1551 yield f
1551 yield f
1552
1552
1553 def __contains__(self, key):
1553 def __contains__(self, key):
1554 return self._repo.dirstate[key] not in b"?r"
1554 return self._repo.dirstate[key] not in b"?r"
1555
1555
1556 def hex(self):
1556 def hex(self):
1557 return wdirhex
1557 return wdirhex
1558
1558
1559 @propertycache
1559 @propertycache
1560 def _parents(self):
1560 def _parents(self):
1561 p = self._repo.dirstate.parents()
1561 p = self._repo.dirstate.parents()
1562 if p[1] == nullid:
1562 if p[1] == nullid:
1563 p = p[:-1]
1563 p = p[:-1]
1564 # use unfiltered repo to delay/avoid loading obsmarkers
1564 # use unfiltered repo to delay/avoid loading obsmarkers
1565 unfi = self._repo.unfiltered()
1565 unfi = self._repo.unfiltered()
1566 return [
1566 return [
1567 changectx(
1567 changectx(
1568 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1568 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1569 )
1569 )
1570 for n in p
1570 for n in p
1571 ]
1571 ]
1572
1572
1573 def setparents(self, p1node, p2node=nullid):
1573 def setparents(self, p1node, p2node=nullid):
1574 dirstate = self._repo.dirstate
1574 dirstate = self._repo.dirstate
1575 with dirstate.parentchange():
1575 with dirstate.parentchange():
1576 copies = dirstate.setparents(p1node, p2node)
1576 copies = dirstate.setparents(p1node, p2node)
1577 pctx = self._repo[p1node]
1577 pctx = self._repo[p1node]
1578 if copies:
1578 if copies:
1579 # Adjust copy records, the dirstate cannot do it, it
1579 # Adjust copy records, the dirstate cannot do it, it
1580 # requires access to parents manifests. Preserve them
1580 # requires access to parents manifests. Preserve them
1581 # only for entries added to first parent.
1581 # only for entries added to first parent.
1582 for f in copies:
1582 for f in copies:
1583 if f not in pctx and copies[f] in pctx:
1583 if f not in pctx and copies[f] in pctx:
1584 dirstate.copy(copies[f], f)
1584 dirstate.copy(copies[f], f)
1585 if p2node == nullid:
1585 if p2node == nullid:
1586 for f, s in sorted(dirstate.copies().items()):
1586 for f, s in sorted(dirstate.copies().items()):
1587 if f not in pctx and s not in pctx:
1587 if f not in pctx and s not in pctx:
1588 dirstate.copy(None, f)
1588 dirstate.copy(None, f)
1589
1589
1590 def _fileinfo(self, path):
1590 def _fileinfo(self, path):
1591 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1591 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1592 self._manifest
1592 self._manifest
1593 return super(workingctx, self)._fileinfo(path)
1593 return super(workingctx, self)._fileinfo(path)
1594
1594
1595 def _buildflagfunc(self):
1595 def _buildflagfunc(self):
1596 # Create a fallback function for getting file flags when the
1596 # Create a fallback function for getting file flags when the
1597 # filesystem doesn't support them
1597 # filesystem doesn't support them
1598
1598
1599 copiesget = self._repo.dirstate.copies().get
1599 copiesget = self._repo.dirstate.copies().get
1600 parents = self.parents()
1600 parents = self.parents()
1601 if len(parents) < 2:
1601 if len(parents) < 2:
1602 # when we have one parent, it's easy: copy from parent
1602 # when we have one parent, it's easy: copy from parent
1603 man = parents[0].manifest()
1603 man = parents[0].manifest()
1604
1604
1605 def func(f):
1605 def func(f):
1606 f = copiesget(f, f)
1606 f = copiesget(f, f)
1607 return man.flags(f)
1607 return man.flags(f)
1608
1608
1609 else:
1609 else:
1610 # merges are tricky: we try to reconstruct the unstored
1610 # merges are tricky: we try to reconstruct the unstored
1611 # result from the merge (issue1802)
1611 # result from the merge (issue1802)
1612 p1, p2 = parents
1612 p1, p2 = parents
1613 pa = p1.ancestor(p2)
1613 pa = p1.ancestor(p2)
1614 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1614 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1615
1615
1616 def func(f):
1616 def func(f):
1617 f = copiesget(f, f) # may be wrong for merges with copies
1617 f = copiesget(f, f) # may be wrong for merges with copies
1618 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1618 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1619 if fl1 == fl2:
1619 if fl1 == fl2:
1620 return fl1
1620 return fl1
1621 if fl1 == fla:
1621 if fl1 == fla:
1622 return fl2
1622 return fl2
1623 if fl2 == fla:
1623 if fl2 == fla:
1624 return fl1
1624 return fl1
1625 return b'' # punt for conflicts
1625 return b'' # punt for conflicts
1626
1626
1627 return func
1627 return func
1628
1628
1629 @propertycache
1629 @propertycache
1630 def _flagfunc(self):
1630 def _flagfunc(self):
1631 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1631 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1632
1632
1633 def flags(self, path):
1633 def flags(self, path):
1634 try:
1634 try:
1635 return self._flagfunc(path)
1635 return self._flagfunc(path)
1636 except OSError:
1636 except OSError:
1637 return b''
1637 return b''
1638
1638
1639 def filectx(self, path, filelog=None):
1639 def filectx(self, path, filelog=None):
1640 """get a file context from the working directory"""
1640 """get a file context from the working directory"""
1641 return workingfilectx(
1641 return workingfilectx(
1642 self._repo, path, workingctx=self, filelog=filelog
1642 self._repo, path, workingctx=self, filelog=filelog
1643 )
1643 )
1644
1644
1645 def dirty(self, missing=False, merge=True, branch=True):
1645 def dirty(self, missing=False, merge=True, branch=True):
1646 """check whether a working directory is modified"""
1646 """check whether a working directory is modified"""
1647 # check subrepos first
1647 # check subrepos first
1648 for s in sorted(self.substate):
1648 for s in sorted(self.substate):
1649 if self.sub(s).dirty(missing=missing):
1649 if self.sub(s).dirty(missing=missing):
1650 return True
1650 return True
1651 # check current working dir
1651 # check current working dir
1652 return (
1652 return (
1653 (merge and self.p2())
1653 (merge and self.p2())
1654 or (branch and self.branch() != self.p1().branch())
1654 or (branch and self.branch() != self.p1().branch())
1655 or self.modified()
1655 or self.modified()
1656 or self.added()
1656 or self.added()
1657 or self.removed()
1657 or self.removed()
1658 or (missing and self.deleted())
1658 or (missing and self.deleted())
1659 )
1659 )
1660
1660
1661 def add(self, list, prefix=b""):
1661 def add(self, list, prefix=b""):
1662 with self._repo.wlock():
1662 with self._repo.wlock():
1663 ui, ds = self._repo.ui, self._repo.dirstate
1663 ui, ds = self._repo.ui, self._repo.dirstate
1664 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1664 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1665 rejected = []
1665 rejected = []
1666 lstat = self._repo.wvfs.lstat
1666 lstat = self._repo.wvfs.lstat
1667 for f in list:
1667 for f in list:
1668 # ds.pathto() returns an absolute file when this is invoked from
1668 # ds.pathto() returns an absolute file when this is invoked from
1669 # the keyword extension. That gets flagged as non-portable on
1669 # the keyword extension. That gets flagged as non-portable on
1670 # Windows, since it contains the drive letter and colon.
1670 # Windows, since it contains the drive letter and colon.
1671 scmutil.checkportable(ui, os.path.join(prefix, f))
1671 scmutil.checkportable(ui, os.path.join(prefix, f))
1672 try:
1672 try:
1673 st = lstat(f)
1673 st = lstat(f)
1674 except OSError:
1674 except OSError:
1675 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1675 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1676 rejected.append(f)
1676 rejected.append(f)
1677 continue
1677 continue
1678 limit = ui.configbytes(b'ui', b'large-file-limit')
1678 limit = ui.configbytes(b'ui', b'large-file-limit')
1679 if limit != 0 and st.st_size > limit:
1679 if limit != 0 and st.st_size > limit:
1680 ui.warn(
1680 ui.warn(
1681 _(
1681 _(
1682 b"%s: up to %d MB of RAM may be required "
1682 b"%s: up to %d MB of RAM may be required "
1683 b"to manage this file\n"
1683 b"to manage this file\n"
1684 b"(use 'hg revert %s' to cancel the "
1684 b"(use 'hg revert %s' to cancel the "
1685 b"pending addition)\n"
1685 b"pending addition)\n"
1686 )
1686 )
1687 % (f, 3 * st.st_size // 1000000, uipath(f))
1687 % (f, 3 * st.st_size // 1000000, uipath(f))
1688 )
1688 )
1689 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1689 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1690 ui.warn(
1690 ui.warn(
1691 _(
1691 _(
1692 b"%s not added: only files and symlinks "
1692 b"%s not added: only files and symlinks "
1693 b"supported currently\n"
1693 b"supported currently\n"
1694 )
1694 )
1695 % uipath(f)
1695 % uipath(f)
1696 )
1696 )
1697 rejected.append(f)
1697 rejected.append(f)
1698 elif ds[f] in b'amn':
1698 elif ds[f] in b'amn':
1699 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1699 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1700 elif ds[f] == b'r':
1700 elif ds[f] == b'r':
1701 ds.normallookup(f)
1701 ds.normallookup(f)
1702 else:
1702 else:
1703 ds.add(f)
1703 ds.add(f)
1704 return rejected
1704 return rejected
1705
1705
1706 def forget(self, files, prefix=b""):
1706 def forget(self, files, prefix=b""):
1707 with self._repo.wlock():
1707 with self._repo.wlock():
1708 ds = self._repo.dirstate
1708 ds = self._repo.dirstate
1709 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1709 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1710 rejected = []
1710 rejected = []
1711 for f in files:
1711 for f in files:
1712 if f not in ds:
1712 if f not in ds:
1713 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1713 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1714 rejected.append(f)
1714 rejected.append(f)
1715 elif ds[f] != b'a':
1715 elif ds[f] != b'a':
1716 ds.remove(f)
1716 ds.remove(f)
1717 else:
1717 else:
1718 ds.drop(f)
1718 ds.drop(f)
1719 return rejected
1719 return rejected
1720
1720
1721 def copy(self, source, dest):
1721 def copy(self, source, dest):
1722 try:
1722 try:
1723 st = self._repo.wvfs.lstat(dest)
1723 st = self._repo.wvfs.lstat(dest)
1724 except OSError as err:
1724 except OSError as err:
1725 if err.errno != errno.ENOENT:
1725 if err.errno != errno.ENOENT:
1726 raise
1726 raise
1727 self._repo.ui.warn(
1727 self._repo.ui.warn(
1728 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1728 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1729 )
1729 )
1730 return
1730 return
1731 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1731 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1732 self._repo.ui.warn(
1732 self._repo.ui.warn(
1733 _(b"copy failed: %s is not a file or a symbolic link\n")
1733 _(b"copy failed: %s is not a file or a symbolic link\n")
1734 % self._repo.dirstate.pathto(dest)
1734 % self._repo.dirstate.pathto(dest)
1735 )
1735 )
1736 else:
1736 else:
1737 with self._repo.wlock():
1737 with self._repo.wlock():
1738 ds = self._repo.dirstate
1738 ds = self._repo.dirstate
1739 if ds[dest] in b'?':
1739 if ds[dest] in b'?':
1740 ds.add(dest)
1740 ds.add(dest)
1741 elif ds[dest] in b'r':
1741 elif ds[dest] in b'r':
1742 ds.normallookup(dest)
1742 ds.normallookup(dest)
1743 ds.copy(source, dest)
1743 ds.copy(source, dest)
1744
1744
1745 def match(
1745 def match(
1746 self,
1746 self,
1747 pats=None,
1747 pats=None,
1748 include=None,
1748 include=None,
1749 exclude=None,
1749 exclude=None,
1750 default=b'glob',
1750 default=b'glob',
1751 listsubrepos=False,
1751 listsubrepos=False,
1752 badfn=None,
1752 badfn=None,
1753 cwd=None,
1753 cwd=None,
1754 ):
1754 ):
1755 r = self._repo
1755 r = self._repo
1756 if not cwd:
1756 if not cwd:
1757 cwd = r.getcwd()
1757 cwd = r.getcwd()
1758
1758
1759 # Only a case insensitive filesystem needs magic to translate user input
1759 # Only a case insensitive filesystem needs magic to translate user input
1760 # to actual case in the filesystem.
1760 # to actual case in the filesystem.
1761 icasefs = not util.fscasesensitive(r.root)
1761 icasefs = not util.fscasesensitive(r.root)
1762 return matchmod.match(
1762 return matchmod.match(
1763 r.root,
1763 r.root,
1764 cwd,
1764 cwd,
1765 pats,
1765 pats,
1766 include,
1766 include,
1767 exclude,
1767 exclude,
1768 default,
1768 default,
1769 auditor=r.auditor,
1769 auditor=r.auditor,
1770 ctx=self,
1770 ctx=self,
1771 listsubrepos=listsubrepos,
1771 listsubrepos=listsubrepos,
1772 badfn=badfn,
1772 badfn=badfn,
1773 icasefs=icasefs,
1773 icasefs=icasefs,
1774 )
1774 )
1775
1775
1776 def _filtersuspectsymlink(self, files):
1776 def _filtersuspectsymlink(self, files):
1777 if not files or self._repo.dirstate._checklink:
1777 if not files or self._repo.dirstate._checklink:
1778 return files
1778 return files
1779
1779
1780 # Symlink placeholders may get non-symlink-like contents
1780 # Symlink placeholders may get non-symlink-like contents
1781 # via user error or dereferencing by NFS or Samba servers,
1781 # via user error or dereferencing by NFS or Samba servers,
1782 # so we filter out any placeholders that don't look like a
1782 # so we filter out any placeholders that don't look like a
1783 # symlink
1783 # symlink
1784 sane = []
1784 sane = []
1785 for f in files:
1785 for f in files:
1786 if self.flags(f) == b'l':
1786 if self.flags(f) == b'l':
1787 d = self[f].data()
1787 d = self[f].data()
1788 if (
1788 if (
1789 d == b''
1789 d == b''
1790 or len(d) >= 1024
1790 or len(d) >= 1024
1791 or b'\n' in d
1791 or b'\n' in d
1792 or stringutil.binary(d)
1792 or stringutil.binary(d)
1793 ):
1793 ):
1794 self._repo.ui.debug(
1794 self._repo.ui.debug(
1795 b'ignoring suspect symlink placeholder "%s"\n' % f
1795 b'ignoring suspect symlink placeholder "%s"\n' % f
1796 )
1796 )
1797 continue
1797 continue
1798 sane.append(f)
1798 sane.append(f)
1799 return sane
1799 return sane
1800
1800
1801 def _checklookup(self, files):
1801 def _checklookup(self, files):
1802 # check for any possibly clean files
1802 # check for any possibly clean files
1803 if not files:
1803 if not files:
1804 return [], [], []
1804 return [], [], []
1805
1805
1806 modified = []
1806 modified = []
1807 deleted = []
1807 deleted = []
1808 fixup = []
1808 fixup = []
1809 pctx = self._parents[0]
1809 pctx = self._parents[0]
1810 # do a full compare of any files that might have changed
1810 # do a full compare of any files that might have changed
1811 for f in sorted(files):
1811 for f in sorted(files):
1812 try:
1812 try:
1813 # This will return True for a file that got replaced by a
1813 # This will return True for a file that got replaced by a
1814 # directory in the interim, but fixing that is pretty hard.
1814 # directory in the interim, but fixing that is pretty hard.
1815 if (
1815 if (
1816 f not in pctx
1816 f not in pctx
1817 or self.flags(f) != pctx.flags(f)
1817 or self.flags(f) != pctx.flags(f)
1818 or pctx[f].cmp(self[f])
1818 or pctx[f].cmp(self[f])
1819 ):
1819 ):
1820 modified.append(f)
1820 modified.append(f)
1821 else:
1821 else:
1822 fixup.append(f)
1822 fixup.append(f)
1823 except (IOError, OSError):
1823 except (IOError, OSError):
1824 # A file become inaccessible in between? Mark it as deleted,
1824 # A file become inaccessible in between? Mark it as deleted,
1825 # matching dirstate behavior (issue5584).
1825 # matching dirstate behavior (issue5584).
1826 # The dirstate has more complex behavior around whether a
1826 # The dirstate has more complex behavior around whether a
1827 # missing file matches a directory, etc, but we don't need to
1827 # missing file matches a directory, etc, but we don't need to
1828 # bother with that: if f has made it to this point, we're sure
1828 # bother with that: if f has made it to this point, we're sure
1829 # it's in the dirstate.
1829 # it's in the dirstate.
1830 deleted.append(f)
1830 deleted.append(f)
1831
1831
1832 return modified, deleted, fixup
1832 return modified, deleted, fixup
1833
1833
1834 def _poststatusfixup(self, status, fixup):
1834 def _poststatusfixup(self, status, fixup):
1835 """update dirstate for files that are actually clean"""
1835 """update dirstate for files that are actually clean"""
1836 poststatus = self._repo.postdsstatus()
1836 poststatus = self._repo.postdsstatus()
1837 if fixup or poststatus:
1837 if fixup or poststatus:
1838 try:
1838 try:
1839 oldid = self._repo.dirstate.identity()
1839 oldid = self._repo.dirstate.identity()
1840
1840
1841 # updating the dirstate is optional
1841 # updating the dirstate is optional
1842 # so we don't wait on the lock
1842 # so we don't wait on the lock
1843 # wlock can invalidate the dirstate, so cache normal _after_
1843 # wlock can invalidate the dirstate, so cache normal _after_
1844 # taking the lock
1844 # taking the lock
1845 with self._repo.wlock(False):
1845 with self._repo.wlock(False):
1846 if self._repo.dirstate.identity() == oldid:
1846 if self._repo.dirstate.identity() == oldid:
1847 if fixup:
1847 if fixup:
1848 normal = self._repo.dirstate.normal
1848 normal = self._repo.dirstate.normal
1849 for f in fixup:
1849 for f in fixup:
1850 normal(f)
1850 normal(f)
1851 # write changes out explicitly, because nesting
1851 # write changes out explicitly, because nesting
1852 # wlock at runtime may prevent 'wlock.release()'
1852 # wlock at runtime may prevent 'wlock.release()'
1853 # after this block from doing so for subsequent
1853 # after this block from doing so for subsequent
1854 # changing files
1854 # changing files
1855 tr = self._repo.currenttransaction()
1855 tr = self._repo.currenttransaction()
1856 self._repo.dirstate.write(tr)
1856 self._repo.dirstate.write(tr)
1857
1857
1858 if poststatus:
1858 if poststatus:
1859 for ps in poststatus:
1859 for ps in poststatus:
1860 ps(self, status)
1860 ps(self, status)
1861 else:
1861 else:
1862 # in this case, writing changes out breaks
1862 # in this case, writing changes out breaks
1863 # consistency, because .hg/dirstate was
1863 # consistency, because .hg/dirstate was
1864 # already changed simultaneously after last
1864 # already changed simultaneously after last
1865 # caching (see also issue5584 for detail)
1865 # caching (see also issue5584 for detail)
1866 self._repo.ui.debug(
1866 self._repo.ui.debug(
1867 b'skip updating dirstate: identity mismatch\n'
1867 b'skip updating dirstate: identity mismatch\n'
1868 )
1868 )
1869 except error.LockError:
1869 except error.LockError:
1870 pass
1870 pass
1871 finally:
1871 finally:
1872 # Even if the wlock couldn't be grabbed, clear out the list.
1872 # Even if the wlock couldn't be grabbed, clear out the list.
1873 self._repo.clearpostdsstatus()
1873 self._repo.clearpostdsstatus()
1874
1874
1875 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1875 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1876 '''Gets the status from the dirstate -- internal use only.'''
1876 '''Gets the status from the dirstate -- internal use only.'''
1877 subrepos = []
1877 subrepos = []
1878 if b'.hgsub' in self:
1878 if b'.hgsub' in self:
1879 subrepos = sorted(self.substate)
1879 subrepos = sorted(self.substate)
1880 cmp, s = self._repo.dirstate.status(
1880 cmp, s = self._repo.dirstate.status(
1881 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1881 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1882 )
1882 )
1883
1883
1884 # check for any possibly clean files
1884 # check for any possibly clean files
1885 fixup = []
1885 fixup = []
1886 if cmp:
1886 if cmp:
1887 modified2, deleted2, fixup = self._checklookup(cmp)
1887 modified2, deleted2, fixup = self._checklookup(cmp)
1888 s.modified.extend(modified2)
1888 s.modified.extend(modified2)
1889 s.deleted.extend(deleted2)
1889 s.deleted.extend(deleted2)
1890
1890
1891 if fixup and clean:
1891 if fixup and clean:
1892 s.clean.extend(fixup)
1892 s.clean.extend(fixup)
1893
1893
1894 self._poststatusfixup(s, fixup)
1894 self._poststatusfixup(s, fixup)
1895
1895
1896 if match.always():
1896 if match.always():
1897 # cache for performance
1897 # cache for performance
1898 if s.unknown or s.ignored or s.clean:
1898 if s.unknown or s.ignored or s.clean:
1899 # "_status" is cached with list*=False in the normal route
1899 # "_status" is cached with list*=False in the normal route
1900 self._status = scmutil.status(
1900 self._status = scmutil.status(
1901 s.modified, s.added, s.removed, s.deleted, [], [], []
1901 s.modified, s.added, s.removed, s.deleted, [], [], []
1902 )
1902 )
1903 else:
1903 else:
1904 self._status = s
1904 self._status = s
1905
1905
1906 return s
1906 return s
1907
1907
1908 @propertycache
1908 @propertycache
1909 def _copies(self):
1909 def _copies(self):
1910 p1copies = {}
1910 p1copies = {}
1911 p2copies = {}
1911 p2copies = {}
1912 parents = self._repo.dirstate.parents()
1912 parents = self._repo.dirstate.parents()
1913 p1manifest = self._repo[parents[0]].manifest()
1913 p1manifest = self._repo[parents[0]].manifest()
1914 p2manifest = self._repo[parents[1]].manifest()
1914 p2manifest = self._repo[parents[1]].manifest()
1915 changedset = set(self.added()) | set(self.modified())
1915 changedset = set(self.added()) | set(self.modified())
1916 narrowmatch = self._repo.narrowmatch()
1916 narrowmatch = self._repo.narrowmatch()
1917 for dst, src in self._repo.dirstate.copies().items():
1917 for dst, src in self._repo.dirstate.copies().items():
1918 if dst not in changedset or not narrowmatch(dst):
1918 if dst not in changedset or not narrowmatch(dst):
1919 continue
1919 continue
1920 if src in p1manifest:
1920 if src in p1manifest:
1921 p1copies[dst] = src
1921 p1copies[dst] = src
1922 elif src in p2manifest:
1922 elif src in p2manifest:
1923 p2copies[dst] = src
1923 p2copies[dst] = src
1924 return p1copies, p2copies
1924 return p1copies, p2copies
1925
1925
1926 @propertycache
1926 @propertycache
1927 def _manifest(self):
1927 def _manifest(self):
1928 """generate a manifest corresponding to the values in self._status
1928 """generate a manifest corresponding to the values in self._status
1929
1929
1930 This reuse the file nodeid from parent, but we use special node
1930 This reuse the file nodeid from parent, but we use special node
1931 identifiers for added and modified files. This is used by manifests
1931 identifiers for added and modified files. This is used by manifests
1932 merge to see that files are different and by update logic to avoid
1932 merge to see that files are different and by update logic to avoid
1933 deleting newly added files.
1933 deleting newly added files.
1934 """
1934 """
1935 return self._buildstatusmanifest(self._status)
1935 return self._buildstatusmanifest(self._status)
1936
1936
1937 def _buildstatusmanifest(self, status):
1937 def _buildstatusmanifest(self, status):
1938 """Builds a manifest that includes the given status results."""
1938 """Builds a manifest that includes the given status results."""
1939 parents = self.parents()
1939 parents = self.parents()
1940
1940
1941 man = parents[0].manifest().copy()
1941 man = parents[0].manifest().copy()
1942
1942
1943 ff = self._flagfunc
1943 ff = self._flagfunc
1944 for i, l in (
1944 for i, l in (
1945 (addednodeid, status.added),
1945 (addednodeid, status.added),
1946 (modifiednodeid, status.modified),
1946 (modifiednodeid, status.modified),
1947 ):
1947 ):
1948 for f in l:
1948 for f in l:
1949 man[f] = i
1949 man[f] = i
1950 try:
1950 try:
1951 man.setflag(f, ff(f))
1951 man.setflag(f, ff(f))
1952 except OSError:
1952 except OSError:
1953 pass
1953 pass
1954
1954
1955 for f in status.deleted + status.removed:
1955 for f in status.deleted + status.removed:
1956 if f in man:
1956 if f in man:
1957 del man[f]
1957 del man[f]
1958
1958
1959 return man
1959 return man
1960
1960
1961 def _buildstatus(
1961 def _buildstatus(
1962 self, other, s, match, listignored, listclean, listunknown
1962 self, other, s, match, listignored, listclean, listunknown
1963 ):
1963 ):
1964 """build a status with respect to another context
1964 """build a status with respect to another context
1965
1965
1966 This includes logic for maintaining the fast path of status when
1966 This includes logic for maintaining the fast path of status when
1967 comparing the working directory against its parent, which is to skip
1967 comparing the working directory against its parent, which is to skip
1968 building a new manifest if self (working directory) is not comparing
1968 building a new manifest if self (working directory) is not comparing
1969 against its parent (repo['.']).
1969 against its parent (repo['.']).
1970 """
1970 """
1971 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1971 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1972 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1972 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1973 # might have accidentally ended up with the entire contents of the file
1973 # might have accidentally ended up with the entire contents of the file
1974 # they are supposed to be linking to.
1974 # they are supposed to be linking to.
1975 s.modified[:] = self._filtersuspectsymlink(s.modified)
1975 s.modified[:] = self._filtersuspectsymlink(s.modified)
1976 if other != self._repo[b'.']:
1976 if other != self._repo[b'.']:
1977 s = super(workingctx, self)._buildstatus(
1977 s = super(workingctx, self)._buildstatus(
1978 other, s, match, listignored, listclean, listunknown
1978 other, s, match, listignored, listclean, listunknown
1979 )
1979 )
1980 return s
1980 return s
1981
1981
1982 def _matchstatus(self, other, match):
1982 def _matchstatus(self, other, match):
1983 """override the match method with a filter for directory patterns
1983 """override the match method with a filter for directory patterns
1984
1984
1985 We use inheritance to customize the match.bad method only in cases of
1985 We use inheritance to customize the match.bad method only in cases of
1986 workingctx since it belongs only to the working directory when
1986 workingctx since it belongs only to the working directory when
1987 comparing against the parent changeset.
1987 comparing against the parent changeset.
1988
1988
1989 If we aren't comparing against the working directory's parent, then we
1989 If we aren't comparing against the working directory's parent, then we
1990 just use the default match object sent to us.
1990 just use the default match object sent to us.
1991 """
1991 """
1992 if other != self._repo[b'.']:
1992 if other != self._repo[b'.']:
1993
1993
1994 def bad(f, msg):
1994 def bad(f, msg):
1995 # 'f' may be a directory pattern from 'match.files()',
1995 # 'f' may be a directory pattern from 'match.files()',
1996 # so 'f not in ctx1' is not enough
1996 # so 'f not in ctx1' is not enough
1997 if f not in other and not other.hasdir(f):
1997 if f not in other and not other.hasdir(f):
1998 self._repo.ui.warn(
1998 self._repo.ui.warn(
1999 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1999 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2000 )
2000 )
2001
2001
2002 match.bad = bad
2002 match.bad = bad
2003 return match
2003 return match
2004
2004
2005 def walk(self, match):
2005 def walk(self, match):
2006 '''Generates matching file names.'''
2006 '''Generates matching file names.'''
2007 return sorted(
2007 return sorted(
2008 self._repo.dirstate.walk(
2008 self._repo.dirstate.walk(
2009 self._repo.narrowmatch(match),
2009 self._repo.narrowmatch(match),
2010 subrepos=sorted(self.substate),
2010 subrepos=sorted(self.substate),
2011 unknown=True,
2011 unknown=True,
2012 ignored=False,
2012 ignored=False,
2013 )
2013 )
2014 )
2014 )
2015
2015
2016 def matches(self, match):
2016 def matches(self, match):
2017 match = self._repo.narrowmatch(match)
2017 match = self._repo.narrowmatch(match)
2018 ds = self._repo.dirstate
2018 ds = self._repo.dirstate
2019 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2019 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2020
2020
2021 def markcommitted(self, node):
2021 def markcommitted(self, node):
2022 with self._repo.dirstate.parentchange():
2022 with self._repo.dirstate.parentchange():
2023 for f in self.modified() + self.added():
2023 for f in self.modified() + self.added():
2024 self._repo.dirstate.normal(f)
2024 self._repo.dirstate.normal(f)
2025 for f in self.removed():
2025 for f in self.removed():
2026 self._repo.dirstate.drop(f)
2026 self._repo.dirstate.drop(f)
2027 self._repo.dirstate.setparents(node)
2027 self._repo.dirstate.setparents(node)
2028 self._repo._quick_access_changeid_invalidate()
2028 self._repo._quick_access_changeid_invalidate()
2029
2029
2030 # write changes out explicitly, because nesting wlock at
2030 # write changes out explicitly, because nesting wlock at
2031 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2031 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2032 # from immediately doing so for subsequent changing files
2032 # from immediately doing so for subsequent changing files
2033 self._repo.dirstate.write(self._repo.currenttransaction())
2033 self._repo.dirstate.write(self._repo.currenttransaction())
2034
2034
2035 sparse.aftercommit(self._repo, node)
2035 sparse.aftercommit(self._repo, node)
2036
2036
2037 def mergestate(self, clean=False):
2037 def mergestate(self, clean=False):
2038 if clean:
2038 if clean:
2039 return mergestatemod.mergestate.clean(self._repo)
2039 return mergestatemod.mergestate.clean(self._repo)
2040 return mergestatemod.mergestate.read(self._repo)
2040 return mergestatemod.mergestate.read(self._repo)
2041
2041
2042
2042
2043 class committablefilectx(basefilectx):
2043 class committablefilectx(basefilectx):
2044 """A committablefilectx provides common functionality for a file context
2044 """A committablefilectx provides common functionality for a file context
2045 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2045 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2046
2046
2047 def __init__(self, repo, path, filelog=None, ctx=None):
2047 def __init__(self, repo, path, filelog=None, ctx=None):
2048 self._repo = repo
2048 self._repo = repo
2049 self._path = path
2049 self._path = path
2050 self._changeid = None
2050 self._changeid = None
2051 self._filerev = self._filenode = None
2051 self._filerev = self._filenode = None
2052
2052
2053 if filelog is not None:
2053 if filelog is not None:
2054 self._filelog = filelog
2054 self._filelog = filelog
2055 if ctx:
2055 if ctx:
2056 self._changectx = ctx
2056 self._changectx = ctx
2057
2057
2058 def __nonzero__(self):
2058 def __nonzero__(self):
2059 return True
2059 return True
2060
2060
2061 __bool__ = __nonzero__
2061 __bool__ = __nonzero__
2062
2062
2063 def linkrev(self):
2063 def linkrev(self):
2064 # linked to self._changectx no matter if file is modified or not
2064 # linked to self._changectx no matter if file is modified or not
2065 return self.rev()
2065 return self.rev()
2066
2066
2067 def renamed(self):
2067 def renamed(self):
2068 path = self.copysource()
2068 path = self.copysource()
2069 if not path:
2069 if not path:
2070 return None
2070 return None
2071 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2071 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2072
2072
2073 def parents(self):
2073 def parents(self):
2074 '''return parent filectxs, following copies if necessary'''
2074 '''return parent filectxs, following copies if necessary'''
2075
2075
2076 def filenode(ctx, path):
2076 def filenode(ctx, path):
2077 return ctx._manifest.get(path, nullid)
2077 return ctx._manifest.get(path, nullid)
2078
2078
2079 path = self._path
2079 path = self._path
2080 fl = self._filelog
2080 fl = self._filelog
2081 pcl = self._changectx._parents
2081 pcl = self._changectx._parents
2082 renamed = self.renamed()
2082 renamed = self.renamed()
2083
2083
2084 if renamed:
2084 if renamed:
2085 pl = [renamed + (None,)]
2085 pl = [renamed + (None,)]
2086 else:
2086 else:
2087 pl = [(path, filenode(pcl[0], path), fl)]
2087 pl = [(path, filenode(pcl[0], path), fl)]
2088
2088
2089 for pc in pcl[1:]:
2089 for pc in pcl[1:]:
2090 pl.append((path, filenode(pc, path), fl))
2090 pl.append((path, filenode(pc, path), fl))
2091
2091
2092 return [
2092 return [
2093 self._parentfilectx(p, fileid=n, filelog=l)
2093 self._parentfilectx(p, fileid=n, filelog=l)
2094 for p, n, l in pl
2094 for p, n, l in pl
2095 if n != nullid
2095 if n != nullid
2096 ]
2096 ]
2097
2097
2098 def children(self):
2098 def children(self):
2099 return []
2099 return []
2100
2100
2101
2101
2102 class workingfilectx(committablefilectx):
2102 class workingfilectx(committablefilectx):
2103 """A workingfilectx object makes access to data related to a particular
2103 """A workingfilectx object makes access to data related to a particular
2104 file in the working directory convenient."""
2104 file in the working directory convenient."""
2105
2105
2106 def __init__(self, repo, path, filelog=None, workingctx=None):
2106 def __init__(self, repo, path, filelog=None, workingctx=None):
2107 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2107 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2108
2108
2109 @propertycache
2109 @propertycache
2110 def _changectx(self):
2110 def _changectx(self):
2111 return workingctx(self._repo)
2111 return workingctx(self._repo)
2112
2112
2113 def data(self):
2113 def data(self):
2114 return self._repo.wread(self._path)
2114 return self._repo.wread(self._path)
2115
2115
2116 def copysource(self):
2116 def copysource(self):
2117 return self._repo.dirstate.copied(self._path)
2117 return self._repo.dirstate.copied(self._path)
2118
2118
2119 def size(self):
2119 def size(self):
2120 return self._repo.wvfs.lstat(self._path).st_size
2120 return self._repo.wvfs.lstat(self._path).st_size
2121
2121
2122 def lstat(self):
2122 def lstat(self):
2123 return self._repo.wvfs.lstat(self._path)
2123 return self._repo.wvfs.lstat(self._path)
2124
2124
2125 def date(self):
2125 def date(self):
2126 t, tz = self._changectx.date()
2126 t, tz = self._changectx.date()
2127 try:
2127 try:
2128 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2128 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2129 except OSError as err:
2129 except OSError as err:
2130 if err.errno != errno.ENOENT:
2130 if err.errno != errno.ENOENT:
2131 raise
2131 raise
2132 return (t, tz)
2132 return (t, tz)
2133
2133
2134 def exists(self):
2134 def exists(self):
2135 return self._repo.wvfs.exists(self._path)
2135 return self._repo.wvfs.exists(self._path)
2136
2136
2137 def lexists(self):
2137 def lexists(self):
2138 return self._repo.wvfs.lexists(self._path)
2138 return self._repo.wvfs.lexists(self._path)
2139
2139
2140 def audit(self):
2140 def audit(self):
2141 return self._repo.wvfs.audit(self._path)
2141 return self._repo.wvfs.audit(self._path)
2142
2142
2143 def cmp(self, fctx):
2143 def cmp(self, fctx):
2144 """compare with other file context
2144 """compare with other file context
2145
2145
2146 returns True if different than fctx.
2146 returns True if different than fctx.
2147 """
2147 """
2148 # fctx should be a filectx (not a workingfilectx)
2148 # fctx should be a filectx (not a workingfilectx)
2149 # invert comparison to reuse the same code path
2149 # invert comparison to reuse the same code path
2150 return fctx.cmp(self)
2150 return fctx.cmp(self)
2151
2151
2152 def remove(self, ignoremissing=False):
2152 def remove(self, ignoremissing=False):
2153 """wraps unlink for a repo's working directory"""
2153 """wraps unlink for a repo's working directory"""
2154 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2154 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2155 self._repo.wvfs.unlinkpath(
2155 self._repo.wvfs.unlinkpath(
2156 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2156 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2157 )
2157 )
2158
2158
2159 def write(self, data, flags, backgroundclose=False, **kwargs):
2159 def write(self, data, flags, backgroundclose=False, **kwargs):
2160 """wraps repo.wwrite"""
2160 """wraps repo.wwrite"""
2161 return self._repo.wwrite(
2161 return self._repo.wwrite(
2162 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2162 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2163 )
2163 )
2164
2164
2165 def markcopied(self, src):
2165 def markcopied(self, src):
2166 """marks this file a copy of `src`"""
2166 """marks this file a copy of `src`"""
2167 self._repo.dirstate.copy(src, self._path)
2167 self._repo.dirstate.copy(src, self._path)
2168
2168
2169 def clearunknown(self):
2169 def clearunknown(self):
2170 """Removes conflicting items in the working directory so that
2170 """Removes conflicting items in the working directory so that
2171 ``write()`` can be called successfully.
2171 ``write()`` can be called successfully.
2172 """
2172 """
2173 wvfs = self._repo.wvfs
2173 wvfs = self._repo.wvfs
2174 f = self._path
2174 f = self._path
2175 wvfs.audit(f)
2175 wvfs.audit(f)
2176 if self._repo.ui.configbool(
2176 if self._repo.ui.configbool(
2177 b'experimental', b'merge.checkpathconflicts'
2177 b'experimental', b'merge.checkpathconflicts'
2178 ):
2178 ):
2179 # remove files under the directory as they should already be
2179 # remove files under the directory as they should already be
2180 # warned and backed up
2180 # warned and backed up
2181 if wvfs.isdir(f) and not wvfs.islink(f):
2181 if wvfs.isdir(f) and not wvfs.islink(f):
2182 wvfs.rmtree(f, forcibly=True)
2182 wvfs.rmtree(f, forcibly=True)
2183 for p in reversed(list(pathutil.finddirs(f))):
2183 for p in reversed(list(pathutil.finddirs(f))):
2184 if wvfs.isfileorlink(p):
2184 if wvfs.isfileorlink(p):
2185 wvfs.unlink(p)
2185 wvfs.unlink(p)
2186 break
2186 break
2187 else:
2187 else:
2188 # don't remove files if path conflicts are not processed
2188 # don't remove files if path conflicts are not processed
2189 if wvfs.isdir(f) and not wvfs.islink(f):
2189 if wvfs.isdir(f) and not wvfs.islink(f):
2190 wvfs.removedirs(f)
2190 wvfs.removedirs(f)
2191
2191
2192 def setflags(self, l, x):
2192 def setflags(self, l, x):
2193 self._repo.wvfs.setflags(self._path, l, x)
2193 self._repo.wvfs.setflags(self._path, l, x)
2194
2194
2195
2195
2196 class overlayworkingctx(committablectx):
2196 class overlayworkingctx(committablectx):
2197 """Wraps another mutable context with a write-back cache that can be
2197 """Wraps another mutable context with a write-back cache that can be
2198 converted into a commit context.
2198 converted into a commit context.
2199
2199
2200 self._cache[path] maps to a dict with keys: {
2200 self._cache[path] maps to a dict with keys: {
2201 'exists': bool?
2201 'exists': bool?
2202 'date': date?
2202 'date': date?
2203 'data': str?
2203 'data': str?
2204 'flags': str?
2204 'flags': str?
2205 'copied': str? (path or None)
2205 'copied': str? (path or None)
2206 }
2206 }
2207 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2207 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2208 is `False`, the file was deleted.
2208 is `False`, the file was deleted.
2209 """
2209 """
2210
2210
2211 def __init__(self, repo):
2211 def __init__(self, repo):
2212 super(overlayworkingctx, self).__init__(repo)
2212 super(overlayworkingctx, self).__init__(repo)
2213 self.clean()
2213 self.clean()
2214
2214
2215 def setbase(self, wrappedctx):
2215 def setbase(self, wrappedctx):
2216 self._wrappedctx = wrappedctx
2216 self._wrappedctx = wrappedctx
2217 self._parents = [wrappedctx]
2217 self._parents = [wrappedctx]
2218 # Drop old manifest cache as it is now out of date.
2218 # Drop old manifest cache as it is now out of date.
2219 # This is necessary when, e.g., rebasing several nodes with one
2219 # This is necessary when, e.g., rebasing several nodes with one
2220 # ``overlayworkingctx`` (e.g. with --collapse).
2220 # ``overlayworkingctx`` (e.g. with --collapse).
2221 util.clearcachedproperty(self, b'_manifest')
2221 util.clearcachedproperty(self, b'_manifest')
2222
2222
2223 def setparents(self, p1node, p2node=nullid):
2223 def setparents(self, p1node, p2node=nullid):
2224 assert p1node == self._wrappedctx.node()
2224 assert p1node == self._wrappedctx.node()
2225 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2225 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2226
2226
2227 def data(self, path):
2227 def data(self, path):
2228 if self.isdirty(path):
2228 if self.isdirty(path):
2229 if self._cache[path][b'exists']:
2229 if self._cache[path][b'exists']:
2230 if self._cache[path][b'data'] is not None:
2230 if self._cache[path][b'data'] is not None:
2231 return self._cache[path][b'data']
2231 return self._cache[path][b'data']
2232 else:
2232 else:
2233 # Must fallback here, too, because we only set flags.
2233 # Must fallback here, too, because we only set flags.
2234 return self._wrappedctx[path].data()
2234 return self._wrappedctx[path].data()
2235 else:
2235 else:
2236 raise error.ProgrammingError(
2236 raise error.ProgrammingError(
2237 b"No such file or directory: %s" % path
2237 b"No such file or directory: %s" % path
2238 )
2238 )
2239 else:
2239 else:
2240 return self._wrappedctx[path].data()
2240 return self._wrappedctx[path].data()
2241
2241
2242 @propertycache
2242 @propertycache
2243 def _manifest(self):
2243 def _manifest(self):
2244 parents = self.parents()
2244 parents = self.parents()
2245 man = parents[0].manifest().copy()
2245 man = parents[0].manifest().copy()
2246
2246
2247 flag = self._flagfunc
2247 flag = self._flagfunc
2248 for path in self.added():
2248 for path in self.added():
2249 man[path] = addednodeid
2249 man[path] = addednodeid
2250 man.setflag(path, flag(path))
2250 man.setflag(path, flag(path))
2251 for path in self.modified():
2251 for path in self.modified():
2252 man[path] = modifiednodeid
2252 man[path] = modifiednodeid
2253 man.setflag(path, flag(path))
2253 man.setflag(path, flag(path))
2254 for path in self.removed():
2254 for path in self.removed():
2255 del man[path]
2255 del man[path]
2256 return man
2256 return man
2257
2257
2258 @propertycache
2258 @propertycache
2259 def _flagfunc(self):
2259 def _flagfunc(self):
2260 def f(path):
2260 def f(path):
2261 return self._cache[path][b'flags']
2261 return self._cache[path][b'flags']
2262
2262
2263 return f
2263 return f
2264
2264
2265 def files(self):
2265 def files(self):
2266 return sorted(self.added() + self.modified() + self.removed())
2266 return sorted(self.added() + self.modified() + self.removed())
2267
2267
2268 def modified(self):
2268 def modified(self):
2269 return [
2269 return [
2270 f
2270 f
2271 for f in self._cache.keys()
2271 for f in self._cache.keys()
2272 if self._cache[f][b'exists'] and self._existsinparent(f)
2272 if self._cache[f][b'exists'] and self._existsinparent(f)
2273 ]
2273 ]
2274
2274
2275 def added(self):
2275 def added(self):
2276 return [
2276 return [
2277 f
2277 f
2278 for f in self._cache.keys()
2278 for f in self._cache.keys()
2279 if self._cache[f][b'exists'] and not self._existsinparent(f)
2279 if self._cache[f][b'exists'] and not self._existsinparent(f)
2280 ]
2280 ]
2281
2281
2282 def removed(self):
2282 def removed(self):
2283 return [
2283 return [
2284 f
2284 f
2285 for f in self._cache.keys()
2285 for f in self._cache.keys()
2286 if not self._cache[f][b'exists'] and self._existsinparent(f)
2286 if not self._cache[f][b'exists'] and self._existsinparent(f)
2287 ]
2287 ]
2288
2288
2289 def p1copies(self):
2289 def p1copies(self):
2290 copies = {}
2290 copies = {}
2291 narrowmatch = self._repo.narrowmatch()
2291 narrowmatch = self._repo.narrowmatch()
2292 for f in self._cache.keys():
2292 for f in self._cache.keys():
2293 if not narrowmatch(f):
2293 if not narrowmatch(f):
2294 continue
2294 continue
2295 copies.pop(f, None) # delete if it exists
2295 copies.pop(f, None) # delete if it exists
2296 source = self._cache[f][b'copied']
2296 source = self._cache[f][b'copied']
2297 if source:
2297 if source:
2298 copies[f] = source
2298 copies[f] = source
2299 return copies
2299 return copies
2300
2300
2301 def p2copies(self):
2301 def p2copies(self):
2302 copies = {}
2302 copies = {}
2303 narrowmatch = self._repo.narrowmatch()
2303 narrowmatch = self._repo.narrowmatch()
2304 for f in self._cache.keys():
2304 for f in self._cache.keys():
2305 if not narrowmatch(f):
2305 if not narrowmatch(f):
2306 continue
2306 continue
2307 copies.pop(f, None) # delete if it exists
2307 copies.pop(f, None) # delete if it exists
2308 source = self._cache[f][b'copied']
2308 source = self._cache[f][b'copied']
2309 if source:
2309 if source:
2310 copies[f] = source
2310 copies[f] = source
2311 return copies
2311 return copies
2312
2312
2313 def isinmemory(self):
2313 def isinmemory(self):
2314 return True
2314 return True
2315
2315
2316 def filedate(self, path):
2316 def filedate(self, path):
2317 if self.isdirty(path):
2317 if self.isdirty(path):
2318 return self._cache[path][b'date']
2318 return self._cache[path][b'date']
2319 else:
2319 else:
2320 return self._wrappedctx[path].date()
2320 return self._wrappedctx[path].date()
2321
2321
2322 def markcopied(self, path, origin):
2322 def markcopied(self, path, origin):
2323 self._markdirty(
2323 self._markdirty(
2324 path,
2324 path,
2325 exists=True,
2325 exists=True,
2326 date=self.filedate(path),
2326 date=self.filedate(path),
2327 flags=self.flags(path),
2327 flags=self.flags(path),
2328 copied=origin,
2328 copied=origin,
2329 )
2329 )
2330
2330
2331 def copydata(self, path):
2331 def copydata(self, path):
2332 if self.isdirty(path):
2332 if self.isdirty(path):
2333 return self._cache[path][b'copied']
2333 return self._cache[path][b'copied']
2334 else:
2334 else:
2335 return None
2335 return None
2336
2336
2337 def flags(self, path):
2337 def flags(self, path):
2338 if self.isdirty(path):
2338 if self.isdirty(path):
2339 if self._cache[path][b'exists']:
2339 if self._cache[path][b'exists']:
2340 return self._cache[path][b'flags']
2340 return self._cache[path][b'flags']
2341 else:
2341 else:
2342 raise error.ProgrammingError(
2342 raise error.ProgrammingError(
2343 b"No such file or directory: %s" % path
2343 b"No such file or directory: %s" % path
2344 )
2344 )
2345 else:
2345 else:
2346 return self._wrappedctx[path].flags()
2346 return self._wrappedctx[path].flags()
2347
2347
2348 def __contains__(self, key):
2348 def __contains__(self, key):
2349 if key in self._cache:
2349 if key in self._cache:
2350 return self._cache[key][b'exists']
2350 return self._cache[key][b'exists']
2351 return key in self.p1()
2351 return key in self.p1()
2352
2352
2353 def _existsinparent(self, path):
2353 def _existsinparent(self, path):
2354 try:
2354 try:
2355 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2355 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2356 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2356 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2357 # with an ``exists()`` function.
2357 # with an ``exists()`` function.
2358 self._wrappedctx[path]
2358 self._wrappedctx[path]
2359 return True
2359 return True
2360 except error.ManifestLookupError:
2360 except error.ManifestLookupError:
2361 return False
2361 return False
2362
2362
2363 def _auditconflicts(self, path):
2363 def _auditconflicts(self, path):
2364 """Replicates conflict checks done by wvfs.write().
2364 """Replicates conflict checks done by wvfs.write().
2365
2365
2366 Since we never write to the filesystem and never call `applyupdates` in
2366 Since we never write to the filesystem and never call `applyupdates` in
2367 IMM, we'll never check that a path is actually writable -- e.g., because
2367 IMM, we'll never check that a path is actually writable -- e.g., because
2368 it adds `a/foo`, but `a` is actually a file in the other commit.
2368 it adds `a/foo`, but `a` is actually a file in the other commit.
2369 """
2369 """
2370
2370
2371 def fail(path, component):
2371 def fail(path, component):
2372 # p1() is the base and we're receiving "writes" for p2()'s
2372 # p1() is the base and we're receiving "writes" for p2()'s
2373 # files.
2373 # files.
2374 if b'l' in self.p1()[component].flags():
2374 if b'l' in self.p1()[component].flags():
2375 raise error.Abort(
2375 raise error.Abort(
2376 b"error: %s conflicts with symlink %s "
2376 b"error: %s conflicts with symlink %s "
2377 b"in %d." % (path, component, self.p1().rev())
2377 b"in %d." % (path, component, self.p1().rev())
2378 )
2378 )
2379 else:
2379 else:
2380 raise error.Abort(
2380 raise error.Abort(
2381 b"error: '%s' conflicts with file '%s' in "
2381 b"error: '%s' conflicts with file '%s' in "
2382 b"%d." % (path, component, self.p1().rev())
2382 b"%d." % (path, component, self.p1().rev())
2383 )
2383 )
2384
2384
2385 # Test that each new directory to be created to write this path from p2
2385 # Test that each new directory to be created to write this path from p2
2386 # is not a file in p1.
2386 # is not a file in p1.
2387 components = path.split(b'/')
2387 components = path.split(b'/')
2388 for i in pycompat.xrange(len(components)):
2388 for i in pycompat.xrange(len(components)):
2389 component = b"/".join(components[0:i])
2389 component = b"/".join(components[0:i])
2390 if component in self:
2390 if component in self:
2391 fail(path, component)
2391 fail(path, component)
2392
2392
2393 # Test the other direction -- that this path from p2 isn't a directory
2393 # Test the other direction -- that this path from p2 isn't a directory
2394 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2394 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2395 match = self.match([path], default=b'path')
2395 match = self.match([path], default=b'path')
2396 mfiles = list(self.p1().manifest().walk(match))
2396 mfiles = list(self.p1().manifest().walk(match))
2397 if len(mfiles) > 0:
2397 if len(mfiles) > 0:
2398 if len(mfiles) == 1 and mfiles[0] == path:
2398 if len(mfiles) == 1 and mfiles[0] == path:
2399 return
2399 return
2400 # omit the files which are deleted in current IMM wctx
2400 # omit the files which are deleted in current IMM wctx
2401 mfiles = [m for m in mfiles if m in self]
2401 mfiles = [m for m in mfiles if m in self]
2402 if not mfiles:
2402 if not mfiles:
2403 return
2403 return
2404 raise error.Abort(
2404 raise error.Abort(
2405 b"error: file '%s' cannot be written because "
2405 b"error: file '%s' cannot be written because "
2406 b" '%s/' is a directory in %s (containing %d "
2406 b" '%s/' is a directory in %s (containing %d "
2407 b"entries: %s)"
2407 b"entries: %s)"
2408 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2408 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2409 )
2409 )
2410
2410
2411 def write(self, path, data, flags=b'', **kwargs):
2411 def write(self, path, data, flags=b'', **kwargs):
2412 if data is None:
2412 if data is None:
2413 raise error.ProgrammingError(b"data must be non-None")
2413 raise error.ProgrammingError(b"data must be non-None")
2414 self._auditconflicts(path)
2414 self._auditconflicts(path)
2415 self._markdirty(
2415 self._markdirty(
2416 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2416 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2417 )
2417 )
2418
2418
2419 def setflags(self, path, l, x):
2419 def setflags(self, path, l, x):
2420 flag = b''
2420 flag = b''
2421 if l:
2421 if l:
2422 flag = b'l'
2422 flag = b'l'
2423 elif x:
2423 elif x:
2424 flag = b'x'
2424 flag = b'x'
2425 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2425 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2426
2426
2427 def remove(self, path):
2427 def remove(self, path):
2428 self._markdirty(path, exists=False)
2428 self._markdirty(path, exists=False)
2429
2429
2430 def exists(self, path):
2430 def exists(self, path):
2431 """exists behaves like `lexists`, but needs to follow symlinks and
2431 """exists behaves like `lexists`, but needs to follow symlinks and
2432 return False if they are broken.
2432 return False if they are broken.
2433 """
2433 """
2434 if self.isdirty(path):
2434 if self.isdirty(path):
2435 # If this path exists and is a symlink, "follow" it by calling
2435 # If this path exists and is a symlink, "follow" it by calling
2436 # exists on the destination path.
2436 # exists on the destination path.
2437 if (
2437 if (
2438 self._cache[path][b'exists']
2438 self._cache[path][b'exists']
2439 and b'l' in self._cache[path][b'flags']
2439 and b'l' in self._cache[path][b'flags']
2440 ):
2440 ):
2441 return self.exists(self._cache[path][b'data'].strip())
2441 return self.exists(self._cache[path][b'data'].strip())
2442 else:
2442 else:
2443 return self._cache[path][b'exists']
2443 return self._cache[path][b'exists']
2444
2444
2445 return self._existsinparent(path)
2445 return self._existsinparent(path)
2446
2446
2447 def lexists(self, path):
2447 def lexists(self, path):
2448 """lexists returns True if the path exists"""
2448 """lexists returns True if the path exists"""
2449 if self.isdirty(path):
2449 if self.isdirty(path):
2450 return self._cache[path][b'exists']
2450 return self._cache[path][b'exists']
2451
2451
2452 return self._existsinparent(path)
2452 return self._existsinparent(path)
2453
2453
2454 def size(self, path):
2454 def size(self, path):
2455 if self.isdirty(path):
2455 if self.isdirty(path):
2456 if self._cache[path][b'exists']:
2456 if self._cache[path][b'exists']:
2457 return len(self._cache[path][b'data'])
2457 return len(self._cache[path][b'data'])
2458 else:
2458 else:
2459 raise error.ProgrammingError(
2459 raise error.ProgrammingError(
2460 b"No such file or directory: %s" % path
2460 b"No such file or directory: %s" % path
2461 )
2461 )
2462 return self._wrappedctx[path].size()
2462 return self._wrappedctx[path].size()
2463
2463
2464 def tomemctx(
2464 def tomemctx(
2465 self,
2465 self,
2466 text,
2466 text,
2467 branch=None,
2467 branch=None,
2468 extra=None,
2468 extra=None,
2469 date=None,
2469 date=None,
2470 parents=None,
2470 parents=None,
2471 user=None,
2471 user=None,
2472 editor=None,
2472 editor=None,
2473 ):
2473 ):
2474 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2474 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2475 committed.
2475 committed.
2476
2476
2477 ``text`` is the commit message.
2477 ``text`` is the commit message.
2478 ``parents`` (optional) are rev numbers.
2478 ``parents`` (optional) are rev numbers.
2479 """
2479 """
2480 # Default parents to the wrapped context if not passed.
2480 # Default parents to the wrapped context if not passed.
2481 if parents is None:
2481 if parents is None:
2482 parents = self.parents()
2482 parents = self.parents()
2483 if len(parents) == 1:
2483 if len(parents) == 1:
2484 parents = (parents[0], None)
2484 parents = (parents[0], None)
2485
2485
2486 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2486 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2487 if parents[1] is None:
2487 if parents[1] is None:
2488 parents = (self._repo[parents[0]], None)
2488 parents = (self._repo[parents[0]], None)
2489 else:
2489 else:
2490 parents = (self._repo[parents[0]], self._repo[parents[1]])
2490 parents = (self._repo[parents[0]], self._repo[parents[1]])
2491
2491
2492 files = self.files()
2492 files = self.files()
2493
2493
2494 def getfile(repo, memctx, path):
2494 def getfile(repo, memctx, path):
2495 if self._cache[path][b'exists']:
2495 if self._cache[path][b'exists']:
2496 return memfilectx(
2496 return memfilectx(
2497 repo,
2497 repo,
2498 memctx,
2498 memctx,
2499 path,
2499 path,
2500 self._cache[path][b'data'],
2500 self._cache[path][b'data'],
2501 b'l' in self._cache[path][b'flags'],
2501 b'l' in self._cache[path][b'flags'],
2502 b'x' in self._cache[path][b'flags'],
2502 b'x' in self._cache[path][b'flags'],
2503 self._cache[path][b'copied'],
2503 self._cache[path][b'copied'],
2504 )
2504 )
2505 else:
2505 else:
2506 # Returning None, but including the path in `files`, is
2506 # Returning None, but including the path in `files`, is
2507 # necessary for memctx to register a deletion.
2507 # necessary for memctx to register a deletion.
2508 return None
2508 return None
2509
2509
2510 if branch is None:
2510 if branch is None:
2511 branch = self._wrappedctx.branch()
2511 branch = self._wrappedctx.branch()
2512
2512
2513 return memctx(
2513 return memctx(
2514 self._repo,
2514 self._repo,
2515 parents,
2515 parents,
2516 text,
2516 text,
2517 files,
2517 files,
2518 getfile,
2518 getfile,
2519 date=date,
2519 date=date,
2520 extra=extra,
2520 extra=extra,
2521 user=user,
2521 user=user,
2522 branch=branch,
2522 branch=branch,
2523 editor=editor,
2523 editor=editor,
2524 )
2524 )
2525
2525
2526 def tomemctx_for_amend(self, precursor):
2526 def tomemctx_for_amend(self, precursor):
2527 extra = precursor.extra().copy()
2527 extra = precursor.extra().copy()
2528 extra[b'amend_source'] = precursor.hex()
2528 extra[b'amend_source'] = precursor.hex()
2529 return self.tomemctx(
2529 return self.tomemctx(
2530 text=precursor.description(),
2530 text=precursor.description(),
2531 branch=precursor.branch(),
2531 branch=precursor.branch(),
2532 extra=extra,
2532 extra=extra,
2533 date=precursor.date(),
2533 date=precursor.date(),
2534 user=precursor.user(),
2534 user=precursor.user(),
2535 )
2535 )
2536
2536
2537 def isdirty(self, path):
2537 def isdirty(self, path):
2538 return path in self._cache
2538 return path in self._cache
2539
2539
2540 def clean(self):
2540 def clean(self):
2541 self._mergestate = None
2541 self._mergestate = None
2542 self._cache = {}
2542 self._cache = {}
2543
2543
2544 def _compact(self):
2544 def _compact(self):
2545 """Removes keys from the cache that are actually clean, by comparing
2545 """Removes keys from the cache that are actually clean, by comparing
2546 them with the underlying context.
2546 them with the underlying context.
2547
2547
2548 This can occur during the merge process, e.g. by passing --tool :local
2548 This can occur during the merge process, e.g. by passing --tool :local
2549 to resolve a conflict.
2549 to resolve a conflict.
2550 """
2550 """
2551 keys = []
2551 keys = []
2552 # This won't be perfect, but can help performance significantly when
2552 # This won't be perfect, but can help performance significantly when
2553 # using things like remotefilelog.
2553 # using things like remotefilelog.
2554 scmutil.prefetchfiles(
2554 scmutil.prefetchfiles(
2555 self.repo(),
2555 self.repo(),
2556 [
2556 [
2557 (
2557 (
2558 self.p1().rev(),
2558 self.p1().rev(),
2559 scmutil.matchfiles(self.repo(), self._cache.keys()),
2559 scmutil.matchfiles(self.repo(), self._cache.keys()),
2560 )
2560 )
2561 ],
2561 ],
2562 )
2562 )
2563
2563
2564 for path in self._cache.keys():
2564 for path in self._cache.keys():
2565 cache = self._cache[path]
2565 cache = self._cache[path]
2566 try:
2566 try:
2567 underlying = self._wrappedctx[path]
2567 underlying = self._wrappedctx[path]
2568 if (
2568 if (
2569 underlying.data() == cache[b'data']
2569 underlying.data() == cache[b'data']
2570 and underlying.flags() == cache[b'flags']
2570 and underlying.flags() == cache[b'flags']
2571 ):
2571 ):
2572 keys.append(path)
2572 keys.append(path)
2573 except error.ManifestLookupError:
2573 except error.ManifestLookupError:
2574 # Path not in the underlying manifest (created).
2574 # Path not in the underlying manifest (created).
2575 continue
2575 continue
2576
2576
2577 for path in keys:
2577 for path in keys:
2578 del self._cache[path]
2578 del self._cache[path]
2579 return keys
2579 return keys
2580
2580
2581 def _markdirty(
2581 def _markdirty(
2582 self, path, exists, data=None, date=None, flags=b'', copied=None
2582 self, path, exists, data=None, date=None, flags=b'', copied=None
2583 ):
2583 ):
2584 # data not provided, let's see if we already have some; if not, let's
2584 # data not provided, let's see if we already have some; if not, let's
2585 # grab it from our underlying context, so that we always have data if
2585 # grab it from our underlying context, so that we always have data if
2586 # the file is marked as existing.
2586 # the file is marked as existing.
2587 if exists and data is None:
2587 if exists and data is None:
2588 oldentry = self._cache.get(path) or {}
2588 oldentry = self._cache.get(path) or {}
2589 data = oldentry.get(b'data')
2589 data = oldentry.get(b'data')
2590 if data is None:
2590 if data is None:
2591 data = self._wrappedctx[path].data()
2591 data = self._wrappedctx[path].data()
2592
2592
2593 self._cache[path] = {
2593 self._cache[path] = {
2594 b'exists': exists,
2594 b'exists': exists,
2595 b'data': data,
2595 b'data': data,
2596 b'date': date,
2596 b'date': date,
2597 b'flags': flags,
2597 b'flags': flags,
2598 b'copied': copied,
2598 b'copied': copied,
2599 }
2599 }
2600 util.clearcachedproperty(self, b'_manifest')
2600
2601
2601 def filectx(self, path, filelog=None):
2602 def filectx(self, path, filelog=None):
2602 return overlayworkingfilectx(
2603 return overlayworkingfilectx(
2603 self._repo, path, parent=self, filelog=filelog
2604 self._repo, path, parent=self, filelog=filelog
2604 )
2605 )
2605
2606
2606 def mergestate(self, clean=False):
2607 def mergestate(self, clean=False):
2607 if clean or self._mergestate is None:
2608 if clean or self._mergestate is None:
2608 self._mergestate = mergestatemod.memmergestate(self._repo)
2609 self._mergestate = mergestatemod.memmergestate(self._repo)
2609 return self._mergestate
2610 return self._mergestate
2610
2611
2611
2612
2612 class overlayworkingfilectx(committablefilectx):
2613 class overlayworkingfilectx(committablefilectx):
2613 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2614 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2614 cache, which can be flushed through later by calling ``flush()``."""
2615 cache, which can be flushed through later by calling ``flush()``."""
2615
2616
2616 def __init__(self, repo, path, filelog=None, parent=None):
2617 def __init__(self, repo, path, filelog=None, parent=None):
2617 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2618 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2618 self._repo = repo
2619 self._repo = repo
2619 self._parent = parent
2620 self._parent = parent
2620 self._path = path
2621 self._path = path
2621
2622
2622 def cmp(self, fctx):
2623 def cmp(self, fctx):
2623 return self.data() != fctx.data()
2624 return self.data() != fctx.data()
2624
2625
2625 def changectx(self):
2626 def changectx(self):
2626 return self._parent
2627 return self._parent
2627
2628
2628 def data(self):
2629 def data(self):
2629 return self._parent.data(self._path)
2630 return self._parent.data(self._path)
2630
2631
2631 def date(self):
2632 def date(self):
2632 return self._parent.filedate(self._path)
2633 return self._parent.filedate(self._path)
2633
2634
2634 def exists(self):
2635 def exists(self):
2635 return self.lexists()
2636 return self.lexists()
2636
2637
2637 def lexists(self):
2638 def lexists(self):
2638 return self._parent.exists(self._path)
2639 return self._parent.exists(self._path)
2639
2640
2640 def copysource(self):
2641 def copysource(self):
2641 return self._parent.copydata(self._path)
2642 return self._parent.copydata(self._path)
2642
2643
2643 def size(self):
2644 def size(self):
2644 return self._parent.size(self._path)
2645 return self._parent.size(self._path)
2645
2646
2646 def markcopied(self, origin):
2647 def markcopied(self, origin):
2647 self._parent.markcopied(self._path, origin)
2648 self._parent.markcopied(self._path, origin)
2648
2649
2649 def audit(self):
2650 def audit(self):
2650 pass
2651 pass
2651
2652
2652 def flags(self):
2653 def flags(self):
2653 return self._parent.flags(self._path)
2654 return self._parent.flags(self._path)
2654
2655
2655 def setflags(self, islink, isexec):
2656 def setflags(self, islink, isexec):
2656 return self._parent.setflags(self._path, islink, isexec)
2657 return self._parent.setflags(self._path, islink, isexec)
2657
2658
2658 def write(self, data, flags, backgroundclose=False, **kwargs):
2659 def write(self, data, flags, backgroundclose=False, **kwargs):
2659 return self._parent.write(self._path, data, flags, **kwargs)
2660 return self._parent.write(self._path, data, flags, **kwargs)
2660
2661
2661 def remove(self, ignoremissing=False):
2662 def remove(self, ignoremissing=False):
2662 return self._parent.remove(self._path)
2663 return self._parent.remove(self._path)
2663
2664
2664 def clearunknown(self):
2665 def clearunknown(self):
2665 pass
2666 pass
2666
2667
2667
2668
2668 class workingcommitctx(workingctx):
2669 class workingcommitctx(workingctx):
2669 """A workingcommitctx object makes access to data related to
2670 """A workingcommitctx object makes access to data related to
2670 the revision being committed convenient.
2671 the revision being committed convenient.
2671
2672
2672 This hides changes in the working directory, if they aren't
2673 This hides changes in the working directory, if they aren't
2673 committed in this context.
2674 committed in this context.
2674 """
2675 """
2675
2676
2676 def __init__(
2677 def __init__(
2677 self, repo, changes, text=b"", user=None, date=None, extra=None
2678 self, repo, changes, text=b"", user=None, date=None, extra=None
2678 ):
2679 ):
2679 super(workingcommitctx, self).__init__(
2680 super(workingcommitctx, self).__init__(
2680 repo, text, user, date, extra, changes
2681 repo, text, user, date, extra, changes
2681 )
2682 )
2682
2683
2683 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2684 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2684 """Return matched files only in ``self._status``
2685 """Return matched files only in ``self._status``
2685
2686
2686 Uncommitted files appear "clean" via this context, even if
2687 Uncommitted files appear "clean" via this context, even if
2687 they aren't actually so in the working directory.
2688 they aren't actually so in the working directory.
2688 """
2689 """
2689 if clean:
2690 if clean:
2690 clean = [f for f in self._manifest if f not in self._changedset]
2691 clean = [f for f in self._manifest if f not in self._changedset]
2691 else:
2692 else:
2692 clean = []
2693 clean = []
2693 return scmutil.status(
2694 return scmutil.status(
2694 [f for f in self._status.modified if match(f)],
2695 [f for f in self._status.modified if match(f)],
2695 [f for f in self._status.added if match(f)],
2696 [f for f in self._status.added if match(f)],
2696 [f for f in self._status.removed if match(f)],
2697 [f for f in self._status.removed if match(f)],
2697 [],
2698 [],
2698 [],
2699 [],
2699 [],
2700 [],
2700 clean,
2701 clean,
2701 )
2702 )
2702
2703
2703 @propertycache
2704 @propertycache
2704 def _changedset(self):
2705 def _changedset(self):
2705 """Return the set of files changed in this context"""
2706 """Return the set of files changed in this context"""
2706 changed = set(self._status.modified)
2707 changed = set(self._status.modified)
2707 changed.update(self._status.added)
2708 changed.update(self._status.added)
2708 changed.update(self._status.removed)
2709 changed.update(self._status.removed)
2709 return changed
2710 return changed
2710
2711
2711
2712
2712 def makecachingfilectxfn(func):
2713 def makecachingfilectxfn(func):
2713 """Create a filectxfn that caches based on the path.
2714 """Create a filectxfn that caches based on the path.
2714
2715
2715 We can't use util.cachefunc because it uses all arguments as the cache
2716 We can't use util.cachefunc because it uses all arguments as the cache
2716 key and this creates a cycle since the arguments include the repo and
2717 key and this creates a cycle since the arguments include the repo and
2717 memctx.
2718 memctx.
2718 """
2719 """
2719 cache = {}
2720 cache = {}
2720
2721
2721 def getfilectx(repo, memctx, path):
2722 def getfilectx(repo, memctx, path):
2722 if path not in cache:
2723 if path not in cache:
2723 cache[path] = func(repo, memctx, path)
2724 cache[path] = func(repo, memctx, path)
2724 return cache[path]
2725 return cache[path]
2725
2726
2726 return getfilectx
2727 return getfilectx
2727
2728
2728
2729
2729 def memfilefromctx(ctx):
2730 def memfilefromctx(ctx):
2730 """Given a context return a memfilectx for ctx[path]
2731 """Given a context return a memfilectx for ctx[path]
2731
2732
2732 This is a convenience method for building a memctx based on another
2733 This is a convenience method for building a memctx based on another
2733 context.
2734 context.
2734 """
2735 """
2735
2736
2736 def getfilectx(repo, memctx, path):
2737 def getfilectx(repo, memctx, path):
2737 fctx = ctx[path]
2738 fctx = ctx[path]
2738 copysource = fctx.copysource()
2739 copysource = fctx.copysource()
2739 return memfilectx(
2740 return memfilectx(
2740 repo,
2741 repo,
2741 memctx,
2742 memctx,
2742 path,
2743 path,
2743 fctx.data(),
2744 fctx.data(),
2744 islink=fctx.islink(),
2745 islink=fctx.islink(),
2745 isexec=fctx.isexec(),
2746 isexec=fctx.isexec(),
2746 copysource=copysource,
2747 copysource=copysource,
2747 )
2748 )
2748
2749
2749 return getfilectx
2750 return getfilectx
2750
2751
2751
2752
2752 def memfilefrompatch(patchstore):
2753 def memfilefrompatch(patchstore):
2753 """Given a patch (e.g. patchstore object) return a memfilectx
2754 """Given a patch (e.g. patchstore object) return a memfilectx
2754
2755
2755 This is a convenience method for building a memctx based on a patchstore.
2756 This is a convenience method for building a memctx based on a patchstore.
2756 """
2757 """
2757
2758
2758 def getfilectx(repo, memctx, path):
2759 def getfilectx(repo, memctx, path):
2759 data, mode, copysource = patchstore.getfile(path)
2760 data, mode, copysource = patchstore.getfile(path)
2760 if data is None:
2761 if data is None:
2761 return None
2762 return None
2762 islink, isexec = mode
2763 islink, isexec = mode
2763 return memfilectx(
2764 return memfilectx(
2764 repo,
2765 repo,
2765 memctx,
2766 memctx,
2766 path,
2767 path,
2767 data,
2768 data,
2768 islink=islink,
2769 islink=islink,
2769 isexec=isexec,
2770 isexec=isexec,
2770 copysource=copysource,
2771 copysource=copysource,
2771 )
2772 )
2772
2773
2773 return getfilectx
2774 return getfilectx
2774
2775
2775
2776
2776 class memctx(committablectx):
2777 class memctx(committablectx):
2777 """Use memctx to perform in-memory commits via localrepo.commitctx().
2778 """Use memctx to perform in-memory commits via localrepo.commitctx().
2778
2779
2779 Revision information is supplied at initialization time while
2780 Revision information is supplied at initialization time while
2780 related files data and is made available through a callback
2781 related files data and is made available through a callback
2781 mechanism. 'repo' is the current localrepo, 'parents' is a
2782 mechanism. 'repo' is the current localrepo, 'parents' is a
2782 sequence of two parent revisions identifiers (pass None for every
2783 sequence of two parent revisions identifiers (pass None for every
2783 missing parent), 'text' is the commit message and 'files' lists
2784 missing parent), 'text' is the commit message and 'files' lists
2784 names of files touched by the revision (normalized and relative to
2785 names of files touched by the revision (normalized and relative to
2785 repository root).
2786 repository root).
2786
2787
2787 filectxfn(repo, memctx, path) is a callable receiving the
2788 filectxfn(repo, memctx, path) is a callable receiving the
2788 repository, the current memctx object and the normalized path of
2789 repository, the current memctx object and the normalized path of
2789 requested file, relative to repository root. It is fired by the
2790 requested file, relative to repository root. It is fired by the
2790 commit function for every file in 'files', but calls order is
2791 commit function for every file in 'files', but calls order is
2791 undefined. If the file is available in the revision being
2792 undefined. If the file is available in the revision being
2792 committed (updated or added), filectxfn returns a memfilectx
2793 committed (updated or added), filectxfn returns a memfilectx
2793 object. If the file was removed, filectxfn return None for recent
2794 object. If the file was removed, filectxfn return None for recent
2794 Mercurial. Moved files are represented by marking the source file
2795 Mercurial. Moved files are represented by marking the source file
2795 removed and the new file added with copy information (see
2796 removed and the new file added with copy information (see
2796 memfilectx).
2797 memfilectx).
2797
2798
2798 user receives the committer name and defaults to current
2799 user receives the committer name and defaults to current
2799 repository username, date is the commit date in any format
2800 repository username, date is the commit date in any format
2800 supported by dateutil.parsedate() and defaults to current date, extra
2801 supported by dateutil.parsedate() and defaults to current date, extra
2801 is a dictionary of metadata or is left empty.
2802 is a dictionary of metadata or is left empty.
2802 """
2803 """
2803
2804
2804 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2805 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2805 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2806 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2806 # this field to determine what to do in filectxfn.
2807 # this field to determine what to do in filectxfn.
2807 _returnnoneformissingfiles = True
2808 _returnnoneformissingfiles = True
2808
2809
2809 def __init__(
2810 def __init__(
2810 self,
2811 self,
2811 repo,
2812 repo,
2812 parents,
2813 parents,
2813 text,
2814 text,
2814 files,
2815 files,
2815 filectxfn,
2816 filectxfn,
2816 user=None,
2817 user=None,
2817 date=None,
2818 date=None,
2818 extra=None,
2819 extra=None,
2819 branch=None,
2820 branch=None,
2820 editor=None,
2821 editor=None,
2821 ):
2822 ):
2822 super(memctx, self).__init__(
2823 super(memctx, self).__init__(
2823 repo, text, user, date, extra, branch=branch
2824 repo, text, user, date, extra, branch=branch
2824 )
2825 )
2825 self._rev = None
2826 self._rev = None
2826 self._node = None
2827 self._node = None
2827 parents = [(p or nullid) for p in parents]
2828 parents = [(p or nullid) for p in parents]
2828 p1, p2 = parents
2829 p1, p2 = parents
2829 self._parents = [self._repo[p] for p in (p1, p2)]
2830 self._parents = [self._repo[p] for p in (p1, p2)]
2830 files = sorted(set(files))
2831 files = sorted(set(files))
2831 self._files = files
2832 self._files = files
2832 self.substate = {}
2833 self.substate = {}
2833
2834
2834 if isinstance(filectxfn, patch.filestore):
2835 if isinstance(filectxfn, patch.filestore):
2835 filectxfn = memfilefrompatch(filectxfn)
2836 filectxfn = memfilefrompatch(filectxfn)
2836 elif not callable(filectxfn):
2837 elif not callable(filectxfn):
2837 # if store is not callable, wrap it in a function
2838 # if store is not callable, wrap it in a function
2838 filectxfn = memfilefromctx(filectxfn)
2839 filectxfn = memfilefromctx(filectxfn)
2839
2840
2840 # memoizing increases performance for e.g. vcs convert scenarios.
2841 # memoizing increases performance for e.g. vcs convert scenarios.
2841 self._filectxfn = makecachingfilectxfn(filectxfn)
2842 self._filectxfn = makecachingfilectxfn(filectxfn)
2842
2843
2843 if editor:
2844 if editor:
2844 self._text = editor(self._repo, self, [])
2845 self._text = editor(self._repo, self, [])
2845 self._repo.savecommitmessage(self._text)
2846 self._repo.savecommitmessage(self._text)
2846
2847
2847 def filectx(self, path, filelog=None):
2848 def filectx(self, path, filelog=None):
2848 """get a file context from the working directory
2849 """get a file context from the working directory
2849
2850
2850 Returns None if file doesn't exist and should be removed."""
2851 Returns None if file doesn't exist and should be removed."""
2851 return self._filectxfn(self._repo, self, path)
2852 return self._filectxfn(self._repo, self, path)
2852
2853
2853 def commit(self):
2854 def commit(self):
2854 """commit context to the repo"""
2855 """commit context to the repo"""
2855 return self._repo.commitctx(self)
2856 return self._repo.commitctx(self)
2856
2857
2857 @propertycache
2858 @propertycache
2858 def _manifest(self):
2859 def _manifest(self):
2859 """generate a manifest based on the return values of filectxfn"""
2860 """generate a manifest based on the return values of filectxfn"""
2860
2861
2861 # keep this simple for now; just worry about p1
2862 # keep this simple for now; just worry about p1
2862 pctx = self._parents[0]
2863 pctx = self._parents[0]
2863 man = pctx.manifest().copy()
2864 man = pctx.manifest().copy()
2864
2865
2865 for f in self._status.modified:
2866 for f in self._status.modified:
2866 man[f] = modifiednodeid
2867 man[f] = modifiednodeid
2867
2868
2868 for f in self._status.added:
2869 for f in self._status.added:
2869 man[f] = addednodeid
2870 man[f] = addednodeid
2870
2871
2871 for f in self._status.removed:
2872 for f in self._status.removed:
2872 if f in man:
2873 if f in man:
2873 del man[f]
2874 del man[f]
2874
2875
2875 return man
2876 return man
2876
2877
2877 @propertycache
2878 @propertycache
2878 def _status(self):
2879 def _status(self):
2879 """Calculate exact status from ``files`` specified at construction"""
2880 """Calculate exact status from ``files`` specified at construction"""
2880 man1 = self.p1().manifest()
2881 man1 = self.p1().manifest()
2881 p2 = self._parents[1]
2882 p2 = self._parents[1]
2882 # "1 < len(self._parents)" can't be used for checking
2883 # "1 < len(self._parents)" can't be used for checking
2883 # existence of the 2nd parent, because "memctx._parents" is
2884 # existence of the 2nd parent, because "memctx._parents" is
2884 # explicitly initialized by the list, of which length is 2.
2885 # explicitly initialized by the list, of which length is 2.
2885 if p2.node() != nullid:
2886 if p2.node() != nullid:
2886 man2 = p2.manifest()
2887 man2 = p2.manifest()
2887 managing = lambda f: f in man1 or f in man2
2888 managing = lambda f: f in man1 or f in man2
2888 else:
2889 else:
2889 managing = lambda f: f in man1
2890 managing = lambda f: f in man1
2890
2891
2891 modified, added, removed = [], [], []
2892 modified, added, removed = [], [], []
2892 for f in self._files:
2893 for f in self._files:
2893 if not managing(f):
2894 if not managing(f):
2894 added.append(f)
2895 added.append(f)
2895 elif self[f]:
2896 elif self[f]:
2896 modified.append(f)
2897 modified.append(f)
2897 else:
2898 else:
2898 removed.append(f)
2899 removed.append(f)
2899
2900
2900 return scmutil.status(modified, added, removed, [], [], [], [])
2901 return scmutil.status(modified, added, removed, [], [], [], [])
2901
2902
2902 def parents(self):
2903 def parents(self):
2903 if self._parents[1].node() == nullid:
2904 if self._parents[1].node() == nullid:
2904 return [self._parents[0]]
2905 return [self._parents[0]]
2905 return self._parents
2906 return self._parents
2906
2907
2907
2908
2908 class memfilectx(committablefilectx):
2909 class memfilectx(committablefilectx):
2909 """memfilectx represents an in-memory file to commit.
2910 """memfilectx represents an in-memory file to commit.
2910
2911
2911 See memctx and committablefilectx for more details.
2912 See memctx and committablefilectx for more details.
2912 """
2913 """
2913
2914
2914 def __init__(
2915 def __init__(
2915 self,
2916 self,
2916 repo,
2917 repo,
2917 changectx,
2918 changectx,
2918 path,
2919 path,
2919 data,
2920 data,
2920 islink=False,
2921 islink=False,
2921 isexec=False,
2922 isexec=False,
2922 copysource=None,
2923 copysource=None,
2923 ):
2924 ):
2924 """
2925 """
2925 path is the normalized file path relative to repository root.
2926 path is the normalized file path relative to repository root.
2926 data is the file content as a string.
2927 data is the file content as a string.
2927 islink is True if the file is a symbolic link.
2928 islink is True if the file is a symbolic link.
2928 isexec is True if the file is executable.
2929 isexec is True if the file is executable.
2929 copied is the source file path if current file was copied in the
2930 copied is the source file path if current file was copied in the
2930 revision being committed, or None."""
2931 revision being committed, or None."""
2931 super(memfilectx, self).__init__(repo, path, None, changectx)
2932 super(memfilectx, self).__init__(repo, path, None, changectx)
2932 self._data = data
2933 self._data = data
2933 if islink:
2934 if islink:
2934 self._flags = b'l'
2935 self._flags = b'l'
2935 elif isexec:
2936 elif isexec:
2936 self._flags = b'x'
2937 self._flags = b'x'
2937 else:
2938 else:
2938 self._flags = b''
2939 self._flags = b''
2939 self._copysource = copysource
2940 self._copysource = copysource
2940
2941
2941 def copysource(self):
2942 def copysource(self):
2942 return self._copysource
2943 return self._copysource
2943
2944
2944 def cmp(self, fctx):
2945 def cmp(self, fctx):
2945 return self.data() != fctx.data()
2946 return self.data() != fctx.data()
2946
2947
2947 def data(self):
2948 def data(self):
2948 return self._data
2949 return self._data
2949
2950
2950 def remove(self, ignoremissing=False):
2951 def remove(self, ignoremissing=False):
2951 """wraps unlink for a repo's working directory"""
2952 """wraps unlink for a repo's working directory"""
2952 # need to figure out what to do here
2953 # need to figure out what to do here
2953 del self._changectx[self._path]
2954 del self._changectx[self._path]
2954
2955
2955 def write(self, data, flags, **kwargs):
2956 def write(self, data, flags, **kwargs):
2956 """wraps repo.wwrite"""
2957 """wraps repo.wwrite"""
2957 self._data = data
2958 self._data = data
2958
2959
2959
2960
2960 class metadataonlyctx(committablectx):
2961 class metadataonlyctx(committablectx):
2961 """Like memctx but it's reusing the manifest of different commit.
2962 """Like memctx but it's reusing the manifest of different commit.
2962 Intended to be used by lightweight operations that are creating
2963 Intended to be used by lightweight operations that are creating
2963 metadata-only changes.
2964 metadata-only changes.
2964
2965
2965 Revision information is supplied at initialization time. 'repo' is the
2966 Revision information is supplied at initialization time. 'repo' is the
2966 current localrepo, 'ctx' is original revision which manifest we're reuisng
2967 current localrepo, 'ctx' is original revision which manifest we're reuisng
2967 'parents' is a sequence of two parent revisions identifiers (pass None for
2968 'parents' is a sequence of two parent revisions identifiers (pass None for
2968 every missing parent), 'text' is the commit.
2969 every missing parent), 'text' is the commit.
2969
2970
2970 user receives the committer name and defaults to current repository
2971 user receives the committer name and defaults to current repository
2971 username, date is the commit date in any format supported by
2972 username, date is the commit date in any format supported by
2972 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2973 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2973 metadata or is left empty.
2974 metadata or is left empty.
2974 """
2975 """
2975
2976
2976 def __init__(
2977 def __init__(
2977 self,
2978 self,
2978 repo,
2979 repo,
2979 originalctx,
2980 originalctx,
2980 parents=None,
2981 parents=None,
2981 text=None,
2982 text=None,
2982 user=None,
2983 user=None,
2983 date=None,
2984 date=None,
2984 extra=None,
2985 extra=None,
2985 editor=None,
2986 editor=None,
2986 ):
2987 ):
2987 if text is None:
2988 if text is None:
2988 text = originalctx.description()
2989 text = originalctx.description()
2989 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2990 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2990 self._rev = None
2991 self._rev = None
2991 self._node = None
2992 self._node = None
2992 self._originalctx = originalctx
2993 self._originalctx = originalctx
2993 self._manifestnode = originalctx.manifestnode()
2994 self._manifestnode = originalctx.manifestnode()
2994 if parents is None:
2995 if parents is None:
2995 parents = originalctx.parents()
2996 parents = originalctx.parents()
2996 else:
2997 else:
2997 parents = [repo[p] for p in parents if p is not None]
2998 parents = [repo[p] for p in parents if p is not None]
2998 parents = parents[:]
2999 parents = parents[:]
2999 while len(parents) < 2:
3000 while len(parents) < 2:
3000 parents.append(repo[nullid])
3001 parents.append(repo[nullid])
3001 p1, p2 = self._parents = parents
3002 p1, p2 = self._parents = parents
3002
3003
3003 # sanity check to ensure that the reused manifest parents are
3004 # sanity check to ensure that the reused manifest parents are
3004 # manifests of our commit parents
3005 # manifests of our commit parents
3005 mp1, mp2 = self.manifestctx().parents
3006 mp1, mp2 = self.manifestctx().parents
3006 if p1 != nullid and p1.manifestnode() != mp1:
3007 if p1 != nullid and p1.manifestnode() != mp1:
3007 raise RuntimeError(
3008 raise RuntimeError(
3008 r"can't reuse the manifest: its p1 "
3009 r"can't reuse the manifest: its p1 "
3009 r"doesn't match the new ctx p1"
3010 r"doesn't match the new ctx p1"
3010 )
3011 )
3011 if p2 != nullid and p2.manifestnode() != mp2:
3012 if p2 != nullid and p2.manifestnode() != mp2:
3012 raise RuntimeError(
3013 raise RuntimeError(
3013 r"can't reuse the manifest: "
3014 r"can't reuse the manifest: "
3014 r"its p2 doesn't match the new ctx p2"
3015 r"its p2 doesn't match the new ctx p2"
3015 )
3016 )
3016
3017
3017 self._files = originalctx.files()
3018 self._files = originalctx.files()
3018 self.substate = {}
3019 self.substate = {}
3019
3020
3020 if editor:
3021 if editor:
3021 self._text = editor(self._repo, self, [])
3022 self._text = editor(self._repo, self, [])
3022 self._repo.savecommitmessage(self._text)
3023 self._repo.savecommitmessage(self._text)
3023
3024
3024 def manifestnode(self):
3025 def manifestnode(self):
3025 return self._manifestnode
3026 return self._manifestnode
3026
3027
3027 @property
3028 @property
3028 def _manifestctx(self):
3029 def _manifestctx(self):
3029 return self._repo.manifestlog[self._manifestnode]
3030 return self._repo.manifestlog[self._manifestnode]
3030
3031
3031 def filectx(self, path, filelog=None):
3032 def filectx(self, path, filelog=None):
3032 return self._originalctx.filectx(path, filelog=filelog)
3033 return self._originalctx.filectx(path, filelog=filelog)
3033
3034
3034 def commit(self):
3035 def commit(self):
3035 """commit context to the repo"""
3036 """commit context to the repo"""
3036 return self._repo.commitctx(self)
3037 return self._repo.commitctx(self)
3037
3038
3038 @property
3039 @property
3039 def _manifest(self):
3040 def _manifest(self):
3040 return self._originalctx.manifest()
3041 return self._originalctx.manifest()
3041
3042
3042 @propertycache
3043 @propertycache
3043 def _status(self):
3044 def _status(self):
3044 """Calculate exact status from ``files`` specified in the ``origctx``
3045 """Calculate exact status from ``files`` specified in the ``origctx``
3045 and parents manifests.
3046 and parents manifests.
3046 """
3047 """
3047 man1 = self.p1().manifest()
3048 man1 = self.p1().manifest()
3048 p2 = self._parents[1]
3049 p2 = self._parents[1]
3049 # "1 < len(self._parents)" can't be used for checking
3050 # "1 < len(self._parents)" can't be used for checking
3050 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3051 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3051 # explicitly initialized by the list, of which length is 2.
3052 # explicitly initialized by the list, of which length is 2.
3052 if p2.node() != nullid:
3053 if p2.node() != nullid:
3053 man2 = p2.manifest()
3054 man2 = p2.manifest()
3054 managing = lambda f: f in man1 or f in man2
3055 managing = lambda f: f in man1 or f in man2
3055 else:
3056 else:
3056 managing = lambda f: f in man1
3057 managing = lambda f: f in man1
3057
3058
3058 modified, added, removed = [], [], []
3059 modified, added, removed = [], [], []
3059 for f in self._files:
3060 for f in self._files:
3060 if not managing(f):
3061 if not managing(f):
3061 added.append(f)
3062 added.append(f)
3062 elif f in self:
3063 elif f in self:
3063 modified.append(f)
3064 modified.append(f)
3064 else:
3065 else:
3065 removed.append(f)
3066 removed.append(f)
3066
3067
3067 return scmutil.status(modified, added, removed, [], [], [], [])
3068 return scmutil.status(modified, added, removed, [], [], [], [])
3068
3069
3069
3070
3070 class arbitraryfilectx(object):
3071 class arbitraryfilectx(object):
3071 """Allows you to use filectx-like functions on a file in an arbitrary
3072 """Allows you to use filectx-like functions on a file in an arbitrary
3072 location on disk, possibly not in the working directory.
3073 location on disk, possibly not in the working directory.
3073 """
3074 """
3074
3075
3075 def __init__(self, path, repo=None):
3076 def __init__(self, path, repo=None):
3076 # Repo is optional because contrib/simplemerge uses this class.
3077 # Repo is optional because contrib/simplemerge uses this class.
3077 self._repo = repo
3078 self._repo = repo
3078 self._path = path
3079 self._path = path
3079
3080
3080 def cmp(self, fctx):
3081 def cmp(self, fctx):
3081 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3082 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3082 # path if either side is a symlink.
3083 # path if either side is a symlink.
3083 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3084 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3084 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3085 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3085 # Add a fast-path for merge if both sides are disk-backed.
3086 # Add a fast-path for merge if both sides are disk-backed.
3086 # Note that filecmp uses the opposite return values (True if same)
3087 # Note that filecmp uses the opposite return values (True if same)
3087 # from our cmp functions (True if different).
3088 # from our cmp functions (True if different).
3088 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3089 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3089 return self.data() != fctx.data()
3090 return self.data() != fctx.data()
3090
3091
3091 def path(self):
3092 def path(self):
3092 return self._path
3093 return self._path
3093
3094
3094 def flags(self):
3095 def flags(self):
3095 return b''
3096 return b''
3096
3097
3097 def data(self):
3098 def data(self):
3098 return util.readfile(self._path)
3099 return util.readfile(self._path)
3099
3100
3100 def decodeddata(self):
3101 def decodeddata(self):
3101 with open(self._path, b"rb") as f:
3102 with open(self._path, b"rb") as f:
3102 return f.read()
3103 return f.read()
3103
3104
3104 def remove(self):
3105 def remove(self):
3105 util.unlink(self._path)
3106 util.unlink(self._path)
3106
3107
3107 def write(self, data, flags, **kwargs):
3108 def write(self, data, flags, **kwargs):
3108 assert not flags
3109 assert not flags
3109 with open(self._path, b"wb") as f:
3110 with open(self._path, b"wb") as f:
3110 f.write(data)
3111 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now