##// END OF EJS Templates
context: use manifest.walk() instead of manifest.match() to get file list...
Augie Fackler -
r44735:beea86e4 default
parent child Browse files
Show More
@@ -1,3057 +1,3056 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, cwd, expr, badfn=None):
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
204 return fileset.match(self, cwd, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 cwd=None,
331 cwd=None,
332 ):
332 ):
333 r = self._repo
333 r = self._repo
334 if not cwd:
334 if not cwd:
335 cwd = r.getcwd()
335 cwd = r.getcwd()
336 return matchmod.match(
336 return matchmod.match(
337 r.root,
337 r.root,
338 cwd,
338 cwd,
339 pats,
339 pats,
340 include,
340 include,
341 exclude,
341 exclude,
342 default,
342 default,
343 auditor=r.nofsauditor,
343 auditor=r.nofsauditor,
344 ctx=self,
344 ctx=self,
345 listsubrepos=listsubrepos,
345 listsubrepos=listsubrepos,
346 badfn=badfn,
346 badfn=badfn,
347 )
347 )
348
348
349 def diff(
349 def diff(
350 self,
350 self,
351 ctx2=None,
351 ctx2=None,
352 match=None,
352 match=None,
353 changes=None,
353 changes=None,
354 opts=None,
354 opts=None,
355 losedatafn=None,
355 losedatafn=None,
356 pathfn=None,
356 pathfn=None,
357 copy=None,
357 copy=None,
358 copysourcematch=None,
358 copysourcematch=None,
359 hunksfilterfn=None,
359 hunksfilterfn=None,
360 ):
360 ):
361 """Returns a diff generator for the given contexts and matcher"""
361 """Returns a diff generator for the given contexts and matcher"""
362 if ctx2 is None:
362 if ctx2 is None:
363 ctx2 = self.p1()
363 ctx2 = self.p1()
364 if ctx2 is not None:
364 if ctx2 is not None:
365 ctx2 = self._repo[ctx2]
365 ctx2 = self._repo[ctx2]
366 return patch.diff(
366 return patch.diff(
367 self._repo,
367 self._repo,
368 ctx2,
368 ctx2,
369 self,
369 self,
370 match=match,
370 match=match,
371 changes=changes,
371 changes=changes,
372 opts=opts,
372 opts=opts,
373 losedatafn=losedatafn,
373 losedatafn=losedatafn,
374 pathfn=pathfn,
374 pathfn=pathfn,
375 copy=copy,
375 copy=copy,
376 copysourcematch=copysourcematch,
376 copysourcematch=copysourcematch,
377 hunksfilterfn=hunksfilterfn,
377 hunksfilterfn=hunksfilterfn,
378 )
378 )
379
379
380 def dirs(self):
380 def dirs(self):
381 return self._manifest.dirs()
381 return self._manifest.dirs()
382
382
383 def hasdir(self, dir):
383 def hasdir(self, dir):
384 return self._manifest.hasdir(dir)
384 return self._manifest.hasdir(dir)
385
385
386 def status(
386 def status(
387 self,
387 self,
388 other=None,
388 other=None,
389 match=None,
389 match=None,
390 listignored=False,
390 listignored=False,
391 listclean=False,
391 listclean=False,
392 listunknown=False,
392 listunknown=False,
393 listsubrepos=False,
393 listsubrepos=False,
394 ):
394 ):
395 """return status of files between two nodes or node and working
395 """return status of files between two nodes or node and working
396 directory.
396 directory.
397
397
398 If other is None, compare this node with working directory.
398 If other is None, compare this node with working directory.
399
399
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 """
401 """
402
402
403 ctx1 = self
403 ctx1 = self
404 ctx2 = self._repo[other]
404 ctx2 = self._repo[other]
405
405
406 # This next code block is, admittedly, fragile logic that tests for
406 # This next code block is, admittedly, fragile logic that tests for
407 # reversing the contexts and wouldn't need to exist if it weren't for
407 # reversing the contexts and wouldn't need to exist if it weren't for
408 # the fast (and common) code path of comparing the working directory
408 # the fast (and common) code path of comparing the working directory
409 # with its first parent.
409 # with its first parent.
410 #
410 #
411 # What we're aiming for here is the ability to call:
411 # What we're aiming for here is the ability to call:
412 #
412 #
413 # workingctx.status(parentctx)
413 # workingctx.status(parentctx)
414 #
414 #
415 # If we always built the manifest for each context and compared those,
415 # If we always built the manifest for each context and compared those,
416 # then we'd be done. But the special case of the above call means we
416 # then we'd be done. But the special case of the above call means we
417 # just copy the manifest of the parent.
417 # just copy the manifest of the parent.
418 reversed = False
418 reversed = False
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 reversed = True
420 reversed = True
421 ctx1, ctx2 = ctx2, ctx1
421 ctx1, ctx2 = ctx2, ctx1
422
422
423 match = self._repo.narrowmatch(match)
423 match = self._repo.narrowmatch(match)
424 match = ctx2._matchstatus(ctx1, match)
424 match = ctx2._matchstatus(ctx1, match)
425 r = scmutil.status([], [], [], [], [], [], [])
425 r = scmutil.status([], [], [], [], [], [], [])
426 r = ctx2._buildstatus(
426 r = ctx2._buildstatus(
427 ctx1, r, match, listignored, listclean, listunknown
427 ctx1, r, match, listignored, listclean, listunknown
428 )
428 )
429
429
430 if reversed:
430 if reversed:
431 # Reverse added and removed. Clear deleted, unknown and ignored as
431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # these make no sense to reverse.
432 # these make no sense to reverse.
433 r = scmutil.status(
433 r = scmutil.status(
434 r.modified, r.removed, r.added, [], [], [], r.clean
434 r.modified, r.removed, r.added, [], [], [], r.clean
435 )
435 )
436
436
437 if listsubrepos:
437 if listsubrepos:
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 try:
439 try:
440 rev2 = ctx2.subrev(subpath)
440 rev2 = ctx2.subrev(subpath)
441 except KeyError:
441 except KeyError:
442 # A subrepo that existed in node1 was deleted between
442 # A subrepo that existed in node1 was deleted between
443 # node1 and node2 (inclusive). Thus, ctx2's substate
443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # won't contain that subpath. The best we can do ignore it.
444 # won't contain that subpath. The best we can do ignore it.
445 rev2 = None
445 rev2 = None
446 submatch = matchmod.subdirmatcher(subpath, match)
446 submatch = matchmod.subdirmatcher(subpath, match)
447 s = sub.status(
447 s = sub.status(
448 rev2,
448 rev2,
449 match=submatch,
449 match=submatch,
450 ignored=listignored,
450 ignored=listignored,
451 clean=listclean,
451 clean=listclean,
452 unknown=listunknown,
452 unknown=listunknown,
453 listsubrepos=True,
453 listsubrepos=True,
454 )
454 )
455 for k in (
455 for k in (
456 'modified',
456 'modified',
457 'added',
457 'added',
458 'removed',
458 'removed',
459 'deleted',
459 'deleted',
460 'unknown',
460 'unknown',
461 'ignored',
461 'ignored',
462 'clean',
462 'clean',
463 ):
463 ):
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466
466
467 r.modified.sort()
467 r.modified.sort()
468 r.added.sort()
468 r.added.sort()
469 r.removed.sort()
469 r.removed.sort()
470 r.deleted.sort()
470 r.deleted.sort()
471 r.unknown.sort()
471 r.unknown.sort()
472 r.ignored.sort()
472 r.ignored.sort()
473 r.clean.sort()
473 r.clean.sort()
474
474
475 return r
475 return r
476
476
477
477
478 class changectx(basectx):
478 class changectx(basectx):
479 """A changecontext object makes access to data related to a particular
479 """A changecontext object makes access to data related to a particular
480 changeset convenient. It represents a read-only context already present in
480 changeset convenient. It represents a read-only context already present in
481 the repo."""
481 the repo."""
482
482
483 def __init__(self, repo, rev, node, maybe_filtered=True):
483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 super(changectx, self).__init__(repo)
484 super(changectx, self).__init__(repo)
485 self._rev = rev
485 self._rev = rev
486 self._node = node
486 self._node = node
487 # When maybe_filtered is True, the revision might be affected by
487 # When maybe_filtered is True, the revision might be affected by
488 # changelog filtering and operation through the filtered changelog must be used.
488 # changelog filtering and operation through the filtered changelog must be used.
489 #
489 #
490 # When maybe_filtered is False, the revision has already been checked
490 # When maybe_filtered is False, the revision has already been checked
491 # against filtering and is not filtered. Operation through the
491 # against filtering and is not filtered. Operation through the
492 # unfiltered changelog might be used in some case.
492 # unfiltered changelog might be used in some case.
493 self._maybe_filtered = maybe_filtered
493 self._maybe_filtered = maybe_filtered
494
494
495 def __hash__(self):
495 def __hash__(self):
496 try:
496 try:
497 return hash(self._rev)
497 return hash(self._rev)
498 except AttributeError:
498 except AttributeError:
499 return id(self)
499 return id(self)
500
500
501 def __nonzero__(self):
501 def __nonzero__(self):
502 return self._rev != nullrev
502 return self._rev != nullrev
503
503
504 __bool__ = __nonzero__
504 __bool__ = __nonzero__
505
505
506 @propertycache
506 @propertycache
507 def _changeset(self):
507 def _changeset(self):
508 if self._maybe_filtered:
508 if self._maybe_filtered:
509 repo = self._repo
509 repo = self._repo
510 else:
510 else:
511 repo = self._repo.unfiltered()
511 repo = self._repo.unfiltered()
512 return repo.changelog.changelogrevision(self.rev())
512 return repo.changelog.changelogrevision(self.rev())
513
513
514 @propertycache
514 @propertycache
515 def _manifest(self):
515 def _manifest(self):
516 return self._manifestctx.read()
516 return self._manifestctx.read()
517
517
518 @property
518 @property
519 def _manifestctx(self):
519 def _manifestctx(self):
520 return self._repo.manifestlog[self._changeset.manifest]
520 return self._repo.manifestlog[self._changeset.manifest]
521
521
522 @propertycache
522 @propertycache
523 def _manifestdelta(self):
523 def _manifestdelta(self):
524 return self._manifestctx.readdelta()
524 return self._manifestctx.readdelta()
525
525
526 @propertycache
526 @propertycache
527 def _parents(self):
527 def _parents(self):
528 repo = self._repo
528 repo = self._repo
529 if self._maybe_filtered:
529 if self._maybe_filtered:
530 cl = repo.changelog
530 cl = repo.changelog
531 else:
531 else:
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533
533
534 p1, p2 = cl.parentrevs(self._rev)
534 p1, p2 = cl.parentrevs(self._rev)
535 if p2 == nullrev:
535 if p2 == nullrev:
536 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
536 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
537 return [
537 return [
538 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
538 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
539 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
539 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
540 ]
540 ]
541
541
542 def changeset(self):
542 def changeset(self):
543 c = self._changeset
543 c = self._changeset
544 return (
544 return (
545 c.manifest,
545 c.manifest,
546 c.user,
546 c.user,
547 c.date,
547 c.date,
548 c.files,
548 c.files,
549 c.description,
549 c.description,
550 c.extra,
550 c.extra,
551 )
551 )
552
552
553 def manifestnode(self):
553 def manifestnode(self):
554 return self._changeset.manifest
554 return self._changeset.manifest
555
555
556 def user(self):
556 def user(self):
557 return self._changeset.user
557 return self._changeset.user
558
558
559 def date(self):
559 def date(self):
560 return self._changeset.date
560 return self._changeset.date
561
561
562 def files(self):
562 def files(self):
563 return self._changeset.files
563 return self._changeset.files
564
564
565 def filesmodified(self):
565 def filesmodified(self):
566 modified = set(self.files())
566 modified = set(self.files())
567 modified.difference_update(self.filesadded())
567 modified.difference_update(self.filesadded())
568 modified.difference_update(self.filesremoved())
568 modified.difference_update(self.filesremoved())
569 return sorted(modified)
569 return sorted(modified)
570
570
571 def filesadded(self):
571 def filesadded(self):
572 filesadded = self._changeset.filesadded
572 filesadded = self._changeset.filesadded
573 compute_on_none = True
573 compute_on_none = True
574 if self._repo.filecopiesmode == b'changeset-sidedata':
574 if self._repo.filecopiesmode == b'changeset-sidedata':
575 compute_on_none = False
575 compute_on_none = False
576 else:
576 else:
577 source = self._repo.ui.config(b'experimental', b'copies.read-from')
577 source = self._repo.ui.config(b'experimental', b'copies.read-from')
578 if source == b'changeset-only':
578 if source == b'changeset-only':
579 compute_on_none = False
579 compute_on_none = False
580 elif source != b'compatibility':
580 elif source != b'compatibility':
581 # filelog mode, ignore any changelog content
581 # filelog mode, ignore any changelog content
582 filesadded = None
582 filesadded = None
583 if filesadded is None:
583 if filesadded is None:
584 if compute_on_none:
584 if compute_on_none:
585 filesadded = copies.computechangesetfilesadded(self)
585 filesadded = copies.computechangesetfilesadded(self)
586 else:
586 else:
587 filesadded = []
587 filesadded = []
588 return filesadded
588 return filesadded
589
589
590 def filesremoved(self):
590 def filesremoved(self):
591 filesremoved = self._changeset.filesremoved
591 filesremoved = self._changeset.filesremoved
592 compute_on_none = True
592 compute_on_none = True
593 if self._repo.filecopiesmode == b'changeset-sidedata':
593 if self._repo.filecopiesmode == b'changeset-sidedata':
594 compute_on_none = False
594 compute_on_none = False
595 else:
595 else:
596 source = self._repo.ui.config(b'experimental', b'copies.read-from')
596 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 if source == b'changeset-only':
597 if source == b'changeset-only':
598 compute_on_none = False
598 compute_on_none = False
599 elif source != b'compatibility':
599 elif source != b'compatibility':
600 # filelog mode, ignore any changelog content
600 # filelog mode, ignore any changelog content
601 filesremoved = None
601 filesremoved = None
602 if filesremoved is None:
602 if filesremoved is None:
603 if compute_on_none:
603 if compute_on_none:
604 filesremoved = copies.computechangesetfilesremoved(self)
604 filesremoved = copies.computechangesetfilesremoved(self)
605 else:
605 else:
606 filesremoved = []
606 filesremoved = []
607 return filesremoved
607 return filesremoved
608
608
609 @propertycache
609 @propertycache
610 def _copies(self):
610 def _copies(self):
611 p1copies = self._changeset.p1copies
611 p1copies = self._changeset.p1copies
612 p2copies = self._changeset.p2copies
612 p2copies = self._changeset.p2copies
613 compute_on_none = True
613 compute_on_none = True
614 if self._repo.filecopiesmode == b'changeset-sidedata':
614 if self._repo.filecopiesmode == b'changeset-sidedata':
615 compute_on_none = False
615 compute_on_none = False
616 else:
616 else:
617 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 source = self._repo.ui.config(b'experimental', b'copies.read-from')
618 # If config says to get copy metadata only from changeset, then
618 # If config says to get copy metadata only from changeset, then
619 # return that, defaulting to {} if there was no copy metadata. In
619 # return that, defaulting to {} if there was no copy metadata. In
620 # compatibility mode, we return copy data from the changeset if it
620 # compatibility mode, we return copy data from the changeset if it
621 # was recorded there, and otherwise we fall back to getting it from
621 # was recorded there, and otherwise we fall back to getting it from
622 # the filelogs (below).
622 # the filelogs (below).
623 #
623 #
624 # If we are in compatiblity mode and there is not data in the
624 # If we are in compatiblity mode and there is not data in the
625 # changeset), we get the copy metadata from the filelogs.
625 # changeset), we get the copy metadata from the filelogs.
626 #
626 #
627 # otherwise, when config said to read only from filelog, we get the
627 # otherwise, when config said to read only from filelog, we get the
628 # copy metadata from the filelogs.
628 # copy metadata from the filelogs.
629 if source == b'changeset-only':
629 if source == b'changeset-only':
630 compute_on_none = False
630 compute_on_none = False
631 elif source != b'compatibility':
631 elif source != b'compatibility':
632 # filelog mode, ignore any changelog content
632 # filelog mode, ignore any changelog content
633 p1copies = p2copies = None
633 p1copies = p2copies = None
634 if p1copies is None:
634 if p1copies is None:
635 if compute_on_none:
635 if compute_on_none:
636 p1copies, p2copies = super(changectx, self)._copies
636 p1copies, p2copies = super(changectx, self)._copies
637 else:
637 else:
638 if p1copies is None:
638 if p1copies is None:
639 p1copies = {}
639 p1copies = {}
640 if p2copies is None:
640 if p2copies is None:
641 p2copies = {}
641 p2copies = {}
642 return p1copies, p2copies
642 return p1copies, p2copies
643
643
644 def description(self):
644 def description(self):
645 return self._changeset.description
645 return self._changeset.description
646
646
647 def branch(self):
647 def branch(self):
648 return encoding.tolocal(self._changeset.extra.get(b"branch"))
648 return encoding.tolocal(self._changeset.extra.get(b"branch"))
649
649
650 def closesbranch(self):
650 def closesbranch(self):
651 return b'close' in self._changeset.extra
651 return b'close' in self._changeset.extra
652
652
653 def extra(self):
653 def extra(self):
654 """Return a dict of extra information."""
654 """Return a dict of extra information."""
655 return self._changeset.extra
655 return self._changeset.extra
656
656
657 def tags(self):
657 def tags(self):
658 """Return a list of byte tag names"""
658 """Return a list of byte tag names"""
659 return self._repo.nodetags(self._node)
659 return self._repo.nodetags(self._node)
660
660
661 def bookmarks(self):
661 def bookmarks(self):
662 """Return a list of byte bookmark names."""
662 """Return a list of byte bookmark names."""
663 return self._repo.nodebookmarks(self._node)
663 return self._repo.nodebookmarks(self._node)
664
664
665 def phase(self):
665 def phase(self):
666 return self._repo._phasecache.phase(self._repo, self._rev)
666 return self._repo._phasecache.phase(self._repo, self._rev)
667
667
668 def hidden(self):
668 def hidden(self):
669 return self._rev in repoview.filterrevs(self._repo, b'visible')
669 return self._rev in repoview.filterrevs(self._repo, b'visible')
670
670
671 def isinmemory(self):
671 def isinmemory(self):
672 return False
672 return False
673
673
674 def children(self):
674 def children(self):
675 """return list of changectx contexts for each child changeset.
675 """return list of changectx contexts for each child changeset.
676
676
677 This returns only the immediate child changesets. Use descendants() to
677 This returns only the immediate child changesets. Use descendants() to
678 recursively walk children.
678 recursively walk children.
679 """
679 """
680 c = self._repo.changelog.children(self._node)
680 c = self._repo.changelog.children(self._node)
681 return [self._repo[x] for x in c]
681 return [self._repo[x] for x in c]
682
682
683 def ancestors(self):
683 def ancestors(self):
684 for a in self._repo.changelog.ancestors([self._rev]):
684 for a in self._repo.changelog.ancestors([self._rev]):
685 yield self._repo[a]
685 yield self._repo[a]
686
686
687 def descendants(self):
687 def descendants(self):
688 """Recursively yield all children of the changeset.
688 """Recursively yield all children of the changeset.
689
689
690 For just the immediate children, use children()
690 For just the immediate children, use children()
691 """
691 """
692 for d in self._repo.changelog.descendants([self._rev]):
692 for d in self._repo.changelog.descendants([self._rev]):
693 yield self._repo[d]
693 yield self._repo[d]
694
694
695 def filectx(self, path, fileid=None, filelog=None):
695 def filectx(self, path, fileid=None, filelog=None):
696 """get a file context from this changeset"""
696 """get a file context from this changeset"""
697 if fileid is None:
697 if fileid is None:
698 fileid = self.filenode(path)
698 fileid = self.filenode(path)
699 return filectx(
699 return filectx(
700 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
700 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
701 )
701 )
702
702
703 def ancestor(self, c2, warn=False):
703 def ancestor(self, c2, warn=False):
704 """return the "best" ancestor context of self and c2
704 """return the "best" ancestor context of self and c2
705
705
706 If there are multiple candidates, it will show a message and check
706 If there are multiple candidates, it will show a message and check
707 merge.preferancestor configuration before falling back to the
707 merge.preferancestor configuration before falling back to the
708 revlog ancestor."""
708 revlog ancestor."""
709 # deal with workingctxs
709 # deal with workingctxs
710 n2 = c2._node
710 n2 = c2._node
711 if n2 is None:
711 if n2 is None:
712 n2 = c2._parents[0]._node
712 n2 = c2._parents[0]._node
713 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
713 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
714 if not cahs:
714 if not cahs:
715 anc = nullid
715 anc = nullid
716 elif len(cahs) == 1:
716 elif len(cahs) == 1:
717 anc = cahs[0]
717 anc = cahs[0]
718 else:
718 else:
719 # experimental config: merge.preferancestor
719 # experimental config: merge.preferancestor
720 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
720 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
721 try:
721 try:
722 ctx = scmutil.revsymbol(self._repo, r)
722 ctx = scmutil.revsymbol(self._repo, r)
723 except error.RepoLookupError:
723 except error.RepoLookupError:
724 continue
724 continue
725 anc = ctx.node()
725 anc = ctx.node()
726 if anc in cahs:
726 if anc in cahs:
727 break
727 break
728 else:
728 else:
729 anc = self._repo.changelog.ancestor(self._node, n2)
729 anc = self._repo.changelog.ancestor(self._node, n2)
730 if warn:
730 if warn:
731 self._repo.ui.status(
731 self._repo.ui.status(
732 (
732 (
733 _(b"note: using %s as ancestor of %s and %s\n")
733 _(b"note: using %s as ancestor of %s and %s\n")
734 % (short(anc), short(self._node), short(n2))
734 % (short(anc), short(self._node), short(n2))
735 )
735 )
736 + b''.join(
736 + b''.join(
737 _(
737 _(
738 b" alternatively, use --config "
738 b" alternatively, use --config "
739 b"merge.preferancestor=%s\n"
739 b"merge.preferancestor=%s\n"
740 )
740 )
741 % short(n)
741 % short(n)
742 for n in sorted(cahs)
742 for n in sorted(cahs)
743 if n != anc
743 if n != anc
744 )
744 )
745 )
745 )
746 return self._repo[anc]
746 return self._repo[anc]
747
747
748 def isancestorof(self, other):
748 def isancestorof(self, other):
749 """True if this changeset is an ancestor of other"""
749 """True if this changeset is an ancestor of other"""
750 return self._repo.changelog.isancestorrev(self._rev, other._rev)
750 return self._repo.changelog.isancestorrev(self._rev, other._rev)
751
751
752 def walk(self, match):
752 def walk(self, match):
753 '''Generates matching file names.'''
753 '''Generates matching file names.'''
754
754
755 # Wrap match.bad method to have message with nodeid
755 # Wrap match.bad method to have message with nodeid
756 def bad(fn, msg):
756 def bad(fn, msg):
757 # The manifest doesn't know about subrepos, so don't complain about
757 # The manifest doesn't know about subrepos, so don't complain about
758 # paths into valid subrepos.
758 # paths into valid subrepos.
759 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
759 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
760 return
760 return
761 match.bad(fn, _(b'no such file in rev %s') % self)
761 match.bad(fn, _(b'no such file in rev %s') % self)
762
762
763 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
763 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
764 return self._manifest.walk(m)
764 return self._manifest.walk(m)
765
765
766 def matches(self, match):
766 def matches(self, match):
767 return self.walk(match)
767 return self.walk(match)
768
768
769
769
770 class basefilectx(object):
770 class basefilectx(object):
771 """A filecontext object represents the common logic for its children:
771 """A filecontext object represents the common logic for its children:
772 filectx: read-only access to a filerevision that is already present
772 filectx: read-only access to a filerevision that is already present
773 in the repo,
773 in the repo,
774 workingfilectx: a filecontext that represents files from the working
774 workingfilectx: a filecontext that represents files from the working
775 directory,
775 directory,
776 memfilectx: a filecontext that represents files in-memory,
776 memfilectx: a filecontext that represents files in-memory,
777 """
777 """
778
778
779 @propertycache
779 @propertycache
780 def _filelog(self):
780 def _filelog(self):
781 return self._repo.file(self._path)
781 return self._repo.file(self._path)
782
782
783 @propertycache
783 @propertycache
784 def _changeid(self):
784 def _changeid(self):
785 if '_changectx' in self.__dict__:
785 if '_changectx' in self.__dict__:
786 return self._changectx.rev()
786 return self._changectx.rev()
787 elif '_descendantrev' in self.__dict__:
787 elif '_descendantrev' in self.__dict__:
788 # this file context was created from a revision with a known
788 # this file context was created from a revision with a known
789 # descendant, we can (lazily) correct for linkrev aliases
789 # descendant, we can (lazily) correct for linkrev aliases
790 return self._adjustlinkrev(self._descendantrev)
790 return self._adjustlinkrev(self._descendantrev)
791 else:
791 else:
792 return self._filelog.linkrev(self._filerev)
792 return self._filelog.linkrev(self._filerev)
793
793
794 @propertycache
794 @propertycache
795 def _filenode(self):
795 def _filenode(self):
796 if '_fileid' in self.__dict__:
796 if '_fileid' in self.__dict__:
797 return self._filelog.lookup(self._fileid)
797 return self._filelog.lookup(self._fileid)
798 else:
798 else:
799 return self._changectx.filenode(self._path)
799 return self._changectx.filenode(self._path)
800
800
801 @propertycache
801 @propertycache
802 def _filerev(self):
802 def _filerev(self):
803 return self._filelog.rev(self._filenode)
803 return self._filelog.rev(self._filenode)
804
804
805 @propertycache
805 @propertycache
806 def _repopath(self):
806 def _repopath(self):
807 return self._path
807 return self._path
808
808
809 def __nonzero__(self):
809 def __nonzero__(self):
810 try:
810 try:
811 self._filenode
811 self._filenode
812 return True
812 return True
813 except error.LookupError:
813 except error.LookupError:
814 # file is missing
814 # file is missing
815 return False
815 return False
816
816
817 __bool__ = __nonzero__
817 __bool__ = __nonzero__
818
818
819 def __bytes__(self):
819 def __bytes__(self):
820 try:
820 try:
821 return b"%s@%s" % (self.path(), self._changectx)
821 return b"%s@%s" % (self.path(), self._changectx)
822 except error.LookupError:
822 except error.LookupError:
823 return b"%s@???" % self.path()
823 return b"%s@???" % self.path()
824
824
825 __str__ = encoding.strmethod(__bytes__)
825 __str__ = encoding.strmethod(__bytes__)
826
826
827 def __repr__(self):
827 def __repr__(self):
828 return "<%s %s>" % (type(self).__name__, str(self))
828 return "<%s %s>" % (type(self).__name__, str(self))
829
829
830 def __hash__(self):
830 def __hash__(self):
831 try:
831 try:
832 return hash((self._path, self._filenode))
832 return hash((self._path, self._filenode))
833 except AttributeError:
833 except AttributeError:
834 return id(self)
834 return id(self)
835
835
836 def __eq__(self, other):
836 def __eq__(self, other):
837 try:
837 try:
838 return (
838 return (
839 type(self) == type(other)
839 type(self) == type(other)
840 and self._path == other._path
840 and self._path == other._path
841 and self._filenode == other._filenode
841 and self._filenode == other._filenode
842 )
842 )
843 except AttributeError:
843 except AttributeError:
844 return False
844 return False
845
845
846 def __ne__(self, other):
846 def __ne__(self, other):
847 return not (self == other)
847 return not (self == other)
848
848
849 def filerev(self):
849 def filerev(self):
850 return self._filerev
850 return self._filerev
851
851
852 def filenode(self):
852 def filenode(self):
853 return self._filenode
853 return self._filenode
854
854
855 @propertycache
855 @propertycache
856 def _flags(self):
856 def _flags(self):
857 return self._changectx.flags(self._path)
857 return self._changectx.flags(self._path)
858
858
859 def flags(self):
859 def flags(self):
860 return self._flags
860 return self._flags
861
861
862 def filelog(self):
862 def filelog(self):
863 return self._filelog
863 return self._filelog
864
864
865 def rev(self):
865 def rev(self):
866 return self._changeid
866 return self._changeid
867
867
868 def linkrev(self):
868 def linkrev(self):
869 return self._filelog.linkrev(self._filerev)
869 return self._filelog.linkrev(self._filerev)
870
870
871 def node(self):
871 def node(self):
872 return self._changectx.node()
872 return self._changectx.node()
873
873
874 def hex(self):
874 def hex(self):
875 return self._changectx.hex()
875 return self._changectx.hex()
876
876
877 def user(self):
877 def user(self):
878 return self._changectx.user()
878 return self._changectx.user()
879
879
880 def date(self):
880 def date(self):
881 return self._changectx.date()
881 return self._changectx.date()
882
882
883 def files(self):
883 def files(self):
884 return self._changectx.files()
884 return self._changectx.files()
885
885
886 def description(self):
886 def description(self):
887 return self._changectx.description()
887 return self._changectx.description()
888
888
889 def branch(self):
889 def branch(self):
890 return self._changectx.branch()
890 return self._changectx.branch()
891
891
892 def extra(self):
892 def extra(self):
893 return self._changectx.extra()
893 return self._changectx.extra()
894
894
895 def phase(self):
895 def phase(self):
896 return self._changectx.phase()
896 return self._changectx.phase()
897
897
898 def phasestr(self):
898 def phasestr(self):
899 return self._changectx.phasestr()
899 return self._changectx.phasestr()
900
900
901 def obsolete(self):
901 def obsolete(self):
902 return self._changectx.obsolete()
902 return self._changectx.obsolete()
903
903
904 def instabilities(self):
904 def instabilities(self):
905 return self._changectx.instabilities()
905 return self._changectx.instabilities()
906
906
907 def manifest(self):
907 def manifest(self):
908 return self._changectx.manifest()
908 return self._changectx.manifest()
909
909
910 def changectx(self):
910 def changectx(self):
911 return self._changectx
911 return self._changectx
912
912
913 def renamed(self):
913 def renamed(self):
914 return self._copied
914 return self._copied
915
915
916 def copysource(self):
916 def copysource(self):
917 return self._copied and self._copied[0]
917 return self._copied and self._copied[0]
918
918
919 def repo(self):
919 def repo(self):
920 return self._repo
920 return self._repo
921
921
922 def size(self):
922 def size(self):
923 return len(self.data())
923 return len(self.data())
924
924
925 def path(self):
925 def path(self):
926 return self._path
926 return self._path
927
927
928 def isbinary(self):
928 def isbinary(self):
929 try:
929 try:
930 return stringutil.binary(self.data())
930 return stringutil.binary(self.data())
931 except IOError:
931 except IOError:
932 return False
932 return False
933
933
934 def isexec(self):
934 def isexec(self):
935 return b'x' in self.flags()
935 return b'x' in self.flags()
936
936
937 def islink(self):
937 def islink(self):
938 return b'l' in self.flags()
938 return b'l' in self.flags()
939
939
940 def isabsent(self):
940 def isabsent(self):
941 """whether this filectx represents a file not in self._changectx
941 """whether this filectx represents a file not in self._changectx
942
942
943 This is mainly for merge code to detect change/delete conflicts. This is
943 This is mainly for merge code to detect change/delete conflicts. This is
944 expected to be True for all subclasses of basectx."""
944 expected to be True for all subclasses of basectx."""
945 return False
945 return False
946
946
947 _customcmp = False
947 _customcmp = False
948
948
949 def cmp(self, fctx):
949 def cmp(self, fctx):
950 """compare with other file context
950 """compare with other file context
951
951
952 returns True if different than fctx.
952 returns True if different than fctx.
953 """
953 """
954 if fctx._customcmp:
954 if fctx._customcmp:
955 return fctx.cmp(self)
955 return fctx.cmp(self)
956
956
957 if self._filenode is None:
957 if self._filenode is None:
958 raise error.ProgrammingError(
958 raise error.ProgrammingError(
959 b'filectx.cmp() must be reimplemented if not backed by revlog'
959 b'filectx.cmp() must be reimplemented if not backed by revlog'
960 )
960 )
961
961
962 if fctx._filenode is None:
962 if fctx._filenode is None:
963 if self._repo._encodefilterpats:
963 if self._repo._encodefilterpats:
964 # can't rely on size() because wdir content may be decoded
964 # can't rely on size() because wdir content may be decoded
965 return self._filelog.cmp(self._filenode, fctx.data())
965 return self._filelog.cmp(self._filenode, fctx.data())
966 if self.size() - 4 == fctx.size():
966 if self.size() - 4 == fctx.size():
967 # size() can match:
967 # size() can match:
968 # if file data starts with '\1\n', empty metadata block is
968 # if file data starts with '\1\n', empty metadata block is
969 # prepended, which adds 4 bytes to filelog.size().
969 # prepended, which adds 4 bytes to filelog.size().
970 return self._filelog.cmp(self._filenode, fctx.data())
970 return self._filelog.cmp(self._filenode, fctx.data())
971 if self.size() == fctx.size():
971 if self.size() == fctx.size():
972 # size() matches: need to compare content
972 # size() matches: need to compare content
973 return self._filelog.cmp(self._filenode, fctx.data())
973 return self._filelog.cmp(self._filenode, fctx.data())
974
974
975 # size() differs
975 # size() differs
976 return True
976 return True
977
977
978 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
978 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
979 """return the first ancestor of <srcrev> introducing <fnode>
979 """return the first ancestor of <srcrev> introducing <fnode>
980
980
981 If the linkrev of the file revision does not point to an ancestor of
981 If the linkrev of the file revision does not point to an ancestor of
982 srcrev, we'll walk down the ancestors until we find one introducing
982 srcrev, we'll walk down the ancestors until we find one introducing
983 this file revision.
983 this file revision.
984
984
985 :srcrev: the changeset revision we search ancestors from
985 :srcrev: the changeset revision we search ancestors from
986 :inclusive: if true, the src revision will also be checked
986 :inclusive: if true, the src revision will also be checked
987 :stoprev: an optional revision to stop the walk at. If no introduction
987 :stoprev: an optional revision to stop the walk at. If no introduction
988 of this file content could be found before this floor
988 of this file content could be found before this floor
989 revision, the function will returns "None" and stops its
989 revision, the function will returns "None" and stops its
990 iteration.
990 iteration.
991 """
991 """
992 repo = self._repo
992 repo = self._repo
993 cl = repo.unfiltered().changelog
993 cl = repo.unfiltered().changelog
994 mfl = repo.manifestlog
994 mfl = repo.manifestlog
995 # fetch the linkrev
995 # fetch the linkrev
996 lkr = self.linkrev()
996 lkr = self.linkrev()
997 if srcrev == lkr:
997 if srcrev == lkr:
998 return lkr
998 return lkr
999 # hack to reuse ancestor computation when searching for renames
999 # hack to reuse ancestor computation when searching for renames
1000 memberanc = getattr(self, '_ancestrycontext', None)
1000 memberanc = getattr(self, '_ancestrycontext', None)
1001 iteranc = None
1001 iteranc = None
1002 if srcrev is None:
1002 if srcrev is None:
1003 # wctx case, used by workingfilectx during mergecopy
1003 # wctx case, used by workingfilectx during mergecopy
1004 revs = [p.rev() for p in self._repo[None].parents()]
1004 revs = [p.rev() for p in self._repo[None].parents()]
1005 inclusive = True # we skipped the real (revless) source
1005 inclusive = True # we skipped the real (revless) source
1006 else:
1006 else:
1007 revs = [srcrev]
1007 revs = [srcrev]
1008 if memberanc is None:
1008 if memberanc is None:
1009 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1009 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1010 # check if this linkrev is an ancestor of srcrev
1010 # check if this linkrev is an ancestor of srcrev
1011 if lkr not in memberanc:
1011 if lkr not in memberanc:
1012 if iteranc is None:
1012 if iteranc is None:
1013 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1013 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1014 fnode = self._filenode
1014 fnode = self._filenode
1015 path = self._path
1015 path = self._path
1016 for a in iteranc:
1016 for a in iteranc:
1017 if stoprev is not None and a < stoprev:
1017 if stoprev is not None and a < stoprev:
1018 return None
1018 return None
1019 ac = cl.read(a) # get changeset data (we avoid object creation)
1019 ac = cl.read(a) # get changeset data (we avoid object creation)
1020 if path in ac[3]: # checking the 'files' field.
1020 if path in ac[3]: # checking the 'files' field.
1021 # The file has been touched, check if the content is
1021 # The file has been touched, check if the content is
1022 # similar to the one we search for.
1022 # similar to the one we search for.
1023 if fnode == mfl[ac[0]].readfast().get(path):
1023 if fnode == mfl[ac[0]].readfast().get(path):
1024 return a
1024 return a
1025 # In theory, we should never get out of that loop without a result.
1025 # In theory, we should never get out of that loop without a result.
1026 # But if manifest uses a buggy file revision (not children of the
1026 # But if manifest uses a buggy file revision (not children of the
1027 # one it replaces) we could. Such a buggy situation will likely
1027 # one it replaces) we could. Such a buggy situation will likely
1028 # result is crash somewhere else at to some point.
1028 # result is crash somewhere else at to some point.
1029 return lkr
1029 return lkr
1030
1030
1031 def isintroducedafter(self, changelogrev):
1031 def isintroducedafter(self, changelogrev):
1032 """True if a filectx has been introduced after a given floor revision
1032 """True if a filectx has been introduced after a given floor revision
1033 """
1033 """
1034 if self.linkrev() >= changelogrev:
1034 if self.linkrev() >= changelogrev:
1035 return True
1035 return True
1036 introrev = self._introrev(stoprev=changelogrev)
1036 introrev = self._introrev(stoprev=changelogrev)
1037 if introrev is None:
1037 if introrev is None:
1038 return False
1038 return False
1039 return introrev >= changelogrev
1039 return introrev >= changelogrev
1040
1040
1041 def introrev(self):
1041 def introrev(self):
1042 """return the rev of the changeset which introduced this file revision
1042 """return the rev of the changeset which introduced this file revision
1043
1043
1044 This method is different from linkrev because it take into account the
1044 This method is different from linkrev because it take into account the
1045 changeset the filectx was created from. It ensures the returned
1045 changeset the filectx was created from. It ensures the returned
1046 revision is one of its ancestors. This prevents bugs from
1046 revision is one of its ancestors. This prevents bugs from
1047 'linkrev-shadowing' when a file revision is used by multiple
1047 'linkrev-shadowing' when a file revision is used by multiple
1048 changesets.
1048 changesets.
1049 """
1049 """
1050 return self._introrev()
1050 return self._introrev()
1051
1051
1052 def _introrev(self, stoprev=None):
1052 def _introrev(self, stoprev=None):
1053 """
1053 """
1054 Same as `introrev` but, with an extra argument to limit changelog
1054 Same as `introrev` but, with an extra argument to limit changelog
1055 iteration range in some internal usecase.
1055 iteration range in some internal usecase.
1056
1056
1057 If `stoprev` is set, the `introrev` will not be searched past that
1057 If `stoprev` is set, the `introrev` will not be searched past that
1058 `stoprev` revision and "None" might be returned. This is useful to
1058 `stoprev` revision and "None" might be returned. This is useful to
1059 limit the iteration range.
1059 limit the iteration range.
1060 """
1060 """
1061 toprev = None
1061 toprev = None
1062 attrs = vars(self)
1062 attrs = vars(self)
1063 if '_changeid' in attrs:
1063 if '_changeid' in attrs:
1064 # We have a cached value already
1064 # We have a cached value already
1065 toprev = self._changeid
1065 toprev = self._changeid
1066 elif '_changectx' in attrs:
1066 elif '_changectx' in attrs:
1067 # We know which changelog entry we are coming from
1067 # We know which changelog entry we are coming from
1068 toprev = self._changectx.rev()
1068 toprev = self._changectx.rev()
1069
1069
1070 if toprev is not None:
1070 if toprev is not None:
1071 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1071 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1072 elif '_descendantrev' in attrs:
1072 elif '_descendantrev' in attrs:
1073 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1073 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1074 # be nice and cache the result of the computation
1074 # be nice and cache the result of the computation
1075 if introrev is not None:
1075 if introrev is not None:
1076 self._changeid = introrev
1076 self._changeid = introrev
1077 return introrev
1077 return introrev
1078 else:
1078 else:
1079 return self.linkrev()
1079 return self.linkrev()
1080
1080
1081 def introfilectx(self):
1081 def introfilectx(self):
1082 """Return filectx having identical contents, but pointing to the
1082 """Return filectx having identical contents, but pointing to the
1083 changeset revision where this filectx was introduced"""
1083 changeset revision where this filectx was introduced"""
1084 introrev = self.introrev()
1084 introrev = self.introrev()
1085 if self.rev() == introrev:
1085 if self.rev() == introrev:
1086 return self
1086 return self
1087 return self.filectx(self.filenode(), changeid=introrev)
1087 return self.filectx(self.filenode(), changeid=introrev)
1088
1088
1089 def _parentfilectx(self, path, fileid, filelog):
1089 def _parentfilectx(self, path, fileid, filelog):
1090 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1090 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1091 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1091 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1092 if '_changeid' in vars(self) or '_changectx' in vars(self):
1092 if '_changeid' in vars(self) or '_changectx' in vars(self):
1093 # If self is associated with a changeset (probably explicitly
1093 # If self is associated with a changeset (probably explicitly
1094 # fed), ensure the created filectx is associated with a
1094 # fed), ensure the created filectx is associated with a
1095 # changeset that is an ancestor of self.changectx.
1095 # changeset that is an ancestor of self.changectx.
1096 # This lets us later use _adjustlinkrev to get a correct link.
1096 # This lets us later use _adjustlinkrev to get a correct link.
1097 fctx._descendantrev = self.rev()
1097 fctx._descendantrev = self.rev()
1098 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1098 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1099 elif '_descendantrev' in vars(self):
1099 elif '_descendantrev' in vars(self):
1100 # Otherwise propagate _descendantrev if we have one associated.
1100 # Otherwise propagate _descendantrev if we have one associated.
1101 fctx._descendantrev = self._descendantrev
1101 fctx._descendantrev = self._descendantrev
1102 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1102 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1103 return fctx
1103 return fctx
1104
1104
1105 def parents(self):
1105 def parents(self):
1106 _path = self._path
1106 _path = self._path
1107 fl = self._filelog
1107 fl = self._filelog
1108 parents = self._filelog.parents(self._filenode)
1108 parents = self._filelog.parents(self._filenode)
1109 pl = [(_path, node, fl) for node in parents if node != nullid]
1109 pl = [(_path, node, fl) for node in parents if node != nullid]
1110
1110
1111 r = fl.renamed(self._filenode)
1111 r = fl.renamed(self._filenode)
1112 if r:
1112 if r:
1113 # - In the simple rename case, both parent are nullid, pl is empty.
1113 # - In the simple rename case, both parent are nullid, pl is empty.
1114 # - In case of merge, only one of the parent is null id and should
1114 # - In case of merge, only one of the parent is null id and should
1115 # be replaced with the rename information. This parent is -always-
1115 # be replaced with the rename information. This parent is -always-
1116 # the first one.
1116 # the first one.
1117 #
1117 #
1118 # As null id have always been filtered out in the previous list
1118 # As null id have always been filtered out in the previous list
1119 # comprehension, inserting to 0 will always result in "replacing
1119 # comprehension, inserting to 0 will always result in "replacing
1120 # first nullid parent with rename information.
1120 # first nullid parent with rename information.
1121 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1121 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1122
1122
1123 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1123 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1124
1124
1125 def p1(self):
1125 def p1(self):
1126 return self.parents()[0]
1126 return self.parents()[0]
1127
1127
1128 def p2(self):
1128 def p2(self):
1129 p = self.parents()
1129 p = self.parents()
1130 if len(p) == 2:
1130 if len(p) == 2:
1131 return p[1]
1131 return p[1]
1132 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1132 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1133
1133
1134 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1134 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1135 """Returns a list of annotateline objects for each line in the file
1135 """Returns a list of annotateline objects for each line in the file
1136
1136
1137 - line.fctx is the filectx of the node where that line was last changed
1137 - line.fctx is the filectx of the node where that line was last changed
1138 - line.lineno is the line number at the first appearance in the managed
1138 - line.lineno is the line number at the first appearance in the managed
1139 file
1139 file
1140 - line.text is the data on that line (including newline character)
1140 - line.text is the data on that line (including newline character)
1141 """
1141 """
1142 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1142 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1143
1143
1144 def parents(f):
1144 def parents(f):
1145 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1145 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1146 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1146 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1147 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1147 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1148 # isn't an ancestor of the srcrev.
1148 # isn't an ancestor of the srcrev.
1149 f._changeid
1149 f._changeid
1150 pl = f.parents()
1150 pl = f.parents()
1151
1151
1152 # Don't return renamed parents if we aren't following.
1152 # Don't return renamed parents if we aren't following.
1153 if not follow:
1153 if not follow:
1154 pl = [p for p in pl if p.path() == f.path()]
1154 pl = [p for p in pl if p.path() == f.path()]
1155
1155
1156 # renamed filectx won't have a filelog yet, so set it
1156 # renamed filectx won't have a filelog yet, so set it
1157 # from the cache to save time
1157 # from the cache to save time
1158 for p in pl:
1158 for p in pl:
1159 if not '_filelog' in p.__dict__:
1159 if not '_filelog' in p.__dict__:
1160 p._filelog = getlog(p.path())
1160 p._filelog = getlog(p.path())
1161
1161
1162 return pl
1162 return pl
1163
1163
1164 # use linkrev to find the first changeset where self appeared
1164 # use linkrev to find the first changeset where self appeared
1165 base = self.introfilectx()
1165 base = self.introfilectx()
1166 if getattr(base, '_ancestrycontext', None) is None:
1166 if getattr(base, '_ancestrycontext', None) is None:
1167 # it is safe to use an unfiltered repository here because we are
1167 # it is safe to use an unfiltered repository here because we are
1168 # walking ancestors only.
1168 # walking ancestors only.
1169 cl = self._repo.unfiltered().changelog
1169 cl = self._repo.unfiltered().changelog
1170 if base.rev() is None:
1170 if base.rev() is None:
1171 # wctx is not inclusive, but works because _ancestrycontext
1171 # wctx is not inclusive, but works because _ancestrycontext
1172 # is used to test filelog revisions
1172 # is used to test filelog revisions
1173 ac = cl.ancestors(
1173 ac = cl.ancestors(
1174 [p.rev() for p in base.parents()], inclusive=True
1174 [p.rev() for p in base.parents()], inclusive=True
1175 )
1175 )
1176 else:
1176 else:
1177 ac = cl.ancestors([base.rev()], inclusive=True)
1177 ac = cl.ancestors([base.rev()], inclusive=True)
1178 base._ancestrycontext = ac
1178 base._ancestrycontext = ac
1179
1179
1180 return dagop.annotate(
1180 return dagop.annotate(
1181 base, parents, skiprevs=skiprevs, diffopts=diffopts
1181 base, parents, skiprevs=skiprevs, diffopts=diffopts
1182 )
1182 )
1183
1183
1184 def ancestors(self, followfirst=False):
1184 def ancestors(self, followfirst=False):
1185 visit = {}
1185 visit = {}
1186 c = self
1186 c = self
1187 if followfirst:
1187 if followfirst:
1188 cut = 1
1188 cut = 1
1189 else:
1189 else:
1190 cut = None
1190 cut = None
1191
1191
1192 while True:
1192 while True:
1193 for parent in c.parents()[:cut]:
1193 for parent in c.parents()[:cut]:
1194 visit[(parent.linkrev(), parent.filenode())] = parent
1194 visit[(parent.linkrev(), parent.filenode())] = parent
1195 if not visit:
1195 if not visit:
1196 break
1196 break
1197 c = visit.pop(max(visit))
1197 c = visit.pop(max(visit))
1198 yield c
1198 yield c
1199
1199
1200 def decodeddata(self):
1200 def decodeddata(self):
1201 """Returns `data()` after running repository decoding filters.
1201 """Returns `data()` after running repository decoding filters.
1202
1202
1203 This is often equivalent to how the data would be expressed on disk.
1203 This is often equivalent to how the data would be expressed on disk.
1204 """
1204 """
1205 return self._repo.wwritedata(self.path(), self.data())
1205 return self._repo.wwritedata(self.path(), self.data())
1206
1206
1207
1207
1208 class filectx(basefilectx):
1208 class filectx(basefilectx):
1209 """A filecontext object makes access to data related to a particular
1209 """A filecontext object makes access to data related to a particular
1210 filerevision convenient."""
1210 filerevision convenient."""
1211
1211
1212 def __init__(
1212 def __init__(
1213 self,
1213 self,
1214 repo,
1214 repo,
1215 path,
1215 path,
1216 changeid=None,
1216 changeid=None,
1217 fileid=None,
1217 fileid=None,
1218 filelog=None,
1218 filelog=None,
1219 changectx=None,
1219 changectx=None,
1220 ):
1220 ):
1221 """changeid must be a revision number, if specified.
1221 """changeid must be a revision number, if specified.
1222 fileid can be a file revision or node."""
1222 fileid can be a file revision or node."""
1223 self._repo = repo
1223 self._repo = repo
1224 self._path = path
1224 self._path = path
1225
1225
1226 assert (
1226 assert (
1227 changeid is not None or fileid is not None or changectx is not None
1227 changeid is not None or fileid is not None or changectx is not None
1228 ), (
1228 ), (
1229 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1229 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1230 % (changeid, fileid, changectx,)
1230 % (changeid, fileid, changectx,)
1231 )
1231 )
1232
1232
1233 if filelog is not None:
1233 if filelog is not None:
1234 self._filelog = filelog
1234 self._filelog = filelog
1235
1235
1236 if changeid is not None:
1236 if changeid is not None:
1237 self._changeid = changeid
1237 self._changeid = changeid
1238 if changectx is not None:
1238 if changectx is not None:
1239 self._changectx = changectx
1239 self._changectx = changectx
1240 if fileid is not None:
1240 if fileid is not None:
1241 self._fileid = fileid
1241 self._fileid = fileid
1242
1242
1243 @propertycache
1243 @propertycache
1244 def _changectx(self):
1244 def _changectx(self):
1245 try:
1245 try:
1246 return self._repo[self._changeid]
1246 return self._repo[self._changeid]
1247 except error.FilteredRepoLookupError:
1247 except error.FilteredRepoLookupError:
1248 # Linkrev may point to any revision in the repository. When the
1248 # Linkrev may point to any revision in the repository. When the
1249 # repository is filtered this may lead to `filectx` trying to build
1249 # repository is filtered this may lead to `filectx` trying to build
1250 # `changectx` for filtered revision. In such case we fallback to
1250 # `changectx` for filtered revision. In such case we fallback to
1251 # creating `changectx` on the unfiltered version of the reposition.
1251 # creating `changectx` on the unfiltered version of the reposition.
1252 # This fallback should not be an issue because `changectx` from
1252 # This fallback should not be an issue because `changectx` from
1253 # `filectx` are not used in complex operations that care about
1253 # `filectx` are not used in complex operations that care about
1254 # filtering.
1254 # filtering.
1255 #
1255 #
1256 # This fallback is a cheap and dirty fix that prevent several
1256 # This fallback is a cheap and dirty fix that prevent several
1257 # crashes. It does not ensure the behavior is correct. However the
1257 # crashes. It does not ensure the behavior is correct. However the
1258 # behavior was not correct before filtering either and "incorrect
1258 # behavior was not correct before filtering either and "incorrect
1259 # behavior" is seen as better as "crash"
1259 # behavior" is seen as better as "crash"
1260 #
1260 #
1261 # Linkrevs have several serious troubles with filtering that are
1261 # Linkrevs have several serious troubles with filtering that are
1262 # complicated to solve. Proper handling of the issue here should be
1262 # complicated to solve. Proper handling of the issue here should be
1263 # considered when solving linkrev issue are on the table.
1263 # considered when solving linkrev issue are on the table.
1264 return self._repo.unfiltered()[self._changeid]
1264 return self._repo.unfiltered()[self._changeid]
1265
1265
1266 def filectx(self, fileid, changeid=None):
1266 def filectx(self, fileid, changeid=None):
1267 '''opens an arbitrary revision of the file without
1267 '''opens an arbitrary revision of the file without
1268 opening a new filelog'''
1268 opening a new filelog'''
1269 return filectx(
1269 return filectx(
1270 self._repo,
1270 self._repo,
1271 self._path,
1271 self._path,
1272 fileid=fileid,
1272 fileid=fileid,
1273 filelog=self._filelog,
1273 filelog=self._filelog,
1274 changeid=changeid,
1274 changeid=changeid,
1275 )
1275 )
1276
1276
1277 def rawdata(self):
1277 def rawdata(self):
1278 return self._filelog.rawdata(self._filenode)
1278 return self._filelog.rawdata(self._filenode)
1279
1279
1280 def rawflags(self):
1280 def rawflags(self):
1281 """low-level revlog flags"""
1281 """low-level revlog flags"""
1282 return self._filelog.flags(self._filerev)
1282 return self._filelog.flags(self._filerev)
1283
1283
1284 def data(self):
1284 def data(self):
1285 try:
1285 try:
1286 return self._filelog.read(self._filenode)
1286 return self._filelog.read(self._filenode)
1287 except error.CensoredNodeError:
1287 except error.CensoredNodeError:
1288 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1288 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1289 return b""
1289 return b""
1290 raise error.Abort(
1290 raise error.Abort(
1291 _(b"censored node: %s") % short(self._filenode),
1291 _(b"censored node: %s") % short(self._filenode),
1292 hint=_(b"set censor.policy to ignore errors"),
1292 hint=_(b"set censor.policy to ignore errors"),
1293 )
1293 )
1294
1294
1295 def size(self):
1295 def size(self):
1296 return self._filelog.size(self._filerev)
1296 return self._filelog.size(self._filerev)
1297
1297
1298 @propertycache
1298 @propertycache
1299 def _copied(self):
1299 def _copied(self):
1300 """check if file was actually renamed in this changeset revision
1300 """check if file was actually renamed in this changeset revision
1301
1301
1302 If rename logged in file revision, we report copy for changeset only
1302 If rename logged in file revision, we report copy for changeset only
1303 if file revisions linkrev points back to the changeset in question
1303 if file revisions linkrev points back to the changeset in question
1304 or both changeset parents contain different file revisions.
1304 or both changeset parents contain different file revisions.
1305 """
1305 """
1306
1306
1307 renamed = self._filelog.renamed(self._filenode)
1307 renamed = self._filelog.renamed(self._filenode)
1308 if not renamed:
1308 if not renamed:
1309 return None
1309 return None
1310
1310
1311 if self.rev() == self.linkrev():
1311 if self.rev() == self.linkrev():
1312 return renamed
1312 return renamed
1313
1313
1314 name = self.path()
1314 name = self.path()
1315 fnode = self._filenode
1315 fnode = self._filenode
1316 for p in self._changectx.parents():
1316 for p in self._changectx.parents():
1317 try:
1317 try:
1318 if fnode == p.filenode(name):
1318 if fnode == p.filenode(name):
1319 return None
1319 return None
1320 except error.LookupError:
1320 except error.LookupError:
1321 pass
1321 pass
1322 return renamed
1322 return renamed
1323
1323
1324 def children(self):
1324 def children(self):
1325 # hard for renames
1325 # hard for renames
1326 c = self._filelog.children(self._filenode)
1326 c = self._filelog.children(self._filenode)
1327 return [
1327 return [
1328 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1328 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1329 for x in c
1329 for x in c
1330 ]
1330 ]
1331
1331
1332
1332
1333 class committablectx(basectx):
1333 class committablectx(basectx):
1334 """A committablectx object provides common functionality for a context that
1334 """A committablectx object provides common functionality for a context that
1335 wants the ability to commit, e.g. workingctx or memctx."""
1335 wants the ability to commit, e.g. workingctx or memctx."""
1336
1336
1337 def __init__(
1337 def __init__(
1338 self,
1338 self,
1339 repo,
1339 repo,
1340 text=b"",
1340 text=b"",
1341 user=None,
1341 user=None,
1342 date=None,
1342 date=None,
1343 extra=None,
1343 extra=None,
1344 changes=None,
1344 changes=None,
1345 branch=None,
1345 branch=None,
1346 ):
1346 ):
1347 super(committablectx, self).__init__(repo)
1347 super(committablectx, self).__init__(repo)
1348 self._rev = None
1348 self._rev = None
1349 self._node = None
1349 self._node = None
1350 self._text = text
1350 self._text = text
1351 if date:
1351 if date:
1352 self._date = dateutil.parsedate(date)
1352 self._date = dateutil.parsedate(date)
1353 if user:
1353 if user:
1354 self._user = user
1354 self._user = user
1355 if changes:
1355 if changes:
1356 self._status = changes
1356 self._status = changes
1357
1357
1358 self._extra = {}
1358 self._extra = {}
1359 if extra:
1359 if extra:
1360 self._extra = extra.copy()
1360 self._extra = extra.copy()
1361 if branch is not None:
1361 if branch is not None:
1362 self._extra[b'branch'] = encoding.fromlocal(branch)
1362 self._extra[b'branch'] = encoding.fromlocal(branch)
1363 if not self._extra.get(b'branch'):
1363 if not self._extra.get(b'branch'):
1364 self._extra[b'branch'] = b'default'
1364 self._extra[b'branch'] = b'default'
1365
1365
1366 def __bytes__(self):
1366 def __bytes__(self):
1367 return bytes(self._parents[0]) + b"+"
1367 return bytes(self._parents[0]) + b"+"
1368
1368
1369 __str__ = encoding.strmethod(__bytes__)
1369 __str__ = encoding.strmethod(__bytes__)
1370
1370
1371 def __nonzero__(self):
1371 def __nonzero__(self):
1372 return True
1372 return True
1373
1373
1374 __bool__ = __nonzero__
1374 __bool__ = __nonzero__
1375
1375
1376 @propertycache
1376 @propertycache
1377 def _status(self):
1377 def _status(self):
1378 return self._repo.status()
1378 return self._repo.status()
1379
1379
1380 @propertycache
1380 @propertycache
1381 def _user(self):
1381 def _user(self):
1382 return self._repo.ui.username()
1382 return self._repo.ui.username()
1383
1383
1384 @propertycache
1384 @propertycache
1385 def _date(self):
1385 def _date(self):
1386 ui = self._repo.ui
1386 ui = self._repo.ui
1387 date = ui.configdate(b'devel', b'default-date')
1387 date = ui.configdate(b'devel', b'default-date')
1388 if date is None:
1388 if date is None:
1389 date = dateutil.makedate()
1389 date = dateutil.makedate()
1390 return date
1390 return date
1391
1391
1392 def subrev(self, subpath):
1392 def subrev(self, subpath):
1393 return None
1393 return None
1394
1394
1395 def manifestnode(self):
1395 def manifestnode(self):
1396 return None
1396 return None
1397
1397
1398 def user(self):
1398 def user(self):
1399 return self._user or self._repo.ui.username()
1399 return self._user or self._repo.ui.username()
1400
1400
1401 def date(self):
1401 def date(self):
1402 return self._date
1402 return self._date
1403
1403
1404 def description(self):
1404 def description(self):
1405 return self._text
1405 return self._text
1406
1406
1407 def files(self):
1407 def files(self):
1408 return sorted(
1408 return sorted(
1409 self._status.modified + self._status.added + self._status.removed
1409 self._status.modified + self._status.added + self._status.removed
1410 )
1410 )
1411
1411
1412 def modified(self):
1412 def modified(self):
1413 return self._status.modified
1413 return self._status.modified
1414
1414
1415 def added(self):
1415 def added(self):
1416 return self._status.added
1416 return self._status.added
1417
1417
1418 def removed(self):
1418 def removed(self):
1419 return self._status.removed
1419 return self._status.removed
1420
1420
1421 def deleted(self):
1421 def deleted(self):
1422 return self._status.deleted
1422 return self._status.deleted
1423
1423
1424 filesmodified = modified
1424 filesmodified = modified
1425 filesadded = added
1425 filesadded = added
1426 filesremoved = removed
1426 filesremoved = removed
1427
1427
1428 def branch(self):
1428 def branch(self):
1429 return encoding.tolocal(self._extra[b'branch'])
1429 return encoding.tolocal(self._extra[b'branch'])
1430
1430
1431 def closesbranch(self):
1431 def closesbranch(self):
1432 return b'close' in self._extra
1432 return b'close' in self._extra
1433
1433
1434 def extra(self):
1434 def extra(self):
1435 return self._extra
1435 return self._extra
1436
1436
1437 def isinmemory(self):
1437 def isinmemory(self):
1438 return False
1438 return False
1439
1439
1440 def tags(self):
1440 def tags(self):
1441 return []
1441 return []
1442
1442
1443 def bookmarks(self):
1443 def bookmarks(self):
1444 b = []
1444 b = []
1445 for p in self.parents():
1445 for p in self.parents():
1446 b.extend(p.bookmarks())
1446 b.extend(p.bookmarks())
1447 return b
1447 return b
1448
1448
1449 def phase(self):
1449 def phase(self):
1450 phase = phases.newcommitphase(self._repo.ui)
1450 phase = phases.newcommitphase(self._repo.ui)
1451 for p in self.parents():
1451 for p in self.parents():
1452 phase = max(phase, p.phase())
1452 phase = max(phase, p.phase())
1453 return phase
1453 return phase
1454
1454
1455 def hidden(self):
1455 def hidden(self):
1456 return False
1456 return False
1457
1457
1458 def children(self):
1458 def children(self):
1459 return []
1459 return []
1460
1460
1461 def ancestor(self, c2):
1461 def ancestor(self, c2):
1462 """return the "best" ancestor context of self and c2"""
1462 """return the "best" ancestor context of self and c2"""
1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1464
1464
1465 def ancestors(self):
1465 def ancestors(self):
1466 for p in self._parents:
1466 for p in self._parents:
1467 yield p
1467 yield p
1468 for a in self._repo.changelog.ancestors(
1468 for a in self._repo.changelog.ancestors(
1469 [p.rev() for p in self._parents]
1469 [p.rev() for p in self._parents]
1470 ):
1470 ):
1471 yield self._repo[a]
1471 yield self._repo[a]
1472
1472
1473 def markcommitted(self, node):
1473 def markcommitted(self, node):
1474 """Perform post-commit cleanup necessary after committing this ctx
1474 """Perform post-commit cleanup necessary after committing this ctx
1475
1475
1476 Specifically, this updates backing stores this working context
1476 Specifically, this updates backing stores this working context
1477 wraps to reflect the fact that the changes reflected by this
1477 wraps to reflect the fact that the changes reflected by this
1478 workingctx have been committed. For example, it marks
1478 workingctx have been committed. For example, it marks
1479 modified and added files as normal in the dirstate.
1479 modified and added files as normal in the dirstate.
1480
1480
1481 """
1481 """
1482
1482
1483 def dirty(self, missing=False, merge=True, branch=True):
1483 def dirty(self, missing=False, merge=True, branch=True):
1484 return False
1484 return False
1485
1485
1486
1486
1487 class workingctx(committablectx):
1487 class workingctx(committablectx):
1488 """A workingctx object makes access to data related to
1488 """A workingctx object makes access to data related to
1489 the current working directory convenient.
1489 the current working directory convenient.
1490 date - any valid date string or (unixtime, offset), or None.
1490 date - any valid date string or (unixtime, offset), or None.
1491 user - username string, or None.
1491 user - username string, or None.
1492 extra - a dictionary of extra values, or None.
1492 extra - a dictionary of extra values, or None.
1493 changes - a list of file lists as returned by localrepo.status()
1493 changes - a list of file lists as returned by localrepo.status()
1494 or None to use the repository status.
1494 or None to use the repository status.
1495 """
1495 """
1496
1496
1497 def __init__(
1497 def __init__(
1498 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1498 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1499 ):
1499 ):
1500 branch = None
1500 branch = None
1501 if not extra or b'branch' not in extra:
1501 if not extra or b'branch' not in extra:
1502 try:
1502 try:
1503 branch = repo.dirstate.branch()
1503 branch = repo.dirstate.branch()
1504 except UnicodeDecodeError:
1504 except UnicodeDecodeError:
1505 raise error.Abort(_(b'branch name not in UTF-8!'))
1505 raise error.Abort(_(b'branch name not in UTF-8!'))
1506 super(workingctx, self).__init__(
1506 super(workingctx, self).__init__(
1507 repo, text, user, date, extra, changes, branch=branch
1507 repo, text, user, date, extra, changes, branch=branch
1508 )
1508 )
1509
1509
1510 def __iter__(self):
1510 def __iter__(self):
1511 d = self._repo.dirstate
1511 d = self._repo.dirstate
1512 for f in d:
1512 for f in d:
1513 if d[f] != b'r':
1513 if d[f] != b'r':
1514 yield f
1514 yield f
1515
1515
1516 def __contains__(self, key):
1516 def __contains__(self, key):
1517 return self._repo.dirstate[key] not in b"?r"
1517 return self._repo.dirstate[key] not in b"?r"
1518
1518
1519 def hex(self):
1519 def hex(self):
1520 return wdirhex
1520 return wdirhex
1521
1521
1522 @propertycache
1522 @propertycache
1523 def _parents(self):
1523 def _parents(self):
1524 p = self._repo.dirstate.parents()
1524 p = self._repo.dirstate.parents()
1525 if p[1] == nullid:
1525 if p[1] == nullid:
1526 p = p[:-1]
1526 p = p[:-1]
1527 # use unfiltered repo to delay/avoid loading obsmarkers
1527 # use unfiltered repo to delay/avoid loading obsmarkers
1528 unfi = self._repo.unfiltered()
1528 unfi = self._repo.unfiltered()
1529 return [
1529 return [
1530 changectx(
1530 changectx(
1531 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1531 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1532 )
1532 )
1533 for n in p
1533 for n in p
1534 ]
1534 ]
1535
1535
1536 def setparents(self, p1node, p2node=nullid):
1536 def setparents(self, p1node, p2node=nullid):
1537 dirstate = self._repo.dirstate
1537 dirstate = self._repo.dirstate
1538 with dirstate.parentchange():
1538 with dirstate.parentchange():
1539 copies = dirstate.setparents(p1node, p2node)
1539 copies = dirstate.setparents(p1node, p2node)
1540 pctx = self._repo[p1node]
1540 pctx = self._repo[p1node]
1541 if copies:
1541 if copies:
1542 # Adjust copy records, the dirstate cannot do it, it
1542 # Adjust copy records, the dirstate cannot do it, it
1543 # requires access to parents manifests. Preserve them
1543 # requires access to parents manifests. Preserve them
1544 # only for entries added to first parent.
1544 # only for entries added to first parent.
1545 for f in copies:
1545 for f in copies:
1546 if f not in pctx and copies[f] in pctx:
1546 if f not in pctx and copies[f] in pctx:
1547 dirstate.copy(copies[f], f)
1547 dirstate.copy(copies[f], f)
1548 if p2node == nullid:
1548 if p2node == nullid:
1549 for f, s in sorted(dirstate.copies().items()):
1549 for f, s in sorted(dirstate.copies().items()):
1550 if f not in pctx and s not in pctx:
1550 if f not in pctx and s not in pctx:
1551 dirstate.copy(None, f)
1551 dirstate.copy(None, f)
1552
1552
1553 def _fileinfo(self, path):
1553 def _fileinfo(self, path):
1554 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1554 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1555 self._manifest
1555 self._manifest
1556 return super(workingctx, self)._fileinfo(path)
1556 return super(workingctx, self)._fileinfo(path)
1557
1557
1558 def _buildflagfunc(self):
1558 def _buildflagfunc(self):
1559 # Create a fallback function for getting file flags when the
1559 # Create a fallback function for getting file flags when the
1560 # filesystem doesn't support them
1560 # filesystem doesn't support them
1561
1561
1562 copiesget = self._repo.dirstate.copies().get
1562 copiesget = self._repo.dirstate.copies().get
1563 parents = self.parents()
1563 parents = self.parents()
1564 if len(parents) < 2:
1564 if len(parents) < 2:
1565 # when we have one parent, it's easy: copy from parent
1565 # when we have one parent, it's easy: copy from parent
1566 man = parents[0].manifest()
1566 man = parents[0].manifest()
1567
1567
1568 def func(f):
1568 def func(f):
1569 f = copiesget(f, f)
1569 f = copiesget(f, f)
1570 return man.flags(f)
1570 return man.flags(f)
1571
1571
1572 else:
1572 else:
1573 # merges are tricky: we try to reconstruct the unstored
1573 # merges are tricky: we try to reconstruct the unstored
1574 # result from the merge (issue1802)
1574 # result from the merge (issue1802)
1575 p1, p2 = parents
1575 p1, p2 = parents
1576 pa = p1.ancestor(p2)
1576 pa = p1.ancestor(p2)
1577 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1577 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1578
1578
1579 def func(f):
1579 def func(f):
1580 f = copiesget(f, f) # may be wrong for merges with copies
1580 f = copiesget(f, f) # may be wrong for merges with copies
1581 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1581 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1582 if fl1 == fl2:
1582 if fl1 == fl2:
1583 return fl1
1583 return fl1
1584 if fl1 == fla:
1584 if fl1 == fla:
1585 return fl2
1585 return fl2
1586 if fl2 == fla:
1586 if fl2 == fla:
1587 return fl1
1587 return fl1
1588 return b'' # punt for conflicts
1588 return b'' # punt for conflicts
1589
1589
1590 return func
1590 return func
1591
1591
1592 @propertycache
1592 @propertycache
1593 def _flagfunc(self):
1593 def _flagfunc(self):
1594 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1594 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1595
1595
1596 def flags(self, path):
1596 def flags(self, path):
1597 if '_manifest' in self.__dict__:
1597 if '_manifest' in self.__dict__:
1598 try:
1598 try:
1599 return self._manifest.flags(path)
1599 return self._manifest.flags(path)
1600 except KeyError:
1600 except KeyError:
1601 return b''
1601 return b''
1602
1602
1603 try:
1603 try:
1604 return self._flagfunc(path)
1604 return self._flagfunc(path)
1605 except OSError:
1605 except OSError:
1606 return b''
1606 return b''
1607
1607
1608 def filectx(self, path, filelog=None):
1608 def filectx(self, path, filelog=None):
1609 """get a file context from the working directory"""
1609 """get a file context from the working directory"""
1610 return workingfilectx(
1610 return workingfilectx(
1611 self._repo, path, workingctx=self, filelog=filelog
1611 self._repo, path, workingctx=self, filelog=filelog
1612 )
1612 )
1613
1613
1614 def dirty(self, missing=False, merge=True, branch=True):
1614 def dirty(self, missing=False, merge=True, branch=True):
1615 """check whether a working directory is modified"""
1615 """check whether a working directory is modified"""
1616 # check subrepos first
1616 # check subrepos first
1617 for s in sorted(self.substate):
1617 for s in sorted(self.substate):
1618 if self.sub(s).dirty(missing=missing):
1618 if self.sub(s).dirty(missing=missing):
1619 return True
1619 return True
1620 # check current working dir
1620 # check current working dir
1621 return (
1621 return (
1622 (merge and self.p2())
1622 (merge and self.p2())
1623 or (branch and self.branch() != self.p1().branch())
1623 or (branch and self.branch() != self.p1().branch())
1624 or self.modified()
1624 or self.modified()
1625 or self.added()
1625 or self.added()
1626 or self.removed()
1626 or self.removed()
1627 or (missing and self.deleted())
1627 or (missing and self.deleted())
1628 )
1628 )
1629
1629
1630 def add(self, list, prefix=b""):
1630 def add(self, list, prefix=b""):
1631 with self._repo.wlock():
1631 with self._repo.wlock():
1632 ui, ds = self._repo.ui, self._repo.dirstate
1632 ui, ds = self._repo.ui, self._repo.dirstate
1633 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1633 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1634 rejected = []
1634 rejected = []
1635 lstat = self._repo.wvfs.lstat
1635 lstat = self._repo.wvfs.lstat
1636 for f in list:
1636 for f in list:
1637 # ds.pathto() returns an absolute file when this is invoked from
1637 # ds.pathto() returns an absolute file when this is invoked from
1638 # the keyword extension. That gets flagged as non-portable on
1638 # the keyword extension. That gets flagged as non-portable on
1639 # Windows, since it contains the drive letter and colon.
1639 # Windows, since it contains the drive letter and colon.
1640 scmutil.checkportable(ui, os.path.join(prefix, f))
1640 scmutil.checkportable(ui, os.path.join(prefix, f))
1641 try:
1641 try:
1642 st = lstat(f)
1642 st = lstat(f)
1643 except OSError:
1643 except OSError:
1644 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1644 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1645 rejected.append(f)
1645 rejected.append(f)
1646 continue
1646 continue
1647 limit = ui.configbytes(b'ui', b'large-file-limit')
1647 limit = ui.configbytes(b'ui', b'large-file-limit')
1648 if limit != 0 and st.st_size > limit:
1648 if limit != 0 and st.st_size > limit:
1649 ui.warn(
1649 ui.warn(
1650 _(
1650 _(
1651 b"%s: up to %d MB of RAM may be required "
1651 b"%s: up to %d MB of RAM may be required "
1652 b"to manage this file\n"
1652 b"to manage this file\n"
1653 b"(use 'hg revert %s' to cancel the "
1653 b"(use 'hg revert %s' to cancel the "
1654 b"pending addition)\n"
1654 b"pending addition)\n"
1655 )
1655 )
1656 % (f, 3 * st.st_size // 1000000, uipath(f))
1656 % (f, 3 * st.st_size // 1000000, uipath(f))
1657 )
1657 )
1658 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1658 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1659 ui.warn(
1659 ui.warn(
1660 _(
1660 _(
1661 b"%s not added: only files and symlinks "
1661 b"%s not added: only files and symlinks "
1662 b"supported currently\n"
1662 b"supported currently\n"
1663 )
1663 )
1664 % uipath(f)
1664 % uipath(f)
1665 )
1665 )
1666 rejected.append(f)
1666 rejected.append(f)
1667 elif ds[f] in b'amn':
1667 elif ds[f] in b'amn':
1668 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1668 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1669 elif ds[f] == b'r':
1669 elif ds[f] == b'r':
1670 ds.normallookup(f)
1670 ds.normallookup(f)
1671 else:
1671 else:
1672 ds.add(f)
1672 ds.add(f)
1673 return rejected
1673 return rejected
1674
1674
1675 def forget(self, files, prefix=b""):
1675 def forget(self, files, prefix=b""):
1676 with self._repo.wlock():
1676 with self._repo.wlock():
1677 ds = self._repo.dirstate
1677 ds = self._repo.dirstate
1678 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1678 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1679 rejected = []
1679 rejected = []
1680 for f in files:
1680 for f in files:
1681 if f not in ds:
1681 if f not in ds:
1682 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1682 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1683 rejected.append(f)
1683 rejected.append(f)
1684 elif ds[f] != b'a':
1684 elif ds[f] != b'a':
1685 ds.remove(f)
1685 ds.remove(f)
1686 else:
1686 else:
1687 ds.drop(f)
1687 ds.drop(f)
1688 return rejected
1688 return rejected
1689
1689
1690 def copy(self, source, dest):
1690 def copy(self, source, dest):
1691 try:
1691 try:
1692 st = self._repo.wvfs.lstat(dest)
1692 st = self._repo.wvfs.lstat(dest)
1693 except OSError as err:
1693 except OSError as err:
1694 if err.errno != errno.ENOENT:
1694 if err.errno != errno.ENOENT:
1695 raise
1695 raise
1696 self._repo.ui.warn(
1696 self._repo.ui.warn(
1697 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1697 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1698 )
1698 )
1699 return
1699 return
1700 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1700 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1701 self._repo.ui.warn(
1701 self._repo.ui.warn(
1702 _(b"copy failed: %s is not a file or a symbolic link\n")
1702 _(b"copy failed: %s is not a file or a symbolic link\n")
1703 % self._repo.dirstate.pathto(dest)
1703 % self._repo.dirstate.pathto(dest)
1704 )
1704 )
1705 else:
1705 else:
1706 with self._repo.wlock():
1706 with self._repo.wlock():
1707 ds = self._repo.dirstate
1707 ds = self._repo.dirstate
1708 if ds[dest] in b'?':
1708 if ds[dest] in b'?':
1709 ds.add(dest)
1709 ds.add(dest)
1710 elif ds[dest] in b'r':
1710 elif ds[dest] in b'r':
1711 ds.normallookup(dest)
1711 ds.normallookup(dest)
1712 ds.copy(source, dest)
1712 ds.copy(source, dest)
1713
1713
1714 def match(
1714 def match(
1715 self,
1715 self,
1716 pats=None,
1716 pats=None,
1717 include=None,
1717 include=None,
1718 exclude=None,
1718 exclude=None,
1719 default=b'glob',
1719 default=b'glob',
1720 listsubrepos=False,
1720 listsubrepos=False,
1721 badfn=None,
1721 badfn=None,
1722 cwd=None,
1722 cwd=None,
1723 ):
1723 ):
1724 r = self._repo
1724 r = self._repo
1725 if not cwd:
1725 if not cwd:
1726 cwd = r.getcwd()
1726 cwd = r.getcwd()
1727
1727
1728 # Only a case insensitive filesystem needs magic to translate user input
1728 # Only a case insensitive filesystem needs magic to translate user input
1729 # to actual case in the filesystem.
1729 # to actual case in the filesystem.
1730 icasefs = not util.fscasesensitive(r.root)
1730 icasefs = not util.fscasesensitive(r.root)
1731 return matchmod.match(
1731 return matchmod.match(
1732 r.root,
1732 r.root,
1733 cwd,
1733 cwd,
1734 pats,
1734 pats,
1735 include,
1735 include,
1736 exclude,
1736 exclude,
1737 default,
1737 default,
1738 auditor=r.auditor,
1738 auditor=r.auditor,
1739 ctx=self,
1739 ctx=self,
1740 listsubrepos=listsubrepos,
1740 listsubrepos=listsubrepos,
1741 badfn=badfn,
1741 badfn=badfn,
1742 icasefs=icasefs,
1742 icasefs=icasefs,
1743 )
1743 )
1744
1744
1745 def _filtersuspectsymlink(self, files):
1745 def _filtersuspectsymlink(self, files):
1746 if not files or self._repo.dirstate._checklink:
1746 if not files or self._repo.dirstate._checklink:
1747 return files
1747 return files
1748
1748
1749 # Symlink placeholders may get non-symlink-like contents
1749 # Symlink placeholders may get non-symlink-like contents
1750 # via user error or dereferencing by NFS or Samba servers,
1750 # via user error or dereferencing by NFS or Samba servers,
1751 # so we filter out any placeholders that don't look like a
1751 # so we filter out any placeholders that don't look like a
1752 # symlink
1752 # symlink
1753 sane = []
1753 sane = []
1754 for f in files:
1754 for f in files:
1755 if self.flags(f) == b'l':
1755 if self.flags(f) == b'l':
1756 d = self[f].data()
1756 d = self[f].data()
1757 if (
1757 if (
1758 d == b''
1758 d == b''
1759 or len(d) >= 1024
1759 or len(d) >= 1024
1760 or b'\n' in d
1760 or b'\n' in d
1761 or stringutil.binary(d)
1761 or stringutil.binary(d)
1762 ):
1762 ):
1763 self._repo.ui.debug(
1763 self._repo.ui.debug(
1764 b'ignoring suspect symlink placeholder "%s"\n' % f
1764 b'ignoring suspect symlink placeholder "%s"\n' % f
1765 )
1765 )
1766 continue
1766 continue
1767 sane.append(f)
1767 sane.append(f)
1768 return sane
1768 return sane
1769
1769
1770 def _checklookup(self, files):
1770 def _checklookup(self, files):
1771 # check for any possibly clean files
1771 # check for any possibly clean files
1772 if not files:
1772 if not files:
1773 return [], [], []
1773 return [], [], []
1774
1774
1775 modified = []
1775 modified = []
1776 deleted = []
1776 deleted = []
1777 fixup = []
1777 fixup = []
1778 pctx = self._parents[0]
1778 pctx = self._parents[0]
1779 # do a full compare of any files that might have changed
1779 # do a full compare of any files that might have changed
1780 for f in sorted(files):
1780 for f in sorted(files):
1781 try:
1781 try:
1782 # This will return True for a file that got replaced by a
1782 # This will return True for a file that got replaced by a
1783 # directory in the interim, but fixing that is pretty hard.
1783 # directory in the interim, but fixing that is pretty hard.
1784 if (
1784 if (
1785 f not in pctx
1785 f not in pctx
1786 or self.flags(f) != pctx.flags(f)
1786 or self.flags(f) != pctx.flags(f)
1787 or pctx[f].cmp(self[f])
1787 or pctx[f].cmp(self[f])
1788 ):
1788 ):
1789 modified.append(f)
1789 modified.append(f)
1790 else:
1790 else:
1791 fixup.append(f)
1791 fixup.append(f)
1792 except (IOError, OSError):
1792 except (IOError, OSError):
1793 # A file become inaccessible in between? Mark it as deleted,
1793 # A file become inaccessible in between? Mark it as deleted,
1794 # matching dirstate behavior (issue5584).
1794 # matching dirstate behavior (issue5584).
1795 # The dirstate has more complex behavior around whether a
1795 # The dirstate has more complex behavior around whether a
1796 # missing file matches a directory, etc, but we don't need to
1796 # missing file matches a directory, etc, but we don't need to
1797 # bother with that: if f has made it to this point, we're sure
1797 # bother with that: if f has made it to this point, we're sure
1798 # it's in the dirstate.
1798 # it's in the dirstate.
1799 deleted.append(f)
1799 deleted.append(f)
1800
1800
1801 return modified, deleted, fixup
1801 return modified, deleted, fixup
1802
1802
1803 def _poststatusfixup(self, status, fixup):
1803 def _poststatusfixup(self, status, fixup):
1804 """update dirstate for files that are actually clean"""
1804 """update dirstate for files that are actually clean"""
1805 poststatus = self._repo.postdsstatus()
1805 poststatus = self._repo.postdsstatus()
1806 if fixup or poststatus:
1806 if fixup or poststatus:
1807 try:
1807 try:
1808 oldid = self._repo.dirstate.identity()
1808 oldid = self._repo.dirstate.identity()
1809
1809
1810 # updating the dirstate is optional
1810 # updating the dirstate is optional
1811 # so we don't wait on the lock
1811 # so we don't wait on the lock
1812 # wlock can invalidate the dirstate, so cache normal _after_
1812 # wlock can invalidate the dirstate, so cache normal _after_
1813 # taking the lock
1813 # taking the lock
1814 with self._repo.wlock(False):
1814 with self._repo.wlock(False):
1815 if self._repo.dirstate.identity() == oldid:
1815 if self._repo.dirstate.identity() == oldid:
1816 if fixup:
1816 if fixup:
1817 normal = self._repo.dirstate.normal
1817 normal = self._repo.dirstate.normal
1818 for f in fixup:
1818 for f in fixup:
1819 normal(f)
1819 normal(f)
1820 # write changes out explicitly, because nesting
1820 # write changes out explicitly, because nesting
1821 # wlock at runtime may prevent 'wlock.release()'
1821 # wlock at runtime may prevent 'wlock.release()'
1822 # after this block from doing so for subsequent
1822 # after this block from doing so for subsequent
1823 # changing files
1823 # changing files
1824 tr = self._repo.currenttransaction()
1824 tr = self._repo.currenttransaction()
1825 self._repo.dirstate.write(tr)
1825 self._repo.dirstate.write(tr)
1826
1826
1827 if poststatus:
1827 if poststatus:
1828 for ps in poststatus:
1828 for ps in poststatus:
1829 ps(self, status)
1829 ps(self, status)
1830 else:
1830 else:
1831 # in this case, writing changes out breaks
1831 # in this case, writing changes out breaks
1832 # consistency, because .hg/dirstate was
1832 # consistency, because .hg/dirstate was
1833 # already changed simultaneously after last
1833 # already changed simultaneously after last
1834 # caching (see also issue5584 for detail)
1834 # caching (see also issue5584 for detail)
1835 self._repo.ui.debug(
1835 self._repo.ui.debug(
1836 b'skip updating dirstate: identity mismatch\n'
1836 b'skip updating dirstate: identity mismatch\n'
1837 )
1837 )
1838 except error.LockError:
1838 except error.LockError:
1839 pass
1839 pass
1840 finally:
1840 finally:
1841 # Even if the wlock couldn't be grabbed, clear out the list.
1841 # Even if the wlock couldn't be grabbed, clear out the list.
1842 self._repo.clearpostdsstatus()
1842 self._repo.clearpostdsstatus()
1843
1843
1844 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1844 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1845 '''Gets the status from the dirstate -- internal use only.'''
1845 '''Gets the status from the dirstate -- internal use only.'''
1846 subrepos = []
1846 subrepos = []
1847 if b'.hgsub' in self:
1847 if b'.hgsub' in self:
1848 subrepos = sorted(self.substate)
1848 subrepos = sorted(self.substate)
1849 cmp, s = self._repo.dirstate.status(
1849 cmp, s = self._repo.dirstate.status(
1850 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1850 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1851 )
1851 )
1852
1852
1853 # check for any possibly clean files
1853 # check for any possibly clean files
1854 fixup = []
1854 fixup = []
1855 if cmp:
1855 if cmp:
1856 modified2, deleted2, fixup = self._checklookup(cmp)
1856 modified2, deleted2, fixup = self._checklookup(cmp)
1857 s.modified.extend(modified2)
1857 s.modified.extend(modified2)
1858 s.deleted.extend(deleted2)
1858 s.deleted.extend(deleted2)
1859
1859
1860 if fixup and clean:
1860 if fixup and clean:
1861 s.clean.extend(fixup)
1861 s.clean.extend(fixup)
1862
1862
1863 self._poststatusfixup(s, fixup)
1863 self._poststatusfixup(s, fixup)
1864
1864
1865 if match.always():
1865 if match.always():
1866 # cache for performance
1866 # cache for performance
1867 if s.unknown or s.ignored or s.clean:
1867 if s.unknown or s.ignored or s.clean:
1868 # "_status" is cached with list*=False in the normal route
1868 # "_status" is cached with list*=False in the normal route
1869 self._status = scmutil.status(
1869 self._status = scmutil.status(
1870 s.modified, s.added, s.removed, s.deleted, [], [], []
1870 s.modified, s.added, s.removed, s.deleted, [], [], []
1871 )
1871 )
1872 else:
1872 else:
1873 self._status = s
1873 self._status = s
1874
1874
1875 return s
1875 return s
1876
1876
1877 @propertycache
1877 @propertycache
1878 def _copies(self):
1878 def _copies(self):
1879 p1copies = {}
1879 p1copies = {}
1880 p2copies = {}
1880 p2copies = {}
1881 parents = self._repo.dirstate.parents()
1881 parents = self._repo.dirstate.parents()
1882 p1manifest = self._repo[parents[0]].manifest()
1882 p1manifest = self._repo[parents[0]].manifest()
1883 p2manifest = self._repo[parents[1]].manifest()
1883 p2manifest = self._repo[parents[1]].manifest()
1884 changedset = set(self.added()) | set(self.modified())
1884 changedset = set(self.added()) | set(self.modified())
1885 narrowmatch = self._repo.narrowmatch()
1885 narrowmatch = self._repo.narrowmatch()
1886 for dst, src in self._repo.dirstate.copies().items():
1886 for dst, src in self._repo.dirstate.copies().items():
1887 if dst not in changedset or not narrowmatch(dst):
1887 if dst not in changedset or not narrowmatch(dst):
1888 continue
1888 continue
1889 if src in p1manifest:
1889 if src in p1manifest:
1890 p1copies[dst] = src
1890 p1copies[dst] = src
1891 elif src in p2manifest:
1891 elif src in p2manifest:
1892 p2copies[dst] = src
1892 p2copies[dst] = src
1893 return p1copies, p2copies
1893 return p1copies, p2copies
1894
1894
1895 @propertycache
1895 @propertycache
1896 def _manifest(self):
1896 def _manifest(self):
1897 """generate a manifest corresponding to the values in self._status
1897 """generate a manifest corresponding to the values in self._status
1898
1898
1899 This reuse the file nodeid from parent, but we use special node
1899 This reuse the file nodeid from parent, but we use special node
1900 identifiers for added and modified files. This is used by manifests
1900 identifiers for added and modified files. This is used by manifests
1901 merge to see that files are different and by update logic to avoid
1901 merge to see that files are different and by update logic to avoid
1902 deleting newly added files.
1902 deleting newly added files.
1903 """
1903 """
1904 return self._buildstatusmanifest(self._status)
1904 return self._buildstatusmanifest(self._status)
1905
1905
1906 def _buildstatusmanifest(self, status):
1906 def _buildstatusmanifest(self, status):
1907 """Builds a manifest that includes the given status results."""
1907 """Builds a manifest that includes the given status results."""
1908 parents = self.parents()
1908 parents = self.parents()
1909
1909
1910 man = parents[0].manifest().copy()
1910 man = parents[0].manifest().copy()
1911
1911
1912 ff = self._flagfunc
1912 ff = self._flagfunc
1913 for i, l in (
1913 for i, l in (
1914 (addednodeid, status.added),
1914 (addednodeid, status.added),
1915 (modifiednodeid, status.modified),
1915 (modifiednodeid, status.modified),
1916 ):
1916 ):
1917 for f in l:
1917 for f in l:
1918 man[f] = i
1918 man[f] = i
1919 try:
1919 try:
1920 man.setflag(f, ff(f))
1920 man.setflag(f, ff(f))
1921 except OSError:
1921 except OSError:
1922 pass
1922 pass
1923
1923
1924 for f in status.deleted + status.removed:
1924 for f in status.deleted + status.removed:
1925 if f in man:
1925 if f in man:
1926 del man[f]
1926 del man[f]
1927
1927
1928 return man
1928 return man
1929
1929
1930 def _buildstatus(
1930 def _buildstatus(
1931 self, other, s, match, listignored, listclean, listunknown
1931 self, other, s, match, listignored, listclean, listunknown
1932 ):
1932 ):
1933 """build a status with respect to another context
1933 """build a status with respect to another context
1934
1934
1935 This includes logic for maintaining the fast path of status when
1935 This includes logic for maintaining the fast path of status when
1936 comparing the working directory against its parent, which is to skip
1936 comparing the working directory against its parent, which is to skip
1937 building a new manifest if self (working directory) is not comparing
1937 building a new manifest if self (working directory) is not comparing
1938 against its parent (repo['.']).
1938 against its parent (repo['.']).
1939 """
1939 """
1940 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1940 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1941 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1941 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1942 # might have accidentally ended up with the entire contents of the file
1942 # might have accidentally ended up with the entire contents of the file
1943 # they are supposed to be linking to.
1943 # they are supposed to be linking to.
1944 s.modified[:] = self._filtersuspectsymlink(s.modified)
1944 s.modified[:] = self._filtersuspectsymlink(s.modified)
1945 if other != self._repo[b'.']:
1945 if other != self._repo[b'.']:
1946 s = super(workingctx, self)._buildstatus(
1946 s = super(workingctx, self)._buildstatus(
1947 other, s, match, listignored, listclean, listunknown
1947 other, s, match, listignored, listclean, listunknown
1948 )
1948 )
1949 return s
1949 return s
1950
1950
1951 def _matchstatus(self, other, match):
1951 def _matchstatus(self, other, match):
1952 """override the match method with a filter for directory patterns
1952 """override the match method with a filter for directory patterns
1953
1953
1954 We use inheritance to customize the match.bad method only in cases of
1954 We use inheritance to customize the match.bad method only in cases of
1955 workingctx since it belongs only to the working directory when
1955 workingctx since it belongs only to the working directory when
1956 comparing against the parent changeset.
1956 comparing against the parent changeset.
1957
1957
1958 If we aren't comparing against the working directory's parent, then we
1958 If we aren't comparing against the working directory's parent, then we
1959 just use the default match object sent to us.
1959 just use the default match object sent to us.
1960 """
1960 """
1961 if other != self._repo[b'.']:
1961 if other != self._repo[b'.']:
1962
1962
1963 def bad(f, msg):
1963 def bad(f, msg):
1964 # 'f' may be a directory pattern from 'match.files()',
1964 # 'f' may be a directory pattern from 'match.files()',
1965 # so 'f not in ctx1' is not enough
1965 # so 'f not in ctx1' is not enough
1966 if f not in other and not other.hasdir(f):
1966 if f not in other and not other.hasdir(f):
1967 self._repo.ui.warn(
1967 self._repo.ui.warn(
1968 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1968 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1969 )
1969 )
1970
1970
1971 match.bad = bad
1971 match.bad = bad
1972 return match
1972 return match
1973
1973
1974 def walk(self, match):
1974 def walk(self, match):
1975 '''Generates matching file names.'''
1975 '''Generates matching file names.'''
1976 return sorted(
1976 return sorted(
1977 self._repo.dirstate.walk(
1977 self._repo.dirstate.walk(
1978 self._repo.narrowmatch(match),
1978 self._repo.narrowmatch(match),
1979 subrepos=sorted(self.substate),
1979 subrepos=sorted(self.substate),
1980 unknown=True,
1980 unknown=True,
1981 ignored=False,
1981 ignored=False,
1982 )
1982 )
1983 )
1983 )
1984
1984
1985 def matches(self, match):
1985 def matches(self, match):
1986 match = self._repo.narrowmatch(match)
1986 match = self._repo.narrowmatch(match)
1987 ds = self._repo.dirstate
1987 ds = self._repo.dirstate
1988 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1988 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1989
1989
1990 def markcommitted(self, node):
1990 def markcommitted(self, node):
1991 with self._repo.dirstate.parentchange():
1991 with self._repo.dirstate.parentchange():
1992 for f in self.modified() + self.added():
1992 for f in self.modified() + self.added():
1993 self._repo.dirstate.normal(f)
1993 self._repo.dirstate.normal(f)
1994 for f in self.removed():
1994 for f in self.removed():
1995 self._repo.dirstate.drop(f)
1995 self._repo.dirstate.drop(f)
1996 self._repo.dirstate.setparents(node)
1996 self._repo.dirstate.setparents(node)
1997 self._repo._quick_access_changeid_invalidate()
1997 self._repo._quick_access_changeid_invalidate()
1998
1998
1999 # write changes out explicitly, because nesting wlock at
1999 # write changes out explicitly, because nesting wlock at
2000 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2000 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2001 # from immediately doing so for subsequent changing files
2001 # from immediately doing so for subsequent changing files
2002 self._repo.dirstate.write(self._repo.currenttransaction())
2002 self._repo.dirstate.write(self._repo.currenttransaction())
2003
2003
2004 sparse.aftercommit(self._repo, node)
2004 sparse.aftercommit(self._repo, node)
2005
2005
2006
2006
2007 class committablefilectx(basefilectx):
2007 class committablefilectx(basefilectx):
2008 """A committablefilectx provides common functionality for a file context
2008 """A committablefilectx provides common functionality for a file context
2009 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2009 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2010
2010
2011 def __init__(self, repo, path, filelog=None, ctx=None):
2011 def __init__(self, repo, path, filelog=None, ctx=None):
2012 self._repo = repo
2012 self._repo = repo
2013 self._path = path
2013 self._path = path
2014 self._changeid = None
2014 self._changeid = None
2015 self._filerev = self._filenode = None
2015 self._filerev = self._filenode = None
2016
2016
2017 if filelog is not None:
2017 if filelog is not None:
2018 self._filelog = filelog
2018 self._filelog = filelog
2019 if ctx:
2019 if ctx:
2020 self._changectx = ctx
2020 self._changectx = ctx
2021
2021
2022 def __nonzero__(self):
2022 def __nonzero__(self):
2023 return True
2023 return True
2024
2024
2025 __bool__ = __nonzero__
2025 __bool__ = __nonzero__
2026
2026
2027 def linkrev(self):
2027 def linkrev(self):
2028 # linked to self._changectx no matter if file is modified or not
2028 # linked to self._changectx no matter if file is modified or not
2029 return self.rev()
2029 return self.rev()
2030
2030
2031 def renamed(self):
2031 def renamed(self):
2032 path = self.copysource()
2032 path = self.copysource()
2033 if not path:
2033 if not path:
2034 return None
2034 return None
2035 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2035 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2036
2036
2037 def parents(self):
2037 def parents(self):
2038 '''return parent filectxs, following copies if necessary'''
2038 '''return parent filectxs, following copies if necessary'''
2039
2039
2040 def filenode(ctx, path):
2040 def filenode(ctx, path):
2041 return ctx._manifest.get(path, nullid)
2041 return ctx._manifest.get(path, nullid)
2042
2042
2043 path = self._path
2043 path = self._path
2044 fl = self._filelog
2044 fl = self._filelog
2045 pcl = self._changectx._parents
2045 pcl = self._changectx._parents
2046 renamed = self.renamed()
2046 renamed = self.renamed()
2047
2047
2048 if renamed:
2048 if renamed:
2049 pl = [renamed + (None,)]
2049 pl = [renamed + (None,)]
2050 else:
2050 else:
2051 pl = [(path, filenode(pcl[0], path), fl)]
2051 pl = [(path, filenode(pcl[0], path), fl)]
2052
2052
2053 for pc in pcl[1:]:
2053 for pc in pcl[1:]:
2054 pl.append((path, filenode(pc, path), fl))
2054 pl.append((path, filenode(pc, path), fl))
2055
2055
2056 return [
2056 return [
2057 self._parentfilectx(p, fileid=n, filelog=l)
2057 self._parentfilectx(p, fileid=n, filelog=l)
2058 for p, n, l in pl
2058 for p, n, l in pl
2059 if n != nullid
2059 if n != nullid
2060 ]
2060 ]
2061
2061
2062 def children(self):
2062 def children(self):
2063 return []
2063 return []
2064
2064
2065
2065
2066 class workingfilectx(committablefilectx):
2066 class workingfilectx(committablefilectx):
2067 """A workingfilectx object makes access to data related to a particular
2067 """A workingfilectx object makes access to data related to a particular
2068 file in the working directory convenient."""
2068 file in the working directory convenient."""
2069
2069
2070 def __init__(self, repo, path, filelog=None, workingctx=None):
2070 def __init__(self, repo, path, filelog=None, workingctx=None):
2071 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2071 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2072
2072
2073 @propertycache
2073 @propertycache
2074 def _changectx(self):
2074 def _changectx(self):
2075 return workingctx(self._repo)
2075 return workingctx(self._repo)
2076
2076
2077 def data(self):
2077 def data(self):
2078 return self._repo.wread(self._path)
2078 return self._repo.wread(self._path)
2079
2079
2080 def copysource(self):
2080 def copysource(self):
2081 return self._repo.dirstate.copied(self._path)
2081 return self._repo.dirstate.copied(self._path)
2082
2082
2083 def size(self):
2083 def size(self):
2084 return self._repo.wvfs.lstat(self._path).st_size
2084 return self._repo.wvfs.lstat(self._path).st_size
2085
2085
2086 def lstat(self):
2086 def lstat(self):
2087 return self._repo.wvfs.lstat(self._path)
2087 return self._repo.wvfs.lstat(self._path)
2088
2088
2089 def date(self):
2089 def date(self):
2090 t, tz = self._changectx.date()
2090 t, tz = self._changectx.date()
2091 try:
2091 try:
2092 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2092 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2093 except OSError as err:
2093 except OSError as err:
2094 if err.errno != errno.ENOENT:
2094 if err.errno != errno.ENOENT:
2095 raise
2095 raise
2096 return (t, tz)
2096 return (t, tz)
2097
2097
2098 def exists(self):
2098 def exists(self):
2099 return self._repo.wvfs.exists(self._path)
2099 return self._repo.wvfs.exists(self._path)
2100
2100
2101 def lexists(self):
2101 def lexists(self):
2102 return self._repo.wvfs.lexists(self._path)
2102 return self._repo.wvfs.lexists(self._path)
2103
2103
2104 def audit(self):
2104 def audit(self):
2105 return self._repo.wvfs.audit(self._path)
2105 return self._repo.wvfs.audit(self._path)
2106
2106
2107 def cmp(self, fctx):
2107 def cmp(self, fctx):
2108 """compare with other file context
2108 """compare with other file context
2109
2109
2110 returns True if different than fctx.
2110 returns True if different than fctx.
2111 """
2111 """
2112 # fctx should be a filectx (not a workingfilectx)
2112 # fctx should be a filectx (not a workingfilectx)
2113 # invert comparison to reuse the same code path
2113 # invert comparison to reuse the same code path
2114 return fctx.cmp(self)
2114 return fctx.cmp(self)
2115
2115
2116 def remove(self, ignoremissing=False):
2116 def remove(self, ignoremissing=False):
2117 """wraps unlink for a repo's working directory"""
2117 """wraps unlink for a repo's working directory"""
2118 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2118 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2119 self._repo.wvfs.unlinkpath(
2119 self._repo.wvfs.unlinkpath(
2120 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2120 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2121 )
2121 )
2122
2122
2123 def write(self, data, flags, backgroundclose=False, **kwargs):
2123 def write(self, data, flags, backgroundclose=False, **kwargs):
2124 """wraps repo.wwrite"""
2124 """wraps repo.wwrite"""
2125 return self._repo.wwrite(
2125 return self._repo.wwrite(
2126 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2126 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2127 )
2127 )
2128
2128
2129 def markcopied(self, src):
2129 def markcopied(self, src):
2130 """marks this file a copy of `src`"""
2130 """marks this file a copy of `src`"""
2131 self._repo.dirstate.copy(src, self._path)
2131 self._repo.dirstate.copy(src, self._path)
2132
2132
2133 def clearunknown(self):
2133 def clearunknown(self):
2134 """Removes conflicting items in the working directory so that
2134 """Removes conflicting items in the working directory so that
2135 ``write()`` can be called successfully.
2135 ``write()`` can be called successfully.
2136 """
2136 """
2137 wvfs = self._repo.wvfs
2137 wvfs = self._repo.wvfs
2138 f = self._path
2138 f = self._path
2139 wvfs.audit(f)
2139 wvfs.audit(f)
2140 if self._repo.ui.configbool(
2140 if self._repo.ui.configbool(
2141 b'experimental', b'merge.checkpathconflicts'
2141 b'experimental', b'merge.checkpathconflicts'
2142 ):
2142 ):
2143 # remove files under the directory as they should already be
2143 # remove files under the directory as they should already be
2144 # warned and backed up
2144 # warned and backed up
2145 if wvfs.isdir(f) and not wvfs.islink(f):
2145 if wvfs.isdir(f) and not wvfs.islink(f):
2146 wvfs.rmtree(f, forcibly=True)
2146 wvfs.rmtree(f, forcibly=True)
2147 for p in reversed(list(pathutil.finddirs(f))):
2147 for p in reversed(list(pathutil.finddirs(f))):
2148 if wvfs.isfileorlink(p):
2148 if wvfs.isfileorlink(p):
2149 wvfs.unlink(p)
2149 wvfs.unlink(p)
2150 break
2150 break
2151 else:
2151 else:
2152 # don't remove files if path conflicts are not processed
2152 # don't remove files if path conflicts are not processed
2153 if wvfs.isdir(f) and not wvfs.islink(f):
2153 if wvfs.isdir(f) and not wvfs.islink(f):
2154 wvfs.removedirs(f)
2154 wvfs.removedirs(f)
2155
2155
2156 def setflags(self, l, x):
2156 def setflags(self, l, x):
2157 self._repo.wvfs.setflags(self._path, l, x)
2157 self._repo.wvfs.setflags(self._path, l, x)
2158
2158
2159
2159
2160 class overlayworkingctx(committablectx):
2160 class overlayworkingctx(committablectx):
2161 """Wraps another mutable context with a write-back cache that can be
2161 """Wraps another mutable context with a write-back cache that can be
2162 converted into a commit context.
2162 converted into a commit context.
2163
2163
2164 self._cache[path] maps to a dict with keys: {
2164 self._cache[path] maps to a dict with keys: {
2165 'exists': bool?
2165 'exists': bool?
2166 'date': date?
2166 'date': date?
2167 'data': str?
2167 'data': str?
2168 'flags': str?
2168 'flags': str?
2169 'copied': str? (path or None)
2169 'copied': str? (path or None)
2170 }
2170 }
2171 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2171 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2172 is `False`, the file was deleted.
2172 is `False`, the file was deleted.
2173 """
2173 """
2174
2174
2175 def __init__(self, repo):
2175 def __init__(self, repo):
2176 super(overlayworkingctx, self).__init__(repo)
2176 super(overlayworkingctx, self).__init__(repo)
2177 self.clean()
2177 self.clean()
2178
2178
2179 def setbase(self, wrappedctx):
2179 def setbase(self, wrappedctx):
2180 self._wrappedctx = wrappedctx
2180 self._wrappedctx = wrappedctx
2181 self._parents = [wrappedctx]
2181 self._parents = [wrappedctx]
2182 # Drop old manifest cache as it is now out of date.
2182 # Drop old manifest cache as it is now out of date.
2183 # This is necessary when, e.g., rebasing several nodes with one
2183 # This is necessary when, e.g., rebasing several nodes with one
2184 # ``overlayworkingctx`` (e.g. with --collapse).
2184 # ``overlayworkingctx`` (e.g. with --collapse).
2185 util.clearcachedproperty(self, b'_manifest')
2185 util.clearcachedproperty(self, b'_manifest')
2186
2186
2187 def setparents(self, p1node, p2node=nullid):
2187 def setparents(self, p1node, p2node=nullid):
2188 assert p1node == self._wrappedctx.node()
2188 assert p1node == self._wrappedctx.node()
2189 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2189 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2190
2190
2191 def data(self, path):
2191 def data(self, path):
2192 if self.isdirty(path):
2192 if self.isdirty(path):
2193 if self._cache[path][b'exists']:
2193 if self._cache[path][b'exists']:
2194 if self._cache[path][b'data'] is not None:
2194 if self._cache[path][b'data'] is not None:
2195 return self._cache[path][b'data']
2195 return self._cache[path][b'data']
2196 else:
2196 else:
2197 # Must fallback here, too, because we only set flags.
2197 # Must fallback here, too, because we only set flags.
2198 return self._wrappedctx[path].data()
2198 return self._wrappedctx[path].data()
2199 else:
2199 else:
2200 raise error.ProgrammingError(
2200 raise error.ProgrammingError(
2201 b"No such file or directory: %s" % path
2201 b"No such file or directory: %s" % path
2202 )
2202 )
2203 else:
2203 else:
2204 return self._wrappedctx[path].data()
2204 return self._wrappedctx[path].data()
2205
2205
2206 @propertycache
2206 @propertycache
2207 def _manifest(self):
2207 def _manifest(self):
2208 parents = self.parents()
2208 parents = self.parents()
2209 man = parents[0].manifest().copy()
2209 man = parents[0].manifest().copy()
2210
2210
2211 flag = self._flagfunc
2211 flag = self._flagfunc
2212 for path in self.added():
2212 for path in self.added():
2213 man[path] = addednodeid
2213 man[path] = addednodeid
2214 man.setflag(path, flag(path))
2214 man.setflag(path, flag(path))
2215 for path in self.modified():
2215 for path in self.modified():
2216 man[path] = modifiednodeid
2216 man[path] = modifiednodeid
2217 man.setflag(path, flag(path))
2217 man.setflag(path, flag(path))
2218 for path in self.removed():
2218 for path in self.removed():
2219 del man[path]
2219 del man[path]
2220 return man
2220 return man
2221
2221
2222 @propertycache
2222 @propertycache
2223 def _flagfunc(self):
2223 def _flagfunc(self):
2224 def f(path):
2224 def f(path):
2225 return self._cache[path][b'flags']
2225 return self._cache[path][b'flags']
2226
2226
2227 return f
2227 return f
2228
2228
2229 def files(self):
2229 def files(self):
2230 return sorted(self.added() + self.modified() + self.removed())
2230 return sorted(self.added() + self.modified() + self.removed())
2231
2231
2232 def modified(self):
2232 def modified(self):
2233 return [
2233 return [
2234 f
2234 f
2235 for f in self._cache.keys()
2235 for f in self._cache.keys()
2236 if self._cache[f][b'exists'] and self._existsinparent(f)
2236 if self._cache[f][b'exists'] and self._existsinparent(f)
2237 ]
2237 ]
2238
2238
2239 def added(self):
2239 def added(self):
2240 return [
2240 return [
2241 f
2241 f
2242 for f in self._cache.keys()
2242 for f in self._cache.keys()
2243 if self._cache[f][b'exists'] and not self._existsinparent(f)
2243 if self._cache[f][b'exists'] and not self._existsinparent(f)
2244 ]
2244 ]
2245
2245
2246 def removed(self):
2246 def removed(self):
2247 return [
2247 return [
2248 f
2248 f
2249 for f in self._cache.keys()
2249 for f in self._cache.keys()
2250 if not self._cache[f][b'exists'] and self._existsinparent(f)
2250 if not self._cache[f][b'exists'] and self._existsinparent(f)
2251 ]
2251 ]
2252
2252
2253 def p1copies(self):
2253 def p1copies(self):
2254 copies = {}
2254 copies = {}
2255 narrowmatch = self._repo.narrowmatch()
2255 narrowmatch = self._repo.narrowmatch()
2256 for f in self._cache.keys():
2256 for f in self._cache.keys():
2257 if not narrowmatch(f):
2257 if not narrowmatch(f):
2258 continue
2258 continue
2259 copies.pop(f, None) # delete if it exists
2259 copies.pop(f, None) # delete if it exists
2260 source = self._cache[f][b'copied']
2260 source = self._cache[f][b'copied']
2261 if source:
2261 if source:
2262 copies[f] = source
2262 copies[f] = source
2263 return copies
2263 return copies
2264
2264
2265 def p2copies(self):
2265 def p2copies(self):
2266 copies = {}
2266 copies = {}
2267 narrowmatch = self._repo.narrowmatch()
2267 narrowmatch = self._repo.narrowmatch()
2268 for f in self._cache.keys():
2268 for f in self._cache.keys():
2269 if not narrowmatch(f):
2269 if not narrowmatch(f):
2270 continue
2270 continue
2271 copies.pop(f, None) # delete if it exists
2271 copies.pop(f, None) # delete if it exists
2272 source = self._cache[f][b'copied']
2272 source = self._cache[f][b'copied']
2273 if source:
2273 if source:
2274 copies[f] = source
2274 copies[f] = source
2275 return copies
2275 return copies
2276
2276
2277 def isinmemory(self):
2277 def isinmemory(self):
2278 return True
2278 return True
2279
2279
2280 def filedate(self, path):
2280 def filedate(self, path):
2281 if self.isdirty(path):
2281 if self.isdirty(path):
2282 return self._cache[path][b'date']
2282 return self._cache[path][b'date']
2283 else:
2283 else:
2284 return self._wrappedctx[path].date()
2284 return self._wrappedctx[path].date()
2285
2285
2286 def markcopied(self, path, origin):
2286 def markcopied(self, path, origin):
2287 self._markdirty(
2287 self._markdirty(
2288 path,
2288 path,
2289 exists=True,
2289 exists=True,
2290 date=self.filedate(path),
2290 date=self.filedate(path),
2291 flags=self.flags(path),
2291 flags=self.flags(path),
2292 copied=origin,
2292 copied=origin,
2293 )
2293 )
2294
2294
2295 def copydata(self, path):
2295 def copydata(self, path):
2296 if self.isdirty(path):
2296 if self.isdirty(path):
2297 return self._cache[path][b'copied']
2297 return self._cache[path][b'copied']
2298 else:
2298 else:
2299 return None
2299 return None
2300
2300
2301 def flags(self, path):
2301 def flags(self, path):
2302 if self.isdirty(path):
2302 if self.isdirty(path):
2303 if self._cache[path][b'exists']:
2303 if self._cache[path][b'exists']:
2304 return self._cache[path][b'flags']
2304 return self._cache[path][b'flags']
2305 else:
2305 else:
2306 raise error.ProgrammingError(
2306 raise error.ProgrammingError(
2307 b"No such file or directory: %s" % self._path
2307 b"No such file or directory: %s" % self._path
2308 )
2308 )
2309 else:
2309 else:
2310 return self._wrappedctx[path].flags()
2310 return self._wrappedctx[path].flags()
2311
2311
2312 def __contains__(self, key):
2312 def __contains__(self, key):
2313 if key in self._cache:
2313 if key in self._cache:
2314 return self._cache[key][b'exists']
2314 return self._cache[key][b'exists']
2315 return key in self.p1()
2315 return key in self.p1()
2316
2316
2317 def _existsinparent(self, path):
2317 def _existsinparent(self, path):
2318 try:
2318 try:
2319 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2319 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2320 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2320 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2321 # with an ``exists()`` function.
2321 # with an ``exists()`` function.
2322 self._wrappedctx[path]
2322 self._wrappedctx[path]
2323 return True
2323 return True
2324 except error.ManifestLookupError:
2324 except error.ManifestLookupError:
2325 return False
2325 return False
2326
2326
2327 def _auditconflicts(self, path):
2327 def _auditconflicts(self, path):
2328 """Replicates conflict checks done by wvfs.write().
2328 """Replicates conflict checks done by wvfs.write().
2329
2329
2330 Since we never write to the filesystem and never call `applyupdates` in
2330 Since we never write to the filesystem and never call `applyupdates` in
2331 IMM, we'll never check that a path is actually writable -- e.g., because
2331 IMM, we'll never check that a path is actually writable -- e.g., because
2332 it adds `a/foo`, but `a` is actually a file in the other commit.
2332 it adds `a/foo`, but `a` is actually a file in the other commit.
2333 """
2333 """
2334
2334
2335 def fail(path, component):
2335 def fail(path, component):
2336 # p1() is the base and we're receiving "writes" for p2()'s
2336 # p1() is the base and we're receiving "writes" for p2()'s
2337 # files.
2337 # files.
2338 if b'l' in self.p1()[component].flags():
2338 if b'l' in self.p1()[component].flags():
2339 raise error.Abort(
2339 raise error.Abort(
2340 b"error: %s conflicts with symlink %s "
2340 b"error: %s conflicts with symlink %s "
2341 b"in %d." % (path, component, self.p1().rev())
2341 b"in %d." % (path, component, self.p1().rev())
2342 )
2342 )
2343 else:
2343 else:
2344 raise error.Abort(
2344 raise error.Abort(
2345 b"error: '%s' conflicts with file '%s' in "
2345 b"error: '%s' conflicts with file '%s' in "
2346 b"%d." % (path, component, self.p1().rev())
2346 b"%d." % (path, component, self.p1().rev())
2347 )
2347 )
2348
2348
2349 # Test that each new directory to be created to write this path from p2
2349 # Test that each new directory to be created to write this path from p2
2350 # is not a file in p1.
2350 # is not a file in p1.
2351 components = path.split(b'/')
2351 components = path.split(b'/')
2352 for i in pycompat.xrange(len(components)):
2352 for i in pycompat.xrange(len(components)):
2353 component = b"/".join(components[0:i])
2353 component = b"/".join(components[0:i])
2354 if component in self:
2354 if component in self:
2355 fail(path, component)
2355 fail(path, component)
2356
2356
2357 # Test the other direction -- that this path from p2 isn't a directory
2357 # Test the other direction -- that this path from p2 isn't a directory
2358 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2358 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2359 match = self.match([path], default=b'path')
2359 match = self.match([path], default=b'path')
2360 matches = self.p1().manifest().matches(match)
2360 mfiles = list(self.p1().manifest().walk(match))
2361 mfiles = matches.keys()
2362 if len(mfiles) > 0:
2361 if len(mfiles) > 0:
2363 if len(mfiles) == 1 and mfiles[0] == path:
2362 if len(mfiles) == 1 and mfiles[0] == path:
2364 return
2363 return
2365 # omit the files which are deleted in current IMM wctx
2364 # omit the files which are deleted in current IMM wctx
2366 mfiles = [m for m in mfiles if m in self]
2365 mfiles = [m for m in mfiles if m in self]
2367 if not mfiles:
2366 if not mfiles:
2368 return
2367 return
2369 raise error.Abort(
2368 raise error.Abort(
2370 b"error: file '%s' cannot be written because "
2369 b"error: file '%s' cannot be written because "
2371 b" '%s/' is a directory in %s (containing %d "
2370 b" '%s/' is a directory in %s (containing %d "
2372 b"entries: %s)"
2371 b"entries: %s)"
2373 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2372 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2374 )
2373 )
2375
2374
2376 def write(self, path, data, flags=b'', **kwargs):
2375 def write(self, path, data, flags=b'', **kwargs):
2377 if data is None:
2376 if data is None:
2378 raise error.ProgrammingError(b"data must be non-None")
2377 raise error.ProgrammingError(b"data must be non-None")
2379 self._auditconflicts(path)
2378 self._auditconflicts(path)
2380 self._markdirty(
2379 self._markdirty(
2381 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2380 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2382 )
2381 )
2383
2382
2384 def setflags(self, path, l, x):
2383 def setflags(self, path, l, x):
2385 flag = b''
2384 flag = b''
2386 if l:
2385 if l:
2387 flag = b'l'
2386 flag = b'l'
2388 elif x:
2387 elif x:
2389 flag = b'x'
2388 flag = b'x'
2390 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2389 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2391
2390
2392 def remove(self, path):
2391 def remove(self, path):
2393 self._markdirty(path, exists=False)
2392 self._markdirty(path, exists=False)
2394
2393
2395 def exists(self, path):
2394 def exists(self, path):
2396 """exists behaves like `lexists`, but needs to follow symlinks and
2395 """exists behaves like `lexists`, but needs to follow symlinks and
2397 return False if they are broken.
2396 return False if they are broken.
2398 """
2397 """
2399 if self.isdirty(path):
2398 if self.isdirty(path):
2400 # If this path exists and is a symlink, "follow" it by calling
2399 # If this path exists and is a symlink, "follow" it by calling
2401 # exists on the destination path.
2400 # exists on the destination path.
2402 if (
2401 if (
2403 self._cache[path][b'exists']
2402 self._cache[path][b'exists']
2404 and b'l' in self._cache[path][b'flags']
2403 and b'l' in self._cache[path][b'flags']
2405 ):
2404 ):
2406 return self.exists(self._cache[path][b'data'].strip())
2405 return self.exists(self._cache[path][b'data'].strip())
2407 else:
2406 else:
2408 return self._cache[path][b'exists']
2407 return self._cache[path][b'exists']
2409
2408
2410 return self._existsinparent(path)
2409 return self._existsinparent(path)
2411
2410
2412 def lexists(self, path):
2411 def lexists(self, path):
2413 """lexists returns True if the path exists"""
2412 """lexists returns True if the path exists"""
2414 if self.isdirty(path):
2413 if self.isdirty(path):
2415 return self._cache[path][b'exists']
2414 return self._cache[path][b'exists']
2416
2415
2417 return self._existsinparent(path)
2416 return self._existsinparent(path)
2418
2417
2419 def size(self, path):
2418 def size(self, path):
2420 if self.isdirty(path):
2419 if self.isdirty(path):
2421 if self._cache[path][b'exists']:
2420 if self._cache[path][b'exists']:
2422 return len(self._cache[path][b'data'])
2421 return len(self._cache[path][b'data'])
2423 else:
2422 else:
2424 raise error.ProgrammingError(
2423 raise error.ProgrammingError(
2425 b"No such file or directory: %s" % self._path
2424 b"No such file or directory: %s" % self._path
2426 )
2425 )
2427 return self._wrappedctx[path].size()
2426 return self._wrappedctx[path].size()
2428
2427
2429 def tomemctx(
2428 def tomemctx(
2430 self,
2429 self,
2431 text,
2430 text,
2432 branch=None,
2431 branch=None,
2433 extra=None,
2432 extra=None,
2434 date=None,
2433 date=None,
2435 parents=None,
2434 parents=None,
2436 user=None,
2435 user=None,
2437 editor=None,
2436 editor=None,
2438 ):
2437 ):
2439 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2438 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2440 committed.
2439 committed.
2441
2440
2442 ``text`` is the commit message.
2441 ``text`` is the commit message.
2443 ``parents`` (optional) are rev numbers.
2442 ``parents`` (optional) are rev numbers.
2444 """
2443 """
2445 # Default parents to the wrapped context if not passed.
2444 # Default parents to the wrapped context if not passed.
2446 if parents is None:
2445 if parents is None:
2447 parents = self.parents()
2446 parents = self.parents()
2448 if len(parents) == 1:
2447 if len(parents) == 1:
2449 parents = (parents[0], None)
2448 parents = (parents[0], None)
2450
2449
2451 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2450 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2452 if parents[1] is None:
2451 if parents[1] is None:
2453 parents = (self._repo[parents[0]], None)
2452 parents = (self._repo[parents[0]], None)
2454 else:
2453 else:
2455 parents = (self._repo[parents[0]], self._repo[parents[1]])
2454 parents = (self._repo[parents[0]], self._repo[parents[1]])
2456
2455
2457 files = self.files()
2456 files = self.files()
2458
2457
2459 def getfile(repo, memctx, path):
2458 def getfile(repo, memctx, path):
2460 if self._cache[path][b'exists']:
2459 if self._cache[path][b'exists']:
2461 return memfilectx(
2460 return memfilectx(
2462 repo,
2461 repo,
2463 memctx,
2462 memctx,
2464 path,
2463 path,
2465 self._cache[path][b'data'],
2464 self._cache[path][b'data'],
2466 b'l' in self._cache[path][b'flags'],
2465 b'l' in self._cache[path][b'flags'],
2467 b'x' in self._cache[path][b'flags'],
2466 b'x' in self._cache[path][b'flags'],
2468 self._cache[path][b'copied'],
2467 self._cache[path][b'copied'],
2469 )
2468 )
2470 else:
2469 else:
2471 # Returning None, but including the path in `files`, is
2470 # Returning None, but including the path in `files`, is
2472 # necessary for memctx to register a deletion.
2471 # necessary for memctx to register a deletion.
2473 return None
2472 return None
2474
2473
2475 if branch is None:
2474 if branch is None:
2476 branch = self._wrappedctx.branch()
2475 branch = self._wrappedctx.branch()
2477
2476
2478 return memctx(
2477 return memctx(
2479 self._repo,
2478 self._repo,
2480 parents,
2479 parents,
2481 text,
2480 text,
2482 files,
2481 files,
2483 getfile,
2482 getfile,
2484 date=date,
2483 date=date,
2485 extra=extra,
2484 extra=extra,
2486 user=user,
2485 user=user,
2487 branch=branch,
2486 branch=branch,
2488 editor=editor,
2487 editor=editor,
2489 )
2488 )
2490
2489
2491 def isdirty(self, path):
2490 def isdirty(self, path):
2492 return path in self._cache
2491 return path in self._cache
2493
2492
2494 def isempty(self):
2493 def isempty(self):
2495 # We need to discard any keys that are actually clean before the empty
2494 # We need to discard any keys that are actually clean before the empty
2496 # commit check.
2495 # commit check.
2497 self._compact()
2496 self._compact()
2498 return len(self._cache) == 0
2497 return len(self._cache) == 0
2499
2498
2500 def clean(self):
2499 def clean(self):
2501 self._cache = {}
2500 self._cache = {}
2502
2501
2503 def _compact(self):
2502 def _compact(self):
2504 """Removes keys from the cache that are actually clean, by comparing
2503 """Removes keys from the cache that are actually clean, by comparing
2505 them with the underlying context.
2504 them with the underlying context.
2506
2505
2507 This can occur during the merge process, e.g. by passing --tool :local
2506 This can occur during the merge process, e.g. by passing --tool :local
2508 to resolve a conflict.
2507 to resolve a conflict.
2509 """
2508 """
2510 keys = []
2509 keys = []
2511 # This won't be perfect, but can help performance significantly when
2510 # This won't be perfect, but can help performance significantly when
2512 # using things like remotefilelog.
2511 # using things like remotefilelog.
2513 scmutil.prefetchfiles(
2512 scmutil.prefetchfiles(
2514 self.repo(),
2513 self.repo(),
2515 [self.p1().rev()],
2514 [self.p1().rev()],
2516 scmutil.matchfiles(self.repo(), self._cache.keys()),
2515 scmutil.matchfiles(self.repo(), self._cache.keys()),
2517 )
2516 )
2518
2517
2519 for path in self._cache.keys():
2518 for path in self._cache.keys():
2520 cache = self._cache[path]
2519 cache = self._cache[path]
2521 try:
2520 try:
2522 underlying = self._wrappedctx[path]
2521 underlying = self._wrappedctx[path]
2523 if (
2522 if (
2524 underlying.data() == cache[b'data']
2523 underlying.data() == cache[b'data']
2525 and underlying.flags() == cache[b'flags']
2524 and underlying.flags() == cache[b'flags']
2526 ):
2525 ):
2527 keys.append(path)
2526 keys.append(path)
2528 except error.ManifestLookupError:
2527 except error.ManifestLookupError:
2529 # Path not in the underlying manifest (created).
2528 # Path not in the underlying manifest (created).
2530 continue
2529 continue
2531
2530
2532 for path in keys:
2531 for path in keys:
2533 del self._cache[path]
2532 del self._cache[path]
2534 return keys
2533 return keys
2535
2534
2536 def _markdirty(
2535 def _markdirty(
2537 self, path, exists, data=None, date=None, flags=b'', copied=None
2536 self, path, exists, data=None, date=None, flags=b'', copied=None
2538 ):
2537 ):
2539 # data not provided, let's see if we already have some; if not, let's
2538 # data not provided, let's see if we already have some; if not, let's
2540 # grab it from our underlying context, so that we always have data if
2539 # grab it from our underlying context, so that we always have data if
2541 # the file is marked as existing.
2540 # the file is marked as existing.
2542 if exists and data is None:
2541 if exists and data is None:
2543 oldentry = self._cache.get(path) or {}
2542 oldentry = self._cache.get(path) or {}
2544 data = oldentry.get(b'data')
2543 data = oldentry.get(b'data')
2545 if data is None:
2544 if data is None:
2546 data = self._wrappedctx[path].data()
2545 data = self._wrappedctx[path].data()
2547
2546
2548 self._cache[path] = {
2547 self._cache[path] = {
2549 b'exists': exists,
2548 b'exists': exists,
2550 b'data': data,
2549 b'data': data,
2551 b'date': date,
2550 b'date': date,
2552 b'flags': flags,
2551 b'flags': flags,
2553 b'copied': copied,
2552 b'copied': copied,
2554 }
2553 }
2555
2554
2556 def filectx(self, path, filelog=None):
2555 def filectx(self, path, filelog=None):
2557 return overlayworkingfilectx(
2556 return overlayworkingfilectx(
2558 self._repo, path, parent=self, filelog=filelog
2557 self._repo, path, parent=self, filelog=filelog
2559 )
2558 )
2560
2559
2561
2560
2562 class overlayworkingfilectx(committablefilectx):
2561 class overlayworkingfilectx(committablefilectx):
2563 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2562 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2564 cache, which can be flushed through later by calling ``flush()``."""
2563 cache, which can be flushed through later by calling ``flush()``."""
2565
2564
2566 def __init__(self, repo, path, filelog=None, parent=None):
2565 def __init__(self, repo, path, filelog=None, parent=None):
2567 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2566 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2568 self._repo = repo
2567 self._repo = repo
2569 self._parent = parent
2568 self._parent = parent
2570 self._path = path
2569 self._path = path
2571
2570
2572 def cmp(self, fctx):
2571 def cmp(self, fctx):
2573 return self.data() != fctx.data()
2572 return self.data() != fctx.data()
2574
2573
2575 def changectx(self):
2574 def changectx(self):
2576 return self._parent
2575 return self._parent
2577
2576
2578 def data(self):
2577 def data(self):
2579 return self._parent.data(self._path)
2578 return self._parent.data(self._path)
2580
2579
2581 def date(self):
2580 def date(self):
2582 return self._parent.filedate(self._path)
2581 return self._parent.filedate(self._path)
2583
2582
2584 def exists(self):
2583 def exists(self):
2585 return self.lexists()
2584 return self.lexists()
2586
2585
2587 def lexists(self):
2586 def lexists(self):
2588 return self._parent.exists(self._path)
2587 return self._parent.exists(self._path)
2589
2588
2590 def copysource(self):
2589 def copysource(self):
2591 return self._parent.copydata(self._path)
2590 return self._parent.copydata(self._path)
2592
2591
2593 def size(self):
2592 def size(self):
2594 return self._parent.size(self._path)
2593 return self._parent.size(self._path)
2595
2594
2596 def markcopied(self, origin):
2595 def markcopied(self, origin):
2597 self._parent.markcopied(self._path, origin)
2596 self._parent.markcopied(self._path, origin)
2598
2597
2599 def audit(self):
2598 def audit(self):
2600 pass
2599 pass
2601
2600
2602 def flags(self):
2601 def flags(self):
2603 return self._parent.flags(self._path)
2602 return self._parent.flags(self._path)
2604
2603
2605 def setflags(self, islink, isexec):
2604 def setflags(self, islink, isexec):
2606 return self._parent.setflags(self._path, islink, isexec)
2605 return self._parent.setflags(self._path, islink, isexec)
2607
2606
2608 def write(self, data, flags, backgroundclose=False, **kwargs):
2607 def write(self, data, flags, backgroundclose=False, **kwargs):
2609 return self._parent.write(self._path, data, flags, **kwargs)
2608 return self._parent.write(self._path, data, flags, **kwargs)
2610
2609
2611 def remove(self, ignoremissing=False):
2610 def remove(self, ignoremissing=False):
2612 return self._parent.remove(self._path)
2611 return self._parent.remove(self._path)
2613
2612
2614 def clearunknown(self):
2613 def clearunknown(self):
2615 pass
2614 pass
2616
2615
2617
2616
2618 class workingcommitctx(workingctx):
2617 class workingcommitctx(workingctx):
2619 """A workingcommitctx object makes access to data related to
2618 """A workingcommitctx object makes access to data related to
2620 the revision being committed convenient.
2619 the revision being committed convenient.
2621
2620
2622 This hides changes in the working directory, if they aren't
2621 This hides changes in the working directory, if they aren't
2623 committed in this context.
2622 committed in this context.
2624 """
2623 """
2625
2624
2626 def __init__(
2625 def __init__(
2627 self, repo, changes, text=b"", user=None, date=None, extra=None
2626 self, repo, changes, text=b"", user=None, date=None, extra=None
2628 ):
2627 ):
2629 super(workingcommitctx, self).__init__(
2628 super(workingcommitctx, self).__init__(
2630 repo, text, user, date, extra, changes
2629 repo, text, user, date, extra, changes
2631 )
2630 )
2632
2631
2633 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2632 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2634 """Return matched files only in ``self._status``
2633 """Return matched files only in ``self._status``
2635
2634
2636 Uncommitted files appear "clean" via this context, even if
2635 Uncommitted files appear "clean" via this context, even if
2637 they aren't actually so in the working directory.
2636 they aren't actually so in the working directory.
2638 """
2637 """
2639 if clean:
2638 if clean:
2640 clean = [f for f in self._manifest if f not in self._changedset]
2639 clean = [f for f in self._manifest if f not in self._changedset]
2641 else:
2640 else:
2642 clean = []
2641 clean = []
2643 return scmutil.status(
2642 return scmutil.status(
2644 [f for f in self._status.modified if match(f)],
2643 [f for f in self._status.modified if match(f)],
2645 [f for f in self._status.added if match(f)],
2644 [f for f in self._status.added if match(f)],
2646 [f for f in self._status.removed if match(f)],
2645 [f for f in self._status.removed if match(f)],
2647 [],
2646 [],
2648 [],
2647 [],
2649 [],
2648 [],
2650 clean,
2649 clean,
2651 )
2650 )
2652
2651
2653 @propertycache
2652 @propertycache
2654 def _changedset(self):
2653 def _changedset(self):
2655 """Return the set of files changed in this context
2654 """Return the set of files changed in this context
2656 """
2655 """
2657 changed = set(self._status.modified)
2656 changed = set(self._status.modified)
2658 changed.update(self._status.added)
2657 changed.update(self._status.added)
2659 changed.update(self._status.removed)
2658 changed.update(self._status.removed)
2660 return changed
2659 return changed
2661
2660
2662
2661
2663 def makecachingfilectxfn(func):
2662 def makecachingfilectxfn(func):
2664 """Create a filectxfn that caches based on the path.
2663 """Create a filectxfn that caches based on the path.
2665
2664
2666 We can't use util.cachefunc because it uses all arguments as the cache
2665 We can't use util.cachefunc because it uses all arguments as the cache
2667 key and this creates a cycle since the arguments include the repo and
2666 key and this creates a cycle since the arguments include the repo and
2668 memctx.
2667 memctx.
2669 """
2668 """
2670 cache = {}
2669 cache = {}
2671
2670
2672 def getfilectx(repo, memctx, path):
2671 def getfilectx(repo, memctx, path):
2673 if path not in cache:
2672 if path not in cache:
2674 cache[path] = func(repo, memctx, path)
2673 cache[path] = func(repo, memctx, path)
2675 return cache[path]
2674 return cache[path]
2676
2675
2677 return getfilectx
2676 return getfilectx
2678
2677
2679
2678
2680 def memfilefromctx(ctx):
2679 def memfilefromctx(ctx):
2681 """Given a context return a memfilectx for ctx[path]
2680 """Given a context return a memfilectx for ctx[path]
2682
2681
2683 This is a convenience method for building a memctx based on another
2682 This is a convenience method for building a memctx based on another
2684 context.
2683 context.
2685 """
2684 """
2686
2685
2687 def getfilectx(repo, memctx, path):
2686 def getfilectx(repo, memctx, path):
2688 fctx = ctx[path]
2687 fctx = ctx[path]
2689 copysource = fctx.copysource()
2688 copysource = fctx.copysource()
2690 return memfilectx(
2689 return memfilectx(
2691 repo,
2690 repo,
2692 memctx,
2691 memctx,
2693 path,
2692 path,
2694 fctx.data(),
2693 fctx.data(),
2695 islink=fctx.islink(),
2694 islink=fctx.islink(),
2696 isexec=fctx.isexec(),
2695 isexec=fctx.isexec(),
2697 copysource=copysource,
2696 copysource=copysource,
2698 )
2697 )
2699
2698
2700 return getfilectx
2699 return getfilectx
2701
2700
2702
2701
2703 def memfilefrompatch(patchstore):
2702 def memfilefrompatch(patchstore):
2704 """Given a patch (e.g. patchstore object) return a memfilectx
2703 """Given a patch (e.g. patchstore object) return a memfilectx
2705
2704
2706 This is a convenience method for building a memctx based on a patchstore.
2705 This is a convenience method for building a memctx based on a patchstore.
2707 """
2706 """
2708
2707
2709 def getfilectx(repo, memctx, path):
2708 def getfilectx(repo, memctx, path):
2710 data, mode, copysource = patchstore.getfile(path)
2709 data, mode, copysource = patchstore.getfile(path)
2711 if data is None:
2710 if data is None:
2712 return None
2711 return None
2713 islink, isexec = mode
2712 islink, isexec = mode
2714 return memfilectx(
2713 return memfilectx(
2715 repo,
2714 repo,
2716 memctx,
2715 memctx,
2717 path,
2716 path,
2718 data,
2717 data,
2719 islink=islink,
2718 islink=islink,
2720 isexec=isexec,
2719 isexec=isexec,
2721 copysource=copysource,
2720 copysource=copysource,
2722 )
2721 )
2723
2722
2724 return getfilectx
2723 return getfilectx
2725
2724
2726
2725
2727 class memctx(committablectx):
2726 class memctx(committablectx):
2728 """Use memctx to perform in-memory commits via localrepo.commitctx().
2727 """Use memctx to perform in-memory commits via localrepo.commitctx().
2729
2728
2730 Revision information is supplied at initialization time while
2729 Revision information is supplied at initialization time while
2731 related files data and is made available through a callback
2730 related files data and is made available through a callback
2732 mechanism. 'repo' is the current localrepo, 'parents' is a
2731 mechanism. 'repo' is the current localrepo, 'parents' is a
2733 sequence of two parent revisions identifiers (pass None for every
2732 sequence of two parent revisions identifiers (pass None for every
2734 missing parent), 'text' is the commit message and 'files' lists
2733 missing parent), 'text' is the commit message and 'files' lists
2735 names of files touched by the revision (normalized and relative to
2734 names of files touched by the revision (normalized and relative to
2736 repository root).
2735 repository root).
2737
2736
2738 filectxfn(repo, memctx, path) is a callable receiving the
2737 filectxfn(repo, memctx, path) is a callable receiving the
2739 repository, the current memctx object and the normalized path of
2738 repository, the current memctx object and the normalized path of
2740 requested file, relative to repository root. It is fired by the
2739 requested file, relative to repository root. It is fired by the
2741 commit function for every file in 'files', but calls order is
2740 commit function for every file in 'files', but calls order is
2742 undefined. If the file is available in the revision being
2741 undefined. If the file is available in the revision being
2743 committed (updated or added), filectxfn returns a memfilectx
2742 committed (updated or added), filectxfn returns a memfilectx
2744 object. If the file was removed, filectxfn return None for recent
2743 object. If the file was removed, filectxfn return None for recent
2745 Mercurial. Moved files are represented by marking the source file
2744 Mercurial. Moved files are represented by marking the source file
2746 removed and the new file added with copy information (see
2745 removed and the new file added with copy information (see
2747 memfilectx).
2746 memfilectx).
2748
2747
2749 user receives the committer name and defaults to current
2748 user receives the committer name and defaults to current
2750 repository username, date is the commit date in any format
2749 repository username, date is the commit date in any format
2751 supported by dateutil.parsedate() and defaults to current date, extra
2750 supported by dateutil.parsedate() and defaults to current date, extra
2752 is a dictionary of metadata or is left empty.
2751 is a dictionary of metadata or is left empty.
2753 """
2752 """
2754
2753
2755 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2754 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2756 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2755 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2757 # this field to determine what to do in filectxfn.
2756 # this field to determine what to do in filectxfn.
2758 _returnnoneformissingfiles = True
2757 _returnnoneformissingfiles = True
2759
2758
2760 def __init__(
2759 def __init__(
2761 self,
2760 self,
2762 repo,
2761 repo,
2763 parents,
2762 parents,
2764 text,
2763 text,
2765 files,
2764 files,
2766 filectxfn,
2765 filectxfn,
2767 user=None,
2766 user=None,
2768 date=None,
2767 date=None,
2769 extra=None,
2768 extra=None,
2770 branch=None,
2769 branch=None,
2771 editor=None,
2770 editor=None,
2772 ):
2771 ):
2773 super(memctx, self).__init__(
2772 super(memctx, self).__init__(
2774 repo, text, user, date, extra, branch=branch
2773 repo, text, user, date, extra, branch=branch
2775 )
2774 )
2776 self._rev = None
2775 self._rev = None
2777 self._node = None
2776 self._node = None
2778 parents = [(p or nullid) for p in parents]
2777 parents = [(p or nullid) for p in parents]
2779 p1, p2 = parents
2778 p1, p2 = parents
2780 self._parents = [self._repo[p] for p in (p1, p2)]
2779 self._parents = [self._repo[p] for p in (p1, p2)]
2781 files = sorted(set(files))
2780 files = sorted(set(files))
2782 self._files = files
2781 self._files = files
2783 self.substate = {}
2782 self.substate = {}
2784
2783
2785 if isinstance(filectxfn, patch.filestore):
2784 if isinstance(filectxfn, patch.filestore):
2786 filectxfn = memfilefrompatch(filectxfn)
2785 filectxfn = memfilefrompatch(filectxfn)
2787 elif not callable(filectxfn):
2786 elif not callable(filectxfn):
2788 # if store is not callable, wrap it in a function
2787 # if store is not callable, wrap it in a function
2789 filectxfn = memfilefromctx(filectxfn)
2788 filectxfn = memfilefromctx(filectxfn)
2790
2789
2791 # memoizing increases performance for e.g. vcs convert scenarios.
2790 # memoizing increases performance for e.g. vcs convert scenarios.
2792 self._filectxfn = makecachingfilectxfn(filectxfn)
2791 self._filectxfn = makecachingfilectxfn(filectxfn)
2793
2792
2794 if editor:
2793 if editor:
2795 self._text = editor(self._repo, self, [])
2794 self._text = editor(self._repo, self, [])
2796 self._repo.savecommitmessage(self._text)
2795 self._repo.savecommitmessage(self._text)
2797
2796
2798 def filectx(self, path, filelog=None):
2797 def filectx(self, path, filelog=None):
2799 """get a file context from the working directory
2798 """get a file context from the working directory
2800
2799
2801 Returns None if file doesn't exist and should be removed."""
2800 Returns None if file doesn't exist and should be removed."""
2802 return self._filectxfn(self._repo, self, path)
2801 return self._filectxfn(self._repo, self, path)
2803
2802
2804 def commit(self):
2803 def commit(self):
2805 """commit context to the repo"""
2804 """commit context to the repo"""
2806 return self._repo.commitctx(self)
2805 return self._repo.commitctx(self)
2807
2806
2808 @propertycache
2807 @propertycache
2809 def _manifest(self):
2808 def _manifest(self):
2810 """generate a manifest based on the return values of filectxfn"""
2809 """generate a manifest based on the return values of filectxfn"""
2811
2810
2812 # keep this simple for now; just worry about p1
2811 # keep this simple for now; just worry about p1
2813 pctx = self._parents[0]
2812 pctx = self._parents[0]
2814 man = pctx.manifest().copy()
2813 man = pctx.manifest().copy()
2815
2814
2816 for f in self._status.modified:
2815 for f in self._status.modified:
2817 man[f] = modifiednodeid
2816 man[f] = modifiednodeid
2818
2817
2819 for f in self._status.added:
2818 for f in self._status.added:
2820 man[f] = addednodeid
2819 man[f] = addednodeid
2821
2820
2822 for f in self._status.removed:
2821 for f in self._status.removed:
2823 if f in man:
2822 if f in man:
2824 del man[f]
2823 del man[f]
2825
2824
2826 return man
2825 return man
2827
2826
2828 @propertycache
2827 @propertycache
2829 def _status(self):
2828 def _status(self):
2830 """Calculate exact status from ``files`` specified at construction
2829 """Calculate exact status from ``files`` specified at construction
2831 """
2830 """
2832 man1 = self.p1().manifest()
2831 man1 = self.p1().manifest()
2833 p2 = self._parents[1]
2832 p2 = self._parents[1]
2834 # "1 < len(self._parents)" can't be used for checking
2833 # "1 < len(self._parents)" can't be used for checking
2835 # existence of the 2nd parent, because "memctx._parents" is
2834 # existence of the 2nd parent, because "memctx._parents" is
2836 # explicitly initialized by the list, of which length is 2.
2835 # explicitly initialized by the list, of which length is 2.
2837 if p2.node() != nullid:
2836 if p2.node() != nullid:
2838 man2 = p2.manifest()
2837 man2 = p2.manifest()
2839 managing = lambda f: f in man1 or f in man2
2838 managing = lambda f: f in man1 or f in man2
2840 else:
2839 else:
2841 managing = lambda f: f in man1
2840 managing = lambda f: f in man1
2842
2841
2843 modified, added, removed = [], [], []
2842 modified, added, removed = [], [], []
2844 for f in self._files:
2843 for f in self._files:
2845 if not managing(f):
2844 if not managing(f):
2846 added.append(f)
2845 added.append(f)
2847 elif self[f]:
2846 elif self[f]:
2848 modified.append(f)
2847 modified.append(f)
2849 else:
2848 else:
2850 removed.append(f)
2849 removed.append(f)
2851
2850
2852 return scmutil.status(modified, added, removed, [], [], [], [])
2851 return scmutil.status(modified, added, removed, [], [], [], [])
2853
2852
2854
2853
2855 class memfilectx(committablefilectx):
2854 class memfilectx(committablefilectx):
2856 """memfilectx represents an in-memory file to commit.
2855 """memfilectx represents an in-memory file to commit.
2857
2856
2858 See memctx and committablefilectx for more details.
2857 See memctx and committablefilectx for more details.
2859 """
2858 """
2860
2859
2861 def __init__(
2860 def __init__(
2862 self,
2861 self,
2863 repo,
2862 repo,
2864 changectx,
2863 changectx,
2865 path,
2864 path,
2866 data,
2865 data,
2867 islink=False,
2866 islink=False,
2868 isexec=False,
2867 isexec=False,
2869 copysource=None,
2868 copysource=None,
2870 ):
2869 ):
2871 """
2870 """
2872 path is the normalized file path relative to repository root.
2871 path is the normalized file path relative to repository root.
2873 data is the file content as a string.
2872 data is the file content as a string.
2874 islink is True if the file is a symbolic link.
2873 islink is True if the file is a symbolic link.
2875 isexec is True if the file is executable.
2874 isexec is True if the file is executable.
2876 copied is the source file path if current file was copied in the
2875 copied is the source file path if current file was copied in the
2877 revision being committed, or None."""
2876 revision being committed, or None."""
2878 super(memfilectx, self).__init__(repo, path, None, changectx)
2877 super(memfilectx, self).__init__(repo, path, None, changectx)
2879 self._data = data
2878 self._data = data
2880 if islink:
2879 if islink:
2881 self._flags = b'l'
2880 self._flags = b'l'
2882 elif isexec:
2881 elif isexec:
2883 self._flags = b'x'
2882 self._flags = b'x'
2884 else:
2883 else:
2885 self._flags = b''
2884 self._flags = b''
2886 self._copysource = copysource
2885 self._copysource = copysource
2887
2886
2888 def copysource(self):
2887 def copysource(self):
2889 return self._copysource
2888 return self._copysource
2890
2889
2891 def cmp(self, fctx):
2890 def cmp(self, fctx):
2892 return self.data() != fctx.data()
2891 return self.data() != fctx.data()
2893
2892
2894 def data(self):
2893 def data(self):
2895 return self._data
2894 return self._data
2896
2895
2897 def remove(self, ignoremissing=False):
2896 def remove(self, ignoremissing=False):
2898 """wraps unlink for a repo's working directory"""
2897 """wraps unlink for a repo's working directory"""
2899 # need to figure out what to do here
2898 # need to figure out what to do here
2900 del self._changectx[self._path]
2899 del self._changectx[self._path]
2901
2900
2902 def write(self, data, flags, **kwargs):
2901 def write(self, data, flags, **kwargs):
2903 """wraps repo.wwrite"""
2902 """wraps repo.wwrite"""
2904 self._data = data
2903 self._data = data
2905
2904
2906
2905
2907 class metadataonlyctx(committablectx):
2906 class metadataonlyctx(committablectx):
2908 """Like memctx but it's reusing the manifest of different commit.
2907 """Like memctx but it's reusing the manifest of different commit.
2909 Intended to be used by lightweight operations that are creating
2908 Intended to be used by lightweight operations that are creating
2910 metadata-only changes.
2909 metadata-only changes.
2911
2910
2912 Revision information is supplied at initialization time. 'repo' is the
2911 Revision information is supplied at initialization time. 'repo' is the
2913 current localrepo, 'ctx' is original revision which manifest we're reuisng
2912 current localrepo, 'ctx' is original revision which manifest we're reuisng
2914 'parents' is a sequence of two parent revisions identifiers (pass None for
2913 'parents' is a sequence of two parent revisions identifiers (pass None for
2915 every missing parent), 'text' is the commit.
2914 every missing parent), 'text' is the commit.
2916
2915
2917 user receives the committer name and defaults to current repository
2916 user receives the committer name and defaults to current repository
2918 username, date is the commit date in any format supported by
2917 username, date is the commit date in any format supported by
2919 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2918 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2920 metadata or is left empty.
2919 metadata or is left empty.
2921 """
2920 """
2922
2921
2923 def __init__(
2922 def __init__(
2924 self,
2923 self,
2925 repo,
2924 repo,
2926 originalctx,
2925 originalctx,
2927 parents=None,
2926 parents=None,
2928 text=None,
2927 text=None,
2929 user=None,
2928 user=None,
2930 date=None,
2929 date=None,
2931 extra=None,
2930 extra=None,
2932 editor=None,
2931 editor=None,
2933 ):
2932 ):
2934 if text is None:
2933 if text is None:
2935 text = originalctx.description()
2934 text = originalctx.description()
2936 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2935 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2937 self._rev = None
2936 self._rev = None
2938 self._node = None
2937 self._node = None
2939 self._originalctx = originalctx
2938 self._originalctx = originalctx
2940 self._manifestnode = originalctx.manifestnode()
2939 self._manifestnode = originalctx.manifestnode()
2941 if parents is None:
2940 if parents is None:
2942 parents = originalctx.parents()
2941 parents = originalctx.parents()
2943 else:
2942 else:
2944 parents = [repo[p] for p in parents if p is not None]
2943 parents = [repo[p] for p in parents if p is not None]
2945 parents = parents[:]
2944 parents = parents[:]
2946 while len(parents) < 2:
2945 while len(parents) < 2:
2947 parents.append(repo[nullid])
2946 parents.append(repo[nullid])
2948 p1, p2 = self._parents = parents
2947 p1, p2 = self._parents = parents
2949
2948
2950 # sanity check to ensure that the reused manifest parents are
2949 # sanity check to ensure that the reused manifest parents are
2951 # manifests of our commit parents
2950 # manifests of our commit parents
2952 mp1, mp2 = self.manifestctx().parents
2951 mp1, mp2 = self.manifestctx().parents
2953 if p1 != nullid and p1.manifestnode() != mp1:
2952 if p1 != nullid and p1.manifestnode() != mp1:
2954 raise RuntimeError(
2953 raise RuntimeError(
2955 r"can't reuse the manifest: its p1 "
2954 r"can't reuse the manifest: its p1 "
2956 r"doesn't match the new ctx p1"
2955 r"doesn't match the new ctx p1"
2957 )
2956 )
2958 if p2 != nullid and p2.manifestnode() != mp2:
2957 if p2 != nullid and p2.manifestnode() != mp2:
2959 raise RuntimeError(
2958 raise RuntimeError(
2960 r"can't reuse the manifest: "
2959 r"can't reuse the manifest: "
2961 r"its p2 doesn't match the new ctx p2"
2960 r"its p2 doesn't match the new ctx p2"
2962 )
2961 )
2963
2962
2964 self._files = originalctx.files()
2963 self._files = originalctx.files()
2965 self.substate = {}
2964 self.substate = {}
2966
2965
2967 if editor:
2966 if editor:
2968 self._text = editor(self._repo, self, [])
2967 self._text = editor(self._repo, self, [])
2969 self._repo.savecommitmessage(self._text)
2968 self._repo.savecommitmessage(self._text)
2970
2969
2971 def manifestnode(self):
2970 def manifestnode(self):
2972 return self._manifestnode
2971 return self._manifestnode
2973
2972
2974 @property
2973 @property
2975 def _manifestctx(self):
2974 def _manifestctx(self):
2976 return self._repo.manifestlog[self._manifestnode]
2975 return self._repo.manifestlog[self._manifestnode]
2977
2976
2978 def filectx(self, path, filelog=None):
2977 def filectx(self, path, filelog=None):
2979 return self._originalctx.filectx(path, filelog=filelog)
2978 return self._originalctx.filectx(path, filelog=filelog)
2980
2979
2981 def commit(self):
2980 def commit(self):
2982 """commit context to the repo"""
2981 """commit context to the repo"""
2983 return self._repo.commitctx(self)
2982 return self._repo.commitctx(self)
2984
2983
2985 @property
2984 @property
2986 def _manifest(self):
2985 def _manifest(self):
2987 return self._originalctx.manifest()
2986 return self._originalctx.manifest()
2988
2987
2989 @propertycache
2988 @propertycache
2990 def _status(self):
2989 def _status(self):
2991 """Calculate exact status from ``files`` specified in the ``origctx``
2990 """Calculate exact status from ``files`` specified in the ``origctx``
2992 and parents manifests.
2991 and parents manifests.
2993 """
2992 """
2994 man1 = self.p1().manifest()
2993 man1 = self.p1().manifest()
2995 p2 = self._parents[1]
2994 p2 = self._parents[1]
2996 # "1 < len(self._parents)" can't be used for checking
2995 # "1 < len(self._parents)" can't be used for checking
2997 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2996 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2998 # explicitly initialized by the list, of which length is 2.
2997 # explicitly initialized by the list, of which length is 2.
2999 if p2.node() != nullid:
2998 if p2.node() != nullid:
3000 man2 = p2.manifest()
2999 man2 = p2.manifest()
3001 managing = lambda f: f in man1 or f in man2
3000 managing = lambda f: f in man1 or f in man2
3002 else:
3001 else:
3003 managing = lambda f: f in man1
3002 managing = lambda f: f in man1
3004
3003
3005 modified, added, removed = [], [], []
3004 modified, added, removed = [], [], []
3006 for f in self._files:
3005 for f in self._files:
3007 if not managing(f):
3006 if not managing(f):
3008 added.append(f)
3007 added.append(f)
3009 elif f in self:
3008 elif f in self:
3010 modified.append(f)
3009 modified.append(f)
3011 else:
3010 else:
3012 removed.append(f)
3011 removed.append(f)
3013
3012
3014 return scmutil.status(modified, added, removed, [], [], [], [])
3013 return scmutil.status(modified, added, removed, [], [], [], [])
3015
3014
3016
3015
3017 class arbitraryfilectx(object):
3016 class arbitraryfilectx(object):
3018 """Allows you to use filectx-like functions on a file in an arbitrary
3017 """Allows you to use filectx-like functions on a file in an arbitrary
3019 location on disk, possibly not in the working directory.
3018 location on disk, possibly not in the working directory.
3020 """
3019 """
3021
3020
3022 def __init__(self, path, repo=None):
3021 def __init__(self, path, repo=None):
3023 # Repo is optional because contrib/simplemerge uses this class.
3022 # Repo is optional because contrib/simplemerge uses this class.
3024 self._repo = repo
3023 self._repo = repo
3025 self._path = path
3024 self._path = path
3026
3025
3027 def cmp(self, fctx):
3026 def cmp(self, fctx):
3028 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3027 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3029 # path if either side is a symlink.
3028 # path if either side is a symlink.
3030 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3029 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3031 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3030 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3032 # Add a fast-path for merge if both sides are disk-backed.
3031 # Add a fast-path for merge if both sides are disk-backed.
3033 # Note that filecmp uses the opposite return values (True if same)
3032 # Note that filecmp uses the opposite return values (True if same)
3034 # from our cmp functions (True if different).
3033 # from our cmp functions (True if different).
3035 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3034 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3036 return self.data() != fctx.data()
3035 return self.data() != fctx.data()
3037
3036
3038 def path(self):
3037 def path(self):
3039 return self._path
3038 return self._path
3040
3039
3041 def flags(self):
3040 def flags(self):
3042 return b''
3041 return b''
3043
3042
3044 def data(self):
3043 def data(self):
3045 return util.readfile(self._path)
3044 return util.readfile(self._path)
3046
3045
3047 def decodeddata(self):
3046 def decodeddata(self):
3048 with open(self._path, b"rb") as f:
3047 with open(self._path, b"rb") as f:
3049 return f.read()
3048 return f.read()
3050
3049
3051 def remove(self):
3050 def remove(self):
3052 util.unlink(self._path)
3051 util.unlink(self._path)
3053
3052
3054 def write(self, data, flags, **kwargs):
3053 def write(self, data, flags, **kwargs):
3055 assert not flags
3054 assert not flags
3056 with open(self._path, b"wb") as f:
3055 with open(self._path, b"wb") as f:
3057 f.write(data)
3056 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now