##// END OF EJS Templates
changectx: use unfiltered changelog to walk ancestors in annotate...
marmoute -
r44567:c4729703 default
parent child Browse files
Show More
@@ -1,3052 +1,3054 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, cwd, expr, badfn=None):
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
204 return fileset.match(self, cwd, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 cwd=None,
331 cwd=None,
332 ):
332 ):
333 r = self._repo
333 r = self._repo
334 if not cwd:
334 if not cwd:
335 cwd = r.getcwd()
335 cwd = r.getcwd()
336 return matchmod.match(
336 return matchmod.match(
337 r.root,
337 r.root,
338 cwd,
338 cwd,
339 pats,
339 pats,
340 include,
340 include,
341 exclude,
341 exclude,
342 default,
342 default,
343 auditor=r.nofsauditor,
343 auditor=r.nofsauditor,
344 ctx=self,
344 ctx=self,
345 listsubrepos=listsubrepos,
345 listsubrepos=listsubrepos,
346 badfn=badfn,
346 badfn=badfn,
347 )
347 )
348
348
349 def diff(
349 def diff(
350 self,
350 self,
351 ctx2=None,
351 ctx2=None,
352 match=None,
352 match=None,
353 changes=None,
353 changes=None,
354 opts=None,
354 opts=None,
355 losedatafn=None,
355 losedatafn=None,
356 pathfn=None,
356 pathfn=None,
357 copy=None,
357 copy=None,
358 copysourcematch=None,
358 copysourcematch=None,
359 hunksfilterfn=None,
359 hunksfilterfn=None,
360 ):
360 ):
361 """Returns a diff generator for the given contexts and matcher"""
361 """Returns a diff generator for the given contexts and matcher"""
362 if ctx2 is None:
362 if ctx2 is None:
363 ctx2 = self.p1()
363 ctx2 = self.p1()
364 if ctx2 is not None:
364 if ctx2 is not None:
365 ctx2 = self._repo[ctx2]
365 ctx2 = self._repo[ctx2]
366 return patch.diff(
366 return patch.diff(
367 self._repo,
367 self._repo,
368 ctx2,
368 ctx2,
369 self,
369 self,
370 match=match,
370 match=match,
371 changes=changes,
371 changes=changes,
372 opts=opts,
372 opts=opts,
373 losedatafn=losedatafn,
373 losedatafn=losedatafn,
374 pathfn=pathfn,
374 pathfn=pathfn,
375 copy=copy,
375 copy=copy,
376 copysourcematch=copysourcematch,
376 copysourcematch=copysourcematch,
377 hunksfilterfn=hunksfilterfn,
377 hunksfilterfn=hunksfilterfn,
378 )
378 )
379
379
380 def dirs(self):
380 def dirs(self):
381 return self._manifest.dirs()
381 return self._manifest.dirs()
382
382
383 def hasdir(self, dir):
383 def hasdir(self, dir):
384 return self._manifest.hasdir(dir)
384 return self._manifest.hasdir(dir)
385
385
386 def status(
386 def status(
387 self,
387 self,
388 other=None,
388 other=None,
389 match=None,
389 match=None,
390 listignored=False,
390 listignored=False,
391 listclean=False,
391 listclean=False,
392 listunknown=False,
392 listunknown=False,
393 listsubrepos=False,
393 listsubrepos=False,
394 ):
394 ):
395 """return status of files between two nodes or node and working
395 """return status of files between two nodes or node and working
396 directory.
396 directory.
397
397
398 If other is None, compare this node with working directory.
398 If other is None, compare this node with working directory.
399
399
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 """
401 """
402
402
403 ctx1 = self
403 ctx1 = self
404 ctx2 = self._repo[other]
404 ctx2 = self._repo[other]
405
405
406 # This next code block is, admittedly, fragile logic that tests for
406 # This next code block is, admittedly, fragile logic that tests for
407 # reversing the contexts and wouldn't need to exist if it weren't for
407 # reversing the contexts and wouldn't need to exist if it weren't for
408 # the fast (and common) code path of comparing the working directory
408 # the fast (and common) code path of comparing the working directory
409 # with its first parent.
409 # with its first parent.
410 #
410 #
411 # What we're aiming for here is the ability to call:
411 # What we're aiming for here is the ability to call:
412 #
412 #
413 # workingctx.status(parentctx)
413 # workingctx.status(parentctx)
414 #
414 #
415 # If we always built the manifest for each context and compared those,
415 # If we always built the manifest for each context and compared those,
416 # then we'd be done. But the special case of the above call means we
416 # then we'd be done. But the special case of the above call means we
417 # just copy the manifest of the parent.
417 # just copy the manifest of the parent.
418 reversed = False
418 reversed = False
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 reversed = True
420 reversed = True
421 ctx1, ctx2 = ctx2, ctx1
421 ctx1, ctx2 = ctx2, ctx1
422
422
423 match = self._repo.narrowmatch(match)
423 match = self._repo.narrowmatch(match)
424 match = ctx2._matchstatus(ctx1, match)
424 match = ctx2._matchstatus(ctx1, match)
425 r = scmutil.status([], [], [], [], [], [], [])
425 r = scmutil.status([], [], [], [], [], [], [])
426 r = ctx2._buildstatus(
426 r = ctx2._buildstatus(
427 ctx1, r, match, listignored, listclean, listunknown
427 ctx1, r, match, listignored, listclean, listunknown
428 )
428 )
429
429
430 if reversed:
430 if reversed:
431 # Reverse added and removed. Clear deleted, unknown and ignored as
431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # these make no sense to reverse.
432 # these make no sense to reverse.
433 r = scmutil.status(
433 r = scmutil.status(
434 r.modified, r.removed, r.added, [], [], [], r.clean
434 r.modified, r.removed, r.added, [], [], [], r.clean
435 )
435 )
436
436
437 if listsubrepos:
437 if listsubrepos:
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 try:
439 try:
440 rev2 = ctx2.subrev(subpath)
440 rev2 = ctx2.subrev(subpath)
441 except KeyError:
441 except KeyError:
442 # A subrepo that existed in node1 was deleted between
442 # A subrepo that existed in node1 was deleted between
443 # node1 and node2 (inclusive). Thus, ctx2's substate
443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # won't contain that subpath. The best we can do ignore it.
444 # won't contain that subpath. The best we can do ignore it.
445 rev2 = None
445 rev2 = None
446 submatch = matchmod.subdirmatcher(subpath, match)
446 submatch = matchmod.subdirmatcher(subpath, match)
447 s = sub.status(
447 s = sub.status(
448 rev2,
448 rev2,
449 match=submatch,
449 match=submatch,
450 ignored=listignored,
450 ignored=listignored,
451 clean=listclean,
451 clean=listclean,
452 unknown=listunknown,
452 unknown=listunknown,
453 listsubrepos=True,
453 listsubrepos=True,
454 )
454 )
455 for k in (
455 for k in (
456 'modified',
456 'modified',
457 'added',
457 'added',
458 'removed',
458 'removed',
459 'deleted',
459 'deleted',
460 'unknown',
460 'unknown',
461 'ignored',
461 'ignored',
462 'clean',
462 'clean',
463 ):
463 ):
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466
466
467 r.modified.sort()
467 r.modified.sort()
468 r.added.sort()
468 r.added.sort()
469 r.removed.sort()
469 r.removed.sort()
470 r.deleted.sort()
470 r.deleted.sort()
471 r.unknown.sort()
471 r.unknown.sort()
472 r.ignored.sort()
472 r.ignored.sort()
473 r.clean.sort()
473 r.clean.sort()
474
474
475 return r
475 return r
476
476
477
477
478 class changectx(basectx):
478 class changectx(basectx):
479 """A changecontext object makes access to data related to a particular
479 """A changecontext object makes access to data related to a particular
480 changeset convenient. It represents a read-only context already present in
480 changeset convenient. It represents a read-only context already present in
481 the repo."""
481 the repo."""
482
482
483 def __init__(self, repo, rev, node, maybe_filtered=True):
483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 super(changectx, self).__init__(repo)
484 super(changectx, self).__init__(repo)
485 self._rev = rev
485 self._rev = rev
486 self._node = node
486 self._node = node
487 # When maybe_filtered is True, the revision might be affected by
487 # When maybe_filtered is True, the revision might be affected by
488 # changelog filtering and operation through the filtered changelog must be used.
488 # changelog filtering and operation through the filtered changelog must be used.
489 #
489 #
490 # When maybe_filtered is False, the revision has already been checked
490 # When maybe_filtered is False, the revision has already been checked
491 # against filtering and is not filtered. Operation through the
491 # against filtering and is not filtered. Operation through the
492 # unfiltered changelog might be used in some case.
492 # unfiltered changelog might be used in some case.
493 self._maybe_filtered = maybe_filtered
493 self._maybe_filtered = maybe_filtered
494
494
495 def __hash__(self):
495 def __hash__(self):
496 try:
496 try:
497 return hash(self._rev)
497 return hash(self._rev)
498 except AttributeError:
498 except AttributeError:
499 return id(self)
499 return id(self)
500
500
501 def __nonzero__(self):
501 def __nonzero__(self):
502 return self._rev != nullrev
502 return self._rev != nullrev
503
503
504 __bool__ = __nonzero__
504 __bool__ = __nonzero__
505
505
506 @propertycache
506 @propertycache
507 def _changeset(self):
507 def _changeset(self):
508 if self._maybe_filtered:
508 if self._maybe_filtered:
509 repo = self._repo
509 repo = self._repo
510 else:
510 else:
511 repo = self._repo.unfiltered()
511 repo = self._repo.unfiltered()
512 return repo.changelog.changelogrevision(self.rev())
512 return repo.changelog.changelogrevision(self.rev())
513
513
514 @propertycache
514 @propertycache
515 def _manifest(self):
515 def _manifest(self):
516 return self._manifestctx.read()
516 return self._manifestctx.read()
517
517
518 @property
518 @property
519 def _manifestctx(self):
519 def _manifestctx(self):
520 return self._repo.manifestlog[self._changeset.manifest]
520 return self._repo.manifestlog[self._changeset.manifest]
521
521
522 @propertycache
522 @propertycache
523 def _manifestdelta(self):
523 def _manifestdelta(self):
524 return self._manifestctx.readdelta()
524 return self._manifestctx.readdelta()
525
525
526 @propertycache
526 @propertycache
527 def _parents(self):
527 def _parents(self):
528 repo = self._repo
528 repo = self._repo
529 if self._maybe_filtered:
529 if self._maybe_filtered:
530 cl = repo.changelog
530 cl = repo.changelog
531 else:
531 else:
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533
533
534 p1, p2 = cl.parentrevs(self._rev)
534 p1, p2 = cl.parentrevs(self._rev)
535 if p2 == nullrev:
535 if p2 == nullrev:
536 return [repo[p1]]
536 return [repo[p1]]
537 return [repo[p1], repo[p2]]
537 return [repo[p1], repo[p2]]
538
538
539 def changeset(self):
539 def changeset(self):
540 c = self._changeset
540 c = self._changeset
541 return (
541 return (
542 c.manifest,
542 c.manifest,
543 c.user,
543 c.user,
544 c.date,
544 c.date,
545 c.files,
545 c.files,
546 c.description,
546 c.description,
547 c.extra,
547 c.extra,
548 )
548 )
549
549
550 def manifestnode(self):
550 def manifestnode(self):
551 return self._changeset.manifest
551 return self._changeset.manifest
552
552
553 def user(self):
553 def user(self):
554 return self._changeset.user
554 return self._changeset.user
555
555
556 def date(self):
556 def date(self):
557 return self._changeset.date
557 return self._changeset.date
558
558
559 def files(self):
559 def files(self):
560 return self._changeset.files
560 return self._changeset.files
561
561
562 def filesmodified(self):
562 def filesmodified(self):
563 modified = set(self.files())
563 modified = set(self.files())
564 modified.difference_update(self.filesadded())
564 modified.difference_update(self.filesadded())
565 modified.difference_update(self.filesremoved())
565 modified.difference_update(self.filesremoved())
566 return sorted(modified)
566 return sorted(modified)
567
567
568 def filesadded(self):
568 def filesadded(self):
569 filesadded = self._changeset.filesadded
569 filesadded = self._changeset.filesadded
570 compute_on_none = True
570 compute_on_none = True
571 if self._repo.filecopiesmode == b'changeset-sidedata':
571 if self._repo.filecopiesmode == b'changeset-sidedata':
572 compute_on_none = False
572 compute_on_none = False
573 else:
573 else:
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 if source == b'changeset-only':
575 if source == b'changeset-only':
576 compute_on_none = False
576 compute_on_none = False
577 elif source != b'compatibility':
577 elif source != b'compatibility':
578 # filelog mode, ignore any changelog content
578 # filelog mode, ignore any changelog content
579 filesadded = None
579 filesadded = None
580 if filesadded is None:
580 if filesadded is None:
581 if compute_on_none:
581 if compute_on_none:
582 filesadded = copies.computechangesetfilesadded(self)
582 filesadded = copies.computechangesetfilesadded(self)
583 else:
583 else:
584 filesadded = []
584 filesadded = []
585 return filesadded
585 return filesadded
586
586
587 def filesremoved(self):
587 def filesremoved(self):
588 filesremoved = self._changeset.filesremoved
588 filesremoved = self._changeset.filesremoved
589 compute_on_none = True
589 compute_on_none = True
590 if self._repo.filecopiesmode == b'changeset-sidedata':
590 if self._repo.filecopiesmode == b'changeset-sidedata':
591 compute_on_none = False
591 compute_on_none = False
592 else:
592 else:
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 if source == b'changeset-only':
594 if source == b'changeset-only':
595 compute_on_none = False
595 compute_on_none = False
596 elif source != b'compatibility':
596 elif source != b'compatibility':
597 # filelog mode, ignore any changelog content
597 # filelog mode, ignore any changelog content
598 filesremoved = None
598 filesremoved = None
599 if filesremoved is None:
599 if filesremoved is None:
600 if compute_on_none:
600 if compute_on_none:
601 filesremoved = copies.computechangesetfilesremoved(self)
601 filesremoved = copies.computechangesetfilesremoved(self)
602 else:
602 else:
603 filesremoved = []
603 filesremoved = []
604 return filesremoved
604 return filesremoved
605
605
606 @propertycache
606 @propertycache
607 def _copies(self):
607 def _copies(self):
608 p1copies = self._changeset.p1copies
608 p1copies = self._changeset.p1copies
609 p2copies = self._changeset.p2copies
609 p2copies = self._changeset.p2copies
610 compute_on_none = True
610 compute_on_none = True
611 if self._repo.filecopiesmode == b'changeset-sidedata':
611 if self._repo.filecopiesmode == b'changeset-sidedata':
612 compute_on_none = False
612 compute_on_none = False
613 else:
613 else:
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 # If config says to get copy metadata only from changeset, then
615 # If config says to get copy metadata only from changeset, then
616 # return that, defaulting to {} if there was no copy metadata. In
616 # return that, defaulting to {} if there was no copy metadata. In
617 # compatibility mode, we return copy data from the changeset if it
617 # compatibility mode, we return copy data from the changeset if it
618 # was recorded there, and otherwise we fall back to getting it from
618 # was recorded there, and otherwise we fall back to getting it from
619 # the filelogs (below).
619 # the filelogs (below).
620 #
620 #
621 # If we are in compatiblity mode and there is not data in the
621 # If we are in compatiblity mode and there is not data in the
622 # changeset), we get the copy metadata from the filelogs.
622 # changeset), we get the copy metadata from the filelogs.
623 #
623 #
624 # otherwise, when config said to read only from filelog, we get the
624 # otherwise, when config said to read only from filelog, we get the
625 # copy metadata from the filelogs.
625 # copy metadata from the filelogs.
626 if source == b'changeset-only':
626 if source == b'changeset-only':
627 compute_on_none = False
627 compute_on_none = False
628 elif source != b'compatibility':
628 elif source != b'compatibility':
629 # filelog mode, ignore any changelog content
629 # filelog mode, ignore any changelog content
630 p1copies = p2copies = None
630 p1copies = p2copies = None
631 if p1copies is None:
631 if p1copies is None:
632 if compute_on_none:
632 if compute_on_none:
633 p1copies, p2copies = super(changectx, self)._copies
633 p1copies, p2copies = super(changectx, self)._copies
634 else:
634 else:
635 if p1copies is None:
635 if p1copies is None:
636 p1copies = {}
636 p1copies = {}
637 if p2copies is None:
637 if p2copies is None:
638 p2copies = {}
638 p2copies = {}
639 return p1copies, p2copies
639 return p1copies, p2copies
640
640
641 def description(self):
641 def description(self):
642 return self._changeset.description
642 return self._changeset.description
643
643
644 def branch(self):
644 def branch(self):
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646
646
647 def closesbranch(self):
647 def closesbranch(self):
648 return b'close' in self._changeset.extra
648 return b'close' in self._changeset.extra
649
649
650 def extra(self):
650 def extra(self):
651 """Return a dict of extra information."""
651 """Return a dict of extra information."""
652 return self._changeset.extra
652 return self._changeset.extra
653
653
654 def tags(self):
654 def tags(self):
655 """Return a list of byte tag names"""
655 """Return a list of byte tag names"""
656 return self._repo.nodetags(self._node)
656 return self._repo.nodetags(self._node)
657
657
658 def bookmarks(self):
658 def bookmarks(self):
659 """Return a list of byte bookmark names."""
659 """Return a list of byte bookmark names."""
660 return self._repo.nodebookmarks(self._node)
660 return self._repo.nodebookmarks(self._node)
661
661
662 def phase(self):
662 def phase(self):
663 return self._repo._phasecache.phase(self._repo, self._rev)
663 return self._repo._phasecache.phase(self._repo, self._rev)
664
664
665 def hidden(self):
665 def hidden(self):
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
667
667
668 def isinmemory(self):
668 def isinmemory(self):
669 return False
669 return False
670
670
671 def children(self):
671 def children(self):
672 """return list of changectx contexts for each child changeset.
672 """return list of changectx contexts for each child changeset.
673
673
674 This returns only the immediate child changesets. Use descendants() to
674 This returns only the immediate child changesets. Use descendants() to
675 recursively walk children.
675 recursively walk children.
676 """
676 """
677 c = self._repo.changelog.children(self._node)
677 c = self._repo.changelog.children(self._node)
678 return [self._repo[x] for x in c]
678 return [self._repo[x] for x in c]
679
679
680 def ancestors(self):
680 def ancestors(self):
681 for a in self._repo.changelog.ancestors([self._rev]):
681 for a in self._repo.changelog.ancestors([self._rev]):
682 yield self._repo[a]
682 yield self._repo[a]
683
683
684 def descendants(self):
684 def descendants(self):
685 """Recursively yield all children of the changeset.
685 """Recursively yield all children of the changeset.
686
686
687 For just the immediate children, use children()
687 For just the immediate children, use children()
688 """
688 """
689 for d in self._repo.changelog.descendants([self._rev]):
689 for d in self._repo.changelog.descendants([self._rev]):
690 yield self._repo[d]
690 yield self._repo[d]
691
691
692 def filectx(self, path, fileid=None, filelog=None):
692 def filectx(self, path, fileid=None, filelog=None):
693 """get a file context from this changeset"""
693 """get a file context from this changeset"""
694 if fileid is None:
694 if fileid is None:
695 fileid = self.filenode(path)
695 fileid = self.filenode(path)
696 return filectx(
696 return filectx(
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 )
698 )
699
699
700 def ancestor(self, c2, warn=False):
700 def ancestor(self, c2, warn=False):
701 """return the "best" ancestor context of self and c2
701 """return the "best" ancestor context of self and c2
702
702
703 If there are multiple candidates, it will show a message and check
703 If there are multiple candidates, it will show a message and check
704 merge.preferancestor configuration before falling back to the
704 merge.preferancestor configuration before falling back to the
705 revlog ancestor."""
705 revlog ancestor."""
706 # deal with workingctxs
706 # deal with workingctxs
707 n2 = c2._node
707 n2 = c2._node
708 if n2 is None:
708 if n2 is None:
709 n2 = c2._parents[0]._node
709 n2 = c2._parents[0]._node
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 if not cahs:
711 if not cahs:
712 anc = nullid
712 anc = nullid
713 elif len(cahs) == 1:
713 elif len(cahs) == 1:
714 anc = cahs[0]
714 anc = cahs[0]
715 else:
715 else:
716 # experimental config: merge.preferancestor
716 # experimental config: merge.preferancestor
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 try:
718 try:
719 ctx = scmutil.revsymbol(self._repo, r)
719 ctx = scmutil.revsymbol(self._repo, r)
720 except error.RepoLookupError:
720 except error.RepoLookupError:
721 continue
721 continue
722 anc = ctx.node()
722 anc = ctx.node()
723 if anc in cahs:
723 if anc in cahs:
724 break
724 break
725 else:
725 else:
726 anc = self._repo.changelog.ancestor(self._node, n2)
726 anc = self._repo.changelog.ancestor(self._node, n2)
727 if warn:
727 if warn:
728 self._repo.ui.status(
728 self._repo.ui.status(
729 (
729 (
730 _(b"note: using %s as ancestor of %s and %s\n")
730 _(b"note: using %s as ancestor of %s and %s\n")
731 % (short(anc), short(self._node), short(n2))
731 % (short(anc), short(self._node), short(n2))
732 )
732 )
733 + b''.join(
733 + b''.join(
734 _(
734 _(
735 b" alternatively, use --config "
735 b" alternatively, use --config "
736 b"merge.preferancestor=%s\n"
736 b"merge.preferancestor=%s\n"
737 )
737 )
738 % short(n)
738 % short(n)
739 for n in sorted(cahs)
739 for n in sorted(cahs)
740 if n != anc
740 if n != anc
741 )
741 )
742 )
742 )
743 return self._repo[anc]
743 return self._repo[anc]
744
744
745 def isancestorof(self, other):
745 def isancestorof(self, other):
746 """True if this changeset is an ancestor of other"""
746 """True if this changeset is an ancestor of other"""
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748
748
749 def walk(self, match):
749 def walk(self, match):
750 '''Generates matching file names.'''
750 '''Generates matching file names.'''
751
751
752 # Wrap match.bad method to have message with nodeid
752 # Wrap match.bad method to have message with nodeid
753 def bad(fn, msg):
753 def bad(fn, msg):
754 # The manifest doesn't know about subrepos, so don't complain about
754 # The manifest doesn't know about subrepos, so don't complain about
755 # paths into valid subrepos.
755 # paths into valid subrepos.
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 return
757 return
758 match.bad(fn, _(b'no such file in rev %s') % self)
758 match.bad(fn, _(b'no such file in rev %s') % self)
759
759
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 return self._manifest.walk(m)
761 return self._manifest.walk(m)
762
762
763 def matches(self, match):
763 def matches(self, match):
764 return self.walk(match)
764 return self.walk(match)
765
765
766
766
767 class basefilectx(object):
767 class basefilectx(object):
768 """A filecontext object represents the common logic for its children:
768 """A filecontext object represents the common logic for its children:
769 filectx: read-only access to a filerevision that is already present
769 filectx: read-only access to a filerevision that is already present
770 in the repo,
770 in the repo,
771 workingfilectx: a filecontext that represents files from the working
771 workingfilectx: a filecontext that represents files from the working
772 directory,
772 directory,
773 memfilectx: a filecontext that represents files in-memory,
773 memfilectx: a filecontext that represents files in-memory,
774 """
774 """
775
775
776 @propertycache
776 @propertycache
777 def _filelog(self):
777 def _filelog(self):
778 return self._repo.file(self._path)
778 return self._repo.file(self._path)
779
779
780 @propertycache
780 @propertycache
781 def _changeid(self):
781 def _changeid(self):
782 if '_changectx' in self.__dict__:
782 if '_changectx' in self.__dict__:
783 return self._changectx.rev()
783 return self._changectx.rev()
784 elif '_descendantrev' in self.__dict__:
784 elif '_descendantrev' in self.__dict__:
785 # this file context was created from a revision with a known
785 # this file context was created from a revision with a known
786 # descendant, we can (lazily) correct for linkrev aliases
786 # descendant, we can (lazily) correct for linkrev aliases
787 return self._adjustlinkrev(self._descendantrev)
787 return self._adjustlinkrev(self._descendantrev)
788 else:
788 else:
789 return self._filelog.linkrev(self._filerev)
789 return self._filelog.linkrev(self._filerev)
790
790
791 @propertycache
791 @propertycache
792 def _filenode(self):
792 def _filenode(self):
793 if '_fileid' in self.__dict__:
793 if '_fileid' in self.__dict__:
794 return self._filelog.lookup(self._fileid)
794 return self._filelog.lookup(self._fileid)
795 else:
795 else:
796 return self._changectx.filenode(self._path)
796 return self._changectx.filenode(self._path)
797
797
798 @propertycache
798 @propertycache
799 def _filerev(self):
799 def _filerev(self):
800 return self._filelog.rev(self._filenode)
800 return self._filelog.rev(self._filenode)
801
801
802 @propertycache
802 @propertycache
803 def _repopath(self):
803 def _repopath(self):
804 return self._path
804 return self._path
805
805
806 def __nonzero__(self):
806 def __nonzero__(self):
807 try:
807 try:
808 self._filenode
808 self._filenode
809 return True
809 return True
810 except error.LookupError:
810 except error.LookupError:
811 # file is missing
811 # file is missing
812 return False
812 return False
813
813
814 __bool__ = __nonzero__
814 __bool__ = __nonzero__
815
815
816 def __bytes__(self):
816 def __bytes__(self):
817 try:
817 try:
818 return b"%s@%s" % (self.path(), self._changectx)
818 return b"%s@%s" % (self.path(), self._changectx)
819 except error.LookupError:
819 except error.LookupError:
820 return b"%s@???" % self.path()
820 return b"%s@???" % self.path()
821
821
822 __str__ = encoding.strmethod(__bytes__)
822 __str__ = encoding.strmethod(__bytes__)
823
823
824 def __repr__(self):
824 def __repr__(self):
825 return "<%s %s>" % (type(self).__name__, str(self))
825 return "<%s %s>" % (type(self).__name__, str(self))
826
826
827 def __hash__(self):
827 def __hash__(self):
828 try:
828 try:
829 return hash((self._path, self._filenode))
829 return hash((self._path, self._filenode))
830 except AttributeError:
830 except AttributeError:
831 return id(self)
831 return id(self)
832
832
833 def __eq__(self, other):
833 def __eq__(self, other):
834 try:
834 try:
835 return (
835 return (
836 type(self) == type(other)
836 type(self) == type(other)
837 and self._path == other._path
837 and self._path == other._path
838 and self._filenode == other._filenode
838 and self._filenode == other._filenode
839 )
839 )
840 except AttributeError:
840 except AttributeError:
841 return False
841 return False
842
842
843 def __ne__(self, other):
843 def __ne__(self, other):
844 return not (self == other)
844 return not (self == other)
845
845
846 def filerev(self):
846 def filerev(self):
847 return self._filerev
847 return self._filerev
848
848
849 def filenode(self):
849 def filenode(self):
850 return self._filenode
850 return self._filenode
851
851
852 @propertycache
852 @propertycache
853 def _flags(self):
853 def _flags(self):
854 return self._changectx.flags(self._path)
854 return self._changectx.flags(self._path)
855
855
856 def flags(self):
856 def flags(self):
857 return self._flags
857 return self._flags
858
858
859 def filelog(self):
859 def filelog(self):
860 return self._filelog
860 return self._filelog
861
861
862 def rev(self):
862 def rev(self):
863 return self._changeid
863 return self._changeid
864
864
865 def linkrev(self):
865 def linkrev(self):
866 return self._filelog.linkrev(self._filerev)
866 return self._filelog.linkrev(self._filerev)
867
867
868 def node(self):
868 def node(self):
869 return self._changectx.node()
869 return self._changectx.node()
870
870
871 def hex(self):
871 def hex(self):
872 return self._changectx.hex()
872 return self._changectx.hex()
873
873
874 def user(self):
874 def user(self):
875 return self._changectx.user()
875 return self._changectx.user()
876
876
877 def date(self):
877 def date(self):
878 return self._changectx.date()
878 return self._changectx.date()
879
879
880 def files(self):
880 def files(self):
881 return self._changectx.files()
881 return self._changectx.files()
882
882
883 def description(self):
883 def description(self):
884 return self._changectx.description()
884 return self._changectx.description()
885
885
886 def branch(self):
886 def branch(self):
887 return self._changectx.branch()
887 return self._changectx.branch()
888
888
889 def extra(self):
889 def extra(self):
890 return self._changectx.extra()
890 return self._changectx.extra()
891
891
892 def phase(self):
892 def phase(self):
893 return self._changectx.phase()
893 return self._changectx.phase()
894
894
895 def phasestr(self):
895 def phasestr(self):
896 return self._changectx.phasestr()
896 return self._changectx.phasestr()
897
897
898 def obsolete(self):
898 def obsolete(self):
899 return self._changectx.obsolete()
899 return self._changectx.obsolete()
900
900
901 def instabilities(self):
901 def instabilities(self):
902 return self._changectx.instabilities()
902 return self._changectx.instabilities()
903
903
904 def manifest(self):
904 def manifest(self):
905 return self._changectx.manifest()
905 return self._changectx.manifest()
906
906
907 def changectx(self):
907 def changectx(self):
908 return self._changectx
908 return self._changectx
909
909
910 def renamed(self):
910 def renamed(self):
911 return self._copied
911 return self._copied
912
912
913 def copysource(self):
913 def copysource(self):
914 return self._copied and self._copied[0]
914 return self._copied and self._copied[0]
915
915
916 def repo(self):
916 def repo(self):
917 return self._repo
917 return self._repo
918
918
919 def size(self):
919 def size(self):
920 return len(self.data())
920 return len(self.data())
921
921
922 def path(self):
922 def path(self):
923 return self._path
923 return self._path
924
924
925 def isbinary(self):
925 def isbinary(self):
926 try:
926 try:
927 return stringutil.binary(self.data())
927 return stringutil.binary(self.data())
928 except IOError:
928 except IOError:
929 return False
929 return False
930
930
931 def isexec(self):
931 def isexec(self):
932 return b'x' in self.flags()
932 return b'x' in self.flags()
933
933
934 def islink(self):
934 def islink(self):
935 return b'l' in self.flags()
935 return b'l' in self.flags()
936
936
937 def isabsent(self):
937 def isabsent(self):
938 """whether this filectx represents a file not in self._changectx
938 """whether this filectx represents a file not in self._changectx
939
939
940 This is mainly for merge code to detect change/delete conflicts. This is
940 This is mainly for merge code to detect change/delete conflicts. This is
941 expected to be True for all subclasses of basectx."""
941 expected to be True for all subclasses of basectx."""
942 return False
942 return False
943
943
944 _customcmp = False
944 _customcmp = False
945
945
946 def cmp(self, fctx):
946 def cmp(self, fctx):
947 """compare with other file context
947 """compare with other file context
948
948
949 returns True if different than fctx.
949 returns True if different than fctx.
950 """
950 """
951 if fctx._customcmp:
951 if fctx._customcmp:
952 return fctx.cmp(self)
952 return fctx.cmp(self)
953
953
954 if self._filenode is None:
954 if self._filenode is None:
955 raise error.ProgrammingError(
955 raise error.ProgrammingError(
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 )
957 )
958
958
959 if fctx._filenode is None:
959 if fctx._filenode is None:
960 if self._repo._encodefilterpats:
960 if self._repo._encodefilterpats:
961 # can't rely on size() because wdir content may be decoded
961 # can't rely on size() because wdir content may be decoded
962 return self._filelog.cmp(self._filenode, fctx.data())
962 return self._filelog.cmp(self._filenode, fctx.data())
963 if self.size() - 4 == fctx.size():
963 if self.size() - 4 == fctx.size():
964 # size() can match:
964 # size() can match:
965 # if file data starts with '\1\n', empty metadata block is
965 # if file data starts with '\1\n', empty metadata block is
966 # prepended, which adds 4 bytes to filelog.size().
966 # prepended, which adds 4 bytes to filelog.size().
967 return self._filelog.cmp(self._filenode, fctx.data())
967 return self._filelog.cmp(self._filenode, fctx.data())
968 if self.size() == fctx.size():
968 if self.size() == fctx.size():
969 # size() matches: need to compare content
969 # size() matches: need to compare content
970 return self._filelog.cmp(self._filenode, fctx.data())
970 return self._filelog.cmp(self._filenode, fctx.data())
971
971
972 # size() differs
972 # size() differs
973 return True
973 return True
974
974
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 """return the first ancestor of <srcrev> introducing <fnode>
976 """return the first ancestor of <srcrev> introducing <fnode>
977
977
978 If the linkrev of the file revision does not point to an ancestor of
978 If the linkrev of the file revision does not point to an ancestor of
979 srcrev, we'll walk down the ancestors until we find one introducing
979 srcrev, we'll walk down the ancestors until we find one introducing
980 this file revision.
980 this file revision.
981
981
982 :srcrev: the changeset revision we search ancestors from
982 :srcrev: the changeset revision we search ancestors from
983 :inclusive: if true, the src revision will also be checked
983 :inclusive: if true, the src revision will also be checked
984 :stoprev: an optional revision to stop the walk at. If no introduction
984 :stoprev: an optional revision to stop the walk at. If no introduction
985 of this file content could be found before this floor
985 of this file content could be found before this floor
986 revision, the function will returns "None" and stops its
986 revision, the function will returns "None" and stops its
987 iteration.
987 iteration.
988 """
988 """
989 repo = self._repo
989 repo = self._repo
990 cl = repo.unfiltered().changelog
990 cl = repo.unfiltered().changelog
991 mfl = repo.manifestlog
991 mfl = repo.manifestlog
992 # fetch the linkrev
992 # fetch the linkrev
993 lkr = self.linkrev()
993 lkr = self.linkrev()
994 if srcrev == lkr:
994 if srcrev == lkr:
995 return lkr
995 return lkr
996 # hack to reuse ancestor computation when searching for renames
996 # hack to reuse ancestor computation when searching for renames
997 memberanc = getattr(self, '_ancestrycontext', None)
997 memberanc = getattr(self, '_ancestrycontext', None)
998 iteranc = None
998 iteranc = None
999 if srcrev is None:
999 if srcrev is None:
1000 # wctx case, used by workingfilectx during mergecopy
1000 # wctx case, used by workingfilectx during mergecopy
1001 revs = [p.rev() for p in self._repo[None].parents()]
1001 revs = [p.rev() for p in self._repo[None].parents()]
1002 inclusive = True # we skipped the real (revless) source
1002 inclusive = True # we skipped the real (revless) source
1003 else:
1003 else:
1004 revs = [srcrev]
1004 revs = [srcrev]
1005 if memberanc is None:
1005 if memberanc is None:
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 # check if this linkrev is an ancestor of srcrev
1007 # check if this linkrev is an ancestor of srcrev
1008 if lkr not in memberanc:
1008 if lkr not in memberanc:
1009 if iteranc is None:
1009 if iteranc is None:
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 fnode = self._filenode
1011 fnode = self._filenode
1012 path = self._path
1012 path = self._path
1013 for a in iteranc:
1013 for a in iteranc:
1014 if stoprev is not None and a < stoprev:
1014 if stoprev is not None and a < stoprev:
1015 return None
1015 return None
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 if path in ac[3]: # checking the 'files' field.
1017 if path in ac[3]: # checking the 'files' field.
1018 # The file has been touched, check if the content is
1018 # The file has been touched, check if the content is
1019 # similar to the one we search for.
1019 # similar to the one we search for.
1020 if fnode == mfl[ac[0]].readfast().get(path):
1020 if fnode == mfl[ac[0]].readfast().get(path):
1021 return a
1021 return a
1022 # In theory, we should never get out of that loop without a result.
1022 # In theory, we should never get out of that loop without a result.
1023 # But if manifest uses a buggy file revision (not children of the
1023 # But if manifest uses a buggy file revision (not children of the
1024 # one it replaces) we could. Such a buggy situation will likely
1024 # one it replaces) we could. Such a buggy situation will likely
1025 # result is crash somewhere else at to some point.
1025 # result is crash somewhere else at to some point.
1026 return lkr
1026 return lkr
1027
1027
1028 def isintroducedafter(self, changelogrev):
1028 def isintroducedafter(self, changelogrev):
1029 """True if a filectx has been introduced after a given floor revision
1029 """True if a filectx has been introduced after a given floor revision
1030 """
1030 """
1031 if self.linkrev() >= changelogrev:
1031 if self.linkrev() >= changelogrev:
1032 return True
1032 return True
1033 introrev = self._introrev(stoprev=changelogrev)
1033 introrev = self._introrev(stoprev=changelogrev)
1034 if introrev is None:
1034 if introrev is None:
1035 return False
1035 return False
1036 return introrev >= changelogrev
1036 return introrev >= changelogrev
1037
1037
1038 def introrev(self):
1038 def introrev(self):
1039 """return the rev of the changeset which introduced this file revision
1039 """return the rev of the changeset which introduced this file revision
1040
1040
1041 This method is different from linkrev because it take into account the
1041 This method is different from linkrev because it take into account the
1042 changeset the filectx was created from. It ensures the returned
1042 changeset the filectx was created from. It ensures the returned
1043 revision is one of its ancestors. This prevents bugs from
1043 revision is one of its ancestors. This prevents bugs from
1044 'linkrev-shadowing' when a file revision is used by multiple
1044 'linkrev-shadowing' when a file revision is used by multiple
1045 changesets.
1045 changesets.
1046 """
1046 """
1047 return self._introrev()
1047 return self._introrev()
1048
1048
1049 def _introrev(self, stoprev=None):
1049 def _introrev(self, stoprev=None):
1050 """
1050 """
1051 Same as `introrev` but, with an extra argument to limit changelog
1051 Same as `introrev` but, with an extra argument to limit changelog
1052 iteration range in some internal usecase.
1052 iteration range in some internal usecase.
1053
1053
1054 If `stoprev` is set, the `introrev` will not be searched past that
1054 If `stoprev` is set, the `introrev` will not be searched past that
1055 `stoprev` revision and "None" might be returned. This is useful to
1055 `stoprev` revision and "None" might be returned. This is useful to
1056 limit the iteration range.
1056 limit the iteration range.
1057 """
1057 """
1058 toprev = None
1058 toprev = None
1059 attrs = vars(self)
1059 attrs = vars(self)
1060 if '_changeid' in attrs:
1060 if '_changeid' in attrs:
1061 # We have a cached value already
1061 # We have a cached value already
1062 toprev = self._changeid
1062 toprev = self._changeid
1063 elif '_changectx' in attrs:
1063 elif '_changectx' in attrs:
1064 # We know which changelog entry we are coming from
1064 # We know which changelog entry we are coming from
1065 toprev = self._changectx.rev()
1065 toprev = self._changectx.rev()
1066
1066
1067 if toprev is not None:
1067 if toprev is not None:
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 elif '_descendantrev' in attrs:
1069 elif '_descendantrev' in attrs:
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 # be nice and cache the result of the computation
1071 # be nice and cache the result of the computation
1072 if introrev is not None:
1072 if introrev is not None:
1073 self._changeid = introrev
1073 self._changeid = introrev
1074 return introrev
1074 return introrev
1075 else:
1075 else:
1076 return self.linkrev()
1076 return self.linkrev()
1077
1077
1078 def introfilectx(self):
1078 def introfilectx(self):
1079 """Return filectx having identical contents, but pointing to the
1079 """Return filectx having identical contents, but pointing to the
1080 changeset revision where this filectx was introduced"""
1080 changeset revision where this filectx was introduced"""
1081 introrev = self.introrev()
1081 introrev = self.introrev()
1082 if self.rev() == introrev:
1082 if self.rev() == introrev:
1083 return self
1083 return self
1084 return self.filectx(self.filenode(), changeid=introrev)
1084 return self.filectx(self.filenode(), changeid=introrev)
1085
1085
1086 def _parentfilectx(self, path, fileid, filelog):
1086 def _parentfilectx(self, path, fileid, filelog):
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 # If self is associated with a changeset (probably explicitly
1090 # If self is associated with a changeset (probably explicitly
1091 # fed), ensure the created filectx is associated with a
1091 # fed), ensure the created filectx is associated with a
1092 # changeset that is an ancestor of self.changectx.
1092 # changeset that is an ancestor of self.changectx.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1094 fctx._descendantrev = self.rev()
1094 fctx._descendantrev = self.rev()
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 elif '_descendantrev' in vars(self):
1096 elif '_descendantrev' in vars(self):
1097 # Otherwise propagate _descendantrev if we have one associated.
1097 # Otherwise propagate _descendantrev if we have one associated.
1098 fctx._descendantrev = self._descendantrev
1098 fctx._descendantrev = self._descendantrev
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 return fctx
1100 return fctx
1101
1101
1102 def parents(self):
1102 def parents(self):
1103 _path = self._path
1103 _path = self._path
1104 fl = self._filelog
1104 fl = self._filelog
1105 parents = self._filelog.parents(self._filenode)
1105 parents = self._filelog.parents(self._filenode)
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1107
1107
1108 r = fl.renamed(self._filenode)
1108 r = fl.renamed(self._filenode)
1109 if r:
1109 if r:
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1111 # - In case of merge, only one of the parent is null id and should
1111 # - In case of merge, only one of the parent is null id and should
1112 # be replaced with the rename information. This parent is -always-
1112 # be replaced with the rename information. This parent is -always-
1113 # the first one.
1113 # the first one.
1114 #
1114 #
1115 # As null id have always been filtered out in the previous list
1115 # As null id have always been filtered out in the previous list
1116 # comprehension, inserting to 0 will always result in "replacing
1116 # comprehension, inserting to 0 will always result in "replacing
1117 # first nullid parent with rename information.
1117 # first nullid parent with rename information.
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119
1119
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121
1121
1122 def p1(self):
1122 def p1(self):
1123 return self.parents()[0]
1123 return self.parents()[0]
1124
1124
1125 def p2(self):
1125 def p2(self):
1126 p = self.parents()
1126 p = self.parents()
1127 if len(p) == 2:
1127 if len(p) == 2:
1128 return p[1]
1128 return p[1]
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130
1130
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 """Returns a list of annotateline objects for each line in the file
1132 """Returns a list of annotateline objects for each line in the file
1133
1133
1134 - line.fctx is the filectx of the node where that line was last changed
1134 - line.fctx is the filectx of the node where that line was last changed
1135 - line.lineno is the line number at the first appearance in the managed
1135 - line.lineno is the line number at the first appearance in the managed
1136 file
1136 file
1137 - line.text is the data on that line (including newline character)
1137 - line.text is the data on that line (including newline character)
1138 """
1138 """
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140
1140
1141 def parents(f):
1141 def parents(f):
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 # isn't an ancestor of the srcrev.
1145 # isn't an ancestor of the srcrev.
1146 f._changeid
1146 f._changeid
1147 pl = f.parents()
1147 pl = f.parents()
1148
1148
1149 # Don't return renamed parents if we aren't following.
1149 # Don't return renamed parents if we aren't following.
1150 if not follow:
1150 if not follow:
1151 pl = [p for p in pl if p.path() == f.path()]
1151 pl = [p for p in pl if p.path() == f.path()]
1152
1152
1153 # renamed filectx won't have a filelog yet, so set it
1153 # renamed filectx won't have a filelog yet, so set it
1154 # from the cache to save time
1154 # from the cache to save time
1155 for p in pl:
1155 for p in pl:
1156 if not '_filelog' in p.__dict__:
1156 if not '_filelog' in p.__dict__:
1157 p._filelog = getlog(p.path())
1157 p._filelog = getlog(p.path())
1158
1158
1159 return pl
1159 return pl
1160
1160
1161 # use linkrev to find the first changeset where self appeared
1161 # use linkrev to find the first changeset where self appeared
1162 base = self.introfilectx()
1162 base = self.introfilectx()
1163 if getattr(base, '_ancestrycontext', None) is None:
1163 if getattr(base, '_ancestrycontext', None) is None:
1164 cl = self._repo.changelog
1164 # it is safe to use an unfiltered repository here because we are
1165 # walking ancestors only.
1166 cl = self._repo.unfiltered().changelog
1165 if base.rev() is None:
1167 if base.rev() is None:
1166 # wctx is not inclusive, but works because _ancestrycontext
1168 # wctx is not inclusive, but works because _ancestrycontext
1167 # is used to test filelog revisions
1169 # is used to test filelog revisions
1168 ac = cl.ancestors(
1170 ac = cl.ancestors(
1169 [p.rev() for p in base.parents()], inclusive=True
1171 [p.rev() for p in base.parents()], inclusive=True
1170 )
1172 )
1171 else:
1173 else:
1172 ac = cl.ancestors([base.rev()], inclusive=True)
1174 ac = cl.ancestors([base.rev()], inclusive=True)
1173 base._ancestrycontext = ac
1175 base._ancestrycontext = ac
1174
1176
1175 return dagop.annotate(
1177 return dagop.annotate(
1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1178 base, parents, skiprevs=skiprevs, diffopts=diffopts
1177 )
1179 )
1178
1180
1179 def ancestors(self, followfirst=False):
1181 def ancestors(self, followfirst=False):
1180 visit = {}
1182 visit = {}
1181 c = self
1183 c = self
1182 if followfirst:
1184 if followfirst:
1183 cut = 1
1185 cut = 1
1184 else:
1186 else:
1185 cut = None
1187 cut = None
1186
1188
1187 while True:
1189 while True:
1188 for parent in c.parents()[:cut]:
1190 for parent in c.parents()[:cut]:
1189 visit[(parent.linkrev(), parent.filenode())] = parent
1191 visit[(parent.linkrev(), parent.filenode())] = parent
1190 if not visit:
1192 if not visit:
1191 break
1193 break
1192 c = visit.pop(max(visit))
1194 c = visit.pop(max(visit))
1193 yield c
1195 yield c
1194
1196
1195 def decodeddata(self):
1197 def decodeddata(self):
1196 """Returns `data()` after running repository decoding filters.
1198 """Returns `data()` after running repository decoding filters.
1197
1199
1198 This is often equivalent to how the data would be expressed on disk.
1200 This is often equivalent to how the data would be expressed on disk.
1199 """
1201 """
1200 return self._repo.wwritedata(self.path(), self.data())
1202 return self._repo.wwritedata(self.path(), self.data())
1201
1203
1202
1204
1203 class filectx(basefilectx):
1205 class filectx(basefilectx):
1204 """A filecontext object makes access to data related to a particular
1206 """A filecontext object makes access to data related to a particular
1205 filerevision convenient."""
1207 filerevision convenient."""
1206
1208
1207 def __init__(
1209 def __init__(
1208 self,
1210 self,
1209 repo,
1211 repo,
1210 path,
1212 path,
1211 changeid=None,
1213 changeid=None,
1212 fileid=None,
1214 fileid=None,
1213 filelog=None,
1215 filelog=None,
1214 changectx=None,
1216 changectx=None,
1215 ):
1217 ):
1216 """changeid must be a revision number, if specified.
1218 """changeid must be a revision number, if specified.
1217 fileid can be a file revision or node."""
1219 fileid can be a file revision or node."""
1218 self._repo = repo
1220 self._repo = repo
1219 self._path = path
1221 self._path = path
1220
1222
1221 assert (
1223 assert (
1222 changeid is not None or fileid is not None or changectx is not None
1224 changeid is not None or fileid is not None or changectx is not None
1223 ), (
1225 ), (
1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1226 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1225 % (changeid, fileid, changectx,)
1227 % (changeid, fileid, changectx,)
1226 )
1228 )
1227
1229
1228 if filelog is not None:
1230 if filelog is not None:
1229 self._filelog = filelog
1231 self._filelog = filelog
1230
1232
1231 if changeid is not None:
1233 if changeid is not None:
1232 self._changeid = changeid
1234 self._changeid = changeid
1233 if changectx is not None:
1235 if changectx is not None:
1234 self._changectx = changectx
1236 self._changectx = changectx
1235 if fileid is not None:
1237 if fileid is not None:
1236 self._fileid = fileid
1238 self._fileid = fileid
1237
1239
1238 @propertycache
1240 @propertycache
1239 def _changectx(self):
1241 def _changectx(self):
1240 try:
1242 try:
1241 return self._repo[self._changeid]
1243 return self._repo[self._changeid]
1242 except error.FilteredRepoLookupError:
1244 except error.FilteredRepoLookupError:
1243 # Linkrev may point to any revision in the repository. When the
1245 # Linkrev may point to any revision in the repository. When the
1244 # repository is filtered this may lead to `filectx` trying to build
1246 # repository is filtered this may lead to `filectx` trying to build
1245 # `changectx` for filtered revision. In such case we fallback to
1247 # `changectx` for filtered revision. In such case we fallback to
1246 # creating `changectx` on the unfiltered version of the reposition.
1248 # creating `changectx` on the unfiltered version of the reposition.
1247 # This fallback should not be an issue because `changectx` from
1249 # This fallback should not be an issue because `changectx` from
1248 # `filectx` are not used in complex operations that care about
1250 # `filectx` are not used in complex operations that care about
1249 # filtering.
1251 # filtering.
1250 #
1252 #
1251 # This fallback is a cheap and dirty fix that prevent several
1253 # This fallback is a cheap and dirty fix that prevent several
1252 # crashes. It does not ensure the behavior is correct. However the
1254 # crashes. It does not ensure the behavior is correct. However the
1253 # behavior was not correct before filtering either and "incorrect
1255 # behavior was not correct before filtering either and "incorrect
1254 # behavior" is seen as better as "crash"
1256 # behavior" is seen as better as "crash"
1255 #
1257 #
1256 # Linkrevs have several serious troubles with filtering that are
1258 # Linkrevs have several serious troubles with filtering that are
1257 # complicated to solve. Proper handling of the issue here should be
1259 # complicated to solve. Proper handling of the issue here should be
1258 # considered when solving linkrev issue are on the table.
1260 # considered when solving linkrev issue are on the table.
1259 return self._repo.unfiltered()[self._changeid]
1261 return self._repo.unfiltered()[self._changeid]
1260
1262
1261 def filectx(self, fileid, changeid=None):
1263 def filectx(self, fileid, changeid=None):
1262 '''opens an arbitrary revision of the file without
1264 '''opens an arbitrary revision of the file without
1263 opening a new filelog'''
1265 opening a new filelog'''
1264 return filectx(
1266 return filectx(
1265 self._repo,
1267 self._repo,
1266 self._path,
1268 self._path,
1267 fileid=fileid,
1269 fileid=fileid,
1268 filelog=self._filelog,
1270 filelog=self._filelog,
1269 changeid=changeid,
1271 changeid=changeid,
1270 )
1272 )
1271
1273
1272 def rawdata(self):
1274 def rawdata(self):
1273 return self._filelog.rawdata(self._filenode)
1275 return self._filelog.rawdata(self._filenode)
1274
1276
1275 def rawflags(self):
1277 def rawflags(self):
1276 """low-level revlog flags"""
1278 """low-level revlog flags"""
1277 return self._filelog.flags(self._filerev)
1279 return self._filelog.flags(self._filerev)
1278
1280
1279 def data(self):
1281 def data(self):
1280 try:
1282 try:
1281 return self._filelog.read(self._filenode)
1283 return self._filelog.read(self._filenode)
1282 except error.CensoredNodeError:
1284 except error.CensoredNodeError:
1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1285 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1284 return b""
1286 return b""
1285 raise error.Abort(
1287 raise error.Abort(
1286 _(b"censored node: %s") % short(self._filenode),
1288 _(b"censored node: %s") % short(self._filenode),
1287 hint=_(b"set censor.policy to ignore errors"),
1289 hint=_(b"set censor.policy to ignore errors"),
1288 )
1290 )
1289
1291
1290 def size(self):
1292 def size(self):
1291 return self._filelog.size(self._filerev)
1293 return self._filelog.size(self._filerev)
1292
1294
1293 @propertycache
1295 @propertycache
1294 def _copied(self):
1296 def _copied(self):
1295 """check if file was actually renamed in this changeset revision
1297 """check if file was actually renamed in this changeset revision
1296
1298
1297 If rename logged in file revision, we report copy for changeset only
1299 If rename logged in file revision, we report copy for changeset only
1298 if file revisions linkrev points back to the changeset in question
1300 if file revisions linkrev points back to the changeset in question
1299 or both changeset parents contain different file revisions.
1301 or both changeset parents contain different file revisions.
1300 """
1302 """
1301
1303
1302 renamed = self._filelog.renamed(self._filenode)
1304 renamed = self._filelog.renamed(self._filenode)
1303 if not renamed:
1305 if not renamed:
1304 return None
1306 return None
1305
1307
1306 if self.rev() == self.linkrev():
1308 if self.rev() == self.linkrev():
1307 return renamed
1309 return renamed
1308
1310
1309 name = self.path()
1311 name = self.path()
1310 fnode = self._filenode
1312 fnode = self._filenode
1311 for p in self._changectx.parents():
1313 for p in self._changectx.parents():
1312 try:
1314 try:
1313 if fnode == p.filenode(name):
1315 if fnode == p.filenode(name):
1314 return None
1316 return None
1315 except error.LookupError:
1317 except error.LookupError:
1316 pass
1318 pass
1317 return renamed
1319 return renamed
1318
1320
1319 def children(self):
1321 def children(self):
1320 # hard for renames
1322 # hard for renames
1321 c = self._filelog.children(self._filenode)
1323 c = self._filelog.children(self._filenode)
1322 return [
1324 return [
1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1325 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1324 for x in c
1326 for x in c
1325 ]
1327 ]
1326
1328
1327
1329
1328 class committablectx(basectx):
1330 class committablectx(basectx):
1329 """A committablectx object provides common functionality for a context that
1331 """A committablectx object provides common functionality for a context that
1330 wants the ability to commit, e.g. workingctx or memctx."""
1332 wants the ability to commit, e.g. workingctx or memctx."""
1331
1333
1332 def __init__(
1334 def __init__(
1333 self,
1335 self,
1334 repo,
1336 repo,
1335 text=b"",
1337 text=b"",
1336 user=None,
1338 user=None,
1337 date=None,
1339 date=None,
1338 extra=None,
1340 extra=None,
1339 changes=None,
1341 changes=None,
1340 branch=None,
1342 branch=None,
1341 ):
1343 ):
1342 super(committablectx, self).__init__(repo)
1344 super(committablectx, self).__init__(repo)
1343 self._rev = None
1345 self._rev = None
1344 self._node = None
1346 self._node = None
1345 self._text = text
1347 self._text = text
1346 if date:
1348 if date:
1347 self._date = dateutil.parsedate(date)
1349 self._date = dateutil.parsedate(date)
1348 if user:
1350 if user:
1349 self._user = user
1351 self._user = user
1350 if changes:
1352 if changes:
1351 self._status = changes
1353 self._status = changes
1352
1354
1353 self._extra = {}
1355 self._extra = {}
1354 if extra:
1356 if extra:
1355 self._extra = extra.copy()
1357 self._extra = extra.copy()
1356 if branch is not None:
1358 if branch is not None:
1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1359 self._extra[b'branch'] = encoding.fromlocal(branch)
1358 if not self._extra.get(b'branch'):
1360 if not self._extra.get(b'branch'):
1359 self._extra[b'branch'] = b'default'
1361 self._extra[b'branch'] = b'default'
1360
1362
1361 def __bytes__(self):
1363 def __bytes__(self):
1362 return bytes(self._parents[0]) + b"+"
1364 return bytes(self._parents[0]) + b"+"
1363
1365
1364 __str__ = encoding.strmethod(__bytes__)
1366 __str__ = encoding.strmethod(__bytes__)
1365
1367
1366 def __nonzero__(self):
1368 def __nonzero__(self):
1367 return True
1369 return True
1368
1370
1369 __bool__ = __nonzero__
1371 __bool__ = __nonzero__
1370
1372
1371 @propertycache
1373 @propertycache
1372 def _status(self):
1374 def _status(self):
1373 return self._repo.status()
1375 return self._repo.status()
1374
1376
1375 @propertycache
1377 @propertycache
1376 def _user(self):
1378 def _user(self):
1377 return self._repo.ui.username()
1379 return self._repo.ui.username()
1378
1380
1379 @propertycache
1381 @propertycache
1380 def _date(self):
1382 def _date(self):
1381 ui = self._repo.ui
1383 ui = self._repo.ui
1382 date = ui.configdate(b'devel', b'default-date')
1384 date = ui.configdate(b'devel', b'default-date')
1383 if date is None:
1385 if date is None:
1384 date = dateutil.makedate()
1386 date = dateutil.makedate()
1385 return date
1387 return date
1386
1388
1387 def subrev(self, subpath):
1389 def subrev(self, subpath):
1388 return None
1390 return None
1389
1391
1390 def manifestnode(self):
1392 def manifestnode(self):
1391 return None
1393 return None
1392
1394
1393 def user(self):
1395 def user(self):
1394 return self._user or self._repo.ui.username()
1396 return self._user or self._repo.ui.username()
1395
1397
1396 def date(self):
1398 def date(self):
1397 return self._date
1399 return self._date
1398
1400
1399 def description(self):
1401 def description(self):
1400 return self._text
1402 return self._text
1401
1403
1402 def files(self):
1404 def files(self):
1403 return sorted(
1405 return sorted(
1404 self._status.modified + self._status.added + self._status.removed
1406 self._status.modified + self._status.added + self._status.removed
1405 )
1407 )
1406
1408
1407 def modified(self):
1409 def modified(self):
1408 return self._status.modified
1410 return self._status.modified
1409
1411
1410 def added(self):
1412 def added(self):
1411 return self._status.added
1413 return self._status.added
1412
1414
1413 def removed(self):
1415 def removed(self):
1414 return self._status.removed
1416 return self._status.removed
1415
1417
1416 def deleted(self):
1418 def deleted(self):
1417 return self._status.deleted
1419 return self._status.deleted
1418
1420
1419 filesmodified = modified
1421 filesmodified = modified
1420 filesadded = added
1422 filesadded = added
1421 filesremoved = removed
1423 filesremoved = removed
1422
1424
1423 def branch(self):
1425 def branch(self):
1424 return encoding.tolocal(self._extra[b'branch'])
1426 return encoding.tolocal(self._extra[b'branch'])
1425
1427
1426 def closesbranch(self):
1428 def closesbranch(self):
1427 return b'close' in self._extra
1429 return b'close' in self._extra
1428
1430
1429 def extra(self):
1431 def extra(self):
1430 return self._extra
1432 return self._extra
1431
1433
1432 def isinmemory(self):
1434 def isinmemory(self):
1433 return False
1435 return False
1434
1436
1435 def tags(self):
1437 def tags(self):
1436 return []
1438 return []
1437
1439
1438 def bookmarks(self):
1440 def bookmarks(self):
1439 b = []
1441 b = []
1440 for p in self.parents():
1442 for p in self.parents():
1441 b.extend(p.bookmarks())
1443 b.extend(p.bookmarks())
1442 return b
1444 return b
1443
1445
1444 def phase(self):
1446 def phase(self):
1445 phase = phases.newcommitphase(self._repo.ui)
1447 phase = phases.newcommitphase(self._repo.ui)
1446 for p in self.parents():
1448 for p in self.parents():
1447 phase = max(phase, p.phase())
1449 phase = max(phase, p.phase())
1448 return phase
1450 return phase
1449
1451
1450 def hidden(self):
1452 def hidden(self):
1451 return False
1453 return False
1452
1454
1453 def children(self):
1455 def children(self):
1454 return []
1456 return []
1455
1457
1456 def ancestor(self, c2):
1458 def ancestor(self, c2):
1457 """return the "best" ancestor context of self and c2"""
1459 """return the "best" ancestor context of self and c2"""
1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1460 return self._parents[0].ancestor(c2) # punt on two parents for now
1459
1461
1460 def ancestors(self):
1462 def ancestors(self):
1461 for p in self._parents:
1463 for p in self._parents:
1462 yield p
1464 yield p
1463 for a in self._repo.changelog.ancestors(
1465 for a in self._repo.changelog.ancestors(
1464 [p.rev() for p in self._parents]
1466 [p.rev() for p in self._parents]
1465 ):
1467 ):
1466 yield self._repo[a]
1468 yield self._repo[a]
1467
1469
1468 def markcommitted(self, node):
1470 def markcommitted(self, node):
1469 """Perform post-commit cleanup necessary after committing this ctx
1471 """Perform post-commit cleanup necessary after committing this ctx
1470
1472
1471 Specifically, this updates backing stores this working context
1473 Specifically, this updates backing stores this working context
1472 wraps to reflect the fact that the changes reflected by this
1474 wraps to reflect the fact that the changes reflected by this
1473 workingctx have been committed. For example, it marks
1475 workingctx have been committed. For example, it marks
1474 modified and added files as normal in the dirstate.
1476 modified and added files as normal in the dirstate.
1475
1477
1476 """
1478 """
1477
1479
1478 def dirty(self, missing=False, merge=True, branch=True):
1480 def dirty(self, missing=False, merge=True, branch=True):
1479 return False
1481 return False
1480
1482
1481
1483
1482 class workingctx(committablectx):
1484 class workingctx(committablectx):
1483 """A workingctx object makes access to data related to
1485 """A workingctx object makes access to data related to
1484 the current working directory convenient.
1486 the current working directory convenient.
1485 date - any valid date string or (unixtime, offset), or None.
1487 date - any valid date string or (unixtime, offset), or None.
1486 user - username string, or None.
1488 user - username string, or None.
1487 extra - a dictionary of extra values, or None.
1489 extra - a dictionary of extra values, or None.
1488 changes - a list of file lists as returned by localrepo.status()
1490 changes - a list of file lists as returned by localrepo.status()
1489 or None to use the repository status.
1491 or None to use the repository status.
1490 """
1492 """
1491
1493
1492 def __init__(
1494 def __init__(
1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1495 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1494 ):
1496 ):
1495 branch = None
1497 branch = None
1496 if not extra or b'branch' not in extra:
1498 if not extra or b'branch' not in extra:
1497 try:
1499 try:
1498 branch = repo.dirstate.branch()
1500 branch = repo.dirstate.branch()
1499 except UnicodeDecodeError:
1501 except UnicodeDecodeError:
1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1502 raise error.Abort(_(b'branch name not in UTF-8!'))
1501 super(workingctx, self).__init__(
1503 super(workingctx, self).__init__(
1502 repo, text, user, date, extra, changes, branch=branch
1504 repo, text, user, date, extra, changes, branch=branch
1503 )
1505 )
1504
1506
1505 def __iter__(self):
1507 def __iter__(self):
1506 d = self._repo.dirstate
1508 d = self._repo.dirstate
1507 for f in d:
1509 for f in d:
1508 if d[f] != b'r':
1510 if d[f] != b'r':
1509 yield f
1511 yield f
1510
1512
1511 def __contains__(self, key):
1513 def __contains__(self, key):
1512 return self._repo.dirstate[key] not in b"?r"
1514 return self._repo.dirstate[key] not in b"?r"
1513
1515
1514 def hex(self):
1516 def hex(self):
1515 return wdirhex
1517 return wdirhex
1516
1518
1517 @propertycache
1519 @propertycache
1518 def _parents(self):
1520 def _parents(self):
1519 p = self._repo.dirstate.parents()
1521 p = self._repo.dirstate.parents()
1520 if p[1] == nullid:
1522 if p[1] == nullid:
1521 p = p[:-1]
1523 p = p[:-1]
1522 # use unfiltered repo to delay/avoid loading obsmarkers
1524 # use unfiltered repo to delay/avoid loading obsmarkers
1523 unfi = self._repo.unfiltered()
1525 unfi = self._repo.unfiltered()
1524 return [
1526 return [
1525 changectx(
1527 changectx(
1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1528 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1527 )
1529 )
1528 for n in p
1530 for n in p
1529 ]
1531 ]
1530
1532
1531 def setparents(self, p1node, p2node=nullid):
1533 def setparents(self, p1node, p2node=nullid):
1532 dirstate = self._repo.dirstate
1534 dirstate = self._repo.dirstate
1533 with dirstate.parentchange():
1535 with dirstate.parentchange():
1534 copies = dirstate.setparents(p1node, p2node)
1536 copies = dirstate.setparents(p1node, p2node)
1535 pctx = self._repo[p1node]
1537 pctx = self._repo[p1node]
1536 if copies:
1538 if copies:
1537 # Adjust copy records, the dirstate cannot do it, it
1539 # Adjust copy records, the dirstate cannot do it, it
1538 # requires access to parents manifests. Preserve them
1540 # requires access to parents manifests. Preserve them
1539 # only for entries added to first parent.
1541 # only for entries added to first parent.
1540 for f in copies:
1542 for f in copies:
1541 if f not in pctx and copies[f] in pctx:
1543 if f not in pctx and copies[f] in pctx:
1542 dirstate.copy(copies[f], f)
1544 dirstate.copy(copies[f], f)
1543 if p2node == nullid:
1545 if p2node == nullid:
1544 for f, s in sorted(dirstate.copies().items()):
1546 for f, s in sorted(dirstate.copies().items()):
1545 if f not in pctx and s not in pctx:
1547 if f not in pctx and s not in pctx:
1546 dirstate.copy(None, f)
1548 dirstate.copy(None, f)
1547
1549
1548 def _fileinfo(self, path):
1550 def _fileinfo(self, path):
1549 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1551 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1550 self._manifest
1552 self._manifest
1551 return super(workingctx, self)._fileinfo(path)
1553 return super(workingctx, self)._fileinfo(path)
1552
1554
1553 def _buildflagfunc(self):
1555 def _buildflagfunc(self):
1554 # Create a fallback function for getting file flags when the
1556 # Create a fallback function for getting file flags when the
1555 # filesystem doesn't support them
1557 # filesystem doesn't support them
1556
1558
1557 copiesget = self._repo.dirstate.copies().get
1559 copiesget = self._repo.dirstate.copies().get
1558 parents = self.parents()
1560 parents = self.parents()
1559 if len(parents) < 2:
1561 if len(parents) < 2:
1560 # when we have one parent, it's easy: copy from parent
1562 # when we have one parent, it's easy: copy from parent
1561 man = parents[0].manifest()
1563 man = parents[0].manifest()
1562
1564
1563 def func(f):
1565 def func(f):
1564 f = copiesget(f, f)
1566 f = copiesget(f, f)
1565 return man.flags(f)
1567 return man.flags(f)
1566
1568
1567 else:
1569 else:
1568 # merges are tricky: we try to reconstruct the unstored
1570 # merges are tricky: we try to reconstruct the unstored
1569 # result from the merge (issue1802)
1571 # result from the merge (issue1802)
1570 p1, p2 = parents
1572 p1, p2 = parents
1571 pa = p1.ancestor(p2)
1573 pa = p1.ancestor(p2)
1572 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1574 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1573
1575
1574 def func(f):
1576 def func(f):
1575 f = copiesget(f, f) # may be wrong for merges with copies
1577 f = copiesget(f, f) # may be wrong for merges with copies
1576 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1578 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1577 if fl1 == fl2:
1579 if fl1 == fl2:
1578 return fl1
1580 return fl1
1579 if fl1 == fla:
1581 if fl1 == fla:
1580 return fl2
1582 return fl2
1581 if fl2 == fla:
1583 if fl2 == fla:
1582 return fl1
1584 return fl1
1583 return b'' # punt for conflicts
1585 return b'' # punt for conflicts
1584
1586
1585 return func
1587 return func
1586
1588
1587 @propertycache
1589 @propertycache
1588 def _flagfunc(self):
1590 def _flagfunc(self):
1589 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1591 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1590
1592
1591 def flags(self, path):
1593 def flags(self, path):
1592 if '_manifest' in self.__dict__:
1594 if '_manifest' in self.__dict__:
1593 try:
1595 try:
1594 return self._manifest.flags(path)
1596 return self._manifest.flags(path)
1595 except KeyError:
1597 except KeyError:
1596 return b''
1598 return b''
1597
1599
1598 try:
1600 try:
1599 return self._flagfunc(path)
1601 return self._flagfunc(path)
1600 except OSError:
1602 except OSError:
1601 return b''
1603 return b''
1602
1604
1603 def filectx(self, path, filelog=None):
1605 def filectx(self, path, filelog=None):
1604 """get a file context from the working directory"""
1606 """get a file context from the working directory"""
1605 return workingfilectx(
1607 return workingfilectx(
1606 self._repo, path, workingctx=self, filelog=filelog
1608 self._repo, path, workingctx=self, filelog=filelog
1607 )
1609 )
1608
1610
1609 def dirty(self, missing=False, merge=True, branch=True):
1611 def dirty(self, missing=False, merge=True, branch=True):
1610 """check whether a working directory is modified"""
1612 """check whether a working directory is modified"""
1611 # check subrepos first
1613 # check subrepos first
1612 for s in sorted(self.substate):
1614 for s in sorted(self.substate):
1613 if self.sub(s).dirty(missing=missing):
1615 if self.sub(s).dirty(missing=missing):
1614 return True
1616 return True
1615 # check current working dir
1617 # check current working dir
1616 return (
1618 return (
1617 (merge and self.p2())
1619 (merge and self.p2())
1618 or (branch and self.branch() != self.p1().branch())
1620 or (branch and self.branch() != self.p1().branch())
1619 or self.modified()
1621 or self.modified()
1620 or self.added()
1622 or self.added()
1621 or self.removed()
1623 or self.removed()
1622 or (missing and self.deleted())
1624 or (missing and self.deleted())
1623 )
1625 )
1624
1626
1625 def add(self, list, prefix=b""):
1627 def add(self, list, prefix=b""):
1626 with self._repo.wlock():
1628 with self._repo.wlock():
1627 ui, ds = self._repo.ui, self._repo.dirstate
1629 ui, ds = self._repo.ui, self._repo.dirstate
1628 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1630 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1629 rejected = []
1631 rejected = []
1630 lstat = self._repo.wvfs.lstat
1632 lstat = self._repo.wvfs.lstat
1631 for f in list:
1633 for f in list:
1632 # ds.pathto() returns an absolute file when this is invoked from
1634 # ds.pathto() returns an absolute file when this is invoked from
1633 # the keyword extension. That gets flagged as non-portable on
1635 # the keyword extension. That gets flagged as non-portable on
1634 # Windows, since it contains the drive letter and colon.
1636 # Windows, since it contains the drive letter and colon.
1635 scmutil.checkportable(ui, os.path.join(prefix, f))
1637 scmutil.checkportable(ui, os.path.join(prefix, f))
1636 try:
1638 try:
1637 st = lstat(f)
1639 st = lstat(f)
1638 except OSError:
1640 except OSError:
1639 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1641 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1640 rejected.append(f)
1642 rejected.append(f)
1641 continue
1643 continue
1642 limit = ui.configbytes(b'ui', b'large-file-limit')
1644 limit = ui.configbytes(b'ui', b'large-file-limit')
1643 if limit != 0 and st.st_size > limit:
1645 if limit != 0 and st.st_size > limit:
1644 ui.warn(
1646 ui.warn(
1645 _(
1647 _(
1646 b"%s: up to %d MB of RAM may be required "
1648 b"%s: up to %d MB of RAM may be required "
1647 b"to manage this file\n"
1649 b"to manage this file\n"
1648 b"(use 'hg revert %s' to cancel the "
1650 b"(use 'hg revert %s' to cancel the "
1649 b"pending addition)\n"
1651 b"pending addition)\n"
1650 )
1652 )
1651 % (f, 3 * st.st_size // 1000000, uipath(f))
1653 % (f, 3 * st.st_size // 1000000, uipath(f))
1652 )
1654 )
1653 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1655 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1654 ui.warn(
1656 ui.warn(
1655 _(
1657 _(
1656 b"%s not added: only files and symlinks "
1658 b"%s not added: only files and symlinks "
1657 b"supported currently\n"
1659 b"supported currently\n"
1658 )
1660 )
1659 % uipath(f)
1661 % uipath(f)
1660 )
1662 )
1661 rejected.append(f)
1663 rejected.append(f)
1662 elif ds[f] in b'amn':
1664 elif ds[f] in b'amn':
1663 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1665 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1664 elif ds[f] == b'r':
1666 elif ds[f] == b'r':
1665 ds.normallookup(f)
1667 ds.normallookup(f)
1666 else:
1668 else:
1667 ds.add(f)
1669 ds.add(f)
1668 return rejected
1670 return rejected
1669
1671
1670 def forget(self, files, prefix=b""):
1672 def forget(self, files, prefix=b""):
1671 with self._repo.wlock():
1673 with self._repo.wlock():
1672 ds = self._repo.dirstate
1674 ds = self._repo.dirstate
1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1675 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1674 rejected = []
1676 rejected = []
1675 for f in files:
1677 for f in files:
1676 if f not in ds:
1678 if f not in ds:
1677 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1679 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1678 rejected.append(f)
1680 rejected.append(f)
1679 elif ds[f] != b'a':
1681 elif ds[f] != b'a':
1680 ds.remove(f)
1682 ds.remove(f)
1681 else:
1683 else:
1682 ds.drop(f)
1684 ds.drop(f)
1683 return rejected
1685 return rejected
1684
1686
1685 def copy(self, source, dest):
1687 def copy(self, source, dest):
1686 try:
1688 try:
1687 st = self._repo.wvfs.lstat(dest)
1689 st = self._repo.wvfs.lstat(dest)
1688 except OSError as err:
1690 except OSError as err:
1689 if err.errno != errno.ENOENT:
1691 if err.errno != errno.ENOENT:
1690 raise
1692 raise
1691 self._repo.ui.warn(
1693 self._repo.ui.warn(
1692 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1694 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1693 )
1695 )
1694 return
1696 return
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1697 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 self._repo.ui.warn(
1698 self._repo.ui.warn(
1697 _(b"copy failed: %s is not a file or a symbolic link\n")
1699 _(b"copy failed: %s is not a file or a symbolic link\n")
1698 % self._repo.dirstate.pathto(dest)
1700 % self._repo.dirstate.pathto(dest)
1699 )
1701 )
1700 else:
1702 else:
1701 with self._repo.wlock():
1703 with self._repo.wlock():
1702 ds = self._repo.dirstate
1704 ds = self._repo.dirstate
1703 if ds[dest] in b'?':
1705 if ds[dest] in b'?':
1704 ds.add(dest)
1706 ds.add(dest)
1705 elif ds[dest] in b'r':
1707 elif ds[dest] in b'r':
1706 ds.normallookup(dest)
1708 ds.normallookup(dest)
1707 ds.copy(source, dest)
1709 ds.copy(source, dest)
1708
1710
1709 def match(
1711 def match(
1710 self,
1712 self,
1711 pats=None,
1713 pats=None,
1712 include=None,
1714 include=None,
1713 exclude=None,
1715 exclude=None,
1714 default=b'glob',
1716 default=b'glob',
1715 listsubrepos=False,
1717 listsubrepos=False,
1716 badfn=None,
1718 badfn=None,
1717 cwd=None,
1719 cwd=None,
1718 ):
1720 ):
1719 r = self._repo
1721 r = self._repo
1720 if not cwd:
1722 if not cwd:
1721 cwd = r.getcwd()
1723 cwd = r.getcwd()
1722
1724
1723 # Only a case insensitive filesystem needs magic to translate user input
1725 # Only a case insensitive filesystem needs magic to translate user input
1724 # to actual case in the filesystem.
1726 # to actual case in the filesystem.
1725 icasefs = not util.fscasesensitive(r.root)
1727 icasefs = not util.fscasesensitive(r.root)
1726 return matchmod.match(
1728 return matchmod.match(
1727 r.root,
1729 r.root,
1728 cwd,
1730 cwd,
1729 pats,
1731 pats,
1730 include,
1732 include,
1731 exclude,
1733 exclude,
1732 default,
1734 default,
1733 auditor=r.auditor,
1735 auditor=r.auditor,
1734 ctx=self,
1736 ctx=self,
1735 listsubrepos=listsubrepos,
1737 listsubrepos=listsubrepos,
1736 badfn=badfn,
1738 badfn=badfn,
1737 icasefs=icasefs,
1739 icasefs=icasefs,
1738 )
1740 )
1739
1741
1740 def _filtersuspectsymlink(self, files):
1742 def _filtersuspectsymlink(self, files):
1741 if not files or self._repo.dirstate._checklink:
1743 if not files or self._repo.dirstate._checklink:
1742 return files
1744 return files
1743
1745
1744 # Symlink placeholders may get non-symlink-like contents
1746 # Symlink placeholders may get non-symlink-like contents
1745 # via user error or dereferencing by NFS or Samba servers,
1747 # via user error or dereferencing by NFS or Samba servers,
1746 # so we filter out any placeholders that don't look like a
1748 # so we filter out any placeholders that don't look like a
1747 # symlink
1749 # symlink
1748 sane = []
1750 sane = []
1749 for f in files:
1751 for f in files:
1750 if self.flags(f) == b'l':
1752 if self.flags(f) == b'l':
1751 d = self[f].data()
1753 d = self[f].data()
1752 if (
1754 if (
1753 d == b''
1755 d == b''
1754 or len(d) >= 1024
1756 or len(d) >= 1024
1755 or b'\n' in d
1757 or b'\n' in d
1756 or stringutil.binary(d)
1758 or stringutil.binary(d)
1757 ):
1759 ):
1758 self._repo.ui.debug(
1760 self._repo.ui.debug(
1759 b'ignoring suspect symlink placeholder "%s"\n' % f
1761 b'ignoring suspect symlink placeholder "%s"\n' % f
1760 )
1762 )
1761 continue
1763 continue
1762 sane.append(f)
1764 sane.append(f)
1763 return sane
1765 return sane
1764
1766
1765 def _checklookup(self, files):
1767 def _checklookup(self, files):
1766 # check for any possibly clean files
1768 # check for any possibly clean files
1767 if not files:
1769 if not files:
1768 return [], [], []
1770 return [], [], []
1769
1771
1770 modified = []
1772 modified = []
1771 deleted = []
1773 deleted = []
1772 fixup = []
1774 fixup = []
1773 pctx = self._parents[0]
1775 pctx = self._parents[0]
1774 # do a full compare of any files that might have changed
1776 # do a full compare of any files that might have changed
1775 for f in sorted(files):
1777 for f in sorted(files):
1776 try:
1778 try:
1777 # This will return True for a file that got replaced by a
1779 # This will return True for a file that got replaced by a
1778 # directory in the interim, but fixing that is pretty hard.
1780 # directory in the interim, but fixing that is pretty hard.
1779 if (
1781 if (
1780 f not in pctx
1782 f not in pctx
1781 or self.flags(f) != pctx.flags(f)
1783 or self.flags(f) != pctx.flags(f)
1782 or pctx[f].cmp(self[f])
1784 or pctx[f].cmp(self[f])
1783 ):
1785 ):
1784 modified.append(f)
1786 modified.append(f)
1785 else:
1787 else:
1786 fixup.append(f)
1788 fixup.append(f)
1787 except (IOError, OSError):
1789 except (IOError, OSError):
1788 # A file become inaccessible in between? Mark it as deleted,
1790 # A file become inaccessible in between? Mark it as deleted,
1789 # matching dirstate behavior (issue5584).
1791 # matching dirstate behavior (issue5584).
1790 # The dirstate has more complex behavior around whether a
1792 # The dirstate has more complex behavior around whether a
1791 # missing file matches a directory, etc, but we don't need to
1793 # missing file matches a directory, etc, but we don't need to
1792 # bother with that: if f has made it to this point, we're sure
1794 # bother with that: if f has made it to this point, we're sure
1793 # it's in the dirstate.
1795 # it's in the dirstate.
1794 deleted.append(f)
1796 deleted.append(f)
1795
1797
1796 return modified, deleted, fixup
1798 return modified, deleted, fixup
1797
1799
1798 def _poststatusfixup(self, status, fixup):
1800 def _poststatusfixup(self, status, fixup):
1799 """update dirstate for files that are actually clean"""
1801 """update dirstate for files that are actually clean"""
1800 poststatus = self._repo.postdsstatus()
1802 poststatus = self._repo.postdsstatus()
1801 if fixup or poststatus:
1803 if fixup or poststatus:
1802 try:
1804 try:
1803 oldid = self._repo.dirstate.identity()
1805 oldid = self._repo.dirstate.identity()
1804
1806
1805 # updating the dirstate is optional
1807 # updating the dirstate is optional
1806 # so we don't wait on the lock
1808 # so we don't wait on the lock
1807 # wlock can invalidate the dirstate, so cache normal _after_
1809 # wlock can invalidate the dirstate, so cache normal _after_
1808 # taking the lock
1810 # taking the lock
1809 with self._repo.wlock(False):
1811 with self._repo.wlock(False):
1810 if self._repo.dirstate.identity() == oldid:
1812 if self._repo.dirstate.identity() == oldid:
1811 if fixup:
1813 if fixup:
1812 normal = self._repo.dirstate.normal
1814 normal = self._repo.dirstate.normal
1813 for f in fixup:
1815 for f in fixup:
1814 normal(f)
1816 normal(f)
1815 # write changes out explicitly, because nesting
1817 # write changes out explicitly, because nesting
1816 # wlock at runtime may prevent 'wlock.release()'
1818 # wlock at runtime may prevent 'wlock.release()'
1817 # after this block from doing so for subsequent
1819 # after this block from doing so for subsequent
1818 # changing files
1820 # changing files
1819 tr = self._repo.currenttransaction()
1821 tr = self._repo.currenttransaction()
1820 self._repo.dirstate.write(tr)
1822 self._repo.dirstate.write(tr)
1821
1823
1822 if poststatus:
1824 if poststatus:
1823 for ps in poststatus:
1825 for ps in poststatus:
1824 ps(self, status)
1826 ps(self, status)
1825 else:
1827 else:
1826 # in this case, writing changes out breaks
1828 # in this case, writing changes out breaks
1827 # consistency, because .hg/dirstate was
1829 # consistency, because .hg/dirstate was
1828 # already changed simultaneously after last
1830 # already changed simultaneously after last
1829 # caching (see also issue5584 for detail)
1831 # caching (see also issue5584 for detail)
1830 self._repo.ui.debug(
1832 self._repo.ui.debug(
1831 b'skip updating dirstate: identity mismatch\n'
1833 b'skip updating dirstate: identity mismatch\n'
1832 )
1834 )
1833 except error.LockError:
1835 except error.LockError:
1834 pass
1836 pass
1835 finally:
1837 finally:
1836 # Even if the wlock couldn't be grabbed, clear out the list.
1838 # Even if the wlock couldn't be grabbed, clear out the list.
1837 self._repo.clearpostdsstatus()
1839 self._repo.clearpostdsstatus()
1838
1840
1839 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1841 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1840 '''Gets the status from the dirstate -- internal use only.'''
1842 '''Gets the status from the dirstate -- internal use only.'''
1841 subrepos = []
1843 subrepos = []
1842 if b'.hgsub' in self:
1844 if b'.hgsub' in self:
1843 subrepos = sorted(self.substate)
1845 subrepos = sorted(self.substate)
1844 cmp, s = self._repo.dirstate.status(
1846 cmp, s = self._repo.dirstate.status(
1845 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1847 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1846 )
1848 )
1847
1849
1848 # check for any possibly clean files
1850 # check for any possibly clean files
1849 fixup = []
1851 fixup = []
1850 if cmp:
1852 if cmp:
1851 modified2, deleted2, fixup = self._checklookup(cmp)
1853 modified2, deleted2, fixup = self._checklookup(cmp)
1852 s.modified.extend(modified2)
1854 s.modified.extend(modified2)
1853 s.deleted.extend(deleted2)
1855 s.deleted.extend(deleted2)
1854
1856
1855 if fixup and clean:
1857 if fixup and clean:
1856 s.clean.extend(fixup)
1858 s.clean.extend(fixup)
1857
1859
1858 self._poststatusfixup(s, fixup)
1860 self._poststatusfixup(s, fixup)
1859
1861
1860 if match.always():
1862 if match.always():
1861 # cache for performance
1863 # cache for performance
1862 if s.unknown or s.ignored or s.clean:
1864 if s.unknown or s.ignored or s.clean:
1863 # "_status" is cached with list*=False in the normal route
1865 # "_status" is cached with list*=False in the normal route
1864 self._status = scmutil.status(
1866 self._status = scmutil.status(
1865 s.modified, s.added, s.removed, s.deleted, [], [], []
1867 s.modified, s.added, s.removed, s.deleted, [], [], []
1866 )
1868 )
1867 else:
1869 else:
1868 self._status = s
1870 self._status = s
1869
1871
1870 return s
1872 return s
1871
1873
1872 @propertycache
1874 @propertycache
1873 def _copies(self):
1875 def _copies(self):
1874 p1copies = {}
1876 p1copies = {}
1875 p2copies = {}
1877 p2copies = {}
1876 parents = self._repo.dirstate.parents()
1878 parents = self._repo.dirstate.parents()
1877 p1manifest = self._repo[parents[0]].manifest()
1879 p1manifest = self._repo[parents[0]].manifest()
1878 p2manifest = self._repo[parents[1]].manifest()
1880 p2manifest = self._repo[parents[1]].manifest()
1879 changedset = set(self.added()) | set(self.modified())
1881 changedset = set(self.added()) | set(self.modified())
1880 narrowmatch = self._repo.narrowmatch()
1882 narrowmatch = self._repo.narrowmatch()
1881 for dst, src in self._repo.dirstate.copies().items():
1883 for dst, src in self._repo.dirstate.copies().items():
1882 if dst not in changedset or not narrowmatch(dst):
1884 if dst not in changedset or not narrowmatch(dst):
1883 continue
1885 continue
1884 if src in p1manifest:
1886 if src in p1manifest:
1885 p1copies[dst] = src
1887 p1copies[dst] = src
1886 elif src in p2manifest:
1888 elif src in p2manifest:
1887 p2copies[dst] = src
1889 p2copies[dst] = src
1888 return p1copies, p2copies
1890 return p1copies, p2copies
1889
1891
1890 @propertycache
1892 @propertycache
1891 def _manifest(self):
1893 def _manifest(self):
1892 """generate a manifest corresponding to the values in self._status
1894 """generate a manifest corresponding to the values in self._status
1893
1895
1894 This reuse the file nodeid from parent, but we use special node
1896 This reuse the file nodeid from parent, but we use special node
1895 identifiers for added and modified files. This is used by manifests
1897 identifiers for added and modified files. This is used by manifests
1896 merge to see that files are different and by update logic to avoid
1898 merge to see that files are different and by update logic to avoid
1897 deleting newly added files.
1899 deleting newly added files.
1898 """
1900 """
1899 return self._buildstatusmanifest(self._status)
1901 return self._buildstatusmanifest(self._status)
1900
1902
1901 def _buildstatusmanifest(self, status):
1903 def _buildstatusmanifest(self, status):
1902 """Builds a manifest that includes the given status results."""
1904 """Builds a manifest that includes the given status results."""
1903 parents = self.parents()
1905 parents = self.parents()
1904
1906
1905 man = parents[0].manifest().copy()
1907 man = parents[0].manifest().copy()
1906
1908
1907 ff = self._flagfunc
1909 ff = self._flagfunc
1908 for i, l in (
1910 for i, l in (
1909 (addednodeid, status.added),
1911 (addednodeid, status.added),
1910 (modifiednodeid, status.modified),
1912 (modifiednodeid, status.modified),
1911 ):
1913 ):
1912 for f in l:
1914 for f in l:
1913 man[f] = i
1915 man[f] = i
1914 try:
1916 try:
1915 man.setflag(f, ff(f))
1917 man.setflag(f, ff(f))
1916 except OSError:
1918 except OSError:
1917 pass
1919 pass
1918
1920
1919 for f in status.deleted + status.removed:
1921 for f in status.deleted + status.removed:
1920 if f in man:
1922 if f in man:
1921 del man[f]
1923 del man[f]
1922
1924
1923 return man
1925 return man
1924
1926
1925 def _buildstatus(
1927 def _buildstatus(
1926 self, other, s, match, listignored, listclean, listunknown
1928 self, other, s, match, listignored, listclean, listunknown
1927 ):
1929 ):
1928 """build a status with respect to another context
1930 """build a status with respect to another context
1929
1931
1930 This includes logic for maintaining the fast path of status when
1932 This includes logic for maintaining the fast path of status when
1931 comparing the working directory against its parent, which is to skip
1933 comparing the working directory against its parent, which is to skip
1932 building a new manifest if self (working directory) is not comparing
1934 building a new manifest if self (working directory) is not comparing
1933 against its parent (repo['.']).
1935 against its parent (repo['.']).
1934 """
1936 """
1935 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1937 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1936 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1938 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1937 # might have accidentally ended up with the entire contents of the file
1939 # might have accidentally ended up with the entire contents of the file
1938 # they are supposed to be linking to.
1940 # they are supposed to be linking to.
1939 s.modified[:] = self._filtersuspectsymlink(s.modified)
1941 s.modified[:] = self._filtersuspectsymlink(s.modified)
1940 if other != self._repo[b'.']:
1942 if other != self._repo[b'.']:
1941 s = super(workingctx, self)._buildstatus(
1943 s = super(workingctx, self)._buildstatus(
1942 other, s, match, listignored, listclean, listunknown
1944 other, s, match, listignored, listclean, listunknown
1943 )
1945 )
1944 return s
1946 return s
1945
1947
1946 def _matchstatus(self, other, match):
1948 def _matchstatus(self, other, match):
1947 """override the match method with a filter for directory patterns
1949 """override the match method with a filter for directory patterns
1948
1950
1949 We use inheritance to customize the match.bad method only in cases of
1951 We use inheritance to customize the match.bad method only in cases of
1950 workingctx since it belongs only to the working directory when
1952 workingctx since it belongs only to the working directory when
1951 comparing against the parent changeset.
1953 comparing against the parent changeset.
1952
1954
1953 If we aren't comparing against the working directory's parent, then we
1955 If we aren't comparing against the working directory's parent, then we
1954 just use the default match object sent to us.
1956 just use the default match object sent to us.
1955 """
1957 """
1956 if other != self._repo[b'.']:
1958 if other != self._repo[b'.']:
1957
1959
1958 def bad(f, msg):
1960 def bad(f, msg):
1959 # 'f' may be a directory pattern from 'match.files()',
1961 # 'f' may be a directory pattern from 'match.files()',
1960 # so 'f not in ctx1' is not enough
1962 # so 'f not in ctx1' is not enough
1961 if f not in other and not other.hasdir(f):
1963 if f not in other and not other.hasdir(f):
1962 self._repo.ui.warn(
1964 self._repo.ui.warn(
1963 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1965 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1964 )
1966 )
1965
1967
1966 match.bad = bad
1968 match.bad = bad
1967 return match
1969 return match
1968
1970
1969 def walk(self, match):
1971 def walk(self, match):
1970 '''Generates matching file names.'''
1972 '''Generates matching file names.'''
1971 return sorted(
1973 return sorted(
1972 self._repo.dirstate.walk(
1974 self._repo.dirstate.walk(
1973 self._repo.narrowmatch(match),
1975 self._repo.narrowmatch(match),
1974 subrepos=sorted(self.substate),
1976 subrepos=sorted(self.substate),
1975 unknown=True,
1977 unknown=True,
1976 ignored=False,
1978 ignored=False,
1977 )
1979 )
1978 )
1980 )
1979
1981
1980 def matches(self, match):
1982 def matches(self, match):
1981 match = self._repo.narrowmatch(match)
1983 match = self._repo.narrowmatch(match)
1982 ds = self._repo.dirstate
1984 ds = self._repo.dirstate
1983 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1985 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1984
1986
1985 def markcommitted(self, node):
1987 def markcommitted(self, node):
1986 with self._repo.dirstate.parentchange():
1988 with self._repo.dirstate.parentchange():
1987 for f in self.modified() + self.added():
1989 for f in self.modified() + self.added():
1988 self._repo.dirstate.normal(f)
1990 self._repo.dirstate.normal(f)
1989 for f in self.removed():
1991 for f in self.removed():
1990 self._repo.dirstate.drop(f)
1992 self._repo.dirstate.drop(f)
1991 self._repo.dirstate.setparents(node)
1993 self._repo.dirstate.setparents(node)
1992 self._repo._quick_access_changeid_invalidate()
1994 self._repo._quick_access_changeid_invalidate()
1993
1995
1994 # write changes out explicitly, because nesting wlock at
1996 # write changes out explicitly, because nesting wlock at
1995 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1997 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1996 # from immediately doing so for subsequent changing files
1998 # from immediately doing so for subsequent changing files
1997 self._repo.dirstate.write(self._repo.currenttransaction())
1999 self._repo.dirstate.write(self._repo.currenttransaction())
1998
2000
1999 sparse.aftercommit(self._repo, node)
2001 sparse.aftercommit(self._repo, node)
2000
2002
2001
2003
2002 class committablefilectx(basefilectx):
2004 class committablefilectx(basefilectx):
2003 """A committablefilectx provides common functionality for a file context
2005 """A committablefilectx provides common functionality for a file context
2004 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2006 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2005
2007
2006 def __init__(self, repo, path, filelog=None, ctx=None):
2008 def __init__(self, repo, path, filelog=None, ctx=None):
2007 self._repo = repo
2009 self._repo = repo
2008 self._path = path
2010 self._path = path
2009 self._changeid = None
2011 self._changeid = None
2010 self._filerev = self._filenode = None
2012 self._filerev = self._filenode = None
2011
2013
2012 if filelog is not None:
2014 if filelog is not None:
2013 self._filelog = filelog
2015 self._filelog = filelog
2014 if ctx:
2016 if ctx:
2015 self._changectx = ctx
2017 self._changectx = ctx
2016
2018
2017 def __nonzero__(self):
2019 def __nonzero__(self):
2018 return True
2020 return True
2019
2021
2020 __bool__ = __nonzero__
2022 __bool__ = __nonzero__
2021
2023
2022 def linkrev(self):
2024 def linkrev(self):
2023 # linked to self._changectx no matter if file is modified or not
2025 # linked to self._changectx no matter if file is modified or not
2024 return self.rev()
2026 return self.rev()
2025
2027
2026 def renamed(self):
2028 def renamed(self):
2027 path = self.copysource()
2029 path = self.copysource()
2028 if not path:
2030 if not path:
2029 return None
2031 return None
2030 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2032 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2031
2033
2032 def parents(self):
2034 def parents(self):
2033 '''return parent filectxs, following copies if necessary'''
2035 '''return parent filectxs, following copies if necessary'''
2034
2036
2035 def filenode(ctx, path):
2037 def filenode(ctx, path):
2036 return ctx._manifest.get(path, nullid)
2038 return ctx._manifest.get(path, nullid)
2037
2039
2038 path = self._path
2040 path = self._path
2039 fl = self._filelog
2041 fl = self._filelog
2040 pcl = self._changectx._parents
2042 pcl = self._changectx._parents
2041 renamed = self.renamed()
2043 renamed = self.renamed()
2042
2044
2043 if renamed:
2045 if renamed:
2044 pl = [renamed + (None,)]
2046 pl = [renamed + (None,)]
2045 else:
2047 else:
2046 pl = [(path, filenode(pcl[0], path), fl)]
2048 pl = [(path, filenode(pcl[0], path), fl)]
2047
2049
2048 for pc in pcl[1:]:
2050 for pc in pcl[1:]:
2049 pl.append((path, filenode(pc, path), fl))
2051 pl.append((path, filenode(pc, path), fl))
2050
2052
2051 return [
2053 return [
2052 self._parentfilectx(p, fileid=n, filelog=l)
2054 self._parentfilectx(p, fileid=n, filelog=l)
2053 for p, n, l in pl
2055 for p, n, l in pl
2054 if n != nullid
2056 if n != nullid
2055 ]
2057 ]
2056
2058
2057 def children(self):
2059 def children(self):
2058 return []
2060 return []
2059
2061
2060
2062
2061 class workingfilectx(committablefilectx):
2063 class workingfilectx(committablefilectx):
2062 """A workingfilectx object makes access to data related to a particular
2064 """A workingfilectx object makes access to data related to a particular
2063 file in the working directory convenient."""
2065 file in the working directory convenient."""
2064
2066
2065 def __init__(self, repo, path, filelog=None, workingctx=None):
2067 def __init__(self, repo, path, filelog=None, workingctx=None):
2066 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2068 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2067
2069
2068 @propertycache
2070 @propertycache
2069 def _changectx(self):
2071 def _changectx(self):
2070 return workingctx(self._repo)
2072 return workingctx(self._repo)
2071
2073
2072 def data(self):
2074 def data(self):
2073 return self._repo.wread(self._path)
2075 return self._repo.wread(self._path)
2074
2076
2075 def copysource(self):
2077 def copysource(self):
2076 return self._repo.dirstate.copied(self._path)
2078 return self._repo.dirstate.copied(self._path)
2077
2079
2078 def size(self):
2080 def size(self):
2079 return self._repo.wvfs.lstat(self._path).st_size
2081 return self._repo.wvfs.lstat(self._path).st_size
2080
2082
2081 def lstat(self):
2083 def lstat(self):
2082 return self._repo.wvfs.lstat(self._path)
2084 return self._repo.wvfs.lstat(self._path)
2083
2085
2084 def date(self):
2086 def date(self):
2085 t, tz = self._changectx.date()
2087 t, tz = self._changectx.date()
2086 try:
2088 try:
2087 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2089 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2088 except OSError as err:
2090 except OSError as err:
2089 if err.errno != errno.ENOENT:
2091 if err.errno != errno.ENOENT:
2090 raise
2092 raise
2091 return (t, tz)
2093 return (t, tz)
2092
2094
2093 def exists(self):
2095 def exists(self):
2094 return self._repo.wvfs.exists(self._path)
2096 return self._repo.wvfs.exists(self._path)
2095
2097
2096 def lexists(self):
2098 def lexists(self):
2097 return self._repo.wvfs.lexists(self._path)
2099 return self._repo.wvfs.lexists(self._path)
2098
2100
2099 def audit(self):
2101 def audit(self):
2100 return self._repo.wvfs.audit(self._path)
2102 return self._repo.wvfs.audit(self._path)
2101
2103
2102 def cmp(self, fctx):
2104 def cmp(self, fctx):
2103 """compare with other file context
2105 """compare with other file context
2104
2106
2105 returns True if different than fctx.
2107 returns True if different than fctx.
2106 """
2108 """
2107 # fctx should be a filectx (not a workingfilectx)
2109 # fctx should be a filectx (not a workingfilectx)
2108 # invert comparison to reuse the same code path
2110 # invert comparison to reuse the same code path
2109 return fctx.cmp(self)
2111 return fctx.cmp(self)
2110
2112
2111 def remove(self, ignoremissing=False):
2113 def remove(self, ignoremissing=False):
2112 """wraps unlink for a repo's working directory"""
2114 """wraps unlink for a repo's working directory"""
2113 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2115 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2114 self._repo.wvfs.unlinkpath(
2116 self._repo.wvfs.unlinkpath(
2115 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2117 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2116 )
2118 )
2117
2119
2118 def write(self, data, flags, backgroundclose=False, **kwargs):
2120 def write(self, data, flags, backgroundclose=False, **kwargs):
2119 """wraps repo.wwrite"""
2121 """wraps repo.wwrite"""
2120 return self._repo.wwrite(
2122 return self._repo.wwrite(
2121 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2123 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2122 )
2124 )
2123
2125
2124 def markcopied(self, src):
2126 def markcopied(self, src):
2125 """marks this file a copy of `src`"""
2127 """marks this file a copy of `src`"""
2126 self._repo.dirstate.copy(src, self._path)
2128 self._repo.dirstate.copy(src, self._path)
2127
2129
2128 def clearunknown(self):
2130 def clearunknown(self):
2129 """Removes conflicting items in the working directory so that
2131 """Removes conflicting items in the working directory so that
2130 ``write()`` can be called successfully.
2132 ``write()`` can be called successfully.
2131 """
2133 """
2132 wvfs = self._repo.wvfs
2134 wvfs = self._repo.wvfs
2133 f = self._path
2135 f = self._path
2134 wvfs.audit(f)
2136 wvfs.audit(f)
2135 if self._repo.ui.configbool(
2137 if self._repo.ui.configbool(
2136 b'experimental', b'merge.checkpathconflicts'
2138 b'experimental', b'merge.checkpathconflicts'
2137 ):
2139 ):
2138 # remove files under the directory as they should already be
2140 # remove files under the directory as they should already be
2139 # warned and backed up
2141 # warned and backed up
2140 if wvfs.isdir(f) and not wvfs.islink(f):
2142 if wvfs.isdir(f) and not wvfs.islink(f):
2141 wvfs.rmtree(f, forcibly=True)
2143 wvfs.rmtree(f, forcibly=True)
2142 for p in reversed(list(pathutil.finddirs(f))):
2144 for p in reversed(list(pathutil.finddirs(f))):
2143 if wvfs.isfileorlink(p):
2145 if wvfs.isfileorlink(p):
2144 wvfs.unlink(p)
2146 wvfs.unlink(p)
2145 break
2147 break
2146 else:
2148 else:
2147 # don't remove files if path conflicts are not processed
2149 # don't remove files if path conflicts are not processed
2148 if wvfs.isdir(f) and not wvfs.islink(f):
2150 if wvfs.isdir(f) and not wvfs.islink(f):
2149 wvfs.removedirs(f)
2151 wvfs.removedirs(f)
2150
2152
2151 def setflags(self, l, x):
2153 def setflags(self, l, x):
2152 self._repo.wvfs.setflags(self._path, l, x)
2154 self._repo.wvfs.setflags(self._path, l, x)
2153
2155
2154
2156
2155 class overlayworkingctx(committablectx):
2157 class overlayworkingctx(committablectx):
2156 """Wraps another mutable context with a write-back cache that can be
2158 """Wraps another mutable context with a write-back cache that can be
2157 converted into a commit context.
2159 converted into a commit context.
2158
2160
2159 self._cache[path] maps to a dict with keys: {
2161 self._cache[path] maps to a dict with keys: {
2160 'exists': bool?
2162 'exists': bool?
2161 'date': date?
2163 'date': date?
2162 'data': str?
2164 'data': str?
2163 'flags': str?
2165 'flags': str?
2164 'copied': str? (path or None)
2166 'copied': str? (path or None)
2165 }
2167 }
2166 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2168 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2167 is `False`, the file was deleted.
2169 is `False`, the file was deleted.
2168 """
2170 """
2169
2171
2170 def __init__(self, repo):
2172 def __init__(self, repo):
2171 super(overlayworkingctx, self).__init__(repo)
2173 super(overlayworkingctx, self).__init__(repo)
2172 self.clean()
2174 self.clean()
2173
2175
2174 def setbase(self, wrappedctx):
2176 def setbase(self, wrappedctx):
2175 self._wrappedctx = wrappedctx
2177 self._wrappedctx = wrappedctx
2176 self._parents = [wrappedctx]
2178 self._parents = [wrappedctx]
2177 # Drop old manifest cache as it is now out of date.
2179 # Drop old manifest cache as it is now out of date.
2178 # This is necessary when, e.g., rebasing several nodes with one
2180 # This is necessary when, e.g., rebasing several nodes with one
2179 # ``overlayworkingctx`` (e.g. with --collapse).
2181 # ``overlayworkingctx`` (e.g. with --collapse).
2180 util.clearcachedproperty(self, b'_manifest')
2182 util.clearcachedproperty(self, b'_manifest')
2181
2183
2182 def setparents(self, p1node, p2node=nullid):
2184 def setparents(self, p1node, p2node=nullid):
2183 assert p1node == self._wrappedctx.node()
2185 assert p1node == self._wrappedctx.node()
2184 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2186 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2185
2187
2186 def data(self, path):
2188 def data(self, path):
2187 if self.isdirty(path):
2189 if self.isdirty(path):
2188 if self._cache[path][b'exists']:
2190 if self._cache[path][b'exists']:
2189 if self._cache[path][b'data'] is not None:
2191 if self._cache[path][b'data'] is not None:
2190 return self._cache[path][b'data']
2192 return self._cache[path][b'data']
2191 else:
2193 else:
2192 # Must fallback here, too, because we only set flags.
2194 # Must fallback here, too, because we only set flags.
2193 return self._wrappedctx[path].data()
2195 return self._wrappedctx[path].data()
2194 else:
2196 else:
2195 raise error.ProgrammingError(
2197 raise error.ProgrammingError(
2196 b"No such file or directory: %s" % path
2198 b"No such file or directory: %s" % path
2197 )
2199 )
2198 else:
2200 else:
2199 return self._wrappedctx[path].data()
2201 return self._wrappedctx[path].data()
2200
2202
2201 @propertycache
2203 @propertycache
2202 def _manifest(self):
2204 def _manifest(self):
2203 parents = self.parents()
2205 parents = self.parents()
2204 man = parents[0].manifest().copy()
2206 man = parents[0].manifest().copy()
2205
2207
2206 flag = self._flagfunc
2208 flag = self._flagfunc
2207 for path in self.added():
2209 for path in self.added():
2208 man[path] = addednodeid
2210 man[path] = addednodeid
2209 man.setflag(path, flag(path))
2211 man.setflag(path, flag(path))
2210 for path in self.modified():
2212 for path in self.modified():
2211 man[path] = modifiednodeid
2213 man[path] = modifiednodeid
2212 man.setflag(path, flag(path))
2214 man.setflag(path, flag(path))
2213 for path in self.removed():
2215 for path in self.removed():
2214 del man[path]
2216 del man[path]
2215 return man
2217 return man
2216
2218
2217 @propertycache
2219 @propertycache
2218 def _flagfunc(self):
2220 def _flagfunc(self):
2219 def f(path):
2221 def f(path):
2220 return self._cache[path][b'flags']
2222 return self._cache[path][b'flags']
2221
2223
2222 return f
2224 return f
2223
2225
2224 def files(self):
2226 def files(self):
2225 return sorted(self.added() + self.modified() + self.removed())
2227 return sorted(self.added() + self.modified() + self.removed())
2226
2228
2227 def modified(self):
2229 def modified(self):
2228 return [
2230 return [
2229 f
2231 f
2230 for f in self._cache.keys()
2232 for f in self._cache.keys()
2231 if self._cache[f][b'exists'] and self._existsinparent(f)
2233 if self._cache[f][b'exists'] and self._existsinparent(f)
2232 ]
2234 ]
2233
2235
2234 def added(self):
2236 def added(self):
2235 return [
2237 return [
2236 f
2238 f
2237 for f in self._cache.keys()
2239 for f in self._cache.keys()
2238 if self._cache[f][b'exists'] and not self._existsinparent(f)
2240 if self._cache[f][b'exists'] and not self._existsinparent(f)
2239 ]
2241 ]
2240
2242
2241 def removed(self):
2243 def removed(self):
2242 return [
2244 return [
2243 f
2245 f
2244 for f in self._cache.keys()
2246 for f in self._cache.keys()
2245 if not self._cache[f][b'exists'] and self._existsinparent(f)
2247 if not self._cache[f][b'exists'] and self._existsinparent(f)
2246 ]
2248 ]
2247
2249
2248 def p1copies(self):
2250 def p1copies(self):
2249 copies = {}
2251 copies = {}
2250 narrowmatch = self._repo.narrowmatch()
2252 narrowmatch = self._repo.narrowmatch()
2251 for f in self._cache.keys():
2253 for f in self._cache.keys():
2252 if not narrowmatch(f):
2254 if not narrowmatch(f):
2253 continue
2255 continue
2254 copies.pop(f, None) # delete if it exists
2256 copies.pop(f, None) # delete if it exists
2255 source = self._cache[f][b'copied']
2257 source = self._cache[f][b'copied']
2256 if source:
2258 if source:
2257 copies[f] = source
2259 copies[f] = source
2258 return copies
2260 return copies
2259
2261
2260 def p2copies(self):
2262 def p2copies(self):
2261 copies = {}
2263 copies = {}
2262 narrowmatch = self._repo.narrowmatch()
2264 narrowmatch = self._repo.narrowmatch()
2263 for f in self._cache.keys():
2265 for f in self._cache.keys():
2264 if not narrowmatch(f):
2266 if not narrowmatch(f):
2265 continue
2267 continue
2266 copies.pop(f, None) # delete if it exists
2268 copies.pop(f, None) # delete if it exists
2267 source = self._cache[f][b'copied']
2269 source = self._cache[f][b'copied']
2268 if source:
2270 if source:
2269 copies[f] = source
2271 copies[f] = source
2270 return copies
2272 return copies
2271
2273
2272 def isinmemory(self):
2274 def isinmemory(self):
2273 return True
2275 return True
2274
2276
2275 def filedate(self, path):
2277 def filedate(self, path):
2276 if self.isdirty(path):
2278 if self.isdirty(path):
2277 return self._cache[path][b'date']
2279 return self._cache[path][b'date']
2278 else:
2280 else:
2279 return self._wrappedctx[path].date()
2281 return self._wrappedctx[path].date()
2280
2282
2281 def markcopied(self, path, origin):
2283 def markcopied(self, path, origin):
2282 self._markdirty(
2284 self._markdirty(
2283 path,
2285 path,
2284 exists=True,
2286 exists=True,
2285 date=self.filedate(path),
2287 date=self.filedate(path),
2286 flags=self.flags(path),
2288 flags=self.flags(path),
2287 copied=origin,
2289 copied=origin,
2288 )
2290 )
2289
2291
2290 def copydata(self, path):
2292 def copydata(self, path):
2291 if self.isdirty(path):
2293 if self.isdirty(path):
2292 return self._cache[path][b'copied']
2294 return self._cache[path][b'copied']
2293 else:
2295 else:
2294 return None
2296 return None
2295
2297
2296 def flags(self, path):
2298 def flags(self, path):
2297 if self.isdirty(path):
2299 if self.isdirty(path):
2298 if self._cache[path][b'exists']:
2300 if self._cache[path][b'exists']:
2299 return self._cache[path][b'flags']
2301 return self._cache[path][b'flags']
2300 else:
2302 else:
2301 raise error.ProgrammingError(
2303 raise error.ProgrammingError(
2302 b"No such file or directory: %s" % self._path
2304 b"No such file or directory: %s" % self._path
2303 )
2305 )
2304 else:
2306 else:
2305 return self._wrappedctx[path].flags()
2307 return self._wrappedctx[path].flags()
2306
2308
2307 def __contains__(self, key):
2309 def __contains__(self, key):
2308 if key in self._cache:
2310 if key in self._cache:
2309 return self._cache[key][b'exists']
2311 return self._cache[key][b'exists']
2310 return key in self.p1()
2312 return key in self.p1()
2311
2313
2312 def _existsinparent(self, path):
2314 def _existsinparent(self, path):
2313 try:
2315 try:
2314 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2316 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2315 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2317 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2316 # with an ``exists()`` function.
2318 # with an ``exists()`` function.
2317 self._wrappedctx[path]
2319 self._wrappedctx[path]
2318 return True
2320 return True
2319 except error.ManifestLookupError:
2321 except error.ManifestLookupError:
2320 return False
2322 return False
2321
2323
2322 def _auditconflicts(self, path):
2324 def _auditconflicts(self, path):
2323 """Replicates conflict checks done by wvfs.write().
2325 """Replicates conflict checks done by wvfs.write().
2324
2326
2325 Since we never write to the filesystem and never call `applyupdates` in
2327 Since we never write to the filesystem and never call `applyupdates` in
2326 IMM, we'll never check that a path is actually writable -- e.g., because
2328 IMM, we'll never check that a path is actually writable -- e.g., because
2327 it adds `a/foo`, but `a` is actually a file in the other commit.
2329 it adds `a/foo`, but `a` is actually a file in the other commit.
2328 """
2330 """
2329
2331
2330 def fail(path, component):
2332 def fail(path, component):
2331 # p1() is the base and we're receiving "writes" for p2()'s
2333 # p1() is the base and we're receiving "writes" for p2()'s
2332 # files.
2334 # files.
2333 if b'l' in self.p1()[component].flags():
2335 if b'l' in self.p1()[component].flags():
2334 raise error.Abort(
2336 raise error.Abort(
2335 b"error: %s conflicts with symlink %s "
2337 b"error: %s conflicts with symlink %s "
2336 b"in %d." % (path, component, self.p1().rev())
2338 b"in %d." % (path, component, self.p1().rev())
2337 )
2339 )
2338 else:
2340 else:
2339 raise error.Abort(
2341 raise error.Abort(
2340 b"error: '%s' conflicts with file '%s' in "
2342 b"error: '%s' conflicts with file '%s' in "
2341 b"%d." % (path, component, self.p1().rev())
2343 b"%d." % (path, component, self.p1().rev())
2342 )
2344 )
2343
2345
2344 # Test that each new directory to be created to write this path from p2
2346 # Test that each new directory to be created to write this path from p2
2345 # is not a file in p1.
2347 # is not a file in p1.
2346 components = path.split(b'/')
2348 components = path.split(b'/')
2347 for i in pycompat.xrange(len(components)):
2349 for i in pycompat.xrange(len(components)):
2348 component = b"/".join(components[0:i])
2350 component = b"/".join(components[0:i])
2349 if component in self:
2351 if component in self:
2350 fail(path, component)
2352 fail(path, component)
2351
2353
2352 # Test the other direction -- that this path from p2 isn't a directory
2354 # Test the other direction -- that this path from p2 isn't a directory
2353 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2355 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2354 match = self.match([path], default=b'path')
2356 match = self.match([path], default=b'path')
2355 matches = self.p1().manifest().matches(match)
2357 matches = self.p1().manifest().matches(match)
2356 mfiles = matches.keys()
2358 mfiles = matches.keys()
2357 if len(mfiles) > 0:
2359 if len(mfiles) > 0:
2358 if len(mfiles) == 1 and mfiles[0] == path:
2360 if len(mfiles) == 1 and mfiles[0] == path:
2359 return
2361 return
2360 # omit the files which are deleted in current IMM wctx
2362 # omit the files which are deleted in current IMM wctx
2361 mfiles = [m for m in mfiles if m in self]
2363 mfiles = [m for m in mfiles if m in self]
2362 if not mfiles:
2364 if not mfiles:
2363 return
2365 return
2364 raise error.Abort(
2366 raise error.Abort(
2365 b"error: file '%s' cannot be written because "
2367 b"error: file '%s' cannot be written because "
2366 b" '%s/' is a directory in %s (containing %d "
2368 b" '%s/' is a directory in %s (containing %d "
2367 b"entries: %s)"
2369 b"entries: %s)"
2368 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2370 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2369 )
2371 )
2370
2372
2371 def write(self, path, data, flags=b'', **kwargs):
2373 def write(self, path, data, flags=b'', **kwargs):
2372 if data is None:
2374 if data is None:
2373 raise error.ProgrammingError(b"data must be non-None")
2375 raise error.ProgrammingError(b"data must be non-None")
2374 self._auditconflicts(path)
2376 self._auditconflicts(path)
2375 self._markdirty(
2377 self._markdirty(
2376 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2378 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2377 )
2379 )
2378
2380
2379 def setflags(self, path, l, x):
2381 def setflags(self, path, l, x):
2380 flag = b''
2382 flag = b''
2381 if l:
2383 if l:
2382 flag = b'l'
2384 flag = b'l'
2383 elif x:
2385 elif x:
2384 flag = b'x'
2386 flag = b'x'
2385 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2387 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2386
2388
2387 def remove(self, path):
2389 def remove(self, path):
2388 self._markdirty(path, exists=False)
2390 self._markdirty(path, exists=False)
2389
2391
2390 def exists(self, path):
2392 def exists(self, path):
2391 """exists behaves like `lexists`, but needs to follow symlinks and
2393 """exists behaves like `lexists`, but needs to follow symlinks and
2392 return False if they are broken.
2394 return False if they are broken.
2393 """
2395 """
2394 if self.isdirty(path):
2396 if self.isdirty(path):
2395 # If this path exists and is a symlink, "follow" it by calling
2397 # If this path exists and is a symlink, "follow" it by calling
2396 # exists on the destination path.
2398 # exists on the destination path.
2397 if (
2399 if (
2398 self._cache[path][b'exists']
2400 self._cache[path][b'exists']
2399 and b'l' in self._cache[path][b'flags']
2401 and b'l' in self._cache[path][b'flags']
2400 ):
2402 ):
2401 return self.exists(self._cache[path][b'data'].strip())
2403 return self.exists(self._cache[path][b'data'].strip())
2402 else:
2404 else:
2403 return self._cache[path][b'exists']
2405 return self._cache[path][b'exists']
2404
2406
2405 return self._existsinparent(path)
2407 return self._existsinparent(path)
2406
2408
2407 def lexists(self, path):
2409 def lexists(self, path):
2408 """lexists returns True if the path exists"""
2410 """lexists returns True if the path exists"""
2409 if self.isdirty(path):
2411 if self.isdirty(path):
2410 return self._cache[path][b'exists']
2412 return self._cache[path][b'exists']
2411
2413
2412 return self._existsinparent(path)
2414 return self._existsinparent(path)
2413
2415
2414 def size(self, path):
2416 def size(self, path):
2415 if self.isdirty(path):
2417 if self.isdirty(path):
2416 if self._cache[path][b'exists']:
2418 if self._cache[path][b'exists']:
2417 return len(self._cache[path][b'data'])
2419 return len(self._cache[path][b'data'])
2418 else:
2420 else:
2419 raise error.ProgrammingError(
2421 raise error.ProgrammingError(
2420 b"No such file or directory: %s" % self._path
2422 b"No such file or directory: %s" % self._path
2421 )
2423 )
2422 return self._wrappedctx[path].size()
2424 return self._wrappedctx[path].size()
2423
2425
2424 def tomemctx(
2426 def tomemctx(
2425 self,
2427 self,
2426 text,
2428 text,
2427 branch=None,
2429 branch=None,
2428 extra=None,
2430 extra=None,
2429 date=None,
2431 date=None,
2430 parents=None,
2432 parents=None,
2431 user=None,
2433 user=None,
2432 editor=None,
2434 editor=None,
2433 ):
2435 ):
2434 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2436 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2435 committed.
2437 committed.
2436
2438
2437 ``text`` is the commit message.
2439 ``text`` is the commit message.
2438 ``parents`` (optional) are rev numbers.
2440 ``parents`` (optional) are rev numbers.
2439 """
2441 """
2440 # Default parents to the wrapped context if not passed.
2442 # Default parents to the wrapped context if not passed.
2441 if parents is None:
2443 if parents is None:
2442 parents = self.parents()
2444 parents = self.parents()
2443 if len(parents) == 1:
2445 if len(parents) == 1:
2444 parents = (parents[0], None)
2446 parents = (parents[0], None)
2445
2447
2446 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2448 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2447 if parents[1] is None:
2449 if parents[1] is None:
2448 parents = (self._repo[parents[0]], None)
2450 parents = (self._repo[parents[0]], None)
2449 else:
2451 else:
2450 parents = (self._repo[parents[0]], self._repo[parents[1]])
2452 parents = (self._repo[parents[0]], self._repo[parents[1]])
2451
2453
2452 files = self.files()
2454 files = self.files()
2453
2455
2454 def getfile(repo, memctx, path):
2456 def getfile(repo, memctx, path):
2455 if self._cache[path][b'exists']:
2457 if self._cache[path][b'exists']:
2456 return memfilectx(
2458 return memfilectx(
2457 repo,
2459 repo,
2458 memctx,
2460 memctx,
2459 path,
2461 path,
2460 self._cache[path][b'data'],
2462 self._cache[path][b'data'],
2461 b'l' in self._cache[path][b'flags'],
2463 b'l' in self._cache[path][b'flags'],
2462 b'x' in self._cache[path][b'flags'],
2464 b'x' in self._cache[path][b'flags'],
2463 self._cache[path][b'copied'],
2465 self._cache[path][b'copied'],
2464 )
2466 )
2465 else:
2467 else:
2466 # Returning None, but including the path in `files`, is
2468 # Returning None, but including the path in `files`, is
2467 # necessary for memctx to register a deletion.
2469 # necessary for memctx to register a deletion.
2468 return None
2470 return None
2469
2471
2470 if branch is None:
2472 if branch is None:
2471 branch = self._wrappedctx.branch()
2473 branch = self._wrappedctx.branch()
2472
2474
2473 return memctx(
2475 return memctx(
2474 self._repo,
2476 self._repo,
2475 parents,
2477 parents,
2476 text,
2478 text,
2477 files,
2479 files,
2478 getfile,
2480 getfile,
2479 date=date,
2481 date=date,
2480 extra=extra,
2482 extra=extra,
2481 user=user,
2483 user=user,
2482 branch=branch,
2484 branch=branch,
2483 editor=editor,
2485 editor=editor,
2484 )
2486 )
2485
2487
2486 def isdirty(self, path):
2488 def isdirty(self, path):
2487 return path in self._cache
2489 return path in self._cache
2488
2490
2489 def isempty(self):
2491 def isempty(self):
2490 # We need to discard any keys that are actually clean before the empty
2492 # We need to discard any keys that are actually clean before the empty
2491 # commit check.
2493 # commit check.
2492 self._compact()
2494 self._compact()
2493 return len(self._cache) == 0
2495 return len(self._cache) == 0
2494
2496
2495 def clean(self):
2497 def clean(self):
2496 self._cache = {}
2498 self._cache = {}
2497
2499
2498 def _compact(self):
2500 def _compact(self):
2499 """Removes keys from the cache that are actually clean, by comparing
2501 """Removes keys from the cache that are actually clean, by comparing
2500 them with the underlying context.
2502 them with the underlying context.
2501
2503
2502 This can occur during the merge process, e.g. by passing --tool :local
2504 This can occur during the merge process, e.g. by passing --tool :local
2503 to resolve a conflict.
2505 to resolve a conflict.
2504 """
2506 """
2505 keys = []
2507 keys = []
2506 # This won't be perfect, but can help performance significantly when
2508 # This won't be perfect, but can help performance significantly when
2507 # using things like remotefilelog.
2509 # using things like remotefilelog.
2508 scmutil.prefetchfiles(
2510 scmutil.prefetchfiles(
2509 self.repo(),
2511 self.repo(),
2510 [self.p1().rev()],
2512 [self.p1().rev()],
2511 scmutil.matchfiles(self.repo(), self._cache.keys()),
2513 scmutil.matchfiles(self.repo(), self._cache.keys()),
2512 )
2514 )
2513
2515
2514 for path in self._cache.keys():
2516 for path in self._cache.keys():
2515 cache = self._cache[path]
2517 cache = self._cache[path]
2516 try:
2518 try:
2517 underlying = self._wrappedctx[path]
2519 underlying = self._wrappedctx[path]
2518 if (
2520 if (
2519 underlying.data() == cache[b'data']
2521 underlying.data() == cache[b'data']
2520 and underlying.flags() == cache[b'flags']
2522 and underlying.flags() == cache[b'flags']
2521 ):
2523 ):
2522 keys.append(path)
2524 keys.append(path)
2523 except error.ManifestLookupError:
2525 except error.ManifestLookupError:
2524 # Path not in the underlying manifest (created).
2526 # Path not in the underlying manifest (created).
2525 continue
2527 continue
2526
2528
2527 for path in keys:
2529 for path in keys:
2528 del self._cache[path]
2530 del self._cache[path]
2529 return keys
2531 return keys
2530
2532
2531 def _markdirty(
2533 def _markdirty(
2532 self, path, exists, data=None, date=None, flags=b'', copied=None
2534 self, path, exists, data=None, date=None, flags=b'', copied=None
2533 ):
2535 ):
2534 # data not provided, let's see if we already have some; if not, let's
2536 # data not provided, let's see if we already have some; if not, let's
2535 # grab it from our underlying context, so that we always have data if
2537 # grab it from our underlying context, so that we always have data if
2536 # the file is marked as existing.
2538 # the file is marked as existing.
2537 if exists and data is None:
2539 if exists and data is None:
2538 oldentry = self._cache.get(path) or {}
2540 oldentry = self._cache.get(path) or {}
2539 data = oldentry.get(b'data')
2541 data = oldentry.get(b'data')
2540 if data is None:
2542 if data is None:
2541 data = self._wrappedctx[path].data()
2543 data = self._wrappedctx[path].data()
2542
2544
2543 self._cache[path] = {
2545 self._cache[path] = {
2544 b'exists': exists,
2546 b'exists': exists,
2545 b'data': data,
2547 b'data': data,
2546 b'date': date,
2548 b'date': date,
2547 b'flags': flags,
2549 b'flags': flags,
2548 b'copied': copied,
2550 b'copied': copied,
2549 }
2551 }
2550
2552
2551 def filectx(self, path, filelog=None):
2553 def filectx(self, path, filelog=None):
2552 return overlayworkingfilectx(
2554 return overlayworkingfilectx(
2553 self._repo, path, parent=self, filelog=filelog
2555 self._repo, path, parent=self, filelog=filelog
2554 )
2556 )
2555
2557
2556
2558
2557 class overlayworkingfilectx(committablefilectx):
2559 class overlayworkingfilectx(committablefilectx):
2558 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2560 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2559 cache, which can be flushed through later by calling ``flush()``."""
2561 cache, which can be flushed through later by calling ``flush()``."""
2560
2562
2561 def __init__(self, repo, path, filelog=None, parent=None):
2563 def __init__(self, repo, path, filelog=None, parent=None):
2562 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2564 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2563 self._repo = repo
2565 self._repo = repo
2564 self._parent = parent
2566 self._parent = parent
2565 self._path = path
2567 self._path = path
2566
2568
2567 def cmp(self, fctx):
2569 def cmp(self, fctx):
2568 return self.data() != fctx.data()
2570 return self.data() != fctx.data()
2569
2571
2570 def changectx(self):
2572 def changectx(self):
2571 return self._parent
2573 return self._parent
2572
2574
2573 def data(self):
2575 def data(self):
2574 return self._parent.data(self._path)
2576 return self._parent.data(self._path)
2575
2577
2576 def date(self):
2578 def date(self):
2577 return self._parent.filedate(self._path)
2579 return self._parent.filedate(self._path)
2578
2580
2579 def exists(self):
2581 def exists(self):
2580 return self.lexists()
2582 return self.lexists()
2581
2583
2582 def lexists(self):
2584 def lexists(self):
2583 return self._parent.exists(self._path)
2585 return self._parent.exists(self._path)
2584
2586
2585 def copysource(self):
2587 def copysource(self):
2586 return self._parent.copydata(self._path)
2588 return self._parent.copydata(self._path)
2587
2589
2588 def size(self):
2590 def size(self):
2589 return self._parent.size(self._path)
2591 return self._parent.size(self._path)
2590
2592
2591 def markcopied(self, origin):
2593 def markcopied(self, origin):
2592 self._parent.markcopied(self._path, origin)
2594 self._parent.markcopied(self._path, origin)
2593
2595
2594 def audit(self):
2596 def audit(self):
2595 pass
2597 pass
2596
2598
2597 def flags(self):
2599 def flags(self):
2598 return self._parent.flags(self._path)
2600 return self._parent.flags(self._path)
2599
2601
2600 def setflags(self, islink, isexec):
2602 def setflags(self, islink, isexec):
2601 return self._parent.setflags(self._path, islink, isexec)
2603 return self._parent.setflags(self._path, islink, isexec)
2602
2604
2603 def write(self, data, flags, backgroundclose=False, **kwargs):
2605 def write(self, data, flags, backgroundclose=False, **kwargs):
2604 return self._parent.write(self._path, data, flags, **kwargs)
2606 return self._parent.write(self._path, data, flags, **kwargs)
2605
2607
2606 def remove(self, ignoremissing=False):
2608 def remove(self, ignoremissing=False):
2607 return self._parent.remove(self._path)
2609 return self._parent.remove(self._path)
2608
2610
2609 def clearunknown(self):
2611 def clearunknown(self):
2610 pass
2612 pass
2611
2613
2612
2614
2613 class workingcommitctx(workingctx):
2615 class workingcommitctx(workingctx):
2614 """A workingcommitctx object makes access to data related to
2616 """A workingcommitctx object makes access to data related to
2615 the revision being committed convenient.
2617 the revision being committed convenient.
2616
2618
2617 This hides changes in the working directory, if they aren't
2619 This hides changes in the working directory, if they aren't
2618 committed in this context.
2620 committed in this context.
2619 """
2621 """
2620
2622
2621 def __init__(
2623 def __init__(
2622 self, repo, changes, text=b"", user=None, date=None, extra=None
2624 self, repo, changes, text=b"", user=None, date=None, extra=None
2623 ):
2625 ):
2624 super(workingcommitctx, self).__init__(
2626 super(workingcommitctx, self).__init__(
2625 repo, text, user, date, extra, changes
2627 repo, text, user, date, extra, changes
2626 )
2628 )
2627
2629
2628 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2630 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2629 """Return matched files only in ``self._status``
2631 """Return matched files only in ``self._status``
2630
2632
2631 Uncommitted files appear "clean" via this context, even if
2633 Uncommitted files appear "clean" via this context, even if
2632 they aren't actually so in the working directory.
2634 they aren't actually so in the working directory.
2633 """
2635 """
2634 if clean:
2636 if clean:
2635 clean = [f for f in self._manifest if f not in self._changedset]
2637 clean = [f for f in self._manifest if f not in self._changedset]
2636 else:
2638 else:
2637 clean = []
2639 clean = []
2638 return scmutil.status(
2640 return scmutil.status(
2639 [f for f in self._status.modified if match(f)],
2641 [f for f in self._status.modified if match(f)],
2640 [f for f in self._status.added if match(f)],
2642 [f for f in self._status.added if match(f)],
2641 [f for f in self._status.removed if match(f)],
2643 [f for f in self._status.removed if match(f)],
2642 [],
2644 [],
2643 [],
2645 [],
2644 [],
2646 [],
2645 clean,
2647 clean,
2646 )
2648 )
2647
2649
2648 @propertycache
2650 @propertycache
2649 def _changedset(self):
2651 def _changedset(self):
2650 """Return the set of files changed in this context
2652 """Return the set of files changed in this context
2651 """
2653 """
2652 changed = set(self._status.modified)
2654 changed = set(self._status.modified)
2653 changed.update(self._status.added)
2655 changed.update(self._status.added)
2654 changed.update(self._status.removed)
2656 changed.update(self._status.removed)
2655 return changed
2657 return changed
2656
2658
2657
2659
2658 def makecachingfilectxfn(func):
2660 def makecachingfilectxfn(func):
2659 """Create a filectxfn that caches based on the path.
2661 """Create a filectxfn that caches based on the path.
2660
2662
2661 We can't use util.cachefunc because it uses all arguments as the cache
2663 We can't use util.cachefunc because it uses all arguments as the cache
2662 key and this creates a cycle since the arguments include the repo and
2664 key and this creates a cycle since the arguments include the repo and
2663 memctx.
2665 memctx.
2664 """
2666 """
2665 cache = {}
2667 cache = {}
2666
2668
2667 def getfilectx(repo, memctx, path):
2669 def getfilectx(repo, memctx, path):
2668 if path not in cache:
2670 if path not in cache:
2669 cache[path] = func(repo, memctx, path)
2671 cache[path] = func(repo, memctx, path)
2670 return cache[path]
2672 return cache[path]
2671
2673
2672 return getfilectx
2674 return getfilectx
2673
2675
2674
2676
2675 def memfilefromctx(ctx):
2677 def memfilefromctx(ctx):
2676 """Given a context return a memfilectx for ctx[path]
2678 """Given a context return a memfilectx for ctx[path]
2677
2679
2678 This is a convenience method for building a memctx based on another
2680 This is a convenience method for building a memctx based on another
2679 context.
2681 context.
2680 """
2682 """
2681
2683
2682 def getfilectx(repo, memctx, path):
2684 def getfilectx(repo, memctx, path):
2683 fctx = ctx[path]
2685 fctx = ctx[path]
2684 copysource = fctx.copysource()
2686 copysource = fctx.copysource()
2685 return memfilectx(
2687 return memfilectx(
2686 repo,
2688 repo,
2687 memctx,
2689 memctx,
2688 path,
2690 path,
2689 fctx.data(),
2691 fctx.data(),
2690 islink=fctx.islink(),
2692 islink=fctx.islink(),
2691 isexec=fctx.isexec(),
2693 isexec=fctx.isexec(),
2692 copysource=copysource,
2694 copysource=copysource,
2693 )
2695 )
2694
2696
2695 return getfilectx
2697 return getfilectx
2696
2698
2697
2699
2698 def memfilefrompatch(patchstore):
2700 def memfilefrompatch(patchstore):
2699 """Given a patch (e.g. patchstore object) return a memfilectx
2701 """Given a patch (e.g. patchstore object) return a memfilectx
2700
2702
2701 This is a convenience method for building a memctx based on a patchstore.
2703 This is a convenience method for building a memctx based on a patchstore.
2702 """
2704 """
2703
2705
2704 def getfilectx(repo, memctx, path):
2706 def getfilectx(repo, memctx, path):
2705 data, mode, copysource = patchstore.getfile(path)
2707 data, mode, copysource = patchstore.getfile(path)
2706 if data is None:
2708 if data is None:
2707 return None
2709 return None
2708 islink, isexec = mode
2710 islink, isexec = mode
2709 return memfilectx(
2711 return memfilectx(
2710 repo,
2712 repo,
2711 memctx,
2713 memctx,
2712 path,
2714 path,
2713 data,
2715 data,
2714 islink=islink,
2716 islink=islink,
2715 isexec=isexec,
2717 isexec=isexec,
2716 copysource=copysource,
2718 copysource=copysource,
2717 )
2719 )
2718
2720
2719 return getfilectx
2721 return getfilectx
2720
2722
2721
2723
2722 class memctx(committablectx):
2724 class memctx(committablectx):
2723 """Use memctx to perform in-memory commits via localrepo.commitctx().
2725 """Use memctx to perform in-memory commits via localrepo.commitctx().
2724
2726
2725 Revision information is supplied at initialization time while
2727 Revision information is supplied at initialization time while
2726 related files data and is made available through a callback
2728 related files data and is made available through a callback
2727 mechanism. 'repo' is the current localrepo, 'parents' is a
2729 mechanism. 'repo' is the current localrepo, 'parents' is a
2728 sequence of two parent revisions identifiers (pass None for every
2730 sequence of two parent revisions identifiers (pass None for every
2729 missing parent), 'text' is the commit message and 'files' lists
2731 missing parent), 'text' is the commit message and 'files' lists
2730 names of files touched by the revision (normalized and relative to
2732 names of files touched by the revision (normalized and relative to
2731 repository root).
2733 repository root).
2732
2734
2733 filectxfn(repo, memctx, path) is a callable receiving the
2735 filectxfn(repo, memctx, path) is a callable receiving the
2734 repository, the current memctx object and the normalized path of
2736 repository, the current memctx object and the normalized path of
2735 requested file, relative to repository root. It is fired by the
2737 requested file, relative to repository root. It is fired by the
2736 commit function for every file in 'files', but calls order is
2738 commit function for every file in 'files', but calls order is
2737 undefined. If the file is available in the revision being
2739 undefined. If the file is available in the revision being
2738 committed (updated or added), filectxfn returns a memfilectx
2740 committed (updated or added), filectxfn returns a memfilectx
2739 object. If the file was removed, filectxfn return None for recent
2741 object. If the file was removed, filectxfn return None for recent
2740 Mercurial. Moved files are represented by marking the source file
2742 Mercurial. Moved files are represented by marking the source file
2741 removed and the new file added with copy information (see
2743 removed and the new file added with copy information (see
2742 memfilectx).
2744 memfilectx).
2743
2745
2744 user receives the committer name and defaults to current
2746 user receives the committer name and defaults to current
2745 repository username, date is the commit date in any format
2747 repository username, date is the commit date in any format
2746 supported by dateutil.parsedate() and defaults to current date, extra
2748 supported by dateutil.parsedate() and defaults to current date, extra
2747 is a dictionary of metadata or is left empty.
2749 is a dictionary of metadata or is left empty.
2748 """
2750 """
2749
2751
2750 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2752 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2751 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2753 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2752 # this field to determine what to do in filectxfn.
2754 # this field to determine what to do in filectxfn.
2753 _returnnoneformissingfiles = True
2755 _returnnoneformissingfiles = True
2754
2756
2755 def __init__(
2757 def __init__(
2756 self,
2758 self,
2757 repo,
2759 repo,
2758 parents,
2760 parents,
2759 text,
2761 text,
2760 files,
2762 files,
2761 filectxfn,
2763 filectxfn,
2762 user=None,
2764 user=None,
2763 date=None,
2765 date=None,
2764 extra=None,
2766 extra=None,
2765 branch=None,
2767 branch=None,
2766 editor=None,
2768 editor=None,
2767 ):
2769 ):
2768 super(memctx, self).__init__(
2770 super(memctx, self).__init__(
2769 repo, text, user, date, extra, branch=branch
2771 repo, text, user, date, extra, branch=branch
2770 )
2772 )
2771 self._rev = None
2773 self._rev = None
2772 self._node = None
2774 self._node = None
2773 parents = [(p or nullid) for p in parents]
2775 parents = [(p or nullid) for p in parents]
2774 p1, p2 = parents
2776 p1, p2 = parents
2775 self._parents = [self._repo[p] for p in (p1, p2)]
2777 self._parents = [self._repo[p] for p in (p1, p2)]
2776 files = sorted(set(files))
2778 files = sorted(set(files))
2777 self._files = files
2779 self._files = files
2778 self.substate = {}
2780 self.substate = {}
2779
2781
2780 if isinstance(filectxfn, patch.filestore):
2782 if isinstance(filectxfn, patch.filestore):
2781 filectxfn = memfilefrompatch(filectxfn)
2783 filectxfn = memfilefrompatch(filectxfn)
2782 elif not callable(filectxfn):
2784 elif not callable(filectxfn):
2783 # if store is not callable, wrap it in a function
2785 # if store is not callable, wrap it in a function
2784 filectxfn = memfilefromctx(filectxfn)
2786 filectxfn = memfilefromctx(filectxfn)
2785
2787
2786 # memoizing increases performance for e.g. vcs convert scenarios.
2788 # memoizing increases performance for e.g. vcs convert scenarios.
2787 self._filectxfn = makecachingfilectxfn(filectxfn)
2789 self._filectxfn = makecachingfilectxfn(filectxfn)
2788
2790
2789 if editor:
2791 if editor:
2790 self._text = editor(self._repo, self, [])
2792 self._text = editor(self._repo, self, [])
2791 self._repo.savecommitmessage(self._text)
2793 self._repo.savecommitmessage(self._text)
2792
2794
2793 def filectx(self, path, filelog=None):
2795 def filectx(self, path, filelog=None):
2794 """get a file context from the working directory
2796 """get a file context from the working directory
2795
2797
2796 Returns None if file doesn't exist and should be removed."""
2798 Returns None if file doesn't exist and should be removed."""
2797 return self._filectxfn(self._repo, self, path)
2799 return self._filectxfn(self._repo, self, path)
2798
2800
2799 def commit(self):
2801 def commit(self):
2800 """commit context to the repo"""
2802 """commit context to the repo"""
2801 return self._repo.commitctx(self)
2803 return self._repo.commitctx(self)
2802
2804
2803 @propertycache
2805 @propertycache
2804 def _manifest(self):
2806 def _manifest(self):
2805 """generate a manifest based on the return values of filectxfn"""
2807 """generate a manifest based on the return values of filectxfn"""
2806
2808
2807 # keep this simple for now; just worry about p1
2809 # keep this simple for now; just worry about p1
2808 pctx = self._parents[0]
2810 pctx = self._parents[0]
2809 man = pctx.manifest().copy()
2811 man = pctx.manifest().copy()
2810
2812
2811 for f in self._status.modified:
2813 for f in self._status.modified:
2812 man[f] = modifiednodeid
2814 man[f] = modifiednodeid
2813
2815
2814 for f in self._status.added:
2816 for f in self._status.added:
2815 man[f] = addednodeid
2817 man[f] = addednodeid
2816
2818
2817 for f in self._status.removed:
2819 for f in self._status.removed:
2818 if f in man:
2820 if f in man:
2819 del man[f]
2821 del man[f]
2820
2822
2821 return man
2823 return man
2822
2824
2823 @propertycache
2825 @propertycache
2824 def _status(self):
2826 def _status(self):
2825 """Calculate exact status from ``files`` specified at construction
2827 """Calculate exact status from ``files`` specified at construction
2826 """
2828 """
2827 man1 = self.p1().manifest()
2829 man1 = self.p1().manifest()
2828 p2 = self._parents[1]
2830 p2 = self._parents[1]
2829 # "1 < len(self._parents)" can't be used for checking
2831 # "1 < len(self._parents)" can't be used for checking
2830 # existence of the 2nd parent, because "memctx._parents" is
2832 # existence of the 2nd parent, because "memctx._parents" is
2831 # explicitly initialized by the list, of which length is 2.
2833 # explicitly initialized by the list, of which length is 2.
2832 if p2.node() != nullid:
2834 if p2.node() != nullid:
2833 man2 = p2.manifest()
2835 man2 = p2.manifest()
2834 managing = lambda f: f in man1 or f in man2
2836 managing = lambda f: f in man1 or f in man2
2835 else:
2837 else:
2836 managing = lambda f: f in man1
2838 managing = lambda f: f in man1
2837
2839
2838 modified, added, removed = [], [], []
2840 modified, added, removed = [], [], []
2839 for f in self._files:
2841 for f in self._files:
2840 if not managing(f):
2842 if not managing(f):
2841 added.append(f)
2843 added.append(f)
2842 elif self[f]:
2844 elif self[f]:
2843 modified.append(f)
2845 modified.append(f)
2844 else:
2846 else:
2845 removed.append(f)
2847 removed.append(f)
2846
2848
2847 return scmutil.status(modified, added, removed, [], [], [], [])
2849 return scmutil.status(modified, added, removed, [], [], [], [])
2848
2850
2849
2851
2850 class memfilectx(committablefilectx):
2852 class memfilectx(committablefilectx):
2851 """memfilectx represents an in-memory file to commit.
2853 """memfilectx represents an in-memory file to commit.
2852
2854
2853 See memctx and committablefilectx for more details.
2855 See memctx and committablefilectx for more details.
2854 """
2856 """
2855
2857
2856 def __init__(
2858 def __init__(
2857 self,
2859 self,
2858 repo,
2860 repo,
2859 changectx,
2861 changectx,
2860 path,
2862 path,
2861 data,
2863 data,
2862 islink=False,
2864 islink=False,
2863 isexec=False,
2865 isexec=False,
2864 copysource=None,
2866 copysource=None,
2865 ):
2867 ):
2866 """
2868 """
2867 path is the normalized file path relative to repository root.
2869 path is the normalized file path relative to repository root.
2868 data is the file content as a string.
2870 data is the file content as a string.
2869 islink is True if the file is a symbolic link.
2871 islink is True if the file is a symbolic link.
2870 isexec is True if the file is executable.
2872 isexec is True if the file is executable.
2871 copied is the source file path if current file was copied in the
2873 copied is the source file path if current file was copied in the
2872 revision being committed, or None."""
2874 revision being committed, or None."""
2873 super(memfilectx, self).__init__(repo, path, None, changectx)
2875 super(memfilectx, self).__init__(repo, path, None, changectx)
2874 self._data = data
2876 self._data = data
2875 if islink:
2877 if islink:
2876 self._flags = b'l'
2878 self._flags = b'l'
2877 elif isexec:
2879 elif isexec:
2878 self._flags = b'x'
2880 self._flags = b'x'
2879 else:
2881 else:
2880 self._flags = b''
2882 self._flags = b''
2881 self._copysource = copysource
2883 self._copysource = copysource
2882
2884
2883 def copysource(self):
2885 def copysource(self):
2884 return self._copysource
2886 return self._copysource
2885
2887
2886 def cmp(self, fctx):
2888 def cmp(self, fctx):
2887 return self.data() != fctx.data()
2889 return self.data() != fctx.data()
2888
2890
2889 def data(self):
2891 def data(self):
2890 return self._data
2892 return self._data
2891
2893
2892 def remove(self, ignoremissing=False):
2894 def remove(self, ignoremissing=False):
2893 """wraps unlink for a repo's working directory"""
2895 """wraps unlink for a repo's working directory"""
2894 # need to figure out what to do here
2896 # need to figure out what to do here
2895 del self._changectx[self._path]
2897 del self._changectx[self._path]
2896
2898
2897 def write(self, data, flags, **kwargs):
2899 def write(self, data, flags, **kwargs):
2898 """wraps repo.wwrite"""
2900 """wraps repo.wwrite"""
2899 self._data = data
2901 self._data = data
2900
2902
2901
2903
2902 class metadataonlyctx(committablectx):
2904 class metadataonlyctx(committablectx):
2903 """Like memctx but it's reusing the manifest of different commit.
2905 """Like memctx but it's reusing the manifest of different commit.
2904 Intended to be used by lightweight operations that are creating
2906 Intended to be used by lightweight operations that are creating
2905 metadata-only changes.
2907 metadata-only changes.
2906
2908
2907 Revision information is supplied at initialization time. 'repo' is the
2909 Revision information is supplied at initialization time. 'repo' is the
2908 current localrepo, 'ctx' is original revision which manifest we're reuisng
2910 current localrepo, 'ctx' is original revision which manifest we're reuisng
2909 'parents' is a sequence of two parent revisions identifiers (pass None for
2911 'parents' is a sequence of two parent revisions identifiers (pass None for
2910 every missing parent), 'text' is the commit.
2912 every missing parent), 'text' is the commit.
2911
2913
2912 user receives the committer name and defaults to current repository
2914 user receives the committer name and defaults to current repository
2913 username, date is the commit date in any format supported by
2915 username, date is the commit date in any format supported by
2914 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2916 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2915 metadata or is left empty.
2917 metadata or is left empty.
2916 """
2918 """
2917
2919
2918 def __init__(
2920 def __init__(
2919 self,
2921 self,
2920 repo,
2922 repo,
2921 originalctx,
2923 originalctx,
2922 parents=None,
2924 parents=None,
2923 text=None,
2925 text=None,
2924 user=None,
2926 user=None,
2925 date=None,
2927 date=None,
2926 extra=None,
2928 extra=None,
2927 editor=None,
2929 editor=None,
2928 ):
2930 ):
2929 if text is None:
2931 if text is None:
2930 text = originalctx.description()
2932 text = originalctx.description()
2931 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2933 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2932 self._rev = None
2934 self._rev = None
2933 self._node = None
2935 self._node = None
2934 self._originalctx = originalctx
2936 self._originalctx = originalctx
2935 self._manifestnode = originalctx.manifestnode()
2937 self._manifestnode = originalctx.manifestnode()
2936 if parents is None:
2938 if parents is None:
2937 parents = originalctx.parents()
2939 parents = originalctx.parents()
2938 else:
2940 else:
2939 parents = [repo[p] for p in parents if p is not None]
2941 parents = [repo[p] for p in parents if p is not None]
2940 parents = parents[:]
2942 parents = parents[:]
2941 while len(parents) < 2:
2943 while len(parents) < 2:
2942 parents.append(repo[nullid])
2944 parents.append(repo[nullid])
2943 p1, p2 = self._parents = parents
2945 p1, p2 = self._parents = parents
2944
2946
2945 # sanity check to ensure that the reused manifest parents are
2947 # sanity check to ensure that the reused manifest parents are
2946 # manifests of our commit parents
2948 # manifests of our commit parents
2947 mp1, mp2 = self.manifestctx().parents
2949 mp1, mp2 = self.manifestctx().parents
2948 if p1 != nullid and p1.manifestnode() != mp1:
2950 if p1 != nullid and p1.manifestnode() != mp1:
2949 raise RuntimeError(
2951 raise RuntimeError(
2950 r"can't reuse the manifest: its p1 "
2952 r"can't reuse the manifest: its p1 "
2951 r"doesn't match the new ctx p1"
2953 r"doesn't match the new ctx p1"
2952 )
2954 )
2953 if p2 != nullid and p2.manifestnode() != mp2:
2955 if p2 != nullid and p2.manifestnode() != mp2:
2954 raise RuntimeError(
2956 raise RuntimeError(
2955 r"can't reuse the manifest: "
2957 r"can't reuse the manifest: "
2956 r"its p2 doesn't match the new ctx p2"
2958 r"its p2 doesn't match the new ctx p2"
2957 )
2959 )
2958
2960
2959 self._files = originalctx.files()
2961 self._files = originalctx.files()
2960 self.substate = {}
2962 self.substate = {}
2961
2963
2962 if editor:
2964 if editor:
2963 self._text = editor(self._repo, self, [])
2965 self._text = editor(self._repo, self, [])
2964 self._repo.savecommitmessage(self._text)
2966 self._repo.savecommitmessage(self._text)
2965
2967
2966 def manifestnode(self):
2968 def manifestnode(self):
2967 return self._manifestnode
2969 return self._manifestnode
2968
2970
2969 @property
2971 @property
2970 def _manifestctx(self):
2972 def _manifestctx(self):
2971 return self._repo.manifestlog[self._manifestnode]
2973 return self._repo.manifestlog[self._manifestnode]
2972
2974
2973 def filectx(self, path, filelog=None):
2975 def filectx(self, path, filelog=None):
2974 return self._originalctx.filectx(path, filelog=filelog)
2976 return self._originalctx.filectx(path, filelog=filelog)
2975
2977
2976 def commit(self):
2978 def commit(self):
2977 """commit context to the repo"""
2979 """commit context to the repo"""
2978 return self._repo.commitctx(self)
2980 return self._repo.commitctx(self)
2979
2981
2980 @property
2982 @property
2981 def _manifest(self):
2983 def _manifest(self):
2982 return self._originalctx.manifest()
2984 return self._originalctx.manifest()
2983
2985
2984 @propertycache
2986 @propertycache
2985 def _status(self):
2987 def _status(self):
2986 """Calculate exact status from ``files`` specified in the ``origctx``
2988 """Calculate exact status from ``files`` specified in the ``origctx``
2987 and parents manifests.
2989 and parents manifests.
2988 """
2990 """
2989 man1 = self.p1().manifest()
2991 man1 = self.p1().manifest()
2990 p2 = self._parents[1]
2992 p2 = self._parents[1]
2991 # "1 < len(self._parents)" can't be used for checking
2993 # "1 < len(self._parents)" can't be used for checking
2992 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2994 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2993 # explicitly initialized by the list, of which length is 2.
2995 # explicitly initialized by the list, of which length is 2.
2994 if p2.node() != nullid:
2996 if p2.node() != nullid:
2995 man2 = p2.manifest()
2997 man2 = p2.manifest()
2996 managing = lambda f: f in man1 or f in man2
2998 managing = lambda f: f in man1 or f in man2
2997 else:
2999 else:
2998 managing = lambda f: f in man1
3000 managing = lambda f: f in man1
2999
3001
3000 modified, added, removed = [], [], []
3002 modified, added, removed = [], [], []
3001 for f in self._files:
3003 for f in self._files:
3002 if not managing(f):
3004 if not managing(f):
3003 added.append(f)
3005 added.append(f)
3004 elif f in self:
3006 elif f in self:
3005 modified.append(f)
3007 modified.append(f)
3006 else:
3008 else:
3007 removed.append(f)
3009 removed.append(f)
3008
3010
3009 return scmutil.status(modified, added, removed, [], [], [], [])
3011 return scmutil.status(modified, added, removed, [], [], [], [])
3010
3012
3011
3013
3012 class arbitraryfilectx(object):
3014 class arbitraryfilectx(object):
3013 """Allows you to use filectx-like functions on a file in an arbitrary
3015 """Allows you to use filectx-like functions on a file in an arbitrary
3014 location on disk, possibly not in the working directory.
3016 location on disk, possibly not in the working directory.
3015 """
3017 """
3016
3018
3017 def __init__(self, path, repo=None):
3019 def __init__(self, path, repo=None):
3018 # Repo is optional because contrib/simplemerge uses this class.
3020 # Repo is optional because contrib/simplemerge uses this class.
3019 self._repo = repo
3021 self._repo = repo
3020 self._path = path
3022 self._path = path
3021
3023
3022 def cmp(self, fctx):
3024 def cmp(self, fctx):
3023 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3025 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3024 # path if either side is a symlink.
3026 # path if either side is a symlink.
3025 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3027 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3026 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3028 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3027 # Add a fast-path for merge if both sides are disk-backed.
3029 # Add a fast-path for merge if both sides are disk-backed.
3028 # Note that filecmp uses the opposite return values (True if same)
3030 # Note that filecmp uses the opposite return values (True if same)
3029 # from our cmp functions (True if different).
3031 # from our cmp functions (True if different).
3030 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3032 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3031 return self.data() != fctx.data()
3033 return self.data() != fctx.data()
3032
3034
3033 def path(self):
3035 def path(self):
3034 return self._path
3036 return self._path
3035
3037
3036 def flags(self):
3038 def flags(self):
3037 return b''
3039 return b''
3038
3040
3039 def data(self):
3041 def data(self):
3040 return util.readfile(self._path)
3042 return util.readfile(self._path)
3041
3043
3042 def decodeddata(self):
3044 def decodeddata(self):
3043 with open(self._path, b"rb") as f:
3045 with open(self._path, b"rb") as f:
3044 return f.read()
3046 return f.read()
3045
3047
3046 def remove(self):
3048 def remove(self):
3047 util.unlink(self._path)
3049 util.unlink(self._path)
3048
3050
3049 def write(self, data, flags, **kwargs):
3051 def write(self, data, flags, **kwargs):
3050 assert not flags
3052 assert not flags
3051 with open(self._path, b"wb") as f:
3053 with open(self._path, b"wb") as f:
3052 f.write(data)
3054 f.write(data)
@@ -1,148 +1,146 b''
1 ===================================
1 ===================================
2 Test repository filtering avoidance
2 Test repository filtering avoidance
3 ===================================
3 ===================================
4
4
5 This test file is a bit special as he does not check feature, but performance related internal code path.
5 This test file is a bit special as he does not check feature, but performance related internal code path.
6
6
7 Right now, filtering a repository comes with a cost that might be significant.
7 Right now, filtering a repository comes with a cost that might be significant.
8 Until this get better, ther are various operation that try hard not to trigger
8 Until this get better, ther are various operation that try hard not to trigger
9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
10
10
11 Setup
11 Setup
12 -----
12 -----
13 $ hg init test-repo
13 $ hg init test-repo
14 $ cd test-repo
14 $ cd test-repo
15 $ echo "some line" > z
15 $ echo "some line" > z
16 $ echo a > a
16 $ echo a > a
17 $ hg commit -Am a
17 $ hg commit -Am a
18 adding a
18 adding a
19 adding z
19 adding z
20 $ echo "in a" >> z
20 $ echo "in a" >> z
21 $ echo b > b
21 $ echo b > b
22 $ hg commit -Am b
22 $ hg commit -Am b
23 adding b
23 adding b
24 $ echo "file" >> z
24 $ echo "file" >> z
25 $ echo c > c
25 $ echo c > c
26 $ hg commit -Am c
26 $ hg commit -Am c
27 adding c
27 adding c
28 $ hg rm a
28 $ hg rm a
29 $ echo c1 > c
29 $ echo c1 > c
30 $ hg add c
30 $ hg add c
31 c already tracked!
31 c already tracked!
32 $ echo d > d
32 $ echo d > d
33 $ hg add d
33 $ hg add d
34 $ rm b
34 $ rm b
35
35
36 $ cat << EOF >> $HGRCPATH
36 $ cat << EOF >> $HGRCPATH
37 > [devel]
37 > [devel]
38 > debug.repo-filters = yes
38 > debug.repo-filters = yes
39 > [ui]
39 > [ui]
40 > debug = yes
40 > debug = yes
41 > EOF
41 > EOF
42
42
43
43
44 tests
44 tests
45 -----
45 -----
46
46
47 Getting the node of `null`
47 Getting the node of `null`
48
48
49 $ hg log -r null -T "{node}\n"
49 $ hg log -r null -T "{node}\n"
50 0000000000000000000000000000000000000000
50 0000000000000000000000000000000000000000
51
51
52 Getting basic changeset inforation about `null`
52 Getting basic changeset inforation about `null`
53
53
54 $ hg log -r null -T "{node}\n{date}\n"
54 $ hg log -r null -T "{node}\n{date}\n"
55 0000000000000000000000000000000000000000
55 0000000000000000000000000000000000000000
56 0.00
56 0.00
57
57
58 Getting status of null
58 Getting status of null
59
59
60 $ hg status --change null
60 $ hg status --change null
61
61
62 Getting status of working copy
62 Getting status of working copy
63
63
64 $ hg status
64 $ hg status
65 M c
65 M c
66 A d
66 A d
67 R a
67 R a
68 ! b
68 ! b
69
69
70 Getting data about the working copy parent
70 Getting data about the working copy parent
71
71
72 $ hg log -r '.' -T "{node}\n{date}\n"
72 $ hg log -r '.' -T "{node}\n{date}\n"
73 c2932ca7786be30b67154d541a8764fae5532261
73 c2932ca7786be30b67154d541a8764fae5532261
74 0.00
74 0.00
75
75
76 Getting working copy diff
76 Getting working copy diff
77
77
78 $ hg diff
78 $ hg diff
79 diff -r c2932ca7786be30b67154d541a8764fae5532261 a
79 diff -r c2932ca7786be30b67154d541a8764fae5532261 a
80 --- a/a Thu Jan 01 00:00:00 1970 +0000
80 --- a/a Thu Jan 01 00:00:00 1970 +0000
81 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
81 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
82 @@ -1,1 +0,0 @@
82 @@ -1,1 +0,0 @@
83 -a
83 -a
84 diff -r c2932ca7786be30b67154d541a8764fae5532261 c
84 diff -r c2932ca7786be30b67154d541a8764fae5532261 c
85 --- a/c Thu Jan 01 00:00:00 1970 +0000
85 --- a/c Thu Jan 01 00:00:00 1970 +0000
86 +++ b/c Thu Jan 01 00:00:00 1970 +0000
86 +++ b/c Thu Jan 01 00:00:00 1970 +0000
87 @@ -1,1 +1,1 @@
87 @@ -1,1 +1,1 @@
88 -c
88 -c
89 +c1
89 +c1
90 diff -r c2932ca7786be30b67154d541a8764fae5532261 d
90 diff -r c2932ca7786be30b67154d541a8764fae5532261 d
91 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
91 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
92 +++ b/d Thu Jan 01 00:00:00 1970 +0000
92 +++ b/d Thu Jan 01 00:00:00 1970 +0000
93 @@ -0,0 +1,1 @@
93 @@ -0,0 +1,1 @@
94 +d
94 +d
95 $ hg diff --change .
95 $ hg diff --change .
96 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
96 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
97 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
97 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
98 +++ b/c Thu Jan 01 00:00:00 1970 +0000
98 +++ b/c Thu Jan 01 00:00:00 1970 +0000
99 @@ -0,0 +1,1 @@
99 @@ -0,0 +1,1 @@
100 +c
100 +c
101 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
101 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
102 --- a/z Thu Jan 01 00:00:00 1970 +0000
102 --- a/z Thu Jan 01 00:00:00 1970 +0000
103 +++ b/z Thu Jan 01 00:00:00 1970 +0000
103 +++ b/z Thu Jan 01 00:00:00 1970 +0000
104 @@ -1,2 +1,3 @@
104 @@ -1,2 +1,3 @@
105 some line
105 some line
106 in a
106 in a
107 +file
107 +file
108
108
109 exporting the current changeset
109 exporting the current changeset
110
110
111 $ hg export
111 $ hg export
112 exporting patch:
112 exporting patch:
113 # HG changeset patch
113 # HG changeset patch
114 # User test
114 # User test
115 # Date 0 0
115 # Date 0 0
116 # Thu Jan 01 00:00:00 1970 +0000
116 # Thu Jan 01 00:00:00 1970 +0000
117 # Node ID c2932ca7786be30b67154d541a8764fae5532261
117 # Node ID c2932ca7786be30b67154d541a8764fae5532261
118 # Parent 05293e5dd8d1ae4f84a8520a11c6f97cad26deca
118 # Parent 05293e5dd8d1ae4f84a8520a11c6f97cad26deca
119 c
119 c
120
120
121 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
121 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
122 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
122 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
123 +++ b/c Thu Jan 01 00:00:00 1970 +0000
123 +++ b/c Thu Jan 01 00:00:00 1970 +0000
124 @@ -0,0 +1,1 @@
124 @@ -0,0 +1,1 @@
125 +c
125 +c
126 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
126 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
127 --- a/z Thu Jan 01 00:00:00 1970 +0000
127 --- a/z Thu Jan 01 00:00:00 1970 +0000
128 +++ b/z Thu Jan 01 00:00:00 1970 +0000
128 +++ b/z Thu Jan 01 00:00:00 1970 +0000
129 @@ -1,2 +1,3 @@
129 @@ -1,2 +1,3 @@
130 some line
130 some line
131 in a
131 in a
132 +file
132 +file
133
133
134 using annotate
134 using annotate
135
135
136 - file with a single change
136 - file with a single change
137
137
138 $ hg annotate a
138 $ hg annotate a
139 debug.filters: computing revision filter for "visible"
140 0: a
139 0: a
141
140
142 - file with multiple change
141 - file with multiple change
143
142
144 $ hg annotate z
143 $ hg annotate z
145 debug.filters: computing revision filter for "visible"
146 0: some line
144 0: some line
147 1: in a
145 1: in a
148 2: file
146 2: file
General Comments 0
You need to be logged in to leave comments. Login now