##// END OF EJS Templates
context: simplify call to icase matcher in 'match()'...
Pierre-Yves David -
r31464:0e7a6279 default
parent child Browse files
Show More
@@ -1,2116 +1,2114 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 subrepo,
40 subrepo,
41 util,
41 util,
42 )
42 )
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45
45
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
47
47
48 class basectx(object):
48 class basectx(object):
49 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
50 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
51 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
52 be committed,
52 be committed,
53 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
54 be committed."""
54 be committed."""
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
57 return changeid
57 return changeid
58
58
59 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
60
60
61 o._repo = repo
61 o._repo = repo
62 o._rev = nullrev
62 o._rev = nullrev
63 o._node = nullid
63 o._node = nullid
64
64
65 return o
65 return o
66
66
67 def __str__(self):
67 def __str__(self):
68 r = short(self.node())
68 r = short(self.node())
69 if pycompat.ispy3:
69 if pycompat.ispy3:
70 return r.decode('ascii')
70 return r.decode('ascii')
71 return r
71 return r
72
72
73 def __bytes__(self):
73 def __bytes__(self):
74 return short(self.node())
74 return short(self.node())
75
75
76 def __int__(self):
76 def __int__(self):
77 return self.rev()
77 return self.rev()
78
78
79 def __repr__(self):
79 def __repr__(self):
80 return "<%s %s>" % (type(self).__name__, str(self))
80 return "<%s %s>" % (type(self).__name__, str(self))
81
81
82 def __eq__(self, other):
82 def __eq__(self, other):
83 try:
83 try:
84 return type(self) == type(other) and self._rev == other._rev
84 return type(self) == type(other) and self._rev == other._rev
85 except AttributeError:
85 except AttributeError:
86 return False
86 return False
87
87
88 def __ne__(self, other):
88 def __ne__(self, other):
89 return not (self == other)
89 return not (self == other)
90
90
91 def __contains__(self, key):
91 def __contains__(self, key):
92 return key in self._manifest
92 return key in self._manifest
93
93
94 def __getitem__(self, key):
94 def __getitem__(self, key):
95 return self.filectx(key)
95 return self.filectx(key)
96
96
97 def __iter__(self):
97 def __iter__(self):
98 return iter(self._manifest)
98 return iter(self._manifest)
99
99
100 def _buildstatusmanifest(self, status):
100 def _buildstatusmanifest(self, status):
101 """Builds a manifest that includes the given status results, if this is
101 """Builds a manifest that includes the given status results, if this is
102 a working copy context. For non-working copy contexts, it just returns
102 a working copy context. For non-working copy contexts, it just returns
103 the normal manifest."""
103 the normal manifest."""
104 return self.manifest()
104 return self.manifest()
105
105
106 def _matchstatus(self, other, match):
106 def _matchstatus(self, other, match):
107 """return match.always if match is none
107 """return match.always if match is none
108
108
109 This internal method provides a way for child objects to override the
109 This internal method provides a way for child objects to override the
110 match operator.
110 match operator.
111 """
111 """
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113
113
114 def _buildstatus(self, other, s, match, listignored, listclean,
114 def _buildstatus(self, other, s, match, listignored, listclean,
115 listunknown):
115 listunknown):
116 """build a status with respect to another context"""
116 """build a status with respect to another context"""
117 # Load earliest manifest first for caching reasons. More specifically,
117 # Load earliest manifest first for caching reasons. More specifically,
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta application.
122 # delta application.
123 mf2 = None
123 mf2 = None
124 if self.rev() is not None and self.rev() < other.rev():
124 if self.rev() is not None and self.rev() < other.rev():
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
127 if mf2 is None:
127 if mf2 is None:
128 mf2 = self._buildstatusmanifest(s)
128 mf2 = self._buildstatusmanifest(s)
129
129
130 modified, added = [], []
130 modified, added = [], []
131 removed = []
131 removed = []
132 clean = []
132 clean = []
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deletedset = set(deleted)
134 deletedset = set(deleted)
135 d = mf1.diff(mf2, match=match, clean=listclean)
135 d = mf1.diff(mf2, match=match, clean=listclean)
136 for fn, value in d.iteritems():
136 for fn, value in d.iteritems():
137 if fn in deletedset:
137 if fn in deletedset:
138 continue
138 continue
139 if value is None:
139 if value is None:
140 clean.append(fn)
140 clean.append(fn)
141 continue
141 continue
142 (node1, flag1), (node2, flag2) = value
142 (node1, flag1), (node2, flag2) = value
143 if node1 is None:
143 if node1 is None:
144 added.append(fn)
144 added.append(fn)
145 elif node2 is None:
145 elif node2 is None:
146 removed.append(fn)
146 removed.append(fn)
147 elif flag1 != flag2:
147 elif flag1 != flag2:
148 modified.append(fn)
148 modified.append(fn)
149 elif node2 not in wdirnodes:
149 elif node2 not in wdirnodes:
150 # When comparing files between two commits, we save time by
150 # When comparing files between two commits, we save time by
151 # not comparing the file contents when the nodeids differ.
151 # not comparing the file contents when the nodeids differ.
152 # Note that this means we incorrectly report a reverted change
152 # Note that this means we incorrectly report a reverted change
153 # to a file as a modification.
153 # to a file as a modification.
154 modified.append(fn)
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
155 elif self[fn].cmp(other[fn]):
156 modified.append(fn)
156 modified.append(fn)
157 else:
157 else:
158 clean.append(fn)
158 clean.append(fn)
159
159
160 if removed:
160 if removed:
161 # need to filter files if they are already reported as removed
161 # need to filter files if they are already reported as removed
162 unknown = [fn for fn in unknown if fn not in mf1 and
162 unknown = [fn for fn in unknown if fn not in mf1 and
163 (not match or match(fn))]
163 (not match or match(fn))]
164 ignored = [fn for fn in ignored if fn not in mf1 and
164 ignored = [fn for fn in ignored if fn not in mf1 and
165 (not match or match(fn))]
165 (not match or match(fn))]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(modified, added, removed, deleted, unknown,
169 return scmutil.status(modified, added, removed, deleted, unknown,
170 ignored, clean)
170 ignored, clean)
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepo.state(self, self._repo.ui)
174 return subrepo.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183 def hex(self):
183 def hex(self):
184 return hex(self.node())
184 return hex(self.node())
185 def manifest(self):
185 def manifest(self):
186 return self._manifest
186 return self._manifest
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189 def repo(self):
189 def repo(self):
190 return self._repo
190 return self._repo
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 parents = self._parents
254 parents = self._parents
255 if len(parents) == 2:
255 if len(parents) == 2:
256 return parents[1]
256 return parents[1]
257 return changectx(self._repo, nullrev)
257 return changectx(self._repo, nullrev)
258
258
259 def _fileinfo(self, path):
259 def _fileinfo(self, path):
260 if '_manifest' in self.__dict__:
260 if '_manifest' in self.__dict__:
261 try:
261 try:
262 return self._manifest[path], self._manifest.flags(path)
262 return self._manifest[path], self._manifest.flags(path)
263 except KeyError:
263 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if '_manifestdelta' in self.__dict__ or path in self.files():
267 if path in self._manifestdelta:
267 if path in self._manifestdelta:
268 return (self._manifestdelta[path],
268 return (self._manifestdelta[path],
269 self._manifestdelta.flags(path))
269 self._manifestdelta.flags(path))
270 mfl = self._repo.manifestlog
270 mfl = self._repo.manifestlog
271 try:
271 try:
272 node, flag = mfl[self._changeset.manifest].find(path)
272 node, flag = mfl[self._changeset.manifest].find(path)
273 except KeyError:
273 except KeyError:
274 raise error.ManifestLookupError(self._node, path,
274 raise error.ManifestLookupError(self._node, path,
275 _('not found in manifest'))
275 _('not found in manifest'))
276
276
277 return node, flag
277 return node, flag
278
278
279 def filenode(self, path):
279 def filenode(self, path):
280 return self._fileinfo(path)[0]
280 return self._fileinfo(path)[0]
281
281
282 def flags(self, path):
282 def flags(self, path):
283 try:
283 try:
284 return self._fileinfo(path)[1]
284 return self._fileinfo(path)[1]
285 except error.LookupError:
285 except error.LookupError:
286 return ''
286 return ''
287
287
288 def sub(self, path, allowcreate=True):
288 def sub(self, path, allowcreate=True):
289 '''return a subrepo for the stored revision of path, never wdir()'''
289 '''return a subrepo for the stored revision of path, never wdir()'''
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291
291
292 def nullsub(self, path, pctx):
292 def nullsub(self, path, pctx):
293 return subrepo.nullsubrepo(self, path, pctx)
293 return subrepo.nullsubrepo(self, path, pctx)
294
294
295 def workingsub(self, path):
295 def workingsub(self, path):
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 context.
297 context.
298 '''
298 '''
299 return subrepo.subrepo(self, path, allowwdir=True)
299 return subrepo.subrepo(self, path, allowwdir=True)
300
300
301 def match(self, pats=None, include=None, exclude=None, default='glob',
301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 listsubrepos=False, badfn=None):
302 listsubrepos=False, badfn=None):
303 if pats is None:
303 if pats is None:
304 pats = []
304 pats = []
305 r = self._repo
305 r = self._repo
306 return matchmod.match(r.root, r.getcwd(), pats,
306 return matchmod.match(r.root, r.getcwd(), pats,
307 include, exclude, default,
307 include, exclude, default,
308 auditor=r.nofsauditor, ctx=self,
308 auditor=r.nofsauditor, ctx=self,
309 listsubrepos=listsubrepos, badfn=badfn)
309 listsubrepos=listsubrepos, badfn=badfn)
310
310
311 def diff(self, ctx2=None, match=None, **opts):
311 def diff(self, ctx2=None, match=None, **opts):
312 """Returns a diff generator for the given contexts and matcher"""
312 """Returns a diff generator for the given contexts and matcher"""
313 if ctx2 is None:
313 if ctx2 is None:
314 ctx2 = self.p1()
314 ctx2 = self.p1()
315 if ctx2 is not None:
315 if ctx2 is not None:
316 ctx2 = self._repo[ctx2]
316 ctx2 = self._repo[ctx2]
317 diffopts = patch.diffopts(self._repo.ui, opts)
317 diffopts = patch.diffopts(self._repo.ui, opts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319
319
320 def dirs(self):
320 def dirs(self):
321 return self._manifest.dirs()
321 return self._manifest.dirs()
322
322
323 def hasdir(self, dir):
323 def hasdir(self, dir):
324 return self._manifest.hasdir(dir)
324 return self._manifest.hasdir(dir)
325
325
326 def dirty(self, missing=False, merge=True, branch=True):
326 def dirty(self, missing=False, merge=True, branch=True):
327 return False
327 return False
328
328
329 def status(self, other=None, match=None, listignored=False,
329 def status(self, other=None, match=None, listignored=False,
330 listclean=False, listunknown=False, listsubrepos=False):
330 listclean=False, listunknown=False, listsubrepos=False):
331 """return status of files between two nodes or node and working
331 """return status of files between two nodes or node and working
332 directory.
332 directory.
333
333
334 If other is None, compare this node with working directory.
334 If other is None, compare this node with working directory.
335
335
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 """
337 """
338
338
339 ctx1 = self
339 ctx1 = self
340 ctx2 = self._repo[other]
340 ctx2 = self._repo[other]
341
341
342 # This next code block is, admittedly, fragile logic that tests for
342 # This next code block is, admittedly, fragile logic that tests for
343 # reversing the contexts and wouldn't need to exist if it weren't for
343 # reversing the contexts and wouldn't need to exist if it weren't for
344 # the fast (and common) code path of comparing the working directory
344 # the fast (and common) code path of comparing the working directory
345 # with its first parent.
345 # with its first parent.
346 #
346 #
347 # What we're aiming for here is the ability to call:
347 # What we're aiming for here is the ability to call:
348 #
348 #
349 # workingctx.status(parentctx)
349 # workingctx.status(parentctx)
350 #
350 #
351 # If we always built the manifest for each context and compared those,
351 # If we always built the manifest for each context and compared those,
352 # then we'd be done. But the special case of the above call means we
352 # then we'd be done. But the special case of the above call means we
353 # just copy the manifest of the parent.
353 # just copy the manifest of the parent.
354 reversed = False
354 reversed = False
355 if (not isinstance(ctx1, changectx)
355 if (not isinstance(ctx1, changectx)
356 and isinstance(ctx2, changectx)):
356 and isinstance(ctx2, changectx)):
357 reversed = True
357 reversed = True
358 ctx1, ctx2 = ctx2, ctx1
358 ctx1, ctx2 = ctx2, ctx1
359
359
360 match = ctx2._matchstatus(ctx1, match)
360 match = ctx2._matchstatus(ctx1, match)
361 r = scmutil.status([], [], [], [], [], [], [])
361 r = scmutil.status([], [], [], [], [], [], [])
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 listunknown)
363 listunknown)
364
364
365 if reversed:
365 if reversed:
366 # Reverse added and removed. Clear deleted, unknown and ignored as
366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 # these make no sense to reverse.
367 # these make no sense to reverse.
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 r.clean)
369 r.clean)
370
370
371 if listsubrepos:
371 if listsubrepos:
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 try:
373 try:
374 rev2 = ctx2.subrev(subpath)
374 rev2 = ctx2.subrev(subpath)
375 except KeyError:
375 except KeyError:
376 # A subrepo that existed in node1 was deleted between
376 # A subrepo that existed in node1 was deleted between
377 # node1 and node2 (inclusive). Thus, ctx2's substate
377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 # won't contain that subpath. The best we can do ignore it.
378 # won't contain that subpath. The best we can do ignore it.
379 rev2 = None
379 rev2 = None
380 submatch = matchmod.subdirmatcher(subpath, match)
380 submatch = matchmod.subdirmatcher(subpath, match)
381 s = sub.status(rev2, match=submatch, ignored=listignored,
381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 clean=listclean, unknown=listunknown,
382 clean=listclean, unknown=listunknown,
383 listsubrepos=True)
383 listsubrepos=True)
384 for rfiles, sfiles in zip(r, s):
384 for rfiles, sfiles in zip(r, s):
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386
386
387 for l in r:
387 for l in r:
388 l.sort()
388 l.sort()
389
389
390 return r
390 return r
391
391
392
392
393 def makememctx(repo, parents, text, user, date, branch, files, store,
393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 editor=None, extra=None):
394 editor=None, extra=None):
395 def getfilectx(repo, memctx, path):
395 def getfilectx(repo, memctx, path):
396 data, mode, copied = store.getfile(path)
396 data, mode, copied = store.getfile(path)
397 if data is None:
397 if data is None:
398 return None
398 return None
399 islink, isexec = mode
399 islink, isexec = mode
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 copied=copied, memctx=memctx)
401 copied=copied, memctx=memctx)
402 if extra is None:
402 if extra is None:
403 extra = {}
403 extra = {}
404 if branch:
404 if branch:
405 extra['branch'] = encoding.fromlocal(branch)
405 extra['branch'] = encoding.fromlocal(branch)
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 date, extra, editor)
407 date, extra, editor)
408 return ctx
408 return ctx
409
409
410 class changectx(basectx):
410 class changectx(basectx):
411 """A changecontext object makes access to data related to a particular
411 """A changecontext object makes access to data related to a particular
412 changeset convenient. It represents a read-only context already present in
412 changeset convenient. It represents a read-only context already present in
413 the repo."""
413 the repo."""
414 def __init__(self, repo, changeid=''):
414 def __init__(self, repo, changeid=''):
415 """changeid is a revision number, node, or tag"""
415 """changeid is a revision number, node, or tag"""
416
416
417 # since basectx.__new__ already took care of copying the object, we
417 # since basectx.__new__ already took care of copying the object, we
418 # don't need to do anything in __init__, so we just exit here
418 # don't need to do anything in __init__, so we just exit here
419 if isinstance(changeid, basectx):
419 if isinstance(changeid, basectx):
420 return
420 return
421
421
422 if changeid == '':
422 if changeid == '':
423 changeid = '.'
423 changeid = '.'
424 self._repo = repo
424 self._repo = repo
425
425
426 try:
426 try:
427 if isinstance(changeid, int):
427 if isinstance(changeid, int):
428 self._node = repo.changelog.node(changeid)
428 self._node = repo.changelog.node(changeid)
429 self._rev = changeid
429 self._rev = changeid
430 return
430 return
431 if not pycompat.ispy3 and isinstance(changeid, long):
431 if not pycompat.ispy3 and isinstance(changeid, long):
432 changeid = str(changeid)
432 changeid = str(changeid)
433 if changeid == 'null':
433 if changeid == 'null':
434 self._node = nullid
434 self._node = nullid
435 self._rev = nullrev
435 self._rev = nullrev
436 return
436 return
437 if changeid == 'tip':
437 if changeid == 'tip':
438 self._node = repo.changelog.tip()
438 self._node = repo.changelog.tip()
439 self._rev = repo.changelog.rev(self._node)
439 self._rev = repo.changelog.rev(self._node)
440 return
440 return
441 if changeid == '.' or changeid == repo.dirstate.p1():
441 if changeid == '.' or changeid == repo.dirstate.p1():
442 # this is a hack to delay/avoid loading obsmarkers
442 # this is a hack to delay/avoid loading obsmarkers
443 # when we know that '.' won't be hidden
443 # when we know that '.' won't be hidden
444 self._node = repo.dirstate.p1()
444 self._node = repo.dirstate.p1()
445 self._rev = repo.unfiltered().changelog.rev(self._node)
445 self._rev = repo.unfiltered().changelog.rev(self._node)
446 return
446 return
447 if len(changeid) == 20:
447 if len(changeid) == 20:
448 try:
448 try:
449 self._node = changeid
449 self._node = changeid
450 self._rev = repo.changelog.rev(changeid)
450 self._rev = repo.changelog.rev(changeid)
451 return
451 return
452 except error.FilteredRepoLookupError:
452 except error.FilteredRepoLookupError:
453 raise
453 raise
454 except LookupError:
454 except LookupError:
455 pass
455 pass
456
456
457 try:
457 try:
458 r = int(changeid)
458 r = int(changeid)
459 if '%d' % r != changeid:
459 if '%d' % r != changeid:
460 raise ValueError
460 raise ValueError
461 l = len(repo.changelog)
461 l = len(repo.changelog)
462 if r < 0:
462 if r < 0:
463 r += l
463 r += l
464 if r < 0 or r >= l:
464 if r < 0 or r >= l:
465 raise ValueError
465 raise ValueError
466 self._rev = r
466 self._rev = r
467 self._node = repo.changelog.node(r)
467 self._node = repo.changelog.node(r)
468 return
468 return
469 except error.FilteredIndexError:
469 except error.FilteredIndexError:
470 raise
470 raise
471 except (ValueError, OverflowError, IndexError):
471 except (ValueError, OverflowError, IndexError):
472 pass
472 pass
473
473
474 if len(changeid) == 40:
474 if len(changeid) == 40:
475 try:
475 try:
476 self._node = bin(changeid)
476 self._node = bin(changeid)
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479 except error.FilteredLookupError:
479 except error.FilteredLookupError:
480 raise
480 raise
481 except (TypeError, LookupError):
481 except (TypeError, LookupError):
482 pass
482 pass
483
483
484 # lookup bookmarks through the name interface
484 # lookup bookmarks through the name interface
485 try:
485 try:
486 self._node = repo.names.singlenode(repo, changeid)
486 self._node = repo.names.singlenode(repo, changeid)
487 self._rev = repo.changelog.rev(self._node)
487 self._rev = repo.changelog.rev(self._node)
488 return
488 return
489 except KeyError:
489 except KeyError:
490 pass
490 pass
491 except error.FilteredRepoLookupError:
491 except error.FilteredRepoLookupError:
492 raise
492 raise
493 except error.RepoLookupError:
493 except error.RepoLookupError:
494 pass
494 pass
495
495
496 self._node = repo.unfiltered().changelog._partialmatch(changeid)
496 self._node = repo.unfiltered().changelog._partialmatch(changeid)
497 if self._node is not None:
497 if self._node is not None:
498 self._rev = repo.changelog.rev(self._node)
498 self._rev = repo.changelog.rev(self._node)
499 return
499 return
500
500
501 # lookup failed
501 # lookup failed
502 # check if it might have come from damaged dirstate
502 # check if it might have come from damaged dirstate
503 #
503 #
504 # XXX we could avoid the unfiltered if we had a recognizable
504 # XXX we could avoid the unfiltered if we had a recognizable
505 # exception for filtered changeset access
505 # exception for filtered changeset access
506 if changeid in repo.unfiltered().dirstate.parents():
506 if changeid in repo.unfiltered().dirstate.parents():
507 msg = _("working directory has unknown parent '%s'!")
507 msg = _("working directory has unknown parent '%s'!")
508 raise error.Abort(msg % short(changeid))
508 raise error.Abort(msg % short(changeid))
509 try:
509 try:
510 if len(changeid) == 20 and nonascii(changeid):
510 if len(changeid) == 20 and nonascii(changeid):
511 changeid = hex(changeid)
511 changeid = hex(changeid)
512 except TypeError:
512 except TypeError:
513 pass
513 pass
514 except (error.FilteredIndexError, error.FilteredLookupError,
514 except (error.FilteredIndexError, error.FilteredLookupError,
515 error.FilteredRepoLookupError):
515 error.FilteredRepoLookupError):
516 if repo.filtername.startswith('visible'):
516 if repo.filtername.startswith('visible'):
517 msg = _("hidden revision '%s'") % changeid
517 msg = _("hidden revision '%s'") % changeid
518 hint = _('use --hidden to access hidden revisions')
518 hint = _('use --hidden to access hidden revisions')
519 raise error.FilteredRepoLookupError(msg, hint=hint)
519 raise error.FilteredRepoLookupError(msg, hint=hint)
520 msg = _("filtered revision '%s' (not in '%s' subset)")
520 msg = _("filtered revision '%s' (not in '%s' subset)")
521 msg %= (changeid, repo.filtername)
521 msg %= (changeid, repo.filtername)
522 raise error.FilteredRepoLookupError(msg)
522 raise error.FilteredRepoLookupError(msg)
523 except IndexError:
523 except IndexError:
524 pass
524 pass
525 raise error.RepoLookupError(
525 raise error.RepoLookupError(
526 _("unknown revision '%s'") % changeid)
526 _("unknown revision '%s'") % changeid)
527
527
528 def __hash__(self):
528 def __hash__(self):
529 try:
529 try:
530 return hash(self._rev)
530 return hash(self._rev)
531 except AttributeError:
531 except AttributeError:
532 return id(self)
532 return id(self)
533
533
534 def __nonzero__(self):
534 def __nonzero__(self):
535 return self._rev != nullrev
535 return self._rev != nullrev
536
536
537 @propertycache
537 @propertycache
538 def _changeset(self):
538 def _changeset(self):
539 return self._repo.changelog.changelogrevision(self.rev())
539 return self._repo.changelog.changelogrevision(self.rev())
540
540
541 @propertycache
541 @propertycache
542 def _manifest(self):
542 def _manifest(self):
543 return self._manifestctx.read()
543 return self._manifestctx.read()
544
544
545 @propertycache
545 @propertycache
546 def _manifestctx(self):
546 def _manifestctx(self):
547 return self._repo.manifestlog[self._changeset.manifest]
547 return self._repo.manifestlog[self._changeset.manifest]
548
548
549 @propertycache
549 @propertycache
550 def _manifestdelta(self):
550 def _manifestdelta(self):
551 return self._manifestctx.readdelta()
551 return self._manifestctx.readdelta()
552
552
553 @propertycache
553 @propertycache
554 def _parents(self):
554 def _parents(self):
555 repo = self._repo
555 repo = self._repo
556 p1, p2 = repo.changelog.parentrevs(self._rev)
556 p1, p2 = repo.changelog.parentrevs(self._rev)
557 if p2 == nullrev:
557 if p2 == nullrev:
558 return [changectx(repo, p1)]
558 return [changectx(repo, p1)]
559 return [changectx(repo, p1), changectx(repo, p2)]
559 return [changectx(repo, p1), changectx(repo, p2)]
560
560
561 def changeset(self):
561 def changeset(self):
562 c = self._changeset
562 c = self._changeset
563 return (
563 return (
564 c.manifest,
564 c.manifest,
565 c.user,
565 c.user,
566 c.date,
566 c.date,
567 c.files,
567 c.files,
568 c.description,
568 c.description,
569 c.extra,
569 c.extra,
570 )
570 )
571 def manifestnode(self):
571 def manifestnode(self):
572 return self._changeset.manifest
572 return self._changeset.manifest
573
573
574 def user(self):
574 def user(self):
575 return self._changeset.user
575 return self._changeset.user
576 def date(self):
576 def date(self):
577 return self._changeset.date
577 return self._changeset.date
578 def files(self):
578 def files(self):
579 return self._changeset.files
579 return self._changeset.files
580 def description(self):
580 def description(self):
581 return self._changeset.description
581 return self._changeset.description
582 def branch(self):
582 def branch(self):
583 return encoding.tolocal(self._changeset.extra.get("branch"))
583 return encoding.tolocal(self._changeset.extra.get("branch"))
584 def closesbranch(self):
584 def closesbranch(self):
585 return 'close' in self._changeset.extra
585 return 'close' in self._changeset.extra
586 def extra(self):
586 def extra(self):
587 return self._changeset.extra
587 return self._changeset.extra
588 def tags(self):
588 def tags(self):
589 return self._repo.nodetags(self._node)
589 return self._repo.nodetags(self._node)
590 def bookmarks(self):
590 def bookmarks(self):
591 return self._repo.nodebookmarks(self._node)
591 return self._repo.nodebookmarks(self._node)
592 def phase(self):
592 def phase(self):
593 return self._repo._phasecache.phase(self._repo, self._rev)
593 return self._repo._phasecache.phase(self._repo, self._rev)
594 def hidden(self):
594 def hidden(self):
595 return self._rev in repoview.filterrevs(self._repo, 'visible')
595 return self._rev in repoview.filterrevs(self._repo, 'visible')
596
596
597 def children(self):
597 def children(self):
598 """return contexts for each child changeset"""
598 """return contexts for each child changeset"""
599 c = self._repo.changelog.children(self._node)
599 c = self._repo.changelog.children(self._node)
600 return [changectx(self._repo, x) for x in c]
600 return [changectx(self._repo, x) for x in c]
601
601
602 def ancestors(self):
602 def ancestors(self):
603 for a in self._repo.changelog.ancestors([self._rev]):
603 for a in self._repo.changelog.ancestors([self._rev]):
604 yield changectx(self._repo, a)
604 yield changectx(self._repo, a)
605
605
606 def descendants(self):
606 def descendants(self):
607 for d in self._repo.changelog.descendants([self._rev]):
607 for d in self._repo.changelog.descendants([self._rev]):
608 yield changectx(self._repo, d)
608 yield changectx(self._repo, d)
609
609
610 def filectx(self, path, fileid=None, filelog=None):
610 def filectx(self, path, fileid=None, filelog=None):
611 """get a file context from this changeset"""
611 """get a file context from this changeset"""
612 if fileid is None:
612 if fileid is None:
613 fileid = self.filenode(path)
613 fileid = self.filenode(path)
614 return filectx(self._repo, path, fileid=fileid,
614 return filectx(self._repo, path, fileid=fileid,
615 changectx=self, filelog=filelog)
615 changectx=self, filelog=filelog)
616
616
617 def ancestor(self, c2, warn=False):
617 def ancestor(self, c2, warn=False):
618 """return the "best" ancestor context of self and c2
618 """return the "best" ancestor context of self and c2
619
619
620 If there are multiple candidates, it will show a message and check
620 If there are multiple candidates, it will show a message and check
621 merge.preferancestor configuration before falling back to the
621 merge.preferancestor configuration before falling back to the
622 revlog ancestor."""
622 revlog ancestor."""
623 # deal with workingctxs
623 # deal with workingctxs
624 n2 = c2._node
624 n2 = c2._node
625 if n2 is None:
625 if n2 is None:
626 n2 = c2._parents[0]._node
626 n2 = c2._parents[0]._node
627 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
627 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
628 if not cahs:
628 if not cahs:
629 anc = nullid
629 anc = nullid
630 elif len(cahs) == 1:
630 elif len(cahs) == 1:
631 anc = cahs[0]
631 anc = cahs[0]
632 else:
632 else:
633 # experimental config: merge.preferancestor
633 # experimental config: merge.preferancestor
634 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
634 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
635 try:
635 try:
636 ctx = changectx(self._repo, r)
636 ctx = changectx(self._repo, r)
637 except error.RepoLookupError:
637 except error.RepoLookupError:
638 continue
638 continue
639 anc = ctx.node()
639 anc = ctx.node()
640 if anc in cahs:
640 if anc in cahs:
641 break
641 break
642 else:
642 else:
643 anc = self._repo.changelog.ancestor(self._node, n2)
643 anc = self._repo.changelog.ancestor(self._node, n2)
644 if warn:
644 if warn:
645 self._repo.ui.status(
645 self._repo.ui.status(
646 (_("note: using %s as ancestor of %s and %s\n") %
646 (_("note: using %s as ancestor of %s and %s\n") %
647 (short(anc), short(self._node), short(n2))) +
647 (short(anc), short(self._node), short(n2))) +
648 ''.join(_(" alternatively, use --config "
648 ''.join(_(" alternatively, use --config "
649 "merge.preferancestor=%s\n") %
649 "merge.preferancestor=%s\n") %
650 short(n) for n in sorted(cahs) if n != anc))
650 short(n) for n in sorted(cahs) if n != anc))
651 return changectx(self._repo, anc)
651 return changectx(self._repo, anc)
652
652
653 def descendant(self, other):
653 def descendant(self, other):
654 """True if other is descendant of this changeset"""
654 """True if other is descendant of this changeset"""
655 return self._repo.changelog.descendant(self._rev, other._rev)
655 return self._repo.changelog.descendant(self._rev, other._rev)
656
656
657 def walk(self, match):
657 def walk(self, match):
658 '''Generates matching file names.'''
658 '''Generates matching file names.'''
659
659
660 # Wrap match.bad method to have message with nodeid
660 # Wrap match.bad method to have message with nodeid
661 def bad(fn, msg):
661 def bad(fn, msg):
662 # The manifest doesn't know about subrepos, so don't complain about
662 # The manifest doesn't know about subrepos, so don't complain about
663 # paths into valid subrepos.
663 # paths into valid subrepos.
664 if any(fn == s or fn.startswith(s + '/')
664 if any(fn == s or fn.startswith(s + '/')
665 for s in self.substate):
665 for s in self.substate):
666 return
666 return
667 match.bad(fn, _('no such file in rev %s') % self)
667 match.bad(fn, _('no such file in rev %s') % self)
668
668
669 m = matchmod.badmatch(match, bad)
669 m = matchmod.badmatch(match, bad)
670 return self._manifest.walk(m)
670 return self._manifest.walk(m)
671
671
672 def matches(self, match):
672 def matches(self, match):
673 return self.walk(match)
673 return self.walk(match)
674
674
675 class basefilectx(object):
675 class basefilectx(object):
676 """A filecontext object represents the common logic for its children:
676 """A filecontext object represents the common logic for its children:
677 filectx: read-only access to a filerevision that is already present
677 filectx: read-only access to a filerevision that is already present
678 in the repo,
678 in the repo,
679 workingfilectx: a filecontext that represents files from the working
679 workingfilectx: a filecontext that represents files from the working
680 directory,
680 directory,
681 memfilectx: a filecontext that represents files in-memory."""
681 memfilectx: a filecontext that represents files in-memory."""
682 def __new__(cls, repo, path, *args, **kwargs):
682 def __new__(cls, repo, path, *args, **kwargs):
683 return super(basefilectx, cls).__new__(cls)
683 return super(basefilectx, cls).__new__(cls)
684
684
685 @propertycache
685 @propertycache
686 def _filelog(self):
686 def _filelog(self):
687 return self._repo.file(self._path)
687 return self._repo.file(self._path)
688
688
689 @propertycache
689 @propertycache
690 def _changeid(self):
690 def _changeid(self):
691 if '_changeid' in self.__dict__:
691 if '_changeid' in self.__dict__:
692 return self._changeid
692 return self._changeid
693 elif '_changectx' in self.__dict__:
693 elif '_changectx' in self.__dict__:
694 return self._changectx.rev()
694 return self._changectx.rev()
695 elif '_descendantrev' in self.__dict__:
695 elif '_descendantrev' in self.__dict__:
696 # this file context was created from a revision with a known
696 # this file context was created from a revision with a known
697 # descendant, we can (lazily) correct for linkrev aliases
697 # descendant, we can (lazily) correct for linkrev aliases
698 return self._adjustlinkrev(self._descendantrev)
698 return self._adjustlinkrev(self._descendantrev)
699 else:
699 else:
700 return self._filelog.linkrev(self._filerev)
700 return self._filelog.linkrev(self._filerev)
701
701
702 @propertycache
702 @propertycache
703 def _filenode(self):
703 def _filenode(self):
704 if '_fileid' in self.__dict__:
704 if '_fileid' in self.__dict__:
705 return self._filelog.lookup(self._fileid)
705 return self._filelog.lookup(self._fileid)
706 else:
706 else:
707 return self._changectx.filenode(self._path)
707 return self._changectx.filenode(self._path)
708
708
709 @propertycache
709 @propertycache
710 def _filerev(self):
710 def _filerev(self):
711 return self._filelog.rev(self._filenode)
711 return self._filelog.rev(self._filenode)
712
712
713 @propertycache
713 @propertycache
714 def _repopath(self):
714 def _repopath(self):
715 return self._path
715 return self._path
716
716
717 def __nonzero__(self):
717 def __nonzero__(self):
718 try:
718 try:
719 self._filenode
719 self._filenode
720 return True
720 return True
721 except error.LookupError:
721 except error.LookupError:
722 # file is missing
722 # file is missing
723 return False
723 return False
724
724
725 def __str__(self):
725 def __str__(self):
726 try:
726 try:
727 return "%s@%s" % (self.path(), self._changectx)
727 return "%s@%s" % (self.path(), self._changectx)
728 except error.LookupError:
728 except error.LookupError:
729 return "%s@???" % self.path()
729 return "%s@???" % self.path()
730
730
731 def __repr__(self):
731 def __repr__(self):
732 return "<%s %s>" % (type(self).__name__, str(self))
732 return "<%s %s>" % (type(self).__name__, str(self))
733
733
734 def __hash__(self):
734 def __hash__(self):
735 try:
735 try:
736 return hash((self._path, self._filenode))
736 return hash((self._path, self._filenode))
737 except AttributeError:
737 except AttributeError:
738 return id(self)
738 return id(self)
739
739
740 def __eq__(self, other):
740 def __eq__(self, other):
741 try:
741 try:
742 return (type(self) == type(other) and self._path == other._path
742 return (type(self) == type(other) and self._path == other._path
743 and self._filenode == other._filenode)
743 and self._filenode == other._filenode)
744 except AttributeError:
744 except AttributeError:
745 return False
745 return False
746
746
747 def __ne__(self, other):
747 def __ne__(self, other):
748 return not (self == other)
748 return not (self == other)
749
749
750 def filerev(self):
750 def filerev(self):
751 return self._filerev
751 return self._filerev
752 def filenode(self):
752 def filenode(self):
753 return self._filenode
753 return self._filenode
754 def flags(self):
754 def flags(self):
755 return self._changectx.flags(self._path)
755 return self._changectx.flags(self._path)
756 def filelog(self):
756 def filelog(self):
757 return self._filelog
757 return self._filelog
758 def rev(self):
758 def rev(self):
759 return self._changeid
759 return self._changeid
760 def linkrev(self):
760 def linkrev(self):
761 return self._filelog.linkrev(self._filerev)
761 return self._filelog.linkrev(self._filerev)
762 def node(self):
762 def node(self):
763 return self._changectx.node()
763 return self._changectx.node()
764 def hex(self):
764 def hex(self):
765 return self._changectx.hex()
765 return self._changectx.hex()
766 def user(self):
766 def user(self):
767 return self._changectx.user()
767 return self._changectx.user()
768 def date(self):
768 def date(self):
769 return self._changectx.date()
769 return self._changectx.date()
770 def files(self):
770 def files(self):
771 return self._changectx.files()
771 return self._changectx.files()
772 def description(self):
772 def description(self):
773 return self._changectx.description()
773 return self._changectx.description()
774 def branch(self):
774 def branch(self):
775 return self._changectx.branch()
775 return self._changectx.branch()
776 def extra(self):
776 def extra(self):
777 return self._changectx.extra()
777 return self._changectx.extra()
778 def phase(self):
778 def phase(self):
779 return self._changectx.phase()
779 return self._changectx.phase()
780 def phasestr(self):
780 def phasestr(self):
781 return self._changectx.phasestr()
781 return self._changectx.phasestr()
782 def manifest(self):
782 def manifest(self):
783 return self._changectx.manifest()
783 return self._changectx.manifest()
784 def changectx(self):
784 def changectx(self):
785 return self._changectx
785 return self._changectx
786 def repo(self):
786 def repo(self):
787 return self._repo
787 return self._repo
788
788
789 def path(self):
789 def path(self):
790 return self._path
790 return self._path
791
791
792 def isbinary(self):
792 def isbinary(self):
793 try:
793 try:
794 return util.binary(self.data())
794 return util.binary(self.data())
795 except IOError:
795 except IOError:
796 return False
796 return False
797 def isexec(self):
797 def isexec(self):
798 return 'x' in self.flags()
798 return 'x' in self.flags()
799 def islink(self):
799 def islink(self):
800 return 'l' in self.flags()
800 return 'l' in self.flags()
801
801
802 def isabsent(self):
802 def isabsent(self):
803 """whether this filectx represents a file not in self._changectx
803 """whether this filectx represents a file not in self._changectx
804
804
805 This is mainly for merge code to detect change/delete conflicts. This is
805 This is mainly for merge code to detect change/delete conflicts. This is
806 expected to be True for all subclasses of basectx."""
806 expected to be True for all subclasses of basectx."""
807 return False
807 return False
808
808
809 _customcmp = False
809 _customcmp = False
810 def cmp(self, fctx):
810 def cmp(self, fctx):
811 """compare with other file context
811 """compare with other file context
812
812
813 returns True if different than fctx.
813 returns True if different than fctx.
814 """
814 """
815 if fctx._customcmp:
815 if fctx._customcmp:
816 return fctx.cmp(self)
816 return fctx.cmp(self)
817
817
818 if (fctx._filenode is None
818 if (fctx._filenode is None
819 and (self._repo._encodefilterpats
819 and (self._repo._encodefilterpats
820 # if file data starts with '\1\n', empty metadata block is
820 # if file data starts with '\1\n', empty metadata block is
821 # prepended, which adds 4 bytes to filelog.size().
821 # prepended, which adds 4 bytes to filelog.size().
822 or self.size() - 4 == fctx.size())
822 or self.size() - 4 == fctx.size())
823 or self.size() == fctx.size()):
823 or self.size() == fctx.size()):
824 return self._filelog.cmp(self._filenode, fctx.data())
824 return self._filelog.cmp(self._filenode, fctx.data())
825
825
826 return True
826 return True
827
827
828 def _adjustlinkrev(self, srcrev, inclusive=False):
828 def _adjustlinkrev(self, srcrev, inclusive=False):
829 """return the first ancestor of <srcrev> introducing <fnode>
829 """return the first ancestor of <srcrev> introducing <fnode>
830
830
831 If the linkrev of the file revision does not point to an ancestor of
831 If the linkrev of the file revision does not point to an ancestor of
832 srcrev, we'll walk down the ancestors until we find one introducing
832 srcrev, we'll walk down the ancestors until we find one introducing
833 this file revision.
833 this file revision.
834
834
835 :srcrev: the changeset revision we search ancestors from
835 :srcrev: the changeset revision we search ancestors from
836 :inclusive: if true, the src revision will also be checked
836 :inclusive: if true, the src revision will also be checked
837 """
837 """
838 repo = self._repo
838 repo = self._repo
839 cl = repo.unfiltered().changelog
839 cl = repo.unfiltered().changelog
840 mfl = repo.manifestlog
840 mfl = repo.manifestlog
841 # fetch the linkrev
841 # fetch the linkrev
842 lkr = self.linkrev()
842 lkr = self.linkrev()
843 # hack to reuse ancestor computation when searching for renames
843 # hack to reuse ancestor computation when searching for renames
844 memberanc = getattr(self, '_ancestrycontext', None)
844 memberanc = getattr(self, '_ancestrycontext', None)
845 iteranc = None
845 iteranc = None
846 if srcrev is None:
846 if srcrev is None:
847 # wctx case, used by workingfilectx during mergecopy
847 # wctx case, used by workingfilectx during mergecopy
848 revs = [p.rev() for p in self._repo[None].parents()]
848 revs = [p.rev() for p in self._repo[None].parents()]
849 inclusive = True # we skipped the real (revless) source
849 inclusive = True # we skipped the real (revless) source
850 else:
850 else:
851 revs = [srcrev]
851 revs = [srcrev]
852 if memberanc is None:
852 if memberanc is None:
853 memberanc = iteranc = cl.ancestors(revs, lkr,
853 memberanc = iteranc = cl.ancestors(revs, lkr,
854 inclusive=inclusive)
854 inclusive=inclusive)
855 # check if this linkrev is an ancestor of srcrev
855 # check if this linkrev is an ancestor of srcrev
856 if lkr not in memberanc:
856 if lkr not in memberanc:
857 if iteranc is None:
857 if iteranc is None:
858 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
858 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
859 fnode = self._filenode
859 fnode = self._filenode
860 path = self._path
860 path = self._path
861 for a in iteranc:
861 for a in iteranc:
862 ac = cl.read(a) # get changeset data (we avoid object creation)
862 ac = cl.read(a) # get changeset data (we avoid object creation)
863 if path in ac[3]: # checking the 'files' field.
863 if path in ac[3]: # checking the 'files' field.
864 # The file has been touched, check if the content is
864 # The file has been touched, check if the content is
865 # similar to the one we search for.
865 # similar to the one we search for.
866 if fnode == mfl[ac[0]].readfast().get(path):
866 if fnode == mfl[ac[0]].readfast().get(path):
867 return a
867 return a
868 # In theory, we should never get out of that loop without a result.
868 # In theory, we should never get out of that loop without a result.
869 # But if manifest uses a buggy file revision (not children of the
869 # But if manifest uses a buggy file revision (not children of the
870 # one it replaces) we could. Such a buggy situation will likely
870 # one it replaces) we could. Such a buggy situation will likely
871 # result is crash somewhere else at to some point.
871 # result is crash somewhere else at to some point.
872 return lkr
872 return lkr
873
873
874 def introrev(self):
874 def introrev(self):
875 """return the rev of the changeset which introduced this file revision
875 """return the rev of the changeset which introduced this file revision
876
876
877 This method is different from linkrev because it take into account the
877 This method is different from linkrev because it take into account the
878 changeset the filectx was created from. It ensures the returned
878 changeset the filectx was created from. It ensures the returned
879 revision is one of its ancestors. This prevents bugs from
879 revision is one of its ancestors. This prevents bugs from
880 'linkrev-shadowing' when a file revision is used by multiple
880 'linkrev-shadowing' when a file revision is used by multiple
881 changesets.
881 changesets.
882 """
882 """
883 lkr = self.linkrev()
883 lkr = self.linkrev()
884 attrs = vars(self)
884 attrs = vars(self)
885 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
885 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
886 if noctx or self.rev() == lkr:
886 if noctx or self.rev() == lkr:
887 return self.linkrev()
887 return self.linkrev()
888 return self._adjustlinkrev(self.rev(), inclusive=True)
888 return self._adjustlinkrev(self.rev(), inclusive=True)
889
889
890 def _parentfilectx(self, path, fileid, filelog):
890 def _parentfilectx(self, path, fileid, filelog):
891 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
891 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
892 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
892 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
893 if '_changeid' in vars(self) or '_changectx' in vars(self):
893 if '_changeid' in vars(self) or '_changectx' in vars(self):
894 # If self is associated with a changeset (probably explicitly
894 # If self is associated with a changeset (probably explicitly
895 # fed), ensure the created filectx is associated with a
895 # fed), ensure the created filectx is associated with a
896 # changeset that is an ancestor of self.changectx.
896 # changeset that is an ancestor of self.changectx.
897 # This lets us later use _adjustlinkrev to get a correct link.
897 # This lets us later use _adjustlinkrev to get a correct link.
898 fctx._descendantrev = self.rev()
898 fctx._descendantrev = self.rev()
899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
900 elif '_descendantrev' in vars(self):
900 elif '_descendantrev' in vars(self):
901 # Otherwise propagate _descendantrev if we have one associated.
901 # Otherwise propagate _descendantrev if we have one associated.
902 fctx._descendantrev = self._descendantrev
902 fctx._descendantrev = self._descendantrev
903 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
903 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
904 return fctx
904 return fctx
905
905
906 def parents(self):
906 def parents(self):
907 _path = self._path
907 _path = self._path
908 fl = self._filelog
908 fl = self._filelog
909 parents = self._filelog.parents(self._filenode)
909 parents = self._filelog.parents(self._filenode)
910 pl = [(_path, node, fl) for node in parents if node != nullid]
910 pl = [(_path, node, fl) for node in parents if node != nullid]
911
911
912 r = fl.renamed(self._filenode)
912 r = fl.renamed(self._filenode)
913 if r:
913 if r:
914 # - In the simple rename case, both parent are nullid, pl is empty.
914 # - In the simple rename case, both parent are nullid, pl is empty.
915 # - In case of merge, only one of the parent is null id and should
915 # - In case of merge, only one of the parent is null id and should
916 # be replaced with the rename information. This parent is -always-
916 # be replaced with the rename information. This parent is -always-
917 # the first one.
917 # the first one.
918 #
918 #
919 # As null id have always been filtered out in the previous list
919 # As null id have always been filtered out in the previous list
920 # comprehension, inserting to 0 will always result in "replacing
920 # comprehension, inserting to 0 will always result in "replacing
921 # first nullid parent with rename information.
921 # first nullid parent with rename information.
922 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
922 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
923
923
924 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
924 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
925
925
926 def p1(self):
926 def p1(self):
927 return self.parents()[0]
927 return self.parents()[0]
928
928
929 def p2(self):
929 def p2(self):
930 p = self.parents()
930 p = self.parents()
931 if len(p) == 2:
931 if len(p) == 2:
932 return p[1]
932 return p[1]
933 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
933 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
934
934
935 def annotate(self, follow=False, linenumber=False, diffopts=None):
935 def annotate(self, follow=False, linenumber=False, diffopts=None):
936 '''returns a list of tuples of ((ctx, number), line) for each line
936 '''returns a list of tuples of ((ctx, number), line) for each line
937 in the file, where ctx is the filectx of the node where
937 in the file, where ctx is the filectx of the node where
938 that line was last changed; if linenumber parameter is true, number is
938 that line was last changed; if linenumber parameter is true, number is
939 the line number at the first appearance in the managed file, otherwise,
939 the line number at the first appearance in the managed file, otherwise,
940 number has a fixed value of False.
940 number has a fixed value of False.
941 '''
941 '''
942
942
943 def lines(text):
943 def lines(text):
944 if text.endswith("\n"):
944 if text.endswith("\n"):
945 return text.count("\n")
945 return text.count("\n")
946 return text.count("\n") + int(bool(text))
946 return text.count("\n") + int(bool(text))
947
947
948 if linenumber:
948 if linenumber:
949 def decorate(text, rev):
949 def decorate(text, rev):
950 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
950 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
951 else:
951 else:
952 def decorate(text, rev):
952 def decorate(text, rev):
953 return ([(rev, False)] * lines(text), text)
953 return ([(rev, False)] * lines(text), text)
954
954
955 def pair(parent, child):
955 def pair(parent, child):
956 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
956 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
957 for (a1, a2, b1, b2), t in blocks:
957 for (a1, a2, b1, b2), t in blocks:
958 # Changed blocks ('!') or blocks made only of blank lines ('~')
958 # Changed blocks ('!') or blocks made only of blank lines ('~')
959 # belong to the child.
959 # belong to the child.
960 if t == '=':
960 if t == '=':
961 child[0][b1:b2] = parent[0][a1:a2]
961 child[0][b1:b2] = parent[0][a1:a2]
962 return child
962 return child
963
963
964 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
964 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
965
965
966 def parents(f):
966 def parents(f):
967 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
967 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
968 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
968 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
969 # from the topmost introrev (= srcrev) down to p.linkrev() if it
969 # from the topmost introrev (= srcrev) down to p.linkrev() if it
970 # isn't an ancestor of the srcrev.
970 # isn't an ancestor of the srcrev.
971 f._changeid
971 f._changeid
972 pl = f.parents()
972 pl = f.parents()
973
973
974 # Don't return renamed parents if we aren't following.
974 # Don't return renamed parents if we aren't following.
975 if not follow:
975 if not follow:
976 pl = [p for p in pl if p.path() == f.path()]
976 pl = [p for p in pl if p.path() == f.path()]
977
977
978 # renamed filectx won't have a filelog yet, so set it
978 # renamed filectx won't have a filelog yet, so set it
979 # from the cache to save time
979 # from the cache to save time
980 for p in pl:
980 for p in pl:
981 if not '_filelog' in p.__dict__:
981 if not '_filelog' in p.__dict__:
982 p._filelog = getlog(p.path())
982 p._filelog = getlog(p.path())
983
983
984 return pl
984 return pl
985
985
986 # use linkrev to find the first changeset where self appeared
986 # use linkrev to find the first changeset where self appeared
987 base = self
987 base = self
988 introrev = self.introrev()
988 introrev = self.introrev()
989 if self.rev() != introrev:
989 if self.rev() != introrev:
990 base = self.filectx(self.filenode(), changeid=introrev)
990 base = self.filectx(self.filenode(), changeid=introrev)
991 if getattr(base, '_ancestrycontext', None) is None:
991 if getattr(base, '_ancestrycontext', None) is None:
992 cl = self._repo.changelog
992 cl = self._repo.changelog
993 if introrev is None:
993 if introrev is None:
994 # wctx is not inclusive, but works because _ancestrycontext
994 # wctx is not inclusive, but works because _ancestrycontext
995 # is used to test filelog revisions
995 # is used to test filelog revisions
996 ac = cl.ancestors([p.rev() for p in base.parents()],
996 ac = cl.ancestors([p.rev() for p in base.parents()],
997 inclusive=True)
997 inclusive=True)
998 else:
998 else:
999 ac = cl.ancestors([introrev], inclusive=True)
999 ac = cl.ancestors([introrev], inclusive=True)
1000 base._ancestrycontext = ac
1000 base._ancestrycontext = ac
1001
1001
1002 # This algorithm would prefer to be recursive, but Python is a
1002 # This algorithm would prefer to be recursive, but Python is a
1003 # bit recursion-hostile. Instead we do an iterative
1003 # bit recursion-hostile. Instead we do an iterative
1004 # depth-first search.
1004 # depth-first search.
1005
1005
1006 # 1st DFS pre-calculates pcache and needed
1006 # 1st DFS pre-calculates pcache and needed
1007 visit = [base]
1007 visit = [base]
1008 pcache = {}
1008 pcache = {}
1009 needed = {base: 1}
1009 needed = {base: 1}
1010 while visit:
1010 while visit:
1011 f = visit.pop()
1011 f = visit.pop()
1012 if f in pcache:
1012 if f in pcache:
1013 continue
1013 continue
1014 pl = parents(f)
1014 pl = parents(f)
1015 pcache[f] = pl
1015 pcache[f] = pl
1016 for p in pl:
1016 for p in pl:
1017 needed[p] = needed.get(p, 0) + 1
1017 needed[p] = needed.get(p, 0) + 1
1018 if p not in pcache:
1018 if p not in pcache:
1019 visit.append(p)
1019 visit.append(p)
1020
1020
1021 # 2nd DFS does the actual annotate
1021 # 2nd DFS does the actual annotate
1022 visit[:] = [base]
1022 visit[:] = [base]
1023 hist = {}
1023 hist = {}
1024 while visit:
1024 while visit:
1025 f = visit[-1]
1025 f = visit[-1]
1026 if f in hist:
1026 if f in hist:
1027 visit.pop()
1027 visit.pop()
1028 continue
1028 continue
1029
1029
1030 ready = True
1030 ready = True
1031 pl = pcache[f]
1031 pl = pcache[f]
1032 for p in pl:
1032 for p in pl:
1033 if p not in hist:
1033 if p not in hist:
1034 ready = False
1034 ready = False
1035 visit.append(p)
1035 visit.append(p)
1036 if ready:
1036 if ready:
1037 visit.pop()
1037 visit.pop()
1038 curr = decorate(f.data(), f)
1038 curr = decorate(f.data(), f)
1039 for p in pl:
1039 for p in pl:
1040 curr = pair(hist[p], curr)
1040 curr = pair(hist[p], curr)
1041 if needed[p] == 1:
1041 if needed[p] == 1:
1042 del hist[p]
1042 del hist[p]
1043 del needed[p]
1043 del needed[p]
1044 else:
1044 else:
1045 needed[p] -= 1
1045 needed[p] -= 1
1046
1046
1047 hist[f] = curr
1047 hist[f] = curr
1048 del pcache[f]
1048 del pcache[f]
1049
1049
1050 return zip(hist[base][0], hist[base][1].splitlines(True))
1050 return zip(hist[base][0], hist[base][1].splitlines(True))
1051
1051
1052 def ancestors(self, followfirst=False):
1052 def ancestors(self, followfirst=False):
1053 visit = {}
1053 visit = {}
1054 c = self
1054 c = self
1055 if followfirst:
1055 if followfirst:
1056 cut = 1
1056 cut = 1
1057 else:
1057 else:
1058 cut = None
1058 cut = None
1059
1059
1060 while True:
1060 while True:
1061 for parent in c.parents()[:cut]:
1061 for parent in c.parents()[:cut]:
1062 visit[(parent.linkrev(), parent.filenode())] = parent
1062 visit[(parent.linkrev(), parent.filenode())] = parent
1063 if not visit:
1063 if not visit:
1064 break
1064 break
1065 c = visit.pop(max(visit))
1065 c = visit.pop(max(visit))
1066 yield c
1066 yield c
1067
1067
1068 class filectx(basefilectx):
1068 class filectx(basefilectx):
1069 """A filecontext object makes access to data related to a particular
1069 """A filecontext object makes access to data related to a particular
1070 filerevision convenient."""
1070 filerevision convenient."""
1071 def __init__(self, repo, path, changeid=None, fileid=None,
1071 def __init__(self, repo, path, changeid=None, fileid=None,
1072 filelog=None, changectx=None):
1072 filelog=None, changectx=None):
1073 """changeid can be a changeset revision, node, or tag.
1073 """changeid can be a changeset revision, node, or tag.
1074 fileid can be a file revision or node."""
1074 fileid can be a file revision or node."""
1075 self._repo = repo
1075 self._repo = repo
1076 self._path = path
1076 self._path = path
1077
1077
1078 assert (changeid is not None
1078 assert (changeid is not None
1079 or fileid is not None
1079 or fileid is not None
1080 or changectx is not None), \
1080 or changectx is not None), \
1081 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1081 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1082 % (changeid, fileid, changectx))
1082 % (changeid, fileid, changectx))
1083
1083
1084 if filelog is not None:
1084 if filelog is not None:
1085 self._filelog = filelog
1085 self._filelog = filelog
1086
1086
1087 if changeid is not None:
1087 if changeid is not None:
1088 self._changeid = changeid
1088 self._changeid = changeid
1089 if changectx is not None:
1089 if changectx is not None:
1090 self._changectx = changectx
1090 self._changectx = changectx
1091 if fileid is not None:
1091 if fileid is not None:
1092 self._fileid = fileid
1092 self._fileid = fileid
1093
1093
1094 @propertycache
1094 @propertycache
1095 def _changectx(self):
1095 def _changectx(self):
1096 try:
1096 try:
1097 return changectx(self._repo, self._changeid)
1097 return changectx(self._repo, self._changeid)
1098 except error.FilteredRepoLookupError:
1098 except error.FilteredRepoLookupError:
1099 # Linkrev may point to any revision in the repository. When the
1099 # Linkrev may point to any revision in the repository. When the
1100 # repository is filtered this may lead to `filectx` trying to build
1100 # repository is filtered this may lead to `filectx` trying to build
1101 # `changectx` for filtered revision. In such case we fallback to
1101 # `changectx` for filtered revision. In such case we fallback to
1102 # creating `changectx` on the unfiltered version of the reposition.
1102 # creating `changectx` on the unfiltered version of the reposition.
1103 # This fallback should not be an issue because `changectx` from
1103 # This fallback should not be an issue because `changectx` from
1104 # `filectx` are not used in complex operations that care about
1104 # `filectx` are not used in complex operations that care about
1105 # filtering.
1105 # filtering.
1106 #
1106 #
1107 # This fallback is a cheap and dirty fix that prevent several
1107 # This fallback is a cheap and dirty fix that prevent several
1108 # crashes. It does not ensure the behavior is correct. However the
1108 # crashes. It does not ensure the behavior is correct. However the
1109 # behavior was not correct before filtering either and "incorrect
1109 # behavior was not correct before filtering either and "incorrect
1110 # behavior" is seen as better as "crash"
1110 # behavior" is seen as better as "crash"
1111 #
1111 #
1112 # Linkrevs have several serious troubles with filtering that are
1112 # Linkrevs have several serious troubles with filtering that are
1113 # complicated to solve. Proper handling of the issue here should be
1113 # complicated to solve. Proper handling of the issue here should be
1114 # considered when solving linkrev issue are on the table.
1114 # considered when solving linkrev issue are on the table.
1115 return changectx(self._repo.unfiltered(), self._changeid)
1115 return changectx(self._repo.unfiltered(), self._changeid)
1116
1116
1117 def filectx(self, fileid, changeid=None):
1117 def filectx(self, fileid, changeid=None):
1118 '''opens an arbitrary revision of the file without
1118 '''opens an arbitrary revision of the file without
1119 opening a new filelog'''
1119 opening a new filelog'''
1120 return filectx(self._repo, self._path, fileid=fileid,
1120 return filectx(self._repo, self._path, fileid=fileid,
1121 filelog=self._filelog, changeid=changeid)
1121 filelog=self._filelog, changeid=changeid)
1122
1122
1123 def rawdata(self):
1123 def rawdata(self):
1124 return self._filelog.revision(self._filenode, raw=True)
1124 return self._filelog.revision(self._filenode, raw=True)
1125
1125
1126 def data(self):
1126 def data(self):
1127 try:
1127 try:
1128 return self._filelog.read(self._filenode)
1128 return self._filelog.read(self._filenode)
1129 except error.CensoredNodeError:
1129 except error.CensoredNodeError:
1130 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1130 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1131 return ""
1131 return ""
1132 raise error.Abort(_("censored node: %s") % short(self._filenode),
1132 raise error.Abort(_("censored node: %s") % short(self._filenode),
1133 hint=_("set censor.policy to ignore errors"))
1133 hint=_("set censor.policy to ignore errors"))
1134
1134
1135 def size(self):
1135 def size(self):
1136 return self._filelog.size(self._filerev)
1136 return self._filelog.size(self._filerev)
1137
1137
1138 def renamed(self):
1138 def renamed(self):
1139 """check if file was actually renamed in this changeset revision
1139 """check if file was actually renamed in this changeset revision
1140
1140
1141 If rename logged in file revision, we report copy for changeset only
1141 If rename logged in file revision, we report copy for changeset only
1142 if file revisions linkrev points back to the changeset in question
1142 if file revisions linkrev points back to the changeset in question
1143 or both changeset parents contain different file revisions.
1143 or both changeset parents contain different file revisions.
1144 """
1144 """
1145
1145
1146 renamed = self._filelog.renamed(self._filenode)
1146 renamed = self._filelog.renamed(self._filenode)
1147 if not renamed:
1147 if not renamed:
1148 return renamed
1148 return renamed
1149
1149
1150 if self.rev() == self.linkrev():
1150 if self.rev() == self.linkrev():
1151 return renamed
1151 return renamed
1152
1152
1153 name = self.path()
1153 name = self.path()
1154 fnode = self._filenode
1154 fnode = self._filenode
1155 for p in self._changectx.parents():
1155 for p in self._changectx.parents():
1156 try:
1156 try:
1157 if fnode == p.filenode(name):
1157 if fnode == p.filenode(name):
1158 return None
1158 return None
1159 except error.LookupError:
1159 except error.LookupError:
1160 pass
1160 pass
1161 return renamed
1161 return renamed
1162
1162
1163 def children(self):
1163 def children(self):
1164 # hard for renames
1164 # hard for renames
1165 c = self._filelog.children(self._filenode)
1165 c = self._filelog.children(self._filenode)
1166 return [filectx(self._repo, self._path, fileid=x,
1166 return [filectx(self._repo, self._path, fileid=x,
1167 filelog=self._filelog) for x in c]
1167 filelog=self._filelog) for x in c]
1168
1168
1169 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1169 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1170 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1170 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1171 if diff from fctx2 to fctx1 has changes in linerange2 and
1171 if diff from fctx2 to fctx1 has changes in linerange2 and
1172 `linerange1` is the new line range for fctx1.
1172 `linerange1` is the new line range for fctx1.
1173 """
1173 """
1174 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1174 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1175 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1175 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1176 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1176 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1177 return diffinrange, linerange1
1177 return diffinrange, linerange1
1178
1178
1179 def blockancestors(fctx, fromline, toline, followfirst=False):
1179 def blockancestors(fctx, fromline, toline, followfirst=False):
1180 """Yield ancestors of `fctx` with respect to the block of lines within
1180 """Yield ancestors of `fctx` with respect to the block of lines within
1181 `fromline`-`toline` range.
1181 `fromline`-`toline` range.
1182 """
1182 """
1183 diffopts = patch.diffopts(fctx._repo.ui)
1183 diffopts = patch.diffopts(fctx._repo.ui)
1184 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1184 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1185 while visit:
1185 while visit:
1186 c, linerange2 = visit.pop(max(visit))
1186 c, linerange2 = visit.pop(max(visit))
1187 pl = c.parents()
1187 pl = c.parents()
1188 if followfirst:
1188 if followfirst:
1189 pl = pl[:1]
1189 pl = pl[:1]
1190 if not pl:
1190 if not pl:
1191 # The block originates from the initial revision.
1191 # The block originates from the initial revision.
1192 yield c, linerange2
1192 yield c, linerange2
1193 continue
1193 continue
1194 inrange = False
1194 inrange = False
1195 for p in pl:
1195 for p in pl:
1196 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1196 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1197 inrange = inrange or inrangep
1197 inrange = inrange or inrangep
1198 if linerange1[0] == linerange1[1]:
1198 if linerange1[0] == linerange1[1]:
1199 # Parent's linerange is empty, meaning that the block got
1199 # Parent's linerange is empty, meaning that the block got
1200 # introduced in this revision; no need to go futher in this
1200 # introduced in this revision; no need to go futher in this
1201 # branch.
1201 # branch.
1202 continue
1202 continue
1203 visit[p.linkrev(), p.filenode()] = p, linerange1
1203 visit[p.linkrev(), p.filenode()] = p, linerange1
1204 if inrange:
1204 if inrange:
1205 yield c, linerange2
1205 yield c, linerange2
1206
1206
1207 class committablectx(basectx):
1207 class committablectx(basectx):
1208 """A committablectx object provides common functionality for a context that
1208 """A committablectx object provides common functionality for a context that
1209 wants the ability to commit, e.g. workingctx or memctx."""
1209 wants the ability to commit, e.g. workingctx or memctx."""
1210 def __init__(self, repo, text="", user=None, date=None, extra=None,
1210 def __init__(self, repo, text="", user=None, date=None, extra=None,
1211 changes=None):
1211 changes=None):
1212 self._repo = repo
1212 self._repo = repo
1213 self._rev = None
1213 self._rev = None
1214 self._node = None
1214 self._node = None
1215 self._text = text
1215 self._text = text
1216 if date:
1216 if date:
1217 self._date = util.parsedate(date)
1217 self._date = util.parsedate(date)
1218 if user:
1218 if user:
1219 self._user = user
1219 self._user = user
1220 if changes:
1220 if changes:
1221 self._status = changes
1221 self._status = changes
1222
1222
1223 self._extra = {}
1223 self._extra = {}
1224 if extra:
1224 if extra:
1225 self._extra = extra.copy()
1225 self._extra = extra.copy()
1226 if 'branch' not in self._extra:
1226 if 'branch' not in self._extra:
1227 try:
1227 try:
1228 branch = encoding.fromlocal(self._repo.dirstate.branch())
1228 branch = encoding.fromlocal(self._repo.dirstate.branch())
1229 except UnicodeDecodeError:
1229 except UnicodeDecodeError:
1230 raise error.Abort(_('branch name not in UTF-8!'))
1230 raise error.Abort(_('branch name not in UTF-8!'))
1231 self._extra['branch'] = branch
1231 self._extra['branch'] = branch
1232 if self._extra['branch'] == '':
1232 if self._extra['branch'] == '':
1233 self._extra['branch'] = 'default'
1233 self._extra['branch'] = 'default'
1234
1234
1235 def __str__(self):
1235 def __str__(self):
1236 return str(self._parents[0]) + "+"
1236 return str(self._parents[0]) + "+"
1237
1237
1238 def __nonzero__(self):
1238 def __nonzero__(self):
1239 return True
1239 return True
1240
1240
1241 def _buildflagfunc(self):
1241 def _buildflagfunc(self):
1242 # Create a fallback function for getting file flags when the
1242 # Create a fallback function for getting file flags when the
1243 # filesystem doesn't support them
1243 # filesystem doesn't support them
1244
1244
1245 copiesget = self._repo.dirstate.copies().get
1245 copiesget = self._repo.dirstate.copies().get
1246 parents = self.parents()
1246 parents = self.parents()
1247 if len(parents) < 2:
1247 if len(parents) < 2:
1248 # when we have one parent, it's easy: copy from parent
1248 # when we have one parent, it's easy: copy from parent
1249 man = parents[0].manifest()
1249 man = parents[0].manifest()
1250 def func(f):
1250 def func(f):
1251 f = copiesget(f, f)
1251 f = copiesget(f, f)
1252 return man.flags(f)
1252 return man.flags(f)
1253 else:
1253 else:
1254 # merges are tricky: we try to reconstruct the unstored
1254 # merges are tricky: we try to reconstruct the unstored
1255 # result from the merge (issue1802)
1255 # result from the merge (issue1802)
1256 p1, p2 = parents
1256 p1, p2 = parents
1257 pa = p1.ancestor(p2)
1257 pa = p1.ancestor(p2)
1258 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1258 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1259
1259
1260 def func(f):
1260 def func(f):
1261 f = copiesget(f, f) # may be wrong for merges with copies
1261 f = copiesget(f, f) # may be wrong for merges with copies
1262 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1262 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1263 if fl1 == fl2:
1263 if fl1 == fl2:
1264 return fl1
1264 return fl1
1265 if fl1 == fla:
1265 if fl1 == fla:
1266 return fl2
1266 return fl2
1267 if fl2 == fla:
1267 if fl2 == fla:
1268 return fl1
1268 return fl1
1269 return '' # punt for conflicts
1269 return '' # punt for conflicts
1270
1270
1271 return func
1271 return func
1272
1272
1273 @propertycache
1273 @propertycache
1274 def _flagfunc(self):
1274 def _flagfunc(self):
1275 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1275 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1276
1276
1277 @propertycache
1277 @propertycache
1278 def _status(self):
1278 def _status(self):
1279 return self._repo.status()
1279 return self._repo.status()
1280
1280
1281 @propertycache
1281 @propertycache
1282 def _user(self):
1282 def _user(self):
1283 return self._repo.ui.username()
1283 return self._repo.ui.username()
1284
1284
1285 @propertycache
1285 @propertycache
1286 def _date(self):
1286 def _date(self):
1287 return util.makedate()
1287 return util.makedate()
1288
1288
1289 def subrev(self, subpath):
1289 def subrev(self, subpath):
1290 return None
1290 return None
1291
1291
1292 def manifestnode(self):
1292 def manifestnode(self):
1293 return None
1293 return None
1294 def user(self):
1294 def user(self):
1295 return self._user or self._repo.ui.username()
1295 return self._user or self._repo.ui.username()
1296 def date(self):
1296 def date(self):
1297 return self._date
1297 return self._date
1298 def description(self):
1298 def description(self):
1299 return self._text
1299 return self._text
1300 def files(self):
1300 def files(self):
1301 return sorted(self._status.modified + self._status.added +
1301 return sorted(self._status.modified + self._status.added +
1302 self._status.removed)
1302 self._status.removed)
1303
1303
1304 def modified(self):
1304 def modified(self):
1305 return self._status.modified
1305 return self._status.modified
1306 def added(self):
1306 def added(self):
1307 return self._status.added
1307 return self._status.added
1308 def removed(self):
1308 def removed(self):
1309 return self._status.removed
1309 return self._status.removed
1310 def deleted(self):
1310 def deleted(self):
1311 return self._status.deleted
1311 return self._status.deleted
1312 def branch(self):
1312 def branch(self):
1313 return encoding.tolocal(self._extra['branch'])
1313 return encoding.tolocal(self._extra['branch'])
1314 def closesbranch(self):
1314 def closesbranch(self):
1315 return 'close' in self._extra
1315 return 'close' in self._extra
1316 def extra(self):
1316 def extra(self):
1317 return self._extra
1317 return self._extra
1318
1318
1319 def tags(self):
1319 def tags(self):
1320 return []
1320 return []
1321
1321
1322 def bookmarks(self):
1322 def bookmarks(self):
1323 b = []
1323 b = []
1324 for p in self.parents():
1324 for p in self.parents():
1325 b.extend(p.bookmarks())
1325 b.extend(p.bookmarks())
1326 return b
1326 return b
1327
1327
1328 def phase(self):
1328 def phase(self):
1329 phase = phases.draft # default phase to draft
1329 phase = phases.draft # default phase to draft
1330 for p in self.parents():
1330 for p in self.parents():
1331 phase = max(phase, p.phase())
1331 phase = max(phase, p.phase())
1332 return phase
1332 return phase
1333
1333
1334 def hidden(self):
1334 def hidden(self):
1335 return False
1335 return False
1336
1336
1337 def children(self):
1337 def children(self):
1338 return []
1338 return []
1339
1339
1340 def flags(self, path):
1340 def flags(self, path):
1341 if '_manifest' in self.__dict__:
1341 if '_manifest' in self.__dict__:
1342 try:
1342 try:
1343 return self._manifest.flags(path)
1343 return self._manifest.flags(path)
1344 except KeyError:
1344 except KeyError:
1345 return ''
1345 return ''
1346
1346
1347 try:
1347 try:
1348 return self._flagfunc(path)
1348 return self._flagfunc(path)
1349 except OSError:
1349 except OSError:
1350 return ''
1350 return ''
1351
1351
1352 def ancestor(self, c2):
1352 def ancestor(self, c2):
1353 """return the "best" ancestor context of self and c2"""
1353 """return the "best" ancestor context of self and c2"""
1354 return self._parents[0].ancestor(c2) # punt on two parents for now
1354 return self._parents[0].ancestor(c2) # punt on two parents for now
1355
1355
1356 def walk(self, match):
1356 def walk(self, match):
1357 '''Generates matching file names.'''
1357 '''Generates matching file names.'''
1358 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1358 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1359 True, False))
1359 True, False))
1360
1360
1361 def matches(self, match):
1361 def matches(self, match):
1362 return sorted(self._repo.dirstate.matches(match))
1362 return sorted(self._repo.dirstate.matches(match))
1363
1363
1364 def ancestors(self):
1364 def ancestors(self):
1365 for p in self._parents:
1365 for p in self._parents:
1366 yield p
1366 yield p
1367 for a in self._repo.changelog.ancestors(
1367 for a in self._repo.changelog.ancestors(
1368 [p.rev() for p in self._parents]):
1368 [p.rev() for p in self._parents]):
1369 yield changectx(self._repo, a)
1369 yield changectx(self._repo, a)
1370
1370
1371 def markcommitted(self, node):
1371 def markcommitted(self, node):
1372 """Perform post-commit cleanup necessary after committing this ctx
1372 """Perform post-commit cleanup necessary after committing this ctx
1373
1373
1374 Specifically, this updates backing stores this working context
1374 Specifically, this updates backing stores this working context
1375 wraps to reflect the fact that the changes reflected by this
1375 wraps to reflect the fact that the changes reflected by this
1376 workingctx have been committed. For example, it marks
1376 workingctx have been committed. For example, it marks
1377 modified and added files as normal in the dirstate.
1377 modified and added files as normal in the dirstate.
1378
1378
1379 """
1379 """
1380
1380
1381 self._repo.dirstate.beginparentchange()
1381 self._repo.dirstate.beginparentchange()
1382 for f in self.modified() + self.added():
1382 for f in self.modified() + self.added():
1383 self._repo.dirstate.normal(f)
1383 self._repo.dirstate.normal(f)
1384 for f in self.removed():
1384 for f in self.removed():
1385 self._repo.dirstate.drop(f)
1385 self._repo.dirstate.drop(f)
1386 self._repo.dirstate.setparents(node)
1386 self._repo.dirstate.setparents(node)
1387 self._repo.dirstate.endparentchange()
1387 self._repo.dirstate.endparentchange()
1388
1388
1389 # write changes out explicitly, because nesting wlock at
1389 # write changes out explicitly, because nesting wlock at
1390 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1390 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1391 # from immediately doing so for subsequent changing files
1391 # from immediately doing so for subsequent changing files
1392 self._repo.dirstate.write(self._repo.currenttransaction())
1392 self._repo.dirstate.write(self._repo.currenttransaction())
1393
1393
1394 class workingctx(committablectx):
1394 class workingctx(committablectx):
1395 """A workingctx object makes access to data related to
1395 """A workingctx object makes access to data related to
1396 the current working directory convenient.
1396 the current working directory convenient.
1397 date - any valid date string or (unixtime, offset), or None.
1397 date - any valid date string or (unixtime, offset), or None.
1398 user - username string, or None.
1398 user - username string, or None.
1399 extra - a dictionary of extra values, or None.
1399 extra - a dictionary of extra values, or None.
1400 changes - a list of file lists as returned by localrepo.status()
1400 changes - a list of file lists as returned by localrepo.status()
1401 or None to use the repository status.
1401 or None to use the repository status.
1402 """
1402 """
1403 def __init__(self, repo, text="", user=None, date=None, extra=None,
1403 def __init__(self, repo, text="", user=None, date=None, extra=None,
1404 changes=None):
1404 changes=None):
1405 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1405 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1406
1406
1407 def __iter__(self):
1407 def __iter__(self):
1408 d = self._repo.dirstate
1408 d = self._repo.dirstate
1409 for f in d:
1409 for f in d:
1410 if d[f] != 'r':
1410 if d[f] != 'r':
1411 yield f
1411 yield f
1412
1412
1413 def __contains__(self, key):
1413 def __contains__(self, key):
1414 return self._repo.dirstate[key] not in "?r"
1414 return self._repo.dirstate[key] not in "?r"
1415
1415
1416 def hex(self):
1416 def hex(self):
1417 return hex(wdirid)
1417 return hex(wdirid)
1418
1418
1419 @propertycache
1419 @propertycache
1420 def _parents(self):
1420 def _parents(self):
1421 p = self._repo.dirstate.parents()
1421 p = self._repo.dirstate.parents()
1422 if p[1] == nullid:
1422 if p[1] == nullid:
1423 p = p[:-1]
1423 p = p[:-1]
1424 return [changectx(self._repo, x) for x in p]
1424 return [changectx(self._repo, x) for x in p]
1425
1425
1426 def filectx(self, path, filelog=None):
1426 def filectx(self, path, filelog=None):
1427 """get a file context from the working directory"""
1427 """get a file context from the working directory"""
1428 return workingfilectx(self._repo, path, workingctx=self,
1428 return workingfilectx(self._repo, path, workingctx=self,
1429 filelog=filelog)
1429 filelog=filelog)
1430
1430
1431 def dirty(self, missing=False, merge=True, branch=True):
1431 def dirty(self, missing=False, merge=True, branch=True):
1432 "check whether a working directory is modified"
1432 "check whether a working directory is modified"
1433 # check subrepos first
1433 # check subrepos first
1434 for s in sorted(self.substate):
1434 for s in sorted(self.substate):
1435 if self.sub(s).dirty():
1435 if self.sub(s).dirty():
1436 return True
1436 return True
1437 # check current working dir
1437 # check current working dir
1438 return ((merge and self.p2()) or
1438 return ((merge and self.p2()) or
1439 (branch and self.branch() != self.p1().branch()) or
1439 (branch and self.branch() != self.p1().branch()) or
1440 self.modified() or self.added() or self.removed() or
1440 self.modified() or self.added() or self.removed() or
1441 (missing and self.deleted()))
1441 (missing and self.deleted()))
1442
1442
1443 def add(self, list, prefix=""):
1443 def add(self, list, prefix=""):
1444 join = lambda f: os.path.join(prefix, f)
1444 join = lambda f: os.path.join(prefix, f)
1445 with self._repo.wlock():
1445 with self._repo.wlock():
1446 ui, ds = self._repo.ui, self._repo.dirstate
1446 ui, ds = self._repo.ui, self._repo.dirstate
1447 rejected = []
1447 rejected = []
1448 lstat = self._repo.wvfs.lstat
1448 lstat = self._repo.wvfs.lstat
1449 for f in list:
1449 for f in list:
1450 scmutil.checkportable(ui, join(f))
1450 scmutil.checkportable(ui, join(f))
1451 try:
1451 try:
1452 st = lstat(f)
1452 st = lstat(f)
1453 except OSError:
1453 except OSError:
1454 ui.warn(_("%s does not exist!\n") % join(f))
1454 ui.warn(_("%s does not exist!\n") % join(f))
1455 rejected.append(f)
1455 rejected.append(f)
1456 continue
1456 continue
1457 if st.st_size > 10000000:
1457 if st.st_size > 10000000:
1458 ui.warn(_("%s: up to %d MB of RAM may be required "
1458 ui.warn(_("%s: up to %d MB of RAM may be required "
1459 "to manage this file\n"
1459 "to manage this file\n"
1460 "(use 'hg revert %s' to cancel the "
1460 "(use 'hg revert %s' to cancel the "
1461 "pending addition)\n")
1461 "pending addition)\n")
1462 % (f, 3 * st.st_size // 1000000, join(f)))
1462 % (f, 3 * st.st_size // 1000000, join(f)))
1463 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1463 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1464 ui.warn(_("%s not added: only files and symlinks "
1464 ui.warn(_("%s not added: only files and symlinks "
1465 "supported currently\n") % join(f))
1465 "supported currently\n") % join(f))
1466 rejected.append(f)
1466 rejected.append(f)
1467 elif ds[f] in 'amn':
1467 elif ds[f] in 'amn':
1468 ui.warn(_("%s already tracked!\n") % join(f))
1468 ui.warn(_("%s already tracked!\n") % join(f))
1469 elif ds[f] == 'r':
1469 elif ds[f] == 'r':
1470 ds.normallookup(f)
1470 ds.normallookup(f)
1471 else:
1471 else:
1472 ds.add(f)
1472 ds.add(f)
1473 return rejected
1473 return rejected
1474
1474
1475 def forget(self, files, prefix=""):
1475 def forget(self, files, prefix=""):
1476 join = lambda f: os.path.join(prefix, f)
1476 join = lambda f: os.path.join(prefix, f)
1477 with self._repo.wlock():
1477 with self._repo.wlock():
1478 rejected = []
1478 rejected = []
1479 for f in files:
1479 for f in files:
1480 if f not in self._repo.dirstate:
1480 if f not in self._repo.dirstate:
1481 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1481 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1482 rejected.append(f)
1482 rejected.append(f)
1483 elif self._repo.dirstate[f] != 'a':
1483 elif self._repo.dirstate[f] != 'a':
1484 self._repo.dirstate.remove(f)
1484 self._repo.dirstate.remove(f)
1485 else:
1485 else:
1486 self._repo.dirstate.drop(f)
1486 self._repo.dirstate.drop(f)
1487 return rejected
1487 return rejected
1488
1488
1489 def undelete(self, list):
1489 def undelete(self, list):
1490 pctxs = self.parents()
1490 pctxs = self.parents()
1491 with self._repo.wlock():
1491 with self._repo.wlock():
1492 for f in list:
1492 for f in list:
1493 if self._repo.dirstate[f] != 'r':
1493 if self._repo.dirstate[f] != 'r':
1494 self._repo.ui.warn(_("%s not removed!\n") % f)
1494 self._repo.ui.warn(_("%s not removed!\n") % f)
1495 else:
1495 else:
1496 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1496 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1497 t = fctx.data()
1497 t = fctx.data()
1498 self._repo.wwrite(f, t, fctx.flags())
1498 self._repo.wwrite(f, t, fctx.flags())
1499 self._repo.dirstate.normal(f)
1499 self._repo.dirstate.normal(f)
1500
1500
1501 def copy(self, source, dest):
1501 def copy(self, source, dest):
1502 try:
1502 try:
1503 st = self._repo.wvfs.lstat(dest)
1503 st = self._repo.wvfs.lstat(dest)
1504 except OSError as err:
1504 except OSError as err:
1505 if err.errno != errno.ENOENT:
1505 if err.errno != errno.ENOENT:
1506 raise
1506 raise
1507 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1507 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1508 return
1508 return
1509 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1509 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1510 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1510 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1511 "symbolic link\n") % dest)
1511 "symbolic link\n") % dest)
1512 else:
1512 else:
1513 with self._repo.wlock():
1513 with self._repo.wlock():
1514 if self._repo.dirstate[dest] in '?':
1514 if self._repo.dirstate[dest] in '?':
1515 self._repo.dirstate.add(dest)
1515 self._repo.dirstate.add(dest)
1516 elif self._repo.dirstate[dest] in 'r':
1516 elif self._repo.dirstate[dest] in 'r':
1517 self._repo.dirstate.normallookup(dest)
1517 self._repo.dirstate.normallookup(dest)
1518 self._repo.dirstate.copy(source, dest)
1518 self._repo.dirstate.copy(source, dest)
1519
1519
1520 def match(self, pats=None, include=None, exclude=None, default='glob',
1520 def match(self, pats=None, include=None, exclude=None, default='glob',
1521 listsubrepos=False, badfn=None):
1521 listsubrepos=False, badfn=None):
1522 if pats is None:
1522 if pats is None:
1523 pats = []
1523 pats = []
1524 r = self._repo
1524 r = self._repo
1525
1525
1526 # Only a case insensitive filesystem needs magic to translate user input
1526 # Only a case insensitive filesystem needs magic to translate user input
1527 # to actual case in the filesystem.
1527 # to actual case in the filesystem.
1528 matcherfunc = matchmod.match
1528 if not util.fscasesensitive(r.root):
1529 if not util.fscasesensitive(r.root):
1529 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats,
1530 matcherfunc = matchmod.icasefsmatcher
1530 include, exclude, default, r.auditor,
1531 return matcherfunc(r.root, r.getcwd(), pats,
1531 self, listsubrepos=listsubrepos,
1532 badfn=badfn)
1533 return matchmod.match(r.root, r.getcwd(), pats,
1534 include, exclude, default,
1532 include, exclude, default,
1535 auditor=r.auditor, ctx=self,
1533 auditor=r.auditor, ctx=self,
1536 listsubrepos=listsubrepos, badfn=badfn)
1534 listsubrepos=listsubrepos, badfn=badfn)
1537
1535
1538 def _filtersuspectsymlink(self, files):
1536 def _filtersuspectsymlink(self, files):
1539 if not files or self._repo.dirstate._checklink:
1537 if not files or self._repo.dirstate._checklink:
1540 return files
1538 return files
1541
1539
1542 # Symlink placeholders may get non-symlink-like contents
1540 # Symlink placeholders may get non-symlink-like contents
1543 # via user error or dereferencing by NFS or Samba servers,
1541 # via user error or dereferencing by NFS or Samba servers,
1544 # so we filter out any placeholders that don't look like a
1542 # so we filter out any placeholders that don't look like a
1545 # symlink
1543 # symlink
1546 sane = []
1544 sane = []
1547 for f in files:
1545 for f in files:
1548 if self.flags(f) == 'l':
1546 if self.flags(f) == 'l':
1549 d = self[f].data()
1547 d = self[f].data()
1550 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1548 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1551 self._repo.ui.debug('ignoring suspect symlink placeholder'
1549 self._repo.ui.debug('ignoring suspect symlink placeholder'
1552 ' "%s"\n' % f)
1550 ' "%s"\n' % f)
1553 continue
1551 continue
1554 sane.append(f)
1552 sane.append(f)
1555 return sane
1553 return sane
1556
1554
1557 def _checklookup(self, files):
1555 def _checklookup(self, files):
1558 # check for any possibly clean files
1556 # check for any possibly clean files
1559 if not files:
1557 if not files:
1560 return [], []
1558 return [], []
1561
1559
1562 modified = []
1560 modified = []
1563 fixup = []
1561 fixup = []
1564 pctx = self._parents[0]
1562 pctx = self._parents[0]
1565 # do a full compare of any files that might have changed
1563 # do a full compare of any files that might have changed
1566 for f in sorted(files):
1564 for f in sorted(files):
1567 if (f not in pctx or self.flags(f) != pctx.flags(f)
1565 if (f not in pctx or self.flags(f) != pctx.flags(f)
1568 or pctx[f].cmp(self[f])):
1566 or pctx[f].cmp(self[f])):
1569 modified.append(f)
1567 modified.append(f)
1570 else:
1568 else:
1571 fixup.append(f)
1569 fixup.append(f)
1572
1570
1573 # update dirstate for files that are actually clean
1571 # update dirstate for files that are actually clean
1574 if fixup:
1572 if fixup:
1575 try:
1573 try:
1576 # updating the dirstate is optional
1574 # updating the dirstate is optional
1577 # so we don't wait on the lock
1575 # so we don't wait on the lock
1578 # wlock can invalidate the dirstate, so cache normal _after_
1576 # wlock can invalidate the dirstate, so cache normal _after_
1579 # taking the lock
1577 # taking the lock
1580 with self._repo.wlock(False):
1578 with self._repo.wlock(False):
1581 normal = self._repo.dirstate.normal
1579 normal = self._repo.dirstate.normal
1582 for f in fixup:
1580 for f in fixup:
1583 normal(f)
1581 normal(f)
1584 # write changes out explicitly, because nesting
1582 # write changes out explicitly, because nesting
1585 # wlock at runtime may prevent 'wlock.release()'
1583 # wlock at runtime may prevent 'wlock.release()'
1586 # after this block from doing so for subsequent
1584 # after this block from doing so for subsequent
1587 # changing files
1585 # changing files
1588 self._repo.dirstate.write(self._repo.currenttransaction())
1586 self._repo.dirstate.write(self._repo.currenttransaction())
1589 except error.LockError:
1587 except error.LockError:
1590 pass
1588 pass
1591 return modified, fixup
1589 return modified, fixup
1592
1590
1593 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1591 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1594 unknown=False):
1592 unknown=False):
1595 '''Gets the status from the dirstate -- internal use only.'''
1593 '''Gets the status from the dirstate -- internal use only.'''
1596 listignored, listclean, listunknown = ignored, clean, unknown
1594 listignored, listclean, listunknown = ignored, clean, unknown
1597 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1595 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1598 subrepos = []
1596 subrepos = []
1599 if '.hgsub' in self:
1597 if '.hgsub' in self:
1600 subrepos = sorted(self.substate)
1598 subrepos = sorted(self.substate)
1601 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1599 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1602 listclean, listunknown)
1600 listclean, listunknown)
1603
1601
1604 # check for any possibly clean files
1602 # check for any possibly clean files
1605 if cmp:
1603 if cmp:
1606 modified2, fixup = self._checklookup(cmp)
1604 modified2, fixup = self._checklookup(cmp)
1607 s.modified.extend(modified2)
1605 s.modified.extend(modified2)
1608
1606
1609 # update dirstate for files that are actually clean
1607 # update dirstate for files that are actually clean
1610 if fixup and listclean:
1608 if fixup and listclean:
1611 s.clean.extend(fixup)
1609 s.clean.extend(fixup)
1612
1610
1613 if match.always():
1611 if match.always():
1614 # cache for performance
1612 # cache for performance
1615 if s.unknown or s.ignored or s.clean:
1613 if s.unknown or s.ignored or s.clean:
1616 # "_status" is cached with list*=False in the normal route
1614 # "_status" is cached with list*=False in the normal route
1617 self._status = scmutil.status(s.modified, s.added, s.removed,
1615 self._status = scmutil.status(s.modified, s.added, s.removed,
1618 s.deleted, [], [], [])
1616 s.deleted, [], [], [])
1619 else:
1617 else:
1620 self._status = s
1618 self._status = s
1621
1619
1622 return s
1620 return s
1623
1621
1624 @propertycache
1622 @propertycache
1625 def _manifest(self):
1623 def _manifest(self):
1626 """generate a manifest corresponding to the values in self._status
1624 """generate a manifest corresponding to the values in self._status
1627
1625
1628 This reuse the file nodeid from parent, but we use special node
1626 This reuse the file nodeid from parent, but we use special node
1629 identifiers for added and modified files. This is used by manifests
1627 identifiers for added and modified files. This is used by manifests
1630 merge to see that files are different and by update logic to avoid
1628 merge to see that files are different and by update logic to avoid
1631 deleting newly added files.
1629 deleting newly added files.
1632 """
1630 """
1633 return self._buildstatusmanifest(self._status)
1631 return self._buildstatusmanifest(self._status)
1634
1632
1635 def _buildstatusmanifest(self, status):
1633 def _buildstatusmanifest(self, status):
1636 """Builds a manifest that includes the given status results."""
1634 """Builds a manifest that includes the given status results."""
1637 parents = self.parents()
1635 parents = self.parents()
1638
1636
1639 man = parents[0].manifest().copy()
1637 man = parents[0].manifest().copy()
1640
1638
1641 ff = self._flagfunc
1639 ff = self._flagfunc
1642 for i, l in ((addednodeid, status.added),
1640 for i, l in ((addednodeid, status.added),
1643 (modifiednodeid, status.modified)):
1641 (modifiednodeid, status.modified)):
1644 for f in l:
1642 for f in l:
1645 man[f] = i
1643 man[f] = i
1646 try:
1644 try:
1647 man.setflag(f, ff(f))
1645 man.setflag(f, ff(f))
1648 except OSError:
1646 except OSError:
1649 pass
1647 pass
1650
1648
1651 for f in status.deleted + status.removed:
1649 for f in status.deleted + status.removed:
1652 if f in man:
1650 if f in man:
1653 del man[f]
1651 del man[f]
1654
1652
1655 return man
1653 return man
1656
1654
1657 def _buildstatus(self, other, s, match, listignored, listclean,
1655 def _buildstatus(self, other, s, match, listignored, listclean,
1658 listunknown):
1656 listunknown):
1659 """build a status with respect to another context
1657 """build a status with respect to another context
1660
1658
1661 This includes logic for maintaining the fast path of status when
1659 This includes logic for maintaining the fast path of status when
1662 comparing the working directory against its parent, which is to skip
1660 comparing the working directory against its parent, which is to skip
1663 building a new manifest if self (working directory) is not comparing
1661 building a new manifest if self (working directory) is not comparing
1664 against its parent (repo['.']).
1662 against its parent (repo['.']).
1665 """
1663 """
1666 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1664 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1667 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1665 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1668 # might have accidentally ended up with the entire contents of the file
1666 # might have accidentally ended up with the entire contents of the file
1669 # they are supposed to be linking to.
1667 # they are supposed to be linking to.
1670 s.modified[:] = self._filtersuspectsymlink(s.modified)
1668 s.modified[:] = self._filtersuspectsymlink(s.modified)
1671 if other != self._repo['.']:
1669 if other != self._repo['.']:
1672 s = super(workingctx, self)._buildstatus(other, s, match,
1670 s = super(workingctx, self)._buildstatus(other, s, match,
1673 listignored, listclean,
1671 listignored, listclean,
1674 listunknown)
1672 listunknown)
1675 return s
1673 return s
1676
1674
1677 def _matchstatus(self, other, match):
1675 def _matchstatus(self, other, match):
1678 """override the match method with a filter for directory patterns
1676 """override the match method with a filter for directory patterns
1679
1677
1680 We use inheritance to customize the match.bad method only in cases of
1678 We use inheritance to customize the match.bad method only in cases of
1681 workingctx since it belongs only to the working directory when
1679 workingctx since it belongs only to the working directory when
1682 comparing against the parent changeset.
1680 comparing against the parent changeset.
1683
1681
1684 If we aren't comparing against the working directory's parent, then we
1682 If we aren't comparing against the working directory's parent, then we
1685 just use the default match object sent to us.
1683 just use the default match object sent to us.
1686 """
1684 """
1687 superself = super(workingctx, self)
1685 superself = super(workingctx, self)
1688 match = superself._matchstatus(other, match)
1686 match = superself._matchstatus(other, match)
1689 if other != self._repo['.']:
1687 if other != self._repo['.']:
1690 def bad(f, msg):
1688 def bad(f, msg):
1691 # 'f' may be a directory pattern from 'match.files()',
1689 # 'f' may be a directory pattern from 'match.files()',
1692 # so 'f not in ctx1' is not enough
1690 # so 'f not in ctx1' is not enough
1693 if f not in other and not other.hasdir(f):
1691 if f not in other and not other.hasdir(f):
1694 self._repo.ui.warn('%s: %s\n' %
1692 self._repo.ui.warn('%s: %s\n' %
1695 (self._repo.dirstate.pathto(f), msg))
1693 (self._repo.dirstate.pathto(f), msg))
1696 match.bad = bad
1694 match.bad = bad
1697 return match
1695 return match
1698
1696
1699 class committablefilectx(basefilectx):
1697 class committablefilectx(basefilectx):
1700 """A committablefilectx provides common functionality for a file context
1698 """A committablefilectx provides common functionality for a file context
1701 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1699 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1702 def __init__(self, repo, path, filelog=None, ctx=None):
1700 def __init__(self, repo, path, filelog=None, ctx=None):
1703 self._repo = repo
1701 self._repo = repo
1704 self._path = path
1702 self._path = path
1705 self._changeid = None
1703 self._changeid = None
1706 self._filerev = self._filenode = None
1704 self._filerev = self._filenode = None
1707
1705
1708 if filelog is not None:
1706 if filelog is not None:
1709 self._filelog = filelog
1707 self._filelog = filelog
1710 if ctx:
1708 if ctx:
1711 self._changectx = ctx
1709 self._changectx = ctx
1712
1710
1713 def __nonzero__(self):
1711 def __nonzero__(self):
1714 return True
1712 return True
1715
1713
1716 def linkrev(self):
1714 def linkrev(self):
1717 # linked to self._changectx no matter if file is modified or not
1715 # linked to self._changectx no matter if file is modified or not
1718 return self.rev()
1716 return self.rev()
1719
1717
1720 def parents(self):
1718 def parents(self):
1721 '''return parent filectxs, following copies if necessary'''
1719 '''return parent filectxs, following copies if necessary'''
1722 def filenode(ctx, path):
1720 def filenode(ctx, path):
1723 return ctx._manifest.get(path, nullid)
1721 return ctx._manifest.get(path, nullid)
1724
1722
1725 path = self._path
1723 path = self._path
1726 fl = self._filelog
1724 fl = self._filelog
1727 pcl = self._changectx._parents
1725 pcl = self._changectx._parents
1728 renamed = self.renamed()
1726 renamed = self.renamed()
1729
1727
1730 if renamed:
1728 if renamed:
1731 pl = [renamed + (None,)]
1729 pl = [renamed + (None,)]
1732 else:
1730 else:
1733 pl = [(path, filenode(pcl[0], path), fl)]
1731 pl = [(path, filenode(pcl[0], path), fl)]
1734
1732
1735 for pc in pcl[1:]:
1733 for pc in pcl[1:]:
1736 pl.append((path, filenode(pc, path), fl))
1734 pl.append((path, filenode(pc, path), fl))
1737
1735
1738 return [self._parentfilectx(p, fileid=n, filelog=l)
1736 return [self._parentfilectx(p, fileid=n, filelog=l)
1739 for p, n, l in pl if n != nullid]
1737 for p, n, l in pl if n != nullid]
1740
1738
1741 def children(self):
1739 def children(self):
1742 return []
1740 return []
1743
1741
1744 class workingfilectx(committablefilectx):
1742 class workingfilectx(committablefilectx):
1745 """A workingfilectx object makes access to data related to a particular
1743 """A workingfilectx object makes access to data related to a particular
1746 file in the working directory convenient."""
1744 file in the working directory convenient."""
1747 def __init__(self, repo, path, filelog=None, workingctx=None):
1745 def __init__(self, repo, path, filelog=None, workingctx=None):
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1746 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1749
1747
1750 @propertycache
1748 @propertycache
1751 def _changectx(self):
1749 def _changectx(self):
1752 return workingctx(self._repo)
1750 return workingctx(self._repo)
1753
1751
1754 def data(self):
1752 def data(self):
1755 return self._repo.wread(self._path)
1753 return self._repo.wread(self._path)
1756 def renamed(self):
1754 def renamed(self):
1757 rp = self._repo.dirstate.copied(self._path)
1755 rp = self._repo.dirstate.copied(self._path)
1758 if not rp:
1756 if not rp:
1759 return None
1757 return None
1760 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1758 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1761
1759
1762 def size(self):
1760 def size(self):
1763 return self._repo.wvfs.lstat(self._path).st_size
1761 return self._repo.wvfs.lstat(self._path).st_size
1764 def date(self):
1762 def date(self):
1765 t, tz = self._changectx.date()
1763 t, tz = self._changectx.date()
1766 try:
1764 try:
1767 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1765 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1768 except OSError as err:
1766 except OSError as err:
1769 if err.errno != errno.ENOENT:
1767 if err.errno != errno.ENOENT:
1770 raise
1768 raise
1771 return (t, tz)
1769 return (t, tz)
1772
1770
1773 def cmp(self, fctx):
1771 def cmp(self, fctx):
1774 """compare with other file context
1772 """compare with other file context
1775
1773
1776 returns True if different than fctx.
1774 returns True if different than fctx.
1777 """
1775 """
1778 # fctx should be a filectx (not a workingfilectx)
1776 # fctx should be a filectx (not a workingfilectx)
1779 # invert comparison to reuse the same code path
1777 # invert comparison to reuse the same code path
1780 return fctx.cmp(self)
1778 return fctx.cmp(self)
1781
1779
1782 def remove(self, ignoremissing=False):
1780 def remove(self, ignoremissing=False):
1783 """wraps unlink for a repo's working directory"""
1781 """wraps unlink for a repo's working directory"""
1784 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1782 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1785
1783
1786 def write(self, data, flags):
1784 def write(self, data, flags):
1787 """wraps repo.wwrite"""
1785 """wraps repo.wwrite"""
1788 self._repo.wwrite(self._path, data, flags)
1786 self._repo.wwrite(self._path, data, flags)
1789
1787
1790 class workingcommitctx(workingctx):
1788 class workingcommitctx(workingctx):
1791 """A workingcommitctx object makes access to data related to
1789 """A workingcommitctx object makes access to data related to
1792 the revision being committed convenient.
1790 the revision being committed convenient.
1793
1791
1794 This hides changes in the working directory, if they aren't
1792 This hides changes in the working directory, if they aren't
1795 committed in this context.
1793 committed in this context.
1796 """
1794 """
1797 def __init__(self, repo, changes,
1795 def __init__(self, repo, changes,
1798 text="", user=None, date=None, extra=None):
1796 text="", user=None, date=None, extra=None):
1799 super(workingctx, self).__init__(repo, text, user, date, extra,
1797 super(workingctx, self).__init__(repo, text, user, date, extra,
1800 changes)
1798 changes)
1801
1799
1802 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1800 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1803 unknown=False):
1801 unknown=False):
1804 """Return matched files only in ``self._status``
1802 """Return matched files only in ``self._status``
1805
1803
1806 Uncommitted files appear "clean" via this context, even if
1804 Uncommitted files appear "clean" via this context, even if
1807 they aren't actually so in the working directory.
1805 they aren't actually so in the working directory.
1808 """
1806 """
1809 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1807 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1810 if clean:
1808 if clean:
1811 clean = [f for f in self._manifest if f not in self._changedset]
1809 clean = [f for f in self._manifest if f not in self._changedset]
1812 else:
1810 else:
1813 clean = []
1811 clean = []
1814 return scmutil.status([f for f in self._status.modified if match(f)],
1812 return scmutil.status([f for f in self._status.modified if match(f)],
1815 [f for f in self._status.added if match(f)],
1813 [f for f in self._status.added if match(f)],
1816 [f for f in self._status.removed if match(f)],
1814 [f for f in self._status.removed if match(f)],
1817 [], [], [], clean)
1815 [], [], [], clean)
1818
1816
1819 @propertycache
1817 @propertycache
1820 def _changedset(self):
1818 def _changedset(self):
1821 """Return the set of files changed in this context
1819 """Return the set of files changed in this context
1822 """
1820 """
1823 changed = set(self._status.modified)
1821 changed = set(self._status.modified)
1824 changed.update(self._status.added)
1822 changed.update(self._status.added)
1825 changed.update(self._status.removed)
1823 changed.update(self._status.removed)
1826 return changed
1824 return changed
1827
1825
1828 def makecachingfilectxfn(func):
1826 def makecachingfilectxfn(func):
1829 """Create a filectxfn that caches based on the path.
1827 """Create a filectxfn that caches based on the path.
1830
1828
1831 We can't use util.cachefunc because it uses all arguments as the cache
1829 We can't use util.cachefunc because it uses all arguments as the cache
1832 key and this creates a cycle since the arguments include the repo and
1830 key and this creates a cycle since the arguments include the repo and
1833 memctx.
1831 memctx.
1834 """
1832 """
1835 cache = {}
1833 cache = {}
1836
1834
1837 def getfilectx(repo, memctx, path):
1835 def getfilectx(repo, memctx, path):
1838 if path not in cache:
1836 if path not in cache:
1839 cache[path] = func(repo, memctx, path)
1837 cache[path] = func(repo, memctx, path)
1840 return cache[path]
1838 return cache[path]
1841
1839
1842 return getfilectx
1840 return getfilectx
1843
1841
1844 class memctx(committablectx):
1842 class memctx(committablectx):
1845 """Use memctx to perform in-memory commits via localrepo.commitctx().
1843 """Use memctx to perform in-memory commits via localrepo.commitctx().
1846
1844
1847 Revision information is supplied at initialization time while
1845 Revision information is supplied at initialization time while
1848 related files data and is made available through a callback
1846 related files data and is made available through a callback
1849 mechanism. 'repo' is the current localrepo, 'parents' is a
1847 mechanism. 'repo' is the current localrepo, 'parents' is a
1850 sequence of two parent revisions identifiers (pass None for every
1848 sequence of two parent revisions identifiers (pass None for every
1851 missing parent), 'text' is the commit message and 'files' lists
1849 missing parent), 'text' is the commit message and 'files' lists
1852 names of files touched by the revision (normalized and relative to
1850 names of files touched by the revision (normalized and relative to
1853 repository root).
1851 repository root).
1854
1852
1855 filectxfn(repo, memctx, path) is a callable receiving the
1853 filectxfn(repo, memctx, path) is a callable receiving the
1856 repository, the current memctx object and the normalized path of
1854 repository, the current memctx object and the normalized path of
1857 requested file, relative to repository root. It is fired by the
1855 requested file, relative to repository root. It is fired by the
1858 commit function for every file in 'files', but calls order is
1856 commit function for every file in 'files', but calls order is
1859 undefined. If the file is available in the revision being
1857 undefined. If the file is available in the revision being
1860 committed (updated or added), filectxfn returns a memfilectx
1858 committed (updated or added), filectxfn returns a memfilectx
1861 object. If the file was removed, filectxfn raises an
1859 object. If the file was removed, filectxfn raises an
1862 IOError. Moved files are represented by marking the source file
1860 IOError. Moved files are represented by marking the source file
1863 removed and the new file added with copy information (see
1861 removed and the new file added with copy information (see
1864 memfilectx).
1862 memfilectx).
1865
1863
1866 user receives the committer name and defaults to current
1864 user receives the committer name and defaults to current
1867 repository username, date is the commit date in any format
1865 repository username, date is the commit date in any format
1868 supported by util.parsedate() and defaults to current date, extra
1866 supported by util.parsedate() and defaults to current date, extra
1869 is a dictionary of metadata or is left empty.
1867 is a dictionary of metadata or is left empty.
1870 """
1868 """
1871
1869
1872 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1870 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1873 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1871 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1874 # this field to determine what to do in filectxfn.
1872 # this field to determine what to do in filectxfn.
1875 _returnnoneformissingfiles = True
1873 _returnnoneformissingfiles = True
1876
1874
1877 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1875 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1878 date=None, extra=None, editor=False):
1876 date=None, extra=None, editor=False):
1879 super(memctx, self).__init__(repo, text, user, date, extra)
1877 super(memctx, self).__init__(repo, text, user, date, extra)
1880 self._rev = None
1878 self._rev = None
1881 self._node = None
1879 self._node = None
1882 parents = [(p or nullid) for p in parents]
1880 parents = [(p or nullid) for p in parents]
1883 p1, p2 = parents
1881 p1, p2 = parents
1884 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1882 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1885 files = sorted(set(files))
1883 files = sorted(set(files))
1886 self._files = files
1884 self._files = files
1887 self.substate = {}
1885 self.substate = {}
1888
1886
1889 # if store is not callable, wrap it in a function
1887 # if store is not callable, wrap it in a function
1890 if not callable(filectxfn):
1888 if not callable(filectxfn):
1891 def getfilectx(repo, memctx, path):
1889 def getfilectx(repo, memctx, path):
1892 fctx = filectxfn[path]
1890 fctx = filectxfn[path]
1893 # this is weird but apparently we only keep track of one parent
1891 # this is weird but apparently we only keep track of one parent
1894 # (why not only store that instead of a tuple?)
1892 # (why not only store that instead of a tuple?)
1895 copied = fctx.renamed()
1893 copied = fctx.renamed()
1896 if copied:
1894 if copied:
1897 copied = copied[0]
1895 copied = copied[0]
1898 return memfilectx(repo, path, fctx.data(),
1896 return memfilectx(repo, path, fctx.data(),
1899 islink=fctx.islink(), isexec=fctx.isexec(),
1897 islink=fctx.islink(), isexec=fctx.isexec(),
1900 copied=copied, memctx=memctx)
1898 copied=copied, memctx=memctx)
1901 self._filectxfn = getfilectx
1899 self._filectxfn = getfilectx
1902 else:
1900 else:
1903 # memoizing increases performance for e.g. vcs convert scenarios.
1901 # memoizing increases performance for e.g. vcs convert scenarios.
1904 self._filectxfn = makecachingfilectxfn(filectxfn)
1902 self._filectxfn = makecachingfilectxfn(filectxfn)
1905
1903
1906 if extra:
1904 if extra:
1907 self._extra = extra.copy()
1905 self._extra = extra.copy()
1908 else:
1906 else:
1909 self._extra = {}
1907 self._extra = {}
1910
1908
1911 if self._extra.get('branch', '') == '':
1909 if self._extra.get('branch', '') == '':
1912 self._extra['branch'] = 'default'
1910 self._extra['branch'] = 'default'
1913
1911
1914 if editor:
1912 if editor:
1915 self._text = editor(self._repo, self, [])
1913 self._text = editor(self._repo, self, [])
1916 self._repo.savecommitmessage(self._text)
1914 self._repo.savecommitmessage(self._text)
1917
1915
1918 def filectx(self, path, filelog=None):
1916 def filectx(self, path, filelog=None):
1919 """get a file context from the working directory
1917 """get a file context from the working directory
1920
1918
1921 Returns None if file doesn't exist and should be removed."""
1919 Returns None if file doesn't exist and should be removed."""
1922 return self._filectxfn(self._repo, self, path)
1920 return self._filectxfn(self._repo, self, path)
1923
1921
1924 def commit(self):
1922 def commit(self):
1925 """commit context to the repo"""
1923 """commit context to the repo"""
1926 return self._repo.commitctx(self)
1924 return self._repo.commitctx(self)
1927
1925
1928 @propertycache
1926 @propertycache
1929 def _manifest(self):
1927 def _manifest(self):
1930 """generate a manifest based on the return values of filectxfn"""
1928 """generate a manifest based on the return values of filectxfn"""
1931
1929
1932 # keep this simple for now; just worry about p1
1930 # keep this simple for now; just worry about p1
1933 pctx = self._parents[0]
1931 pctx = self._parents[0]
1934 man = pctx.manifest().copy()
1932 man = pctx.manifest().copy()
1935
1933
1936 for f in self._status.modified:
1934 for f in self._status.modified:
1937 p1node = nullid
1935 p1node = nullid
1938 p2node = nullid
1936 p2node = nullid
1939 p = pctx[f].parents() # if file isn't in pctx, check p2?
1937 p = pctx[f].parents() # if file isn't in pctx, check p2?
1940 if len(p) > 0:
1938 if len(p) > 0:
1941 p1node = p[0].filenode()
1939 p1node = p[0].filenode()
1942 if len(p) > 1:
1940 if len(p) > 1:
1943 p2node = p[1].filenode()
1941 p2node = p[1].filenode()
1944 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1942 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1945
1943
1946 for f in self._status.added:
1944 for f in self._status.added:
1947 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1945 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1948
1946
1949 for f in self._status.removed:
1947 for f in self._status.removed:
1950 if f in man:
1948 if f in man:
1951 del man[f]
1949 del man[f]
1952
1950
1953 return man
1951 return man
1954
1952
1955 @propertycache
1953 @propertycache
1956 def _status(self):
1954 def _status(self):
1957 """Calculate exact status from ``files`` specified at construction
1955 """Calculate exact status from ``files`` specified at construction
1958 """
1956 """
1959 man1 = self.p1().manifest()
1957 man1 = self.p1().manifest()
1960 p2 = self._parents[1]
1958 p2 = self._parents[1]
1961 # "1 < len(self._parents)" can't be used for checking
1959 # "1 < len(self._parents)" can't be used for checking
1962 # existence of the 2nd parent, because "memctx._parents" is
1960 # existence of the 2nd parent, because "memctx._parents" is
1963 # explicitly initialized by the list, of which length is 2.
1961 # explicitly initialized by the list, of which length is 2.
1964 if p2.node() != nullid:
1962 if p2.node() != nullid:
1965 man2 = p2.manifest()
1963 man2 = p2.manifest()
1966 managing = lambda f: f in man1 or f in man2
1964 managing = lambda f: f in man1 or f in man2
1967 else:
1965 else:
1968 managing = lambda f: f in man1
1966 managing = lambda f: f in man1
1969
1967
1970 modified, added, removed = [], [], []
1968 modified, added, removed = [], [], []
1971 for f in self._files:
1969 for f in self._files:
1972 if not managing(f):
1970 if not managing(f):
1973 added.append(f)
1971 added.append(f)
1974 elif self[f]:
1972 elif self[f]:
1975 modified.append(f)
1973 modified.append(f)
1976 else:
1974 else:
1977 removed.append(f)
1975 removed.append(f)
1978
1976
1979 return scmutil.status(modified, added, removed, [], [], [], [])
1977 return scmutil.status(modified, added, removed, [], [], [], [])
1980
1978
1981 class memfilectx(committablefilectx):
1979 class memfilectx(committablefilectx):
1982 """memfilectx represents an in-memory file to commit.
1980 """memfilectx represents an in-memory file to commit.
1983
1981
1984 See memctx and committablefilectx for more details.
1982 See memctx and committablefilectx for more details.
1985 """
1983 """
1986 def __init__(self, repo, path, data, islink=False,
1984 def __init__(self, repo, path, data, islink=False,
1987 isexec=False, copied=None, memctx=None):
1985 isexec=False, copied=None, memctx=None):
1988 """
1986 """
1989 path is the normalized file path relative to repository root.
1987 path is the normalized file path relative to repository root.
1990 data is the file content as a string.
1988 data is the file content as a string.
1991 islink is True if the file is a symbolic link.
1989 islink is True if the file is a symbolic link.
1992 isexec is True if the file is executable.
1990 isexec is True if the file is executable.
1993 copied is the source file path if current file was copied in the
1991 copied is the source file path if current file was copied in the
1994 revision being committed, or None."""
1992 revision being committed, or None."""
1995 super(memfilectx, self).__init__(repo, path, None, memctx)
1993 super(memfilectx, self).__init__(repo, path, None, memctx)
1996 self._data = data
1994 self._data = data
1997 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1995 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1998 self._copied = None
1996 self._copied = None
1999 if copied:
1997 if copied:
2000 self._copied = (copied, nullid)
1998 self._copied = (copied, nullid)
2001
1999
2002 def data(self):
2000 def data(self):
2003 return self._data
2001 return self._data
2004 def size(self):
2002 def size(self):
2005 return len(self.data())
2003 return len(self.data())
2006 def flags(self):
2004 def flags(self):
2007 return self._flags
2005 return self._flags
2008 def renamed(self):
2006 def renamed(self):
2009 return self._copied
2007 return self._copied
2010
2008
2011 def remove(self, ignoremissing=False):
2009 def remove(self, ignoremissing=False):
2012 """wraps unlink for a repo's working directory"""
2010 """wraps unlink for a repo's working directory"""
2013 # need to figure out what to do here
2011 # need to figure out what to do here
2014 del self._changectx[self._path]
2012 del self._changectx[self._path]
2015
2013
2016 def write(self, data, flags):
2014 def write(self, data, flags):
2017 """wraps repo.wwrite"""
2015 """wraps repo.wwrite"""
2018 self._data = data
2016 self._data = data
2019
2017
2020 class metadataonlyctx(committablectx):
2018 class metadataonlyctx(committablectx):
2021 """Like memctx but it's reusing the manifest of different commit.
2019 """Like memctx but it's reusing the manifest of different commit.
2022 Intended to be used by lightweight operations that are creating
2020 Intended to be used by lightweight operations that are creating
2023 metadata-only changes.
2021 metadata-only changes.
2024
2022
2025 Revision information is supplied at initialization time. 'repo' is the
2023 Revision information is supplied at initialization time. 'repo' is the
2026 current localrepo, 'ctx' is original revision which manifest we're reuisng
2024 current localrepo, 'ctx' is original revision which manifest we're reuisng
2027 'parents' is a sequence of two parent revisions identifiers (pass None for
2025 'parents' is a sequence of two parent revisions identifiers (pass None for
2028 every missing parent), 'text' is the commit.
2026 every missing parent), 'text' is the commit.
2029
2027
2030 user receives the committer name and defaults to current repository
2028 user receives the committer name and defaults to current repository
2031 username, date is the commit date in any format supported by
2029 username, date is the commit date in any format supported by
2032 util.parsedate() and defaults to current date, extra is a dictionary of
2030 util.parsedate() and defaults to current date, extra is a dictionary of
2033 metadata or is left empty.
2031 metadata or is left empty.
2034 """
2032 """
2035 def __new__(cls, repo, originalctx, *args, **kwargs):
2033 def __new__(cls, repo, originalctx, *args, **kwargs):
2036 return super(metadataonlyctx, cls).__new__(cls, repo)
2034 return super(metadataonlyctx, cls).__new__(cls, repo)
2037
2035
2038 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2036 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2039 extra=None, editor=False):
2037 extra=None, editor=False):
2040 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2038 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2041 self._rev = None
2039 self._rev = None
2042 self._node = None
2040 self._node = None
2043 self._originalctx = originalctx
2041 self._originalctx = originalctx
2044 self._manifestnode = originalctx.manifestnode()
2042 self._manifestnode = originalctx.manifestnode()
2045 parents = [(p or nullid) for p in parents]
2043 parents = [(p or nullid) for p in parents]
2046 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2044 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2047
2045
2048 # sanity check to ensure that the reused manifest parents are
2046 # sanity check to ensure that the reused manifest parents are
2049 # manifests of our commit parents
2047 # manifests of our commit parents
2050 mp1, mp2 = self.manifestctx().parents
2048 mp1, mp2 = self.manifestctx().parents
2051 if p1 != nullid and p1.manifestctx().node() != mp1:
2049 if p1 != nullid and p1.manifestctx().node() != mp1:
2052 raise RuntimeError('can\'t reuse the manifest: '
2050 raise RuntimeError('can\'t reuse the manifest: '
2053 'its p1 doesn\'t match the new ctx p1')
2051 'its p1 doesn\'t match the new ctx p1')
2054 if p2 != nullid and p2.manifestctx().node() != mp2:
2052 if p2 != nullid and p2.manifestctx().node() != mp2:
2055 raise RuntimeError('can\'t reuse the manifest: '
2053 raise RuntimeError('can\'t reuse the manifest: '
2056 'its p2 doesn\'t match the new ctx p2')
2054 'its p2 doesn\'t match the new ctx p2')
2057
2055
2058 self._files = originalctx.files()
2056 self._files = originalctx.files()
2059 self.substate = {}
2057 self.substate = {}
2060
2058
2061 if extra:
2059 if extra:
2062 self._extra = extra.copy()
2060 self._extra = extra.copy()
2063 else:
2061 else:
2064 self._extra = {}
2062 self._extra = {}
2065
2063
2066 if self._extra.get('branch', '') == '':
2064 if self._extra.get('branch', '') == '':
2067 self._extra['branch'] = 'default'
2065 self._extra['branch'] = 'default'
2068
2066
2069 if editor:
2067 if editor:
2070 self._text = editor(self._repo, self, [])
2068 self._text = editor(self._repo, self, [])
2071 self._repo.savecommitmessage(self._text)
2069 self._repo.savecommitmessage(self._text)
2072
2070
2073 def manifestnode(self):
2071 def manifestnode(self):
2074 return self._manifestnode
2072 return self._manifestnode
2075
2073
2076 @propertycache
2074 @propertycache
2077 def _manifestctx(self):
2075 def _manifestctx(self):
2078 return self._repo.manifestlog[self._manifestnode]
2076 return self._repo.manifestlog[self._manifestnode]
2079
2077
2080 def filectx(self, path, filelog=None):
2078 def filectx(self, path, filelog=None):
2081 return self._originalctx.filectx(path, filelog=filelog)
2079 return self._originalctx.filectx(path, filelog=filelog)
2082
2080
2083 def commit(self):
2081 def commit(self):
2084 """commit context to the repo"""
2082 """commit context to the repo"""
2085 return self._repo.commitctx(self)
2083 return self._repo.commitctx(self)
2086
2084
2087 @property
2085 @property
2088 def _manifest(self):
2086 def _manifest(self):
2089 return self._originalctx.manifest()
2087 return self._originalctx.manifest()
2090
2088
2091 @propertycache
2089 @propertycache
2092 def _status(self):
2090 def _status(self):
2093 """Calculate exact status from ``files`` specified in the ``origctx``
2091 """Calculate exact status from ``files`` specified in the ``origctx``
2094 and parents manifests.
2092 and parents manifests.
2095 """
2093 """
2096 man1 = self.p1().manifest()
2094 man1 = self.p1().manifest()
2097 p2 = self._parents[1]
2095 p2 = self._parents[1]
2098 # "1 < len(self._parents)" can't be used for checking
2096 # "1 < len(self._parents)" can't be used for checking
2099 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2097 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2100 # explicitly initialized by the list, of which length is 2.
2098 # explicitly initialized by the list, of which length is 2.
2101 if p2.node() != nullid:
2099 if p2.node() != nullid:
2102 man2 = p2.manifest()
2100 man2 = p2.manifest()
2103 managing = lambda f: f in man1 or f in man2
2101 managing = lambda f: f in man1 or f in man2
2104 else:
2102 else:
2105 managing = lambda f: f in man1
2103 managing = lambda f: f in man1
2106
2104
2107 modified, added, removed = [], [], []
2105 modified, added, removed = [], [], []
2108 for f in self._files:
2106 for f in self._files:
2109 if not managing(f):
2107 if not managing(f):
2110 added.append(f)
2108 added.append(f)
2111 elif self[f]:
2109 elif self[f]:
2112 modified.append(f)
2110 modified.append(f)
2113 else:
2111 else:
2114 removed.append(f)
2112 removed.append(f)
2115
2113
2116 return scmutil.status(modified, added, removed, [], [], [], [])
2114 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now