##// END OF EJS Templates
filectx: make renamed a property cache...
Jun Wu -
r32235:c38c15d4 default
parent child Browse files
Show More
@@ -1,2175 +1,2176
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 subrepo,
40 subrepo,
41 util,
41 util,
42 )
42 )
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45
45
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
47
47
48 class basectx(object):
48 class basectx(object):
49 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
50 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
51 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
52 be committed,
52 be committed,
53 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
54 be committed."""
54 be committed."""
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
57 return changeid
57 return changeid
58
58
59 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
60
60
61 o._repo = repo
61 o._repo = repo
62 o._rev = nullrev
62 o._rev = nullrev
63 o._node = nullid
63 o._node = nullid
64
64
65 return o
65 return o
66
66
67 def __str__(self):
67 def __str__(self):
68 r = short(self.node())
68 r = short(self.node())
69 if pycompat.ispy3:
69 if pycompat.ispy3:
70 return r.decode('ascii')
70 return r.decode('ascii')
71 return r
71 return r
72
72
73 def __bytes__(self):
73 def __bytes__(self):
74 return short(self.node())
74 return short(self.node())
75
75
76 def __int__(self):
76 def __int__(self):
77 return self.rev()
77 return self.rev()
78
78
79 def __repr__(self):
79 def __repr__(self):
80 return "<%s %s>" % (type(self).__name__, str(self))
80 return "<%s %s>" % (type(self).__name__, str(self))
81
81
82 def __eq__(self, other):
82 def __eq__(self, other):
83 try:
83 try:
84 return type(self) == type(other) and self._rev == other._rev
84 return type(self) == type(other) and self._rev == other._rev
85 except AttributeError:
85 except AttributeError:
86 return False
86 return False
87
87
88 def __ne__(self, other):
88 def __ne__(self, other):
89 return not (self == other)
89 return not (self == other)
90
90
91 def __contains__(self, key):
91 def __contains__(self, key):
92 return key in self._manifest
92 return key in self._manifest
93
93
94 def __getitem__(self, key):
94 def __getitem__(self, key):
95 return self.filectx(key)
95 return self.filectx(key)
96
96
97 def __iter__(self):
97 def __iter__(self):
98 return iter(self._manifest)
98 return iter(self._manifest)
99
99
100 def _buildstatusmanifest(self, status):
100 def _buildstatusmanifest(self, status):
101 """Builds a manifest that includes the given status results, if this is
101 """Builds a manifest that includes the given status results, if this is
102 a working copy context. For non-working copy contexts, it just returns
102 a working copy context. For non-working copy contexts, it just returns
103 the normal manifest."""
103 the normal manifest."""
104 return self.manifest()
104 return self.manifest()
105
105
106 def _matchstatus(self, other, match):
106 def _matchstatus(self, other, match):
107 """return match.always if match is none
107 """return match.always if match is none
108
108
109 This internal method provides a way for child objects to override the
109 This internal method provides a way for child objects to override the
110 match operator.
110 match operator.
111 """
111 """
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113
113
114 def _buildstatus(self, other, s, match, listignored, listclean,
114 def _buildstatus(self, other, s, match, listignored, listclean,
115 listunknown):
115 listunknown):
116 """build a status with respect to another context"""
116 """build a status with respect to another context"""
117 # Load earliest manifest first for caching reasons. More specifically,
117 # Load earliest manifest first for caching reasons. More specifically,
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta application.
122 # delta application.
123 mf2 = None
123 mf2 = None
124 if self.rev() is not None and self.rev() < other.rev():
124 if self.rev() is not None and self.rev() < other.rev():
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
127 if mf2 is None:
127 if mf2 is None:
128 mf2 = self._buildstatusmanifest(s)
128 mf2 = self._buildstatusmanifest(s)
129
129
130 modified, added = [], []
130 modified, added = [], []
131 removed = []
131 removed = []
132 clean = []
132 clean = []
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deletedset = set(deleted)
134 deletedset = set(deleted)
135 d = mf1.diff(mf2, match=match, clean=listclean)
135 d = mf1.diff(mf2, match=match, clean=listclean)
136 for fn, value in d.iteritems():
136 for fn, value in d.iteritems():
137 if fn in deletedset:
137 if fn in deletedset:
138 continue
138 continue
139 if value is None:
139 if value is None:
140 clean.append(fn)
140 clean.append(fn)
141 continue
141 continue
142 (node1, flag1), (node2, flag2) = value
142 (node1, flag1), (node2, flag2) = value
143 if node1 is None:
143 if node1 is None:
144 added.append(fn)
144 added.append(fn)
145 elif node2 is None:
145 elif node2 is None:
146 removed.append(fn)
146 removed.append(fn)
147 elif flag1 != flag2:
147 elif flag1 != flag2:
148 modified.append(fn)
148 modified.append(fn)
149 elif node2 not in wdirnodes:
149 elif node2 not in wdirnodes:
150 # When comparing files between two commits, we save time by
150 # When comparing files between two commits, we save time by
151 # not comparing the file contents when the nodeids differ.
151 # not comparing the file contents when the nodeids differ.
152 # Note that this means we incorrectly report a reverted change
152 # Note that this means we incorrectly report a reverted change
153 # to a file as a modification.
153 # to a file as a modification.
154 modified.append(fn)
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
155 elif self[fn].cmp(other[fn]):
156 modified.append(fn)
156 modified.append(fn)
157 else:
157 else:
158 clean.append(fn)
158 clean.append(fn)
159
159
160 if removed:
160 if removed:
161 # need to filter files if they are already reported as removed
161 # need to filter files if they are already reported as removed
162 unknown = [fn for fn in unknown if fn not in mf1 and
162 unknown = [fn for fn in unknown if fn not in mf1 and
163 (not match or match(fn))]
163 (not match or match(fn))]
164 ignored = [fn for fn in ignored if fn not in mf1 and
164 ignored = [fn for fn in ignored if fn not in mf1 and
165 (not match or match(fn))]
165 (not match or match(fn))]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(modified, added, removed, deleted, unknown,
169 return scmutil.status(modified, added, removed, deleted, unknown,
170 ignored, clean)
170 ignored, clean)
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepo.state(self, self._repo.ui)
174 return subrepo.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183 def hex(self):
183 def hex(self):
184 return hex(self.node())
184 return hex(self.node())
185 def manifest(self):
185 def manifest(self):
186 return self._manifest
186 return self._manifest
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189 def repo(self):
189 def repo(self):
190 return self._repo
190 return self._repo
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 parents = self._parents
254 parents = self._parents
255 if len(parents) == 2:
255 if len(parents) == 2:
256 return parents[1]
256 return parents[1]
257 return changectx(self._repo, nullrev)
257 return changectx(self._repo, nullrev)
258
258
259 def _fileinfo(self, path):
259 def _fileinfo(self, path):
260 if r'_manifest' in self.__dict__:
260 if r'_manifest' in self.__dict__:
261 try:
261 try:
262 return self._manifest[path], self._manifest.flags(path)
262 return self._manifest[path], self._manifest.flags(path)
263 except KeyError:
263 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266 if r'_manifestdelta' in self.__dict__ or path in self.files():
266 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if path in self._manifestdelta:
267 if path in self._manifestdelta:
268 return (self._manifestdelta[path],
268 return (self._manifestdelta[path],
269 self._manifestdelta.flags(path))
269 self._manifestdelta.flags(path))
270 mfl = self._repo.manifestlog
270 mfl = self._repo.manifestlog
271 try:
271 try:
272 node, flag = mfl[self._changeset.manifest].find(path)
272 node, flag = mfl[self._changeset.manifest].find(path)
273 except KeyError:
273 except KeyError:
274 raise error.ManifestLookupError(self._node, path,
274 raise error.ManifestLookupError(self._node, path,
275 _('not found in manifest'))
275 _('not found in manifest'))
276
276
277 return node, flag
277 return node, flag
278
278
279 def filenode(self, path):
279 def filenode(self, path):
280 return self._fileinfo(path)[0]
280 return self._fileinfo(path)[0]
281
281
282 def flags(self, path):
282 def flags(self, path):
283 try:
283 try:
284 return self._fileinfo(path)[1]
284 return self._fileinfo(path)[1]
285 except error.LookupError:
285 except error.LookupError:
286 return ''
286 return ''
287
287
288 def sub(self, path, allowcreate=True):
288 def sub(self, path, allowcreate=True):
289 '''return a subrepo for the stored revision of path, never wdir()'''
289 '''return a subrepo for the stored revision of path, never wdir()'''
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291
291
292 def nullsub(self, path, pctx):
292 def nullsub(self, path, pctx):
293 return subrepo.nullsubrepo(self, path, pctx)
293 return subrepo.nullsubrepo(self, path, pctx)
294
294
295 def workingsub(self, path):
295 def workingsub(self, path):
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 context.
297 context.
298 '''
298 '''
299 return subrepo.subrepo(self, path, allowwdir=True)
299 return subrepo.subrepo(self, path, allowwdir=True)
300
300
301 def match(self, pats=None, include=None, exclude=None, default='glob',
301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 listsubrepos=False, badfn=None):
302 listsubrepos=False, badfn=None):
303 if pats is None:
303 if pats is None:
304 pats = []
304 pats = []
305 r = self._repo
305 r = self._repo
306 return matchmod.match(r.root, r.getcwd(), pats,
306 return matchmod.match(r.root, r.getcwd(), pats,
307 include, exclude, default,
307 include, exclude, default,
308 auditor=r.nofsauditor, ctx=self,
308 auditor=r.nofsauditor, ctx=self,
309 listsubrepos=listsubrepos, badfn=badfn)
309 listsubrepos=listsubrepos, badfn=badfn)
310
310
311 def diff(self, ctx2=None, match=None, **opts):
311 def diff(self, ctx2=None, match=None, **opts):
312 """Returns a diff generator for the given contexts and matcher"""
312 """Returns a diff generator for the given contexts and matcher"""
313 if ctx2 is None:
313 if ctx2 is None:
314 ctx2 = self.p1()
314 ctx2 = self.p1()
315 if ctx2 is not None:
315 if ctx2 is not None:
316 ctx2 = self._repo[ctx2]
316 ctx2 = self._repo[ctx2]
317 diffopts = patch.diffopts(self._repo.ui, opts)
317 diffopts = patch.diffopts(self._repo.ui, opts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319
319
320 def dirs(self):
320 def dirs(self):
321 return self._manifest.dirs()
321 return self._manifest.dirs()
322
322
323 def hasdir(self, dir):
323 def hasdir(self, dir):
324 return self._manifest.hasdir(dir)
324 return self._manifest.hasdir(dir)
325
325
326 def dirty(self, missing=False, merge=True, branch=True):
326 def dirty(self, missing=False, merge=True, branch=True):
327 return False
327 return False
328
328
329 def status(self, other=None, match=None, listignored=False,
329 def status(self, other=None, match=None, listignored=False,
330 listclean=False, listunknown=False, listsubrepos=False):
330 listclean=False, listunknown=False, listsubrepos=False):
331 """return status of files between two nodes or node and working
331 """return status of files between two nodes or node and working
332 directory.
332 directory.
333
333
334 If other is None, compare this node with working directory.
334 If other is None, compare this node with working directory.
335
335
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 """
337 """
338
338
339 ctx1 = self
339 ctx1 = self
340 ctx2 = self._repo[other]
340 ctx2 = self._repo[other]
341
341
342 # This next code block is, admittedly, fragile logic that tests for
342 # This next code block is, admittedly, fragile logic that tests for
343 # reversing the contexts and wouldn't need to exist if it weren't for
343 # reversing the contexts and wouldn't need to exist if it weren't for
344 # the fast (and common) code path of comparing the working directory
344 # the fast (and common) code path of comparing the working directory
345 # with its first parent.
345 # with its first parent.
346 #
346 #
347 # What we're aiming for here is the ability to call:
347 # What we're aiming for here is the ability to call:
348 #
348 #
349 # workingctx.status(parentctx)
349 # workingctx.status(parentctx)
350 #
350 #
351 # If we always built the manifest for each context and compared those,
351 # If we always built the manifest for each context and compared those,
352 # then we'd be done. But the special case of the above call means we
352 # then we'd be done. But the special case of the above call means we
353 # just copy the manifest of the parent.
353 # just copy the manifest of the parent.
354 reversed = False
354 reversed = False
355 if (not isinstance(ctx1, changectx)
355 if (not isinstance(ctx1, changectx)
356 and isinstance(ctx2, changectx)):
356 and isinstance(ctx2, changectx)):
357 reversed = True
357 reversed = True
358 ctx1, ctx2 = ctx2, ctx1
358 ctx1, ctx2 = ctx2, ctx1
359
359
360 match = ctx2._matchstatus(ctx1, match)
360 match = ctx2._matchstatus(ctx1, match)
361 r = scmutil.status([], [], [], [], [], [], [])
361 r = scmutil.status([], [], [], [], [], [], [])
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 listunknown)
363 listunknown)
364
364
365 if reversed:
365 if reversed:
366 # Reverse added and removed. Clear deleted, unknown and ignored as
366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 # these make no sense to reverse.
367 # these make no sense to reverse.
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 r.clean)
369 r.clean)
370
370
371 if listsubrepos:
371 if listsubrepos:
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 try:
373 try:
374 rev2 = ctx2.subrev(subpath)
374 rev2 = ctx2.subrev(subpath)
375 except KeyError:
375 except KeyError:
376 # A subrepo that existed in node1 was deleted between
376 # A subrepo that existed in node1 was deleted between
377 # node1 and node2 (inclusive). Thus, ctx2's substate
377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 # won't contain that subpath. The best we can do ignore it.
378 # won't contain that subpath. The best we can do ignore it.
379 rev2 = None
379 rev2 = None
380 submatch = matchmod.subdirmatcher(subpath, match)
380 submatch = matchmod.subdirmatcher(subpath, match)
381 s = sub.status(rev2, match=submatch, ignored=listignored,
381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 clean=listclean, unknown=listunknown,
382 clean=listclean, unknown=listunknown,
383 listsubrepos=True)
383 listsubrepos=True)
384 for rfiles, sfiles in zip(r, s):
384 for rfiles, sfiles in zip(r, s):
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386
386
387 for l in r:
387 for l in r:
388 l.sort()
388 l.sort()
389
389
390 return r
390 return r
391
391
392
392
393 def makememctx(repo, parents, text, user, date, branch, files, store,
393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 editor=None, extra=None):
394 editor=None, extra=None):
395 def getfilectx(repo, memctx, path):
395 def getfilectx(repo, memctx, path):
396 data, mode, copied = store.getfile(path)
396 data, mode, copied = store.getfile(path)
397 if data is None:
397 if data is None:
398 return None
398 return None
399 islink, isexec = mode
399 islink, isexec = mode
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 copied=copied, memctx=memctx)
401 copied=copied, memctx=memctx)
402 if extra is None:
402 if extra is None:
403 extra = {}
403 extra = {}
404 if branch:
404 if branch:
405 extra['branch'] = encoding.fromlocal(branch)
405 extra['branch'] = encoding.fromlocal(branch)
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 date, extra, editor)
407 date, extra, editor)
408 return ctx
408 return ctx
409
409
410 def _filterederror(repo, changeid):
410 def _filterederror(repo, changeid):
411 """build an exception to be raised about a filtered changeid
411 """build an exception to be raised about a filtered changeid
412
412
413 This is extracted in a function to help extensions (eg: evolve) to
413 This is extracted in a function to help extensions (eg: evolve) to
414 experiment with various message variants."""
414 experiment with various message variants."""
415 if repo.filtername.startswith('visible'):
415 if repo.filtername.startswith('visible'):
416 msg = _("hidden revision '%s'") % changeid
416 msg = _("hidden revision '%s'") % changeid
417 hint = _('use --hidden to access hidden revisions')
417 hint = _('use --hidden to access hidden revisions')
418 return error.FilteredRepoLookupError(msg, hint=hint)
418 return error.FilteredRepoLookupError(msg, hint=hint)
419 msg = _("filtered revision '%s' (not in '%s' subset)")
419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 msg %= (changeid, repo.filtername)
420 msg %= (changeid, repo.filtername)
421 return error.FilteredRepoLookupError(msg)
421 return error.FilteredRepoLookupError(msg)
422
422
423 class changectx(basectx):
423 class changectx(basectx):
424 """A changecontext object makes access to data related to a particular
424 """A changecontext object makes access to data related to a particular
425 changeset convenient. It represents a read-only context already present in
425 changeset convenient. It represents a read-only context already present in
426 the repo."""
426 the repo."""
427 def __init__(self, repo, changeid=''):
427 def __init__(self, repo, changeid=''):
428 """changeid is a revision number, node, or tag"""
428 """changeid is a revision number, node, or tag"""
429
429
430 # since basectx.__new__ already took care of copying the object, we
430 # since basectx.__new__ already took care of copying the object, we
431 # don't need to do anything in __init__, so we just exit here
431 # don't need to do anything in __init__, so we just exit here
432 if isinstance(changeid, basectx):
432 if isinstance(changeid, basectx):
433 return
433 return
434
434
435 if changeid == '':
435 if changeid == '':
436 changeid = '.'
436 changeid = '.'
437 self._repo = repo
437 self._repo = repo
438
438
439 try:
439 try:
440 if isinstance(changeid, int):
440 if isinstance(changeid, int):
441 self._node = repo.changelog.node(changeid)
441 self._node = repo.changelog.node(changeid)
442 self._rev = changeid
442 self._rev = changeid
443 return
443 return
444 if not pycompat.ispy3 and isinstance(changeid, long):
444 if not pycompat.ispy3 and isinstance(changeid, long):
445 changeid = str(changeid)
445 changeid = str(changeid)
446 if changeid == 'null':
446 if changeid == 'null':
447 self._node = nullid
447 self._node = nullid
448 self._rev = nullrev
448 self._rev = nullrev
449 return
449 return
450 if changeid == 'tip':
450 if changeid == 'tip':
451 self._node = repo.changelog.tip()
451 self._node = repo.changelog.tip()
452 self._rev = repo.changelog.rev(self._node)
452 self._rev = repo.changelog.rev(self._node)
453 return
453 return
454 if changeid == '.' or changeid == repo.dirstate.p1():
454 if changeid == '.' or changeid == repo.dirstate.p1():
455 # this is a hack to delay/avoid loading obsmarkers
455 # this is a hack to delay/avoid loading obsmarkers
456 # when we know that '.' won't be hidden
456 # when we know that '.' won't be hidden
457 self._node = repo.dirstate.p1()
457 self._node = repo.dirstate.p1()
458 self._rev = repo.unfiltered().changelog.rev(self._node)
458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 return
459 return
460 if len(changeid) == 20:
460 if len(changeid) == 20:
461 try:
461 try:
462 self._node = changeid
462 self._node = changeid
463 self._rev = repo.changelog.rev(changeid)
463 self._rev = repo.changelog.rev(changeid)
464 return
464 return
465 except error.FilteredRepoLookupError:
465 except error.FilteredRepoLookupError:
466 raise
466 raise
467 except LookupError:
467 except LookupError:
468 pass
468 pass
469
469
470 try:
470 try:
471 r = int(changeid)
471 r = int(changeid)
472 if '%d' % r != changeid:
472 if '%d' % r != changeid:
473 raise ValueError
473 raise ValueError
474 l = len(repo.changelog)
474 l = len(repo.changelog)
475 if r < 0:
475 if r < 0:
476 r += l
476 r += l
477 if r < 0 or r >= l:
477 if r < 0 or r >= l:
478 raise ValueError
478 raise ValueError
479 self._rev = r
479 self._rev = r
480 self._node = repo.changelog.node(r)
480 self._node = repo.changelog.node(r)
481 return
481 return
482 except error.FilteredIndexError:
482 except error.FilteredIndexError:
483 raise
483 raise
484 except (ValueError, OverflowError, IndexError):
484 except (ValueError, OverflowError, IndexError):
485 pass
485 pass
486
486
487 if len(changeid) == 40:
487 if len(changeid) == 40:
488 try:
488 try:
489 self._node = bin(changeid)
489 self._node = bin(changeid)
490 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
491 return
491 return
492 except error.FilteredLookupError:
492 except error.FilteredLookupError:
493 raise
493 raise
494 except (TypeError, LookupError):
494 except (TypeError, LookupError):
495 pass
495 pass
496
496
497 # lookup bookmarks through the name interface
497 # lookup bookmarks through the name interface
498 try:
498 try:
499 self._node = repo.names.singlenode(repo, changeid)
499 self._node = repo.names.singlenode(repo, changeid)
500 self._rev = repo.changelog.rev(self._node)
500 self._rev = repo.changelog.rev(self._node)
501 return
501 return
502 except KeyError:
502 except KeyError:
503 pass
503 pass
504 except error.FilteredRepoLookupError:
504 except error.FilteredRepoLookupError:
505 raise
505 raise
506 except error.RepoLookupError:
506 except error.RepoLookupError:
507 pass
507 pass
508
508
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 if self._node is not None:
510 if self._node is not None:
511 self._rev = repo.changelog.rev(self._node)
511 self._rev = repo.changelog.rev(self._node)
512 return
512 return
513
513
514 # lookup failed
514 # lookup failed
515 # check if it might have come from damaged dirstate
515 # check if it might have come from damaged dirstate
516 #
516 #
517 # XXX we could avoid the unfiltered if we had a recognizable
517 # XXX we could avoid the unfiltered if we had a recognizable
518 # exception for filtered changeset access
518 # exception for filtered changeset access
519 if changeid in repo.unfiltered().dirstate.parents():
519 if changeid in repo.unfiltered().dirstate.parents():
520 msg = _("working directory has unknown parent '%s'!")
520 msg = _("working directory has unknown parent '%s'!")
521 raise error.Abort(msg % short(changeid))
521 raise error.Abort(msg % short(changeid))
522 try:
522 try:
523 if len(changeid) == 20 and nonascii(changeid):
523 if len(changeid) == 20 and nonascii(changeid):
524 changeid = hex(changeid)
524 changeid = hex(changeid)
525 except TypeError:
525 except TypeError:
526 pass
526 pass
527 except (error.FilteredIndexError, error.FilteredLookupError,
527 except (error.FilteredIndexError, error.FilteredLookupError,
528 error.FilteredRepoLookupError):
528 error.FilteredRepoLookupError):
529 raise _filterederror(repo, changeid)
529 raise _filterederror(repo, changeid)
530 except IndexError:
530 except IndexError:
531 pass
531 pass
532 raise error.RepoLookupError(
532 raise error.RepoLookupError(
533 _("unknown revision '%s'") % changeid)
533 _("unknown revision '%s'") % changeid)
534
534
535 def __hash__(self):
535 def __hash__(self):
536 try:
536 try:
537 return hash(self._rev)
537 return hash(self._rev)
538 except AttributeError:
538 except AttributeError:
539 return id(self)
539 return id(self)
540
540
541 def __nonzero__(self):
541 def __nonzero__(self):
542 return self._rev != nullrev
542 return self._rev != nullrev
543
543
544 __bool__ = __nonzero__
544 __bool__ = __nonzero__
545
545
546 @propertycache
546 @propertycache
547 def _changeset(self):
547 def _changeset(self):
548 return self._repo.changelog.changelogrevision(self.rev())
548 return self._repo.changelog.changelogrevision(self.rev())
549
549
550 @propertycache
550 @propertycache
551 def _manifest(self):
551 def _manifest(self):
552 return self._manifestctx.read()
552 return self._manifestctx.read()
553
553
554 @propertycache
554 @propertycache
555 def _manifestctx(self):
555 def _manifestctx(self):
556 return self._repo.manifestlog[self._changeset.manifest]
556 return self._repo.manifestlog[self._changeset.manifest]
557
557
558 @propertycache
558 @propertycache
559 def _manifestdelta(self):
559 def _manifestdelta(self):
560 return self._manifestctx.readdelta()
560 return self._manifestctx.readdelta()
561
561
562 @propertycache
562 @propertycache
563 def _parents(self):
563 def _parents(self):
564 repo = self._repo
564 repo = self._repo
565 p1, p2 = repo.changelog.parentrevs(self._rev)
565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 if p2 == nullrev:
566 if p2 == nullrev:
567 return [changectx(repo, p1)]
567 return [changectx(repo, p1)]
568 return [changectx(repo, p1), changectx(repo, p2)]
568 return [changectx(repo, p1), changectx(repo, p2)]
569
569
570 def changeset(self):
570 def changeset(self):
571 c = self._changeset
571 c = self._changeset
572 return (
572 return (
573 c.manifest,
573 c.manifest,
574 c.user,
574 c.user,
575 c.date,
575 c.date,
576 c.files,
576 c.files,
577 c.description,
577 c.description,
578 c.extra,
578 c.extra,
579 )
579 )
580 def manifestnode(self):
580 def manifestnode(self):
581 return self._changeset.manifest
581 return self._changeset.manifest
582
582
583 def user(self):
583 def user(self):
584 return self._changeset.user
584 return self._changeset.user
585 def date(self):
585 def date(self):
586 return self._changeset.date
586 return self._changeset.date
587 def files(self):
587 def files(self):
588 return self._changeset.files
588 return self._changeset.files
589 def description(self):
589 def description(self):
590 return self._changeset.description
590 return self._changeset.description
591 def branch(self):
591 def branch(self):
592 return encoding.tolocal(self._changeset.extra.get("branch"))
592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 def closesbranch(self):
593 def closesbranch(self):
594 return 'close' in self._changeset.extra
594 return 'close' in self._changeset.extra
595 def extra(self):
595 def extra(self):
596 return self._changeset.extra
596 return self._changeset.extra
597 def tags(self):
597 def tags(self):
598 return self._repo.nodetags(self._node)
598 return self._repo.nodetags(self._node)
599 def bookmarks(self):
599 def bookmarks(self):
600 return self._repo.nodebookmarks(self._node)
600 return self._repo.nodebookmarks(self._node)
601 def phase(self):
601 def phase(self):
602 return self._repo._phasecache.phase(self._repo, self._rev)
602 return self._repo._phasecache.phase(self._repo, self._rev)
603 def hidden(self):
603 def hidden(self):
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605
605
606 def children(self):
606 def children(self):
607 """return contexts for each child changeset"""
607 """return contexts for each child changeset"""
608 c = self._repo.changelog.children(self._node)
608 c = self._repo.changelog.children(self._node)
609 return [changectx(self._repo, x) for x in c]
609 return [changectx(self._repo, x) for x in c]
610
610
611 def ancestors(self):
611 def ancestors(self):
612 for a in self._repo.changelog.ancestors([self._rev]):
612 for a in self._repo.changelog.ancestors([self._rev]):
613 yield changectx(self._repo, a)
613 yield changectx(self._repo, a)
614
614
615 def descendants(self):
615 def descendants(self):
616 for d in self._repo.changelog.descendants([self._rev]):
616 for d in self._repo.changelog.descendants([self._rev]):
617 yield changectx(self._repo, d)
617 yield changectx(self._repo, d)
618
618
619 def filectx(self, path, fileid=None, filelog=None):
619 def filectx(self, path, fileid=None, filelog=None):
620 """get a file context from this changeset"""
620 """get a file context from this changeset"""
621 if fileid is None:
621 if fileid is None:
622 fileid = self.filenode(path)
622 fileid = self.filenode(path)
623 return filectx(self._repo, path, fileid=fileid,
623 return filectx(self._repo, path, fileid=fileid,
624 changectx=self, filelog=filelog)
624 changectx=self, filelog=filelog)
625
625
626 def ancestor(self, c2, warn=False):
626 def ancestor(self, c2, warn=False):
627 """return the "best" ancestor context of self and c2
627 """return the "best" ancestor context of self and c2
628
628
629 If there are multiple candidates, it will show a message and check
629 If there are multiple candidates, it will show a message and check
630 merge.preferancestor configuration before falling back to the
630 merge.preferancestor configuration before falling back to the
631 revlog ancestor."""
631 revlog ancestor."""
632 # deal with workingctxs
632 # deal with workingctxs
633 n2 = c2._node
633 n2 = c2._node
634 if n2 is None:
634 if n2 is None:
635 n2 = c2._parents[0]._node
635 n2 = c2._parents[0]._node
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 if not cahs:
637 if not cahs:
638 anc = nullid
638 anc = nullid
639 elif len(cahs) == 1:
639 elif len(cahs) == 1:
640 anc = cahs[0]
640 anc = cahs[0]
641 else:
641 else:
642 # experimental config: merge.preferancestor
642 # experimental config: merge.preferancestor
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 try:
644 try:
645 ctx = changectx(self._repo, r)
645 ctx = changectx(self._repo, r)
646 except error.RepoLookupError:
646 except error.RepoLookupError:
647 continue
647 continue
648 anc = ctx.node()
648 anc = ctx.node()
649 if anc in cahs:
649 if anc in cahs:
650 break
650 break
651 else:
651 else:
652 anc = self._repo.changelog.ancestor(self._node, n2)
652 anc = self._repo.changelog.ancestor(self._node, n2)
653 if warn:
653 if warn:
654 self._repo.ui.status(
654 self._repo.ui.status(
655 (_("note: using %s as ancestor of %s and %s\n") %
655 (_("note: using %s as ancestor of %s and %s\n") %
656 (short(anc), short(self._node), short(n2))) +
656 (short(anc), short(self._node), short(n2))) +
657 ''.join(_(" alternatively, use --config "
657 ''.join(_(" alternatively, use --config "
658 "merge.preferancestor=%s\n") %
658 "merge.preferancestor=%s\n") %
659 short(n) for n in sorted(cahs) if n != anc))
659 short(n) for n in sorted(cahs) if n != anc))
660 return changectx(self._repo, anc)
660 return changectx(self._repo, anc)
661
661
662 def descendant(self, other):
662 def descendant(self, other):
663 """True if other is descendant of this changeset"""
663 """True if other is descendant of this changeset"""
664 return self._repo.changelog.descendant(self._rev, other._rev)
664 return self._repo.changelog.descendant(self._rev, other._rev)
665
665
666 def walk(self, match):
666 def walk(self, match):
667 '''Generates matching file names.'''
667 '''Generates matching file names.'''
668
668
669 # Wrap match.bad method to have message with nodeid
669 # Wrap match.bad method to have message with nodeid
670 def bad(fn, msg):
670 def bad(fn, msg):
671 # The manifest doesn't know about subrepos, so don't complain about
671 # The manifest doesn't know about subrepos, so don't complain about
672 # paths into valid subrepos.
672 # paths into valid subrepos.
673 if any(fn == s or fn.startswith(s + '/')
673 if any(fn == s or fn.startswith(s + '/')
674 for s in self.substate):
674 for s in self.substate):
675 return
675 return
676 match.bad(fn, _('no such file in rev %s') % self)
676 match.bad(fn, _('no such file in rev %s') % self)
677
677
678 m = matchmod.badmatch(match, bad)
678 m = matchmod.badmatch(match, bad)
679 return self._manifest.walk(m)
679 return self._manifest.walk(m)
680
680
681 def matches(self, match):
681 def matches(self, match):
682 return self.walk(match)
682 return self.walk(match)
683
683
684 class basefilectx(object):
684 class basefilectx(object):
685 """A filecontext object represents the common logic for its children:
685 """A filecontext object represents the common logic for its children:
686 filectx: read-only access to a filerevision that is already present
686 filectx: read-only access to a filerevision that is already present
687 in the repo,
687 in the repo,
688 workingfilectx: a filecontext that represents files from the working
688 workingfilectx: a filecontext that represents files from the working
689 directory,
689 directory,
690 memfilectx: a filecontext that represents files in-memory."""
690 memfilectx: a filecontext that represents files in-memory."""
691 def __new__(cls, repo, path, *args, **kwargs):
691 def __new__(cls, repo, path, *args, **kwargs):
692 return super(basefilectx, cls).__new__(cls)
692 return super(basefilectx, cls).__new__(cls)
693
693
694 @propertycache
694 @propertycache
695 def _filelog(self):
695 def _filelog(self):
696 return self._repo.file(self._path)
696 return self._repo.file(self._path)
697
697
698 @propertycache
698 @propertycache
699 def _changeid(self):
699 def _changeid(self):
700 if r'_changeid' in self.__dict__:
700 if r'_changeid' in self.__dict__:
701 return self._changeid
701 return self._changeid
702 elif r'_changectx' in self.__dict__:
702 elif r'_changectx' in self.__dict__:
703 return self._changectx.rev()
703 return self._changectx.rev()
704 elif r'_descendantrev' in self.__dict__:
704 elif r'_descendantrev' in self.__dict__:
705 # this file context was created from a revision with a known
705 # this file context was created from a revision with a known
706 # descendant, we can (lazily) correct for linkrev aliases
706 # descendant, we can (lazily) correct for linkrev aliases
707 return self._adjustlinkrev(self._descendantrev)
707 return self._adjustlinkrev(self._descendantrev)
708 else:
708 else:
709 return self._filelog.linkrev(self._filerev)
709 return self._filelog.linkrev(self._filerev)
710
710
711 @propertycache
711 @propertycache
712 def _filenode(self):
712 def _filenode(self):
713 if r'_fileid' in self.__dict__:
713 if r'_fileid' in self.__dict__:
714 return self._filelog.lookup(self._fileid)
714 return self._filelog.lookup(self._fileid)
715 else:
715 else:
716 return self._changectx.filenode(self._path)
716 return self._changectx.filenode(self._path)
717
717
718 @propertycache
718 @propertycache
719 def _filerev(self):
719 def _filerev(self):
720 return self._filelog.rev(self._filenode)
720 return self._filelog.rev(self._filenode)
721
721
722 @propertycache
722 @propertycache
723 def _repopath(self):
723 def _repopath(self):
724 return self._path
724 return self._path
725
725
726 def __nonzero__(self):
726 def __nonzero__(self):
727 try:
727 try:
728 self._filenode
728 self._filenode
729 return True
729 return True
730 except error.LookupError:
730 except error.LookupError:
731 # file is missing
731 # file is missing
732 return False
732 return False
733
733
734 __bool__ = __nonzero__
734 __bool__ = __nonzero__
735
735
736 def __str__(self):
736 def __str__(self):
737 try:
737 try:
738 return "%s@%s" % (self.path(), self._changectx)
738 return "%s@%s" % (self.path(), self._changectx)
739 except error.LookupError:
739 except error.LookupError:
740 return "%s@???" % self.path()
740 return "%s@???" % self.path()
741
741
742 def __repr__(self):
742 def __repr__(self):
743 return "<%s %s>" % (type(self).__name__, str(self))
743 return "<%s %s>" % (type(self).__name__, str(self))
744
744
745 def __hash__(self):
745 def __hash__(self):
746 try:
746 try:
747 return hash((self._path, self._filenode))
747 return hash((self._path, self._filenode))
748 except AttributeError:
748 except AttributeError:
749 return id(self)
749 return id(self)
750
750
751 def __eq__(self, other):
751 def __eq__(self, other):
752 try:
752 try:
753 return (type(self) == type(other) and self._path == other._path
753 return (type(self) == type(other) and self._path == other._path
754 and self._filenode == other._filenode)
754 and self._filenode == other._filenode)
755 except AttributeError:
755 except AttributeError:
756 return False
756 return False
757
757
758 def __ne__(self, other):
758 def __ne__(self, other):
759 return not (self == other)
759 return not (self == other)
760
760
761 def filerev(self):
761 def filerev(self):
762 return self._filerev
762 return self._filerev
763 def filenode(self):
763 def filenode(self):
764 return self._filenode
764 return self._filenode
765 @propertycache
765 @propertycache
766 def _flags(self):
766 def _flags(self):
767 return self._changectx.flags(self._path)
767 return self._changectx.flags(self._path)
768 def flags(self):
768 def flags(self):
769 return self._flags
769 return self._flags
770 def filelog(self):
770 def filelog(self):
771 return self._filelog
771 return self._filelog
772 def rev(self):
772 def rev(self):
773 return self._changeid
773 return self._changeid
774 def linkrev(self):
774 def linkrev(self):
775 return self._filelog.linkrev(self._filerev)
775 return self._filelog.linkrev(self._filerev)
776 def node(self):
776 def node(self):
777 return self._changectx.node()
777 return self._changectx.node()
778 def hex(self):
778 def hex(self):
779 return self._changectx.hex()
779 return self._changectx.hex()
780 def user(self):
780 def user(self):
781 return self._changectx.user()
781 return self._changectx.user()
782 def date(self):
782 def date(self):
783 return self._changectx.date()
783 return self._changectx.date()
784 def files(self):
784 def files(self):
785 return self._changectx.files()
785 return self._changectx.files()
786 def description(self):
786 def description(self):
787 return self._changectx.description()
787 return self._changectx.description()
788 def branch(self):
788 def branch(self):
789 return self._changectx.branch()
789 return self._changectx.branch()
790 def extra(self):
790 def extra(self):
791 return self._changectx.extra()
791 return self._changectx.extra()
792 def phase(self):
792 def phase(self):
793 return self._changectx.phase()
793 return self._changectx.phase()
794 def phasestr(self):
794 def phasestr(self):
795 return self._changectx.phasestr()
795 return self._changectx.phasestr()
796 def manifest(self):
796 def manifest(self):
797 return self._changectx.manifest()
797 return self._changectx.manifest()
798 def changectx(self):
798 def changectx(self):
799 return self._changectx
799 return self._changectx
800 def renamed(self):
801 return self._copied
800 def repo(self):
802 def repo(self):
801 return self._repo
803 return self._repo
802
804
803 def path(self):
805 def path(self):
804 return self._path
806 return self._path
805
807
806 def isbinary(self):
808 def isbinary(self):
807 try:
809 try:
808 return util.binary(self.data())
810 return util.binary(self.data())
809 except IOError:
811 except IOError:
810 return False
812 return False
811 def isexec(self):
813 def isexec(self):
812 return 'x' in self.flags()
814 return 'x' in self.flags()
813 def islink(self):
815 def islink(self):
814 return 'l' in self.flags()
816 return 'l' in self.flags()
815
817
816 def isabsent(self):
818 def isabsent(self):
817 """whether this filectx represents a file not in self._changectx
819 """whether this filectx represents a file not in self._changectx
818
820
819 This is mainly for merge code to detect change/delete conflicts. This is
821 This is mainly for merge code to detect change/delete conflicts. This is
820 expected to be True for all subclasses of basectx."""
822 expected to be True for all subclasses of basectx."""
821 return False
823 return False
822
824
823 _customcmp = False
825 _customcmp = False
824 def cmp(self, fctx):
826 def cmp(self, fctx):
825 """compare with other file context
827 """compare with other file context
826
828
827 returns True if different than fctx.
829 returns True if different than fctx.
828 """
830 """
829 if fctx._customcmp:
831 if fctx._customcmp:
830 return fctx.cmp(self)
832 return fctx.cmp(self)
831
833
832 if (fctx._filenode is None
834 if (fctx._filenode is None
833 and (self._repo._encodefilterpats
835 and (self._repo._encodefilterpats
834 # if file data starts with '\1\n', empty metadata block is
836 # if file data starts with '\1\n', empty metadata block is
835 # prepended, which adds 4 bytes to filelog.size().
837 # prepended, which adds 4 bytes to filelog.size().
836 or self.size() - 4 == fctx.size())
838 or self.size() - 4 == fctx.size())
837 or self.size() == fctx.size()):
839 or self.size() == fctx.size()):
838 return self._filelog.cmp(self._filenode, fctx.data())
840 return self._filelog.cmp(self._filenode, fctx.data())
839
841
840 return True
842 return True
841
843
842 def _adjustlinkrev(self, srcrev, inclusive=False):
844 def _adjustlinkrev(self, srcrev, inclusive=False):
843 """return the first ancestor of <srcrev> introducing <fnode>
845 """return the first ancestor of <srcrev> introducing <fnode>
844
846
845 If the linkrev of the file revision does not point to an ancestor of
847 If the linkrev of the file revision does not point to an ancestor of
846 srcrev, we'll walk down the ancestors until we find one introducing
848 srcrev, we'll walk down the ancestors until we find one introducing
847 this file revision.
849 this file revision.
848
850
849 :srcrev: the changeset revision we search ancestors from
851 :srcrev: the changeset revision we search ancestors from
850 :inclusive: if true, the src revision will also be checked
852 :inclusive: if true, the src revision will also be checked
851 """
853 """
852 repo = self._repo
854 repo = self._repo
853 cl = repo.unfiltered().changelog
855 cl = repo.unfiltered().changelog
854 mfl = repo.manifestlog
856 mfl = repo.manifestlog
855 # fetch the linkrev
857 # fetch the linkrev
856 lkr = self.linkrev()
858 lkr = self.linkrev()
857 # hack to reuse ancestor computation when searching for renames
859 # hack to reuse ancestor computation when searching for renames
858 memberanc = getattr(self, '_ancestrycontext', None)
860 memberanc = getattr(self, '_ancestrycontext', None)
859 iteranc = None
861 iteranc = None
860 if srcrev is None:
862 if srcrev is None:
861 # wctx case, used by workingfilectx during mergecopy
863 # wctx case, used by workingfilectx during mergecopy
862 revs = [p.rev() for p in self._repo[None].parents()]
864 revs = [p.rev() for p in self._repo[None].parents()]
863 inclusive = True # we skipped the real (revless) source
865 inclusive = True # we skipped the real (revless) source
864 else:
866 else:
865 revs = [srcrev]
867 revs = [srcrev]
866 if memberanc is None:
868 if memberanc is None:
867 memberanc = iteranc = cl.ancestors(revs, lkr,
869 memberanc = iteranc = cl.ancestors(revs, lkr,
868 inclusive=inclusive)
870 inclusive=inclusive)
869 # check if this linkrev is an ancestor of srcrev
871 # check if this linkrev is an ancestor of srcrev
870 if lkr not in memberanc:
872 if lkr not in memberanc:
871 if iteranc is None:
873 if iteranc is None:
872 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
874 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
873 fnode = self._filenode
875 fnode = self._filenode
874 path = self._path
876 path = self._path
875 for a in iteranc:
877 for a in iteranc:
876 ac = cl.read(a) # get changeset data (we avoid object creation)
878 ac = cl.read(a) # get changeset data (we avoid object creation)
877 if path in ac[3]: # checking the 'files' field.
879 if path in ac[3]: # checking the 'files' field.
878 # The file has been touched, check if the content is
880 # The file has been touched, check if the content is
879 # similar to the one we search for.
881 # similar to the one we search for.
880 if fnode == mfl[ac[0]].readfast().get(path):
882 if fnode == mfl[ac[0]].readfast().get(path):
881 return a
883 return a
882 # In theory, we should never get out of that loop without a result.
884 # In theory, we should never get out of that loop without a result.
883 # But if manifest uses a buggy file revision (not children of the
885 # But if manifest uses a buggy file revision (not children of the
884 # one it replaces) we could. Such a buggy situation will likely
886 # one it replaces) we could. Such a buggy situation will likely
885 # result is crash somewhere else at to some point.
887 # result is crash somewhere else at to some point.
886 return lkr
888 return lkr
887
889
888 def introrev(self):
890 def introrev(self):
889 """return the rev of the changeset which introduced this file revision
891 """return the rev of the changeset which introduced this file revision
890
892
891 This method is different from linkrev because it take into account the
893 This method is different from linkrev because it take into account the
892 changeset the filectx was created from. It ensures the returned
894 changeset the filectx was created from. It ensures the returned
893 revision is one of its ancestors. This prevents bugs from
895 revision is one of its ancestors. This prevents bugs from
894 'linkrev-shadowing' when a file revision is used by multiple
896 'linkrev-shadowing' when a file revision is used by multiple
895 changesets.
897 changesets.
896 """
898 """
897 lkr = self.linkrev()
899 lkr = self.linkrev()
898 attrs = vars(self)
900 attrs = vars(self)
899 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
901 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
900 if noctx or self.rev() == lkr:
902 if noctx or self.rev() == lkr:
901 return self.linkrev()
903 return self.linkrev()
902 return self._adjustlinkrev(self.rev(), inclusive=True)
904 return self._adjustlinkrev(self.rev(), inclusive=True)
903
905
904 def _parentfilectx(self, path, fileid, filelog):
906 def _parentfilectx(self, path, fileid, filelog):
905 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
907 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
906 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
908 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
907 if '_changeid' in vars(self) or '_changectx' in vars(self):
909 if '_changeid' in vars(self) or '_changectx' in vars(self):
908 # If self is associated with a changeset (probably explicitly
910 # If self is associated with a changeset (probably explicitly
909 # fed), ensure the created filectx is associated with a
911 # fed), ensure the created filectx is associated with a
910 # changeset that is an ancestor of self.changectx.
912 # changeset that is an ancestor of self.changectx.
911 # This lets us later use _adjustlinkrev to get a correct link.
913 # This lets us later use _adjustlinkrev to get a correct link.
912 fctx._descendantrev = self.rev()
914 fctx._descendantrev = self.rev()
913 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
914 elif '_descendantrev' in vars(self):
916 elif '_descendantrev' in vars(self):
915 # Otherwise propagate _descendantrev if we have one associated.
917 # Otherwise propagate _descendantrev if we have one associated.
916 fctx._descendantrev = self._descendantrev
918 fctx._descendantrev = self._descendantrev
917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
919 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
918 return fctx
920 return fctx
919
921
920 def parents(self):
922 def parents(self):
921 _path = self._path
923 _path = self._path
922 fl = self._filelog
924 fl = self._filelog
923 parents = self._filelog.parents(self._filenode)
925 parents = self._filelog.parents(self._filenode)
924 pl = [(_path, node, fl) for node in parents if node != nullid]
926 pl = [(_path, node, fl) for node in parents if node != nullid]
925
927
926 r = fl.renamed(self._filenode)
928 r = fl.renamed(self._filenode)
927 if r:
929 if r:
928 # - In the simple rename case, both parent are nullid, pl is empty.
930 # - In the simple rename case, both parent are nullid, pl is empty.
929 # - In case of merge, only one of the parent is null id and should
931 # - In case of merge, only one of the parent is null id and should
930 # be replaced with the rename information. This parent is -always-
932 # be replaced with the rename information. This parent is -always-
931 # the first one.
933 # the first one.
932 #
934 #
933 # As null id have always been filtered out in the previous list
935 # As null id have always been filtered out in the previous list
934 # comprehension, inserting to 0 will always result in "replacing
936 # comprehension, inserting to 0 will always result in "replacing
935 # first nullid parent with rename information.
937 # first nullid parent with rename information.
936 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
938 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
937
939
938 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
940 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
939
941
940 def p1(self):
942 def p1(self):
941 return self.parents()[0]
943 return self.parents()[0]
942
944
943 def p2(self):
945 def p2(self):
944 p = self.parents()
946 p = self.parents()
945 if len(p) == 2:
947 if len(p) == 2:
946 return p[1]
948 return p[1]
947 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
949 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
948
950
949 def annotate(self, follow=False, linenumber=False, diffopts=None):
951 def annotate(self, follow=False, linenumber=False, diffopts=None):
950 '''returns a list of tuples of ((ctx, number), line) for each line
952 '''returns a list of tuples of ((ctx, number), line) for each line
951 in the file, where ctx is the filectx of the node where
953 in the file, where ctx is the filectx of the node where
952 that line was last changed; if linenumber parameter is true, number is
954 that line was last changed; if linenumber parameter is true, number is
953 the line number at the first appearance in the managed file, otherwise,
955 the line number at the first appearance in the managed file, otherwise,
954 number has a fixed value of False.
956 number has a fixed value of False.
955 '''
957 '''
956
958
957 def lines(text):
959 def lines(text):
958 if text.endswith("\n"):
960 if text.endswith("\n"):
959 return text.count("\n")
961 return text.count("\n")
960 return text.count("\n") + int(bool(text))
962 return text.count("\n") + int(bool(text))
961
963
962 if linenumber:
964 if linenumber:
963 def decorate(text, rev):
965 def decorate(text, rev):
964 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
966 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
965 else:
967 else:
966 def decorate(text, rev):
968 def decorate(text, rev):
967 return ([(rev, False)] * lines(text), text)
969 return ([(rev, False)] * lines(text), text)
968
970
969 def pair(parent, child):
971 def pair(parent, child):
970 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
972 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
971 for (a1, a2, b1, b2), t in blocks:
973 for (a1, a2, b1, b2), t in blocks:
972 # Changed blocks ('!') or blocks made only of blank lines ('~')
974 # Changed blocks ('!') or blocks made only of blank lines ('~')
973 # belong to the child.
975 # belong to the child.
974 if t == '=':
976 if t == '=':
975 child[0][b1:b2] = parent[0][a1:a2]
977 child[0][b1:b2] = parent[0][a1:a2]
976 return child
978 return child
977
979
978 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
980 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
979
981
980 def parents(f):
982 def parents(f):
981 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
983 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
982 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
984 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
983 # from the topmost introrev (= srcrev) down to p.linkrev() if it
985 # from the topmost introrev (= srcrev) down to p.linkrev() if it
984 # isn't an ancestor of the srcrev.
986 # isn't an ancestor of the srcrev.
985 f._changeid
987 f._changeid
986 pl = f.parents()
988 pl = f.parents()
987
989
988 # Don't return renamed parents if we aren't following.
990 # Don't return renamed parents if we aren't following.
989 if not follow:
991 if not follow:
990 pl = [p for p in pl if p.path() == f.path()]
992 pl = [p for p in pl if p.path() == f.path()]
991
993
992 # renamed filectx won't have a filelog yet, so set it
994 # renamed filectx won't have a filelog yet, so set it
993 # from the cache to save time
995 # from the cache to save time
994 for p in pl:
996 for p in pl:
995 if not '_filelog' in p.__dict__:
997 if not '_filelog' in p.__dict__:
996 p._filelog = getlog(p.path())
998 p._filelog = getlog(p.path())
997
999
998 return pl
1000 return pl
999
1001
1000 # use linkrev to find the first changeset where self appeared
1002 # use linkrev to find the first changeset where self appeared
1001 base = self
1003 base = self
1002 introrev = self.introrev()
1004 introrev = self.introrev()
1003 if self.rev() != introrev:
1005 if self.rev() != introrev:
1004 base = self.filectx(self.filenode(), changeid=introrev)
1006 base = self.filectx(self.filenode(), changeid=introrev)
1005 if getattr(base, '_ancestrycontext', None) is None:
1007 if getattr(base, '_ancestrycontext', None) is None:
1006 cl = self._repo.changelog
1008 cl = self._repo.changelog
1007 if introrev is None:
1009 if introrev is None:
1008 # wctx is not inclusive, but works because _ancestrycontext
1010 # wctx is not inclusive, but works because _ancestrycontext
1009 # is used to test filelog revisions
1011 # is used to test filelog revisions
1010 ac = cl.ancestors([p.rev() for p in base.parents()],
1012 ac = cl.ancestors([p.rev() for p in base.parents()],
1011 inclusive=True)
1013 inclusive=True)
1012 else:
1014 else:
1013 ac = cl.ancestors([introrev], inclusive=True)
1015 ac = cl.ancestors([introrev], inclusive=True)
1014 base._ancestrycontext = ac
1016 base._ancestrycontext = ac
1015
1017
1016 # This algorithm would prefer to be recursive, but Python is a
1018 # This algorithm would prefer to be recursive, but Python is a
1017 # bit recursion-hostile. Instead we do an iterative
1019 # bit recursion-hostile. Instead we do an iterative
1018 # depth-first search.
1020 # depth-first search.
1019
1021
1020 # 1st DFS pre-calculates pcache and needed
1022 # 1st DFS pre-calculates pcache and needed
1021 visit = [base]
1023 visit = [base]
1022 pcache = {}
1024 pcache = {}
1023 needed = {base: 1}
1025 needed = {base: 1}
1024 while visit:
1026 while visit:
1025 f = visit.pop()
1027 f = visit.pop()
1026 if f in pcache:
1028 if f in pcache:
1027 continue
1029 continue
1028 pl = parents(f)
1030 pl = parents(f)
1029 pcache[f] = pl
1031 pcache[f] = pl
1030 for p in pl:
1032 for p in pl:
1031 needed[p] = needed.get(p, 0) + 1
1033 needed[p] = needed.get(p, 0) + 1
1032 if p not in pcache:
1034 if p not in pcache:
1033 visit.append(p)
1035 visit.append(p)
1034
1036
1035 # 2nd DFS does the actual annotate
1037 # 2nd DFS does the actual annotate
1036 visit[:] = [base]
1038 visit[:] = [base]
1037 hist = {}
1039 hist = {}
1038 while visit:
1040 while visit:
1039 f = visit[-1]
1041 f = visit[-1]
1040 if f in hist:
1042 if f in hist:
1041 visit.pop()
1043 visit.pop()
1042 continue
1044 continue
1043
1045
1044 ready = True
1046 ready = True
1045 pl = pcache[f]
1047 pl = pcache[f]
1046 for p in pl:
1048 for p in pl:
1047 if p not in hist:
1049 if p not in hist:
1048 ready = False
1050 ready = False
1049 visit.append(p)
1051 visit.append(p)
1050 if ready:
1052 if ready:
1051 visit.pop()
1053 visit.pop()
1052 curr = decorate(f.data(), f)
1054 curr = decorate(f.data(), f)
1053 for p in pl:
1055 for p in pl:
1054 curr = pair(hist[p], curr)
1056 curr = pair(hist[p], curr)
1055 if needed[p] == 1:
1057 if needed[p] == 1:
1056 del hist[p]
1058 del hist[p]
1057 del needed[p]
1059 del needed[p]
1058 else:
1060 else:
1059 needed[p] -= 1
1061 needed[p] -= 1
1060
1062
1061 hist[f] = curr
1063 hist[f] = curr
1062 del pcache[f]
1064 del pcache[f]
1063
1065
1064 return zip(hist[base][0], hist[base][1].splitlines(True))
1066 return zip(hist[base][0], hist[base][1].splitlines(True))
1065
1067
1066 def ancestors(self, followfirst=False):
1068 def ancestors(self, followfirst=False):
1067 visit = {}
1069 visit = {}
1068 c = self
1070 c = self
1069 if followfirst:
1071 if followfirst:
1070 cut = 1
1072 cut = 1
1071 else:
1073 else:
1072 cut = None
1074 cut = None
1073
1075
1074 while True:
1076 while True:
1075 for parent in c.parents()[:cut]:
1077 for parent in c.parents()[:cut]:
1076 visit[(parent.linkrev(), parent.filenode())] = parent
1078 visit[(parent.linkrev(), parent.filenode())] = parent
1077 if not visit:
1079 if not visit:
1078 break
1080 break
1079 c = visit.pop(max(visit))
1081 c = visit.pop(max(visit))
1080 yield c
1082 yield c
1081
1083
1082 class filectx(basefilectx):
1084 class filectx(basefilectx):
1083 """A filecontext object makes access to data related to a particular
1085 """A filecontext object makes access to data related to a particular
1084 filerevision convenient."""
1086 filerevision convenient."""
1085 def __init__(self, repo, path, changeid=None, fileid=None,
1087 def __init__(self, repo, path, changeid=None, fileid=None,
1086 filelog=None, changectx=None):
1088 filelog=None, changectx=None):
1087 """changeid can be a changeset revision, node, or tag.
1089 """changeid can be a changeset revision, node, or tag.
1088 fileid can be a file revision or node."""
1090 fileid can be a file revision or node."""
1089 self._repo = repo
1091 self._repo = repo
1090 self._path = path
1092 self._path = path
1091
1093
1092 assert (changeid is not None
1094 assert (changeid is not None
1093 or fileid is not None
1095 or fileid is not None
1094 or changectx is not None), \
1096 or changectx is not None), \
1095 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1097 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1096 % (changeid, fileid, changectx))
1098 % (changeid, fileid, changectx))
1097
1099
1098 if filelog is not None:
1100 if filelog is not None:
1099 self._filelog = filelog
1101 self._filelog = filelog
1100
1102
1101 if changeid is not None:
1103 if changeid is not None:
1102 self._changeid = changeid
1104 self._changeid = changeid
1103 if changectx is not None:
1105 if changectx is not None:
1104 self._changectx = changectx
1106 self._changectx = changectx
1105 if fileid is not None:
1107 if fileid is not None:
1106 self._fileid = fileid
1108 self._fileid = fileid
1107
1109
1108 @propertycache
1110 @propertycache
1109 def _changectx(self):
1111 def _changectx(self):
1110 try:
1112 try:
1111 return changectx(self._repo, self._changeid)
1113 return changectx(self._repo, self._changeid)
1112 except error.FilteredRepoLookupError:
1114 except error.FilteredRepoLookupError:
1113 # Linkrev may point to any revision in the repository. When the
1115 # Linkrev may point to any revision in the repository. When the
1114 # repository is filtered this may lead to `filectx` trying to build
1116 # repository is filtered this may lead to `filectx` trying to build
1115 # `changectx` for filtered revision. In such case we fallback to
1117 # `changectx` for filtered revision. In such case we fallback to
1116 # creating `changectx` on the unfiltered version of the reposition.
1118 # creating `changectx` on the unfiltered version of the reposition.
1117 # This fallback should not be an issue because `changectx` from
1119 # This fallback should not be an issue because `changectx` from
1118 # `filectx` are not used in complex operations that care about
1120 # `filectx` are not used in complex operations that care about
1119 # filtering.
1121 # filtering.
1120 #
1122 #
1121 # This fallback is a cheap and dirty fix that prevent several
1123 # This fallback is a cheap and dirty fix that prevent several
1122 # crashes. It does not ensure the behavior is correct. However the
1124 # crashes. It does not ensure the behavior is correct. However the
1123 # behavior was not correct before filtering either and "incorrect
1125 # behavior was not correct before filtering either and "incorrect
1124 # behavior" is seen as better as "crash"
1126 # behavior" is seen as better as "crash"
1125 #
1127 #
1126 # Linkrevs have several serious troubles with filtering that are
1128 # Linkrevs have several serious troubles with filtering that are
1127 # complicated to solve. Proper handling of the issue here should be
1129 # complicated to solve. Proper handling of the issue here should be
1128 # considered when solving linkrev issue are on the table.
1130 # considered when solving linkrev issue are on the table.
1129 return changectx(self._repo.unfiltered(), self._changeid)
1131 return changectx(self._repo.unfiltered(), self._changeid)
1130
1132
1131 def filectx(self, fileid, changeid=None):
1133 def filectx(self, fileid, changeid=None):
1132 '''opens an arbitrary revision of the file without
1134 '''opens an arbitrary revision of the file without
1133 opening a new filelog'''
1135 opening a new filelog'''
1134 return filectx(self._repo, self._path, fileid=fileid,
1136 return filectx(self._repo, self._path, fileid=fileid,
1135 filelog=self._filelog, changeid=changeid)
1137 filelog=self._filelog, changeid=changeid)
1136
1138
1137 def rawdata(self):
1139 def rawdata(self):
1138 return self._filelog.revision(self._filenode, raw=True)
1140 return self._filelog.revision(self._filenode, raw=True)
1139
1141
1140 def data(self):
1142 def data(self):
1141 try:
1143 try:
1142 return self._filelog.read(self._filenode)
1144 return self._filelog.read(self._filenode)
1143 except error.CensoredNodeError:
1145 except error.CensoredNodeError:
1144 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1146 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1145 return ""
1147 return ""
1146 raise error.Abort(_("censored node: %s") % short(self._filenode),
1148 raise error.Abort(_("censored node: %s") % short(self._filenode),
1147 hint=_("set censor.policy to ignore errors"))
1149 hint=_("set censor.policy to ignore errors"))
1148
1150
1149 def size(self):
1151 def size(self):
1150 return self._filelog.size(self._filerev)
1152 return self._filelog.size(self._filerev)
1151
1153
1152 def renamed(self):
1154 @propertycache
1155 def _copied(self):
1153 """check if file was actually renamed in this changeset revision
1156 """check if file was actually renamed in this changeset revision
1154
1157
1155 If rename logged in file revision, we report copy for changeset only
1158 If rename logged in file revision, we report copy for changeset only
1156 if file revisions linkrev points back to the changeset in question
1159 if file revisions linkrev points back to the changeset in question
1157 or both changeset parents contain different file revisions.
1160 or both changeset parents contain different file revisions.
1158 """
1161 """
1159
1162
1160 renamed = self._filelog.renamed(self._filenode)
1163 renamed = self._filelog.renamed(self._filenode)
1161 if not renamed:
1164 if not renamed:
1162 return renamed
1165 return renamed
1163
1166
1164 if self.rev() == self.linkrev():
1167 if self.rev() == self.linkrev():
1165 return renamed
1168 return renamed
1166
1169
1167 name = self.path()
1170 name = self.path()
1168 fnode = self._filenode
1171 fnode = self._filenode
1169 for p in self._changectx.parents():
1172 for p in self._changectx.parents():
1170 try:
1173 try:
1171 if fnode == p.filenode(name):
1174 if fnode == p.filenode(name):
1172 return None
1175 return None
1173 except error.LookupError:
1176 except error.LookupError:
1174 pass
1177 pass
1175 return renamed
1178 return renamed
1176
1179
1177 def children(self):
1180 def children(self):
1178 # hard for renames
1181 # hard for renames
1179 c = self._filelog.children(self._filenode)
1182 c = self._filelog.children(self._filenode)
1180 return [filectx(self._repo, self._path, fileid=x,
1183 return [filectx(self._repo, self._path, fileid=x,
1181 filelog=self._filelog) for x in c]
1184 filelog=self._filelog) for x in c]
1182
1185
1183 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1186 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1184 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1187 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1185 if diff from fctx2 to fctx1 has changes in linerange2 and
1188 if diff from fctx2 to fctx1 has changes in linerange2 and
1186 `linerange1` is the new line range for fctx1.
1189 `linerange1` is the new line range for fctx1.
1187 """
1190 """
1188 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1191 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1189 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1192 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1190 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1193 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1191 return diffinrange, linerange1
1194 return diffinrange, linerange1
1192
1195
1193 def blockancestors(fctx, fromline, toline, followfirst=False):
1196 def blockancestors(fctx, fromline, toline, followfirst=False):
1194 """Yield ancestors of `fctx` with respect to the block of lines within
1197 """Yield ancestors of `fctx` with respect to the block of lines within
1195 `fromline`-`toline` range.
1198 `fromline`-`toline` range.
1196 """
1199 """
1197 diffopts = patch.diffopts(fctx._repo.ui)
1200 diffopts = patch.diffopts(fctx._repo.ui)
1198 introrev = fctx.introrev()
1201 introrev = fctx.introrev()
1199 if fctx.rev() != introrev:
1202 if fctx.rev() != introrev:
1200 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1203 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1201 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1204 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1202 while visit:
1205 while visit:
1203 c, linerange2 = visit.pop(max(visit))
1206 c, linerange2 = visit.pop(max(visit))
1204 pl = c.parents()
1207 pl = c.parents()
1205 if followfirst:
1208 if followfirst:
1206 pl = pl[:1]
1209 pl = pl[:1]
1207 if not pl:
1210 if not pl:
1208 # The block originates from the initial revision.
1211 # The block originates from the initial revision.
1209 yield c, linerange2
1212 yield c, linerange2
1210 continue
1213 continue
1211 inrange = False
1214 inrange = False
1212 for p in pl:
1215 for p in pl:
1213 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1216 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1214 inrange = inrange or inrangep
1217 inrange = inrange or inrangep
1215 if linerange1[0] == linerange1[1]:
1218 if linerange1[0] == linerange1[1]:
1216 # Parent's linerange is empty, meaning that the block got
1219 # Parent's linerange is empty, meaning that the block got
1217 # introduced in this revision; no need to go futher in this
1220 # introduced in this revision; no need to go futher in this
1218 # branch.
1221 # branch.
1219 continue
1222 continue
1220 # Set _descendantrev with 'c' (a known descendant) so that, when
1223 # Set _descendantrev with 'c' (a known descendant) so that, when
1221 # _adjustlinkrev is called for 'p', it receives this descendant
1224 # _adjustlinkrev is called for 'p', it receives this descendant
1222 # (as srcrev) instead possibly topmost introrev.
1225 # (as srcrev) instead possibly topmost introrev.
1223 p._descendantrev = c.rev()
1226 p._descendantrev = c.rev()
1224 visit[p.linkrev(), p.filenode()] = p, linerange1
1227 visit[p.linkrev(), p.filenode()] = p, linerange1
1225 if inrange:
1228 if inrange:
1226 yield c, linerange2
1229 yield c, linerange2
1227
1230
1228 def blockdescendants(fctx, fromline, toline):
1231 def blockdescendants(fctx, fromline, toline):
1229 """Yield descendants of `fctx` with respect to the block of lines within
1232 """Yield descendants of `fctx` with respect to the block of lines within
1230 `fromline`-`toline` range.
1233 `fromline`-`toline` range.
1231 """
1234 """
1232 # First possibly yield 'fctx' if it has changes in range with respect to
1235 # First possibly yield 'fctx' if it has changes in range with respect to
1233 # its parents.
1236 # its parents.
1234 try:
1237 try:
1235 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1238 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1236 except StopIteration:
1239 except StopIteration:
1237 pass
1240 pass
1238 else:
1241 else:
1239 if c == fctx:
1242 if c == fctx:
1240 yield c, linerange1
1243 yield c, linerange1
1241
1244
1242 diffopts = patch.diffopts(fctx._repo.ui)
1245 diffopts = patch.diffopts(fctx._repo.ui)
1243 fl = fctx.filelog()
1246 fl = fctx.filelog()
1244 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1247 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1245 for i in fl.descendants([fctx.filerev()]):
1248 for i in fl.descendants([fctx.filerev()]):
1246 c = fctx.filectx(i)
1249 c = fctx.filectx(i)
1247 inrange = False
1250 inrange = False
1248 for x in fl.parentrevs(i):
1251 for x in fl.parentrevs(i):
1249 try:
1252 try:
1250 p, linerange2 = seen[x]
1253 p, linerange2 = seen[x]
1251 except KeyError:
1254 except KeyError:
1252 # nullrev or other branch
1255 # nullrev or other branch
1253 continue
1256 continue
1254 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1257 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1255 inrange = inrange or inrangep
1258 inrange = inrange or inrangep
1256 # If revision 'i' has been seen (it's a merge), we assume that its
1259 # If revision 'i' has been seen (it's a merge), we assume that its
1257 # line range is the same independently of which parents was used
1260 # line range is the same independently of which parents was used
1258 # to compute it.
1261 # to compute it.
1259 assert i not in seen or seen[i][1] == linerange1, (
1262 assert i not in seen or seen[i][1] == linerange1, (
1260 'computed line range for %s is not consistent between '
1263 'computed line range for %s is not consistent between '
1261 'ancestor branches' % c)
1264 'ancestor branches' % c)
1262 seen[i] = c, linerange1
1265 seen[i] = c, linerange1
1263 if inrange:
1266 if inrange:
1264 yield c, linerange1
1267 yield c, linerange1
1265
1268
1266 class committablectx(basectx):
1269 class committablectx(basectx):
1267 """A committablectx object provides common functionality for a context that
1270 """A committablectx object provides common functionality for a context that
1268 wants the ability to commit, e.g. workingctx or memctx."""
1271 wants the ability to commit, e.g. workingctx or memctx."""
1269 def __init__(self, repo, text="", user=None, date=None, extra=None,
1272 def __init__(self, repo, text="", user=None, date=None, extra=None,
1270 changes=None):
1273 changes=None):
1271 self._repo = repo
1274 self._repo = repo
1272 self._rev = None
1275 self._rev = None
1273 self._node = None
1276 self._node = None
1274 self._text = text
1277 self._text = text
1275 if date:
1278 if date:
1276 self._date = util.parsedate(date)
1279 self._date = util.parsedate(date)
1277 if user:
1280 if user:
1278 self._user = user
1281 self._user = user
1279 if changes:
1282 if changes:
1280 self._status = changes
1283 self._status = changes
1281
1284
1282 self._extra = {}
1285 self._extra = {}
1283 if extra:
1286 if extra:
1284 self._extra = extra.copy()
1287 self._extra = extra.copy()
1285 if 'branch' not in self._extra:
1288 if 'branch' not in self._extra:
1286 try:
1289 try:
1287 branch = encoding.fromlocal(self._repo.dirstate.branch())
1290 branch = encoding.fromlocal(self._repo.dirstate.branch())
1288 except UnicodeDecodeError:
1291 except UnicodeDecodeError:
1289 raise error.Abort(_('branch name not in UTF-8!'))
1292 raise error.Abort(_('branch name not in UTF-8!'))
1290 self._extra['branch'] = branch
1293 self._extra['branch'] = branch
1291 if self._extra['branch'] == '':
1294 if self._extra['branch'] == '':
1292 self._extra['branch'] = 'default'
1295 self._extra['branch'] = 'default'
1293
1296
1294 def __str__(self):
1297 def __str__(self):
1295 return str(self._parents[0]) + "+"
1298 return str(self._parents[0]) + "+"
1296
1299
1297 def __nonzero__(self):
1300 def __nonzero__(self):
1298 return True
1301 return True
1299
1302
1300 __bool__ = __nonzero__
1303 __bool__ = __nonzero__
1301
1304
1302 def _buildflagfunc(self):
1305 def _buildflagfunc(self):
1303 # Create a fallback function for getting file flags when the
1306 # Create a fallback function for getting file flags when the
1304 # filesystem doesn't support them
1307 # filesystem doesn't support them
1305
1308
1306 copiesget = self._repo.dirstate.copies().get
1309 copiesget = self._repo.dirstate.copies().get
1307 parents = self.parents()
1310 parents = self.parents()
1308 if len(parents) < 2:
1311 if len(parents) < 2:
1309 # when we have one parent, it's easy: copy from parent
1312 # when we have one parent, it's easy: copy from parent
1310 man = parents[0].manifest()
1313 man = parents[0].manifest()
1311 def func(f):
1314 def func(f):
1312 f = copiesget(f, f)
1315 f = copiesget(f, f)
1313 return man.flags(f)
1316 return man.flags(f)
1314 else:
1317 else:
1315 # merges are tricky: we try to reconstruct the unstored
1318 # merges are tricky: we try to reconstruct the unstored
1316 # result from the merge (issue1802)
1319 # result from the merge (issue1802)
1317 p1, p2 = parents
1320 p1, p2 = parents
1318 pa = p1.ancestor(p2)
1321 pa = p1.ancestor(p2)
1319 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1322 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1320
1323
1321 def func(f):
1324 def func(f):
1322 f = copiesget(f, f) # may be wrong for merges with copies
1325 f = copiesget(f, f) # may be wrong for merges with copies
1323 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1326 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1324 if fl1 == fl2:
1327 if fl1 == fl2:
1325 return fl1
1328 return fl1
1326 if fl1 == fla:
1329 if fl1 == fla:
1327 return fl2
1330 return fl2
1328 if fl2 == fla:
1331 if fl2 == fla:
1329 return fl1
1332 return fl1
1330 return '' # punt for conflicts
1333 return '' # punt for conflicts
1331
1334
1332 return func
1335 return func
1333
1336
1334 @propertycache
1337 @propertycache
1335 def _flagfunc(self):
1338 def _flagfunc(self):
1336 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1339 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1337
1340
1338 @propertycache
1341 @propertycache
1339 def _status(self):
1342 def _status(self):
1340 return self._repo.status()
1343 return self._repo.status()
1341
1344
1342 @propertycache
1345 @propertycache
1343 def _user(self):
1346 def _user(self):
1344 return self._repo.ui.username()
1347 return self._repo.ui.username()
1345
1348
1346 @propertycache
1349 @propertycache
1347 def _date(self):
1350 def _date(self):
1348 return util.makedate()
1351 return util.makedate()
1349
1352
1350 def subrev(self, subpath):
1353 def subrev(self, subpath):
1351 return None
1354 return None
1352
1355
1353 def manifestnode(self):
1356 def manifestnode(self):
1354 return None
1357 return None
1355 def user(self):
1358 def user(self):
1356 return self._user or self._repo.ui.username()
1359 return self._user or self._repo.ui.username()
1357 def date(self):
1360 def date(self):
1358 return self._date
1361 return self._date
1359 def description(self):
1362 def description(self):
1360 return self._text
1363 return self._text
1361 def files(self):
1364 def files(self):
1362 return sorted(self._status.modified + self._status.added +
1365 return sorted(self._status.modified + self._status.added +
1363 self._status.removed)
1366 self._status.removed)
1364
1367
1365 def modified(self):
1368 def modified(self):
1366 return self._status.modified
1369 return self._status.modified
1367 def added(self):
1370 def added(self):
1368 return self._status.added
1371 return self._status.added
1369 def removed(self):
1372 def removed(self):
1370 return self._status.removed
1373 return self._status.removed
1371 def deleted(self):
1374 def deleted(self):
1372 return self._status.deleted
1375 return self._status.deleted
1373 def branch(self):
1376 def branch(self):
1374 return encoding.tolocal(self._extra['branch'])
1377 return encoding.tolocal(self._extra['branch'])
1375 def closesbranch(self):
1378 def closesbranch(self):
1376 return 'close' in self._extra
1379 return 'close' in self._extra
1377 def extra(self):
1380 def extra(self):
1378 return self._extra
1381 return self._extra
1379
1382
1380 def tags(self):
1383 def tags(self):
1381 return []
1384 return []
1382
1385
1383 def bookmarks(self):
1386 def bookmarks(self):
1384 b = []
1387 b = []
1385 for p in self.parents():
1388 for p in self.parents():
1386 b.extend(p.bookmarks())
1389 b.extend(p.bookmarks())
1387 return b
1390 return b
1388
1391
1389 def phase(self):
1392 def phase(self):
1390 phase = phases.draft # default phase to draft
1393 phase = phases.draft # default phase to draft
1391 for p in self.parents():
1394 for p in self.parents():
1392 phase = max(phase, p.phase())
1395 phase = max(phase, p.phase())
1393 return phase
1396 return phase
1394
1397
1395 def hidden(self):
1398 def hidden(self):
1396 return False
1399 return False
1397
1400
1398 def children(self):
1401 def children(self):
1399 return []
1402 return []
1400
1403
1401 def flags(self, path):
1404 def flags(self, path):
1402 if r'_manifest' in self.__dict__:
1405 if r'_manifest' in self.__dict__:
1403 try:
1406 try:
1404 return self._manifest.flags(path)
1407 return self._manifest.flags(path)
1405 except KeyError:
1408 except KeyError:
1406 return ''
1409 return ''
1407
1410
1408 try:
1411 try:
1409 return self._flagfunc(path)
1412 return self._flagfunc(path)
1410 except OSError:
1413 except OSError:
1411 return ''
1414 return ''
1412
1415
1413 def ancestor(self, c2):
1416 def ancestor(self, c2):
1414 """return the "best" ancestor context of self and c2"""
1417 """return the "best" ancestor context of self and c2"""
1415 return self._parents[0].ancestor(c2) # punt on two parents for now
1418 return self._parents[0].ancestor(c2) # punt on two parents for now
1416
1419
1417 def walk(self, match):
1420 def walk(self, match):
1418 '''Generates matching file names.'''
1421 '''Generates matching file names.'''
1419 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1422 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1420 True, False))
1423 True, False))
1421
1424
1422 def matches(self, match):
1425 def matches(self, match):
1423 return sorted(self._repo.dirstate.matches(match))
1426 return sorted(self._repo.dirstate.matches(match))
1424
1427
1425 def ancestors(self):
1428 def ancestors(self):
1426 for p in self._parents:
1429 for p in self._parents:
1427 yield p
1430 yield p
1428 for a in self._repo.changelog.ancestors(
1431 for a in self._repo.changelog.ancestors(
1429 [p.rev() for p in self._parents]):
1432 [p.rev() for p in self._parents]):
1430 yield changectx(self._repo, a)
1433 yield changectx(self._repo, a)
1431
1434
1432 def markcommitted(self, node):
1435 def markcommitted(self, node):
1433 """Perform post-commit cleanup necessary after committing this ctx
1436 """Perform post-commit cleanup necessary after committing this ctx
1434
1437
1435 Specifically, this updates backing stores this working context
1438 Specifically, this updates backing stores this working context
1436 wraps to reflect the fact that the changes reflected by this
1439 wraps to reflect the fact that the changes reflected by this
1437 workingctx have been committed. For example, it marks
1440 workingctx have been committed. For example, it marks
1438 modified and added files as normal in the dirstate.
1441 modified and added files as normal in the dirstate.
1439
1442
1440 """
1443 """
1441
1444
1442 self._repo.dirstate.beginparentchange()
1445 self._repo.dirstate.beginparentchange()
1443 for f in self.modified() + self.added():
1446 for f in self.modified() + self.added():
1444 self._repo.dirstate.normal(f)
1447 self._repo.dirstate.normal(f)
1445 for f in self.removed():
1448 for f in self.removed():
1446 self._repo.dirstate.drop(f)
1449 self._repo.dirstate.drop(f)
1447 self._repo.dirstate.setparents(node)
1450 self._repo.dirstate.setparents(node)
1448 self._repo.dirstate.endparentchange()
1451 self._repo.dirstate.endparentchange()
1449
1452
1450 # write changes out explicitly, because nesting wlock at
1453 # write changes out explicitly, because nesting wlock at
1451 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1454 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1452 # from immediately doing so for subsequent changing files
1455 # from immediately doing so for subsequent changing files
1453 self._repo.dirstate.write(self._repo.currenttransaction())
1456 self._repo.dirstate.write(self._repo.currenttransaction())
1454
1457
1455 class workingctx(committablectx):
1458 class workingctx(committablectx):
1456 """A workingctx object makes access to data related to
1459 """A workingctx object makes access to data related to
1457 the current working directory convenient.
1460 the current working directory convenient.
1458 date - any valid date string or (unixtime, offset), or None.
1461 date - any valid date string or (unixtime, offset), or None.
1459 user - username string, or None.
1462 user - username string, or None.
1460 extra - a dictionary of extra values, or None.
1463 extra - a dictionary of extra values, or None.
1461 changes - a list of file lists as returned by localrepo.status()
1464 changes - a list of file lists as returned by localrepo.status()
1462 or None to use the repository status.
1465 or None to use the repository status.
1463 """
1466 """
1464 def __init__(self, repo, text="", user=None, date=None, extra=None,
1467 def __init__(self, repo, text="", user=None, date=None, extra=None,
1465 changes=None):
1468 changes=None):
1466 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1469 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1467
1470
1468 def __iter__(self):
1471 def __iter__(self):
1469 d = self._repo.dirstate
1472 d = self._repo.dirstate
1470 for f in d:
1473 for f in d:
1471 if d[f] != 'r':
1474 if d[f] != 'r':
1472 yield f
1475 yield f
1473
1476
1474 def __contains__(self, key):
1477 def __contains__(self, key):
1475 return self._repo.dirstate[key] not in "?r"
1478 return self._repo.dirstate[key] not in "?r"
1476
1479
1477 def hex(self):
1480 def hex(self):
1478 return hex(wdirid)
1481 return hex(wdirid)
1479
1482
1480 @propertycache
1483 @propertycache
1481 def _parents(self):
1484 def _parents(self):
1482 p = self._repo.dirstate.parents()
1485 p = self._repo.dirstate.parents()
1483 if p[1] == nullid:
1486 if p[1] == nullid:
1484 p = p[:-1]
1487 p = p[:-1]
1485 return [changectx(self._repo, x) for x in p]
1488 return [changectx(self._repo, x) for x in p]
1486
1489
1487 def filectx(self, path, filelog=None):
1490 def filectx(self, path, filelog=None):
1488 """get a file context from the working directory"""
1491 """get a file context from the working directory"""
1489 return workingfilectx(self._repo, path, workingctx=self,
1492 return workingfilectx(self._repo, path, workingctx=self,
1490 filelog=filelog)
1493 filelog=filelog)
1491
1494
1492 def dirty(self, missing=False, merge=True, branch=True):
1495 def dirty(self, missing=False, merge=True, branch=True):
1493 "check whether a working directory is modified"
1496 "check whether a working directory is modified"
1494 # check subrepos first
1497 # check subrepos first
1495 for s in sorted(self.substate):
1498 for s in sorted(self.substate):
1496 if self.sub(s).dirty():
1499 if self.sub(s).dirty():
1497 return True
1500 return True
1498 # check current working dir
1501 # check current working dir
1499 return ((merge and self.p2()) or
1502 return ((merge and self.p2()) or
1500 (branch and self.branch() != self.p1().branch()) or
1503 (branch and self.branch() != self.p1().branch()) or
1501 self.modified() or self.added() or self.removed() or
1504 self.modified() or self.added() or self.removed() or
1502 (missing and self.deleted()))
1505 (missing and self.deleted()))
1503
1506
1504 def add(self, list, prefix=""):
1507 def add(self, list, prefix=""):
1505 join = lambda f: os.path.join(prefix, f)
1508 join = lambda f: os.path.join(prefix, f)
1506 with self._repo.wlock():
1509 with self._repo.wlock():
1507 ui, ds = self._repo.ui, self._repo.dirstate
1510 ui, ds = self._repo.ui, self._repo.dirstate
1508 rejected = []
1511 rejected = []
1509 lstat = self._repo.wvfs.lstat
1512 lstat = self._repo.wvfs.lstat
1510 for f in list:
1513 for f in list:
1511 scmutil.checkportable(ui, join(f))
1514 scmutil.checkportable(ui, join(f))
1512 try:
1515 try:
1513 st = lstat(f)
1516 st = lstat(f)
1514 except OSError:
1517 except OSError:
1515 ui.warn(_("%s does not exist!\n") % join(f))
1518 ui.warn(_("%s does not exist!\n") % join(f))
1516 rejected.append(f)
1519 rejected.append(f)
1517 continue
1520 continue
1518 if st.st_size > 10000000:
1521 if st.st_size > 10000000:
1519 ui.warn(_("%s: up to %d MB of RAM may be required "
1522 ui.warn(_("%s: up to %d MB of RAM may be required "
1520 "to manage this file\n"
1523 "to manage this file\n"
1521 "(use 'hg revert %s' to cancel the "
1524 "(use 'hg revert %s' to cancel the "
1522 "pending addition)\n")
1525 "pending addition)\n")
1523 % (f, 3 * st.st_size // 1000000, join(f)))
1526 % (f, 3 * st.st_size // 1000000, join(f)))
1524 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1527 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1525 ui.warn(_("%s not added: only files and symlinks "
1528 ui.warn(_("%s not added: only files and symlinks "
1526 "supported currently\n") % join(f))
1529 "supported currently\n") % join(f))
1527 rejected.append(f)
1530 rejected.append(f)
1528 elif ds[f] in 'amn':
1531 elif ds[f] in 'amn':
1529 ui.warn(_("%s already tracked!\n") % join(f))
1532 ui.warn(_("%s already tracked!\n") % join(f))
1530 elif ds[f] == 'r':
1533 elif ds[f] == 'r':
1531 ds.normallookup(f)
1534 ds.normallookup(f)
1532 else:
1535 else:
1533 ds.add(f)
1536 ds.add(f)
1534 return rejected
1537 return rejected
1535
1538
1536 def forget(self, files, prefix=""):
1539 def forget(self, files, prefix=""):
1537 join = lambda f: os.path.join(prefix, f)
1540 join = lambda f: os.path.join(prefix, f)
1538 with self._repo.wlock():
1541 with self._repo.wlock():
1539 rejected = []
1542 rejected = []
1540 for f in files:
1543 for f in files:
1541 if f not in self._repo.dirstate:
1544 if f not in self._repo.dirstate:
1542 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1545 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1543 rejected.append(f)
1546 rejected.append(f)
1544 elif self._repo.dirstate[f] != 'a':
1547 elif self._repo.dirstate[f] != 'a':
1545 self._repo.dirstate.remove(f)
1548 self._repo.dirstate.remove(f)
1546 else:
1549 else:
1547 self._repo.dirstate.drop(f)
1550 self._repo.dirstate.drop(f)
1548 return rejected
1551 return rejected
1549
1552
1550 def undelete(self, list):
1553 def undelete(self, list):
1551 pctxs = self.parents()
1554 pctxs = self.parents()
1552 with self._repo.wlock():
1555 with self._repo.wlock():
1553 for f in list:
1556 for f in list:
1554 if self._repo.dirstate[f] != 'r':
1557 if self._repo.dirstate[f] != 'r':
1555 self._repo.ui.warn(_("%s not removed!\n") % f)
1558 self._repo.ui.warn(_("%s not removed!\n") % f)
1556 else:
1559 else:
1557 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1560 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1558 t = fctx.data()
1561 t = fctx.data()
1559 self._repo.wwrite(f, t, fctx.flags())
1562 self._repo.wwrite(f, t, fctx.flags())
1560 self._repo.dirstate.normal(f)
1563 self._repo.dirstate.normal(f)
1561
1564
1562 def copy(self, source, dest):
1565 def copy(self, source, dest):
1563 try:
1566 try:
1564 st = self._repo.wvfs.lstat(dest)
1567 st = self._repo.wvfs.lstat(dest)
1565 except OSError as err:
1568 except OSError as err:
1566 if err.errno != errno.ENOENT:
1569 if err.errno != errno.ENOENT:
1567 raise
1570 raise
1568 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1571 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1569 return
1572 return
1570 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1573 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1571 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1574 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1572 "symbolic link\n") % dest)
1575 "symbolic link\n") % dest)
1573 else:
1576 else:
1574 with self._repo.wlock():
1577 with self._repo.wlock():
1575 if self._repo.dirstate[dest] in '?':
1578 if self._repo.dirstate[dest] in '?':
1576 self._repo.dirstate.add(dest)
1579 self._repo.dirstate.add(dest)
1577 elif self._repo.dirstate[dest] in 'r':
1580 elif self._repo.dirstate[dest] in 'r':
1578 self._repo.dirstate.normallookup(dest)
1581 self._repo.dirstate.normallookup(dest)
1579 self._repo.dirstate.copy(source, dest)
1582 self._repo.dirstate.copy(source, dest)
1580
1583
1581 def match(self, pats=None, include=None, exclude=None, default='glob',
1584 def match(self, pats=None, include=None, exclude=None, default='glob',
1582 listsubrepos=False, badfn=None):
1585 listsubrepos=False, badfn=None):
1583 if pats is None:
1586 if pats is None:
1584 pats = []
1587 pats = []
1585 r = self._repo
1588 r = self._repo
1586
1589
1587 # Only a case insensitive filesystem needs magic to translate user input
1590 # Only a case insensitive filesystem needs magic to translate user input
1588 # to actual case in the filesystem.
1591 # to actual case in the filesystem.
1589 matcherfunc = matchmod.match
1592 matcherfunc = matchmod.match
1590 if not util.fscasesensitive(r.root):
1593 if not util.fscasesensitive(r.root):
1591 matcherfunc = matchmod.icasefsmatcher
1594 matcherfunc = matchmod.icasefsmatcher
1592 return matcherfunc(r.root, r.getcwd(), pats,
1595 return matcherfunc(r.root, r.getcwd(), pats,
1593 include, exclude, default,
1596 include, exclude, default,
1594 auditor=r.auditor, ctx=self,
1597 auditor=r.auditor, ctx=self,
1595 listsubrepos=listsubrepos, badfn=badfn)
1598 listsubrepos=listsubrepos, badfn=badfn)
1596
1599
1597 def _filtersuspectsymlink(self, files):
1600 def _filtersuspectsymlink(self, files):
1598 if not files or self._repo.dirstate._checklink:
1601 if not files or self._repo.dirstate._checklink:
1599 return files
1602 return files
1600
1603
1601 # Symlink placeholders may get non-symlink-like contents
1604 # Symlink placeholders may get non-symlink-like contents
1602 # via user error or dereferencing by NFS or Samba servers,
1605 # via user error or dereferencing by NFS or Samba servers,
1603 # so we filter out any placeholders that don't look like a
1606 # so we filter out any placeholders that don't look like a
1604 # symlink
1607 # symlink
1605 sane = []
1608 sane = []
1606 for f in files:
1609 for f in files:
1607 if self.flags(f) == 'l':
1610 if self.flags(f) == 'l':
1608 d = self[f].data()
1611 d = self[f].data()
1609 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1612 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1610 self._repo.ui.debug('ignoring suspect symlink placeholder'
1613 self._repo.ui.debug('ignoring suspect symlink placeholder'
1611 ' "%s"\n' % f)
1614 ' "%s"\n' % f)
1612 continue
1615 continue
1613 sane.append(f)
1616 sane.append(f)
1614 return sane
1617 return sane
1615
1618
1616 def _checklookup(self, files):
1619 def _checklookup(self, files):
1617 # check for any possibly clean files
1620 # check for any possibly clean files
1618 if not files:
1621 if not files:
1619 return [], []
1622 return [], []
1620
1623
1621 modified = []
1624 modified = []
1622 fixup = []
1625 fixup = []
1623 pctx = self._parents[0]
1626 pctx = self._parents[0]
1624 # do a full compare of any files that might have changed
1627 # do a full compare of any files that might have changed
1625 for f in sorted(files):
1628 for f in sorted(files):
1626 if (f not in pctx or self.flags(f) != pctx.flags(f)
1629 if (f not in pctx or self.flags(f) != pctx.flags(f)
1627 or pctx[f].cmp(self[f])):
1630 or pctx[f].cmp(self[f])):
1628 modified.append(f)
1631 modified.append(f)
1629 else:
1632 else:
1630 fixup.append(f)
1633 fixup.append(f)
1631
1634
1632 # update dirstate for files that are actually clean
1635 # update dirstate for files that are actually clean
1633 if fixup:
1636 if fixup:
1634 try:
1637 try:
1635 # updating the dirstate is optional
1638 # updating the dirstate is optional
1636 # so we don't wait on the lock
1639 # so we don't wait on the lock
1637 # wlock can invalidate the dirstate, so cache normal _after_
1640 # wlock can invalidate the dirstate, so cache normal _after_
1638 # taking the lock
1641 # taking the lock
1639 with self._repo.wlock(False):
1642 with self._repo.wlock(False):
1640 normal = self._repo.dirstate.normal
1643 normal = self._repo.dirstate.normal
1641 for f in fixup:
1644 for f in fixup:
1642 normal(f)
1645 normal(f)
1643 # write changes out explicitly, because nesting
1646 # write changes out explicitly, because nesting
1644 # wlock at runtime may prevent 'wlock.release()'
1647 # wlock at runtime may prevent 'wlock.release()'
1645 # after this block from doing so for subsequent
1648 # after this block from doing so for subsequent
1646 # changing files
1649 # changing files
1647 self._repo.dirstate.write(self._repo.currenttransaction())
1650 self._repo.dirstate.write(self._repo.currenttransaction())
1648 except error.LockError:
1651 except error.LockError:
1649 pass
1652 pass
1650 return modified, fixup
1653 return modified, fixup
1651
1654
1652 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1655 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1653 unknown=False):
1656 unknown=False):
1654 '''Gets the status from the dirstate -- internal use only.'''
1657 '''Gets the status from the dirstate -- internal use only.'''
1655 listignored, listclean, listunknown = ignored, clean, unknown
1658 listignored, listclean, listunknown = ignored, clean, unknown
1656 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1659 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1657 subrepos = []
1660 subrepos = []
1658 if '.hgsub' in self:
1661 if '.hgsub' in self:
1659 subrepos = sorted(self.substate)
1662 subrepos = sorted(self.substate)
1660 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1663 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1661 listclean, listunknown)
1664 listclean, listunknown)
1662
1665
1663 # check for any possibly clean files
1666 # check for any possibly clean files
1664 if cmp:
1667 if cmp:
1665 modified2, fixup = self._checklookup(cmp)
1668 modified2, fixup = self._checklookup(cmp)
1666 s.modified.extend(modified2)
1669 s.modified.extend(modified2)
1667
1670
1668 # update dirstate for files that are actually clean
1671 # update dirstate for files that are actually clean
1669 if fixup and listclean:
1672 if fixup and listclean:
1670 s.clean.extend(fixup)
1673 s.clean.extend(fixup)
1671
1674
1672 if match.always():
1675 if match.always():
1673 # cache for performance
1676 # cache for performance
1674 if s.unknown or s.ignored or s.clean:
1677 if s.unknown or s.ignored or s.clean:
1675 # "_status" is cached with list*=False in the normal route
1678 # "_status" is cached with list*=False in the normal route
1676 self._status = scmutil.status(s.modified, s.added, s.removed,
1679 self._status = scmutil.status(s.modified, s.added, s.removed,
1677 s.deleted, [], [], [])
1680 s.deleted, [], [], [])
1678 else:
1681 else:
1679 self._status = s
1682 self._status = s
1680
1683
1681 return s
1684 return s
1682
1685
1683 @propertycache
1686 @propertycache
1684 def _manifest(self):
1687 def _manifest(self):
1685 """generate a manifest corresponding to the values in self._status
1688 """generate a manifest corresponding to the values in self._status
1686
1689
1687 This reuse the file nodeid from parent, but we use special node
1690 This reuse the file nodeid from parent, but we use special node
1688 identifiers for added and modified files. This is used by manifests
1691 identifiers for added and modified files. This is used by manifests
1689 merge to see that files are different and by update logic to avoid
1692 merge to see that files are different and by update logic to avoid
1690 deleting newly added files.
1693 deleting newly added files.
1691 """
1694 """
1692 return self._buildstatusmanifest(self._status)
1695 return self._buildstatusmanifest(self._status)
1693
1696
1694 def _buildstatusmanifest(self, status):
1697 def _buildstatusmanifest(self, status):
1695 """Builds a manifest that includes the given status results."""
1698 """Builds a manifest that includes the given status results."""
1696 parents = self.parents()
1699 parents = self.parents()
1697
1700
1698 man = parents[0].manifest().copy()
1701 man = parents[0].manifest().copy()
1699
1702
1700 ff = self._flagfunc
1703 ff = self._flagfunc
1701 for i, l in ((addednodeid, status.added),
1704 for i, l in ((addednodeid, status.added),
1702 (modifiednodeid, status.modified)):
1705 (modifiednodeid, status.modified)):
1703 for f in l:
1706 for f in l:
1704 man[f] = i
1707 man[f] = i
1705 try:
1708 try:
1706 man.setflag(f, ff(f))
1709 man.setflag(f, ff(f))
1707 except OSError:
1710 except OSError:
1708 pass
1711 pass
1709
1712
1710 for f in status.deleted + status.removed:
1713 for f in status.deleted + status.removed:
1711 if f in man:
1714 if f in man:
1712 del man[f]
1715 del man[f]
1713
1716
1714 return man
1717 return man
1715
1718
1716 def _buildstatus(self, other, s, match, listignored, listclean,
1719 def _buildstatus(self, other, s, match, listignored, listclean,
1717 listunknown):
1720 listunknown):
1718 """build a status with respect to another context
1721 """build a status with respect to another context
1719
1722
1720 This includes logic for maintaining the fast path of status when
1723 This includes logic for maintaining the fast path of status when
1721 comparing the working directory against its parent, which is to skip
1724 comparing the working directory against its parent, which is to skip
1722 building a new manifest if self (working directory) is not comparing
1725 building a new manifest if self (working directory) is not comparing
1723 against its parent (repo['.']).
1726 against its parent (repo['.']).
1724 """
1727 """
1725 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1728 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1726 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1729 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1727 # might have accidentally ended up with the entire contents of the file
1730 # might have accidentally ended up with the entire contents of the file
1728 # they are supposed to be linking to.
1731 # they are supposed to be linking to.
1729 s.modified[:] = self._filtersuspectsymlink(s.modified)
1732 s.modified[:] = self._filtersuspectsymlink(s.modified)
1730 if other != self._repo['.']:
1733 if other != self._repo['.']:
1731 s = super(workingctx, self)._buildstatus(other, s, match,
1734 s = super(workingctx, self)._buildstatus(other, s, match,
1732 listignored, listclean,
1735 listignored, listclean,
1733 listunknown)
1736 listunknown)
1734 return s
1737 return s
1735
1738
1736 def _matchstatus(self, other, match):
1739 def _matchstatus(self, other, match):
1737 """override the match method with a filter for directory patterns
1740 """override the match method with a filter for directory patterns
1738
1741
1739 We use inheritance to customize the match.bad method only in cases of
1742 We use inheritance to customize the match.bad method only in cases of
1740 workingctx since it belongs only to the working directory when
1743 workingctx since it belongs only to the working directory when
1741 comparing against the parent changeset.
1744 comparing against the parent changeset.
1742
1745
1743 If we aren't comparing against the working directory's parent, then we
1746 If we aren't comparing against the working directory's parent, then we
1744 just use the default match object sent to us.
1747 just use the default match object sent to us.
1745 """
1748 """
1746 superself = super(workingctx, self)
1749 superself = super(workingctx, self)
1747 match = superself._matchstatus(other, match)
1750 match = superself._matchstatus(other, match)
1748 if other != self._repo['.']:
1751 if other != self._repo['.']:
1749 def bad(f, msg):
1752 def bad(f, msg):
1750 # 'f' may be a directory pattern from 'match.files()',
1753 # 'f' may be a directory pattern from 'match.files()',
1751 # so 'f not in ctx1' is not enough
1754 # so 'f not in ctx1' is not enough
1752 if f not in other and not other.hasdir(f):
1755 if f not in other and not other.hasdir(f):
1753 self._repo.ui.warn('%s: %s\n' %
1756 self._repo.ui.warn('%s: %s\n' %
1754 (self._repo.dirstate.pathto(f), msg))
1757 (self._repo.dirstate.pathto(f), msg))
1755 match.bad = bad
1758 match.bad = bad
1756 return match
1759 return match
1757
1760
1758 class committablefilectx(basefilectx):
1761 class committablefilectx(basefilectx):
1759 """A committablefilectx provides common functionality for a file context
1762 """A committablefilectx provides common functionality for a file context
1760 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1763 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1761 def __init__(self, repo, path, filelog=None, ctx=None):
1764 def __init__(self, repo, path, filelog=None, ctx=None):
1762 self._repo = repo
1765 self._repo = repo
1763 self._path = path
1766 self._path = path
1764 self._changeid = None
1767 self._changeid = None
1765 self._filerev = self._filenode = None
1768 self._filerev = self._filenode = None
1766
1769
1767 if filelog is not None:
1770 if filelog is not None:
1768 self._filelog = filelog
1771 self._filelog = filelog
1769 if ctx:
1772 if ctx:
1770 self._changectx = ctx
1773 self._changectx = ctx
1771
1774
1772 def __nonzero__(self):
1775 def __nonzero__(self):
1773 return True
1776 return True
1774
1777
1775 __bool__ = __nonzero__
1778 __bool__ = __nonzero__
1776
1779
1777 def linkrev(self):
1780 def linkrev(self):
1778 # linked to self._changectx no matter if file is modified or not
1781 # linked to self._changectx no matter if file is modified or not
1779 return self.rev()
1782 return self.rev()
1780
1783
1781 def parents(self):
1784 def parents(self):
1782 '''return parent filectxs, following copies if necessary'''
1785 '''return parent filectxs, following copies if necessary'''
1783 def filenode(ctx, path):
1786 def filenode(ctx, path):
1784 return ctx._manifest.get(path, nullid)
1787 return ctx._manifest.get(path, nullid)
1785
1788
1786 path = self._path
1789 path = self._path
1787 fl = self._filelog
1790 fl = self._filelog
1788 pcl = self._changectx._parents
1791 pcl = self._changectx._parents
1789 renamed = self.renamed()
1792 renamed = self.renamed()
1790
1793
1791 if renamed:
1794 if renamed:
1792 pl = [renamed + (None,)]
1795 pl = [renamed + (None,)]
1793 else:
1796 else:
1794 pl = [(path, filenode(pcl[0], path), fl)]
1797 pl = [(path, filenode(pcl[0], path), fl)]
1795
1798
1796 for pc in pcl[1:]:
1799 for pc in pcl[1:]:
1797 pl.append((path, filenode(pc, path), fl))
1800 pl.append((path, filenode(pc, path), fl))
1798
1801
1799 return [self._parentfilectx(p, fileid=n, filelog=l)
1802 return [self._parentfilectx(p, fileid=n, filelog=l)
1800 for p, n, l in pl if n != nullid]
1803 for p, n, l in pl if n != nullid]
1801
1804
1802 def children(self):
1805 def children(self):
1803 return []
1806 return []
1804
1807
1805 class workingfilectx(committablefilectx):
1808 class workingfilectx(committablefilectx):
1806 """A workingfilectx object makes access to data related to a particular
1809 """A workingfilectx object makes access to data related to a particular
1807 file in the working directory convenient."""
1810 file in the working directory convenient."""
1808 def __init__(self, repo, path, filelog=None, workingctx=None):
1811 def __init__(self, repo, path, filelog=None, workingctx=None):
1809 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1812 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1810
1813
1811 @propertycache
1814 @propertycache
1812 def _changectx(self):
1815 def _changectx(self):
1813 return workingctx(self._repo)
1816 return workingctx(self._repo)
1814
1817
1815 def data(self):
1818 def data(self):
1816 return self._repo.wread(self._path)
1819 return self._repo.wread(self._path)
1817 def renamed(self):
1820 def renamed(self):
1818 rp = self._repo.dirstate.copied(self._path)
1821 rp = self._repo.dirstate.copied(self._path)
1819 if not rp:
1822 if not rp:
1820 return None
1823 return None
1821 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1824 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1822
1825
1823 def size(self):
1826 def size(self):
1824 return self._repo.wvfs.lstat(self._path).st_size
1827 return self._repo.wvfs.lstat(self._path).st_size
1825 def date(self):
1828 def date(self):
1826 t, tz = self._changectx.date()
1829 t, tz = self._changectx.date()
1827 try:
1830 try:
1828 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1831 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1829 except OSError as err:
1832 except OSError as err:
1830 if err.errno != errno.ENOENT:
1833 if err.errno != errno.ENOENT:
1831 raise
1834 raise
1832 return (t, tz)
1835 return (t, tz)
1833
1836
1834 def cmp(self, fctx):
1837 def cmp(self, fctx):
1835 """compare with other file context
1838 """compare with other file context
1836
1839
1837 returns True if different than fctx.
1840 returns True if different than fctx.
1838 """
1841 """
1839 # fctx should be a filectx (not a workingfilectx)
1842 # fctx should be a filectx (not a workingfilectx)
1840 # invert comparison to reuse the same code path
1843 # invert comparison to reuse the same code path
1841 return fctx.cmp(self)
1844 return fctx.cmp(self)
1842
1845
1843 def remove(self, ignoremissing=False):
1846 def remove(self, ignoremissing=False):
1844 """wraps unlink for a repo's working directory"""
1847 """wraps unlink for a repo's working directory"""
1845 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1848 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1846
1849
1847 def write(self, data, flags):
1850 def write(self, data, flags):
1848 """wraps repo.wwrite"""
1851 """wraps repo.wwrite"""
1849 self._repo.wwrite(self._path, data, flags)
1852 self._repo.wwrite(self._path, data, flags)
1850
1853
1851 class workingcommitctx(workingctx):
1854 class workingcommitctx(workingctx):
1852 """A workingcommitctx object makes access to data related to
1855 """A workingcommitctx object makes access to data related to
1853 the revision being committed convenient.
1856 the revision being committed convenient.
1854
1857
1855 This hides changes in the working directory, if they aren't
1858 This hides changes in the working directory, if they aren't
1856 committed in this context.
1859 committed in this context.
1857 """
1860 """
1858 def __init__(self, repo, changes,
1861 def __init__(self, repo, changes,
1859 text="", user=None, date=None, extra=None):
1862 text="", user=None, date=None, extra=None):
1860 super(workingctx, self).__init__(repo, text, user, date, extra,
1863 super(workingctx, self).__init__(repo, text, user, date, extra,
1861 changes)
1864 changes)
1862
1865
1863 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1866 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1864 unknown=False):
1867 unknown=False):
1865 """Return matched files only in ``self._status``
1868 """Return matched files only in ``self._status``
1866
1869
1867 Uncommitted files appear "clean" via this context, even if
1870 Uncommitted files appear "clean" via this context, even if
1868 they aren't actually so in the working directory.
1871 they aren't actually so in the working directory.
1869 """
1872 """
1870 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1873 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1871 if clean:
1874 if clean:
1872 clean = [f for f in self._manifest if f not in self._changedset]
1875 clean = [f for f in self._manifest if f not in self._changedset]
1873 else:
1876 else:
1874 clean = []
1877 clean = []
1875 return scmutil.status([f for f in self._status.modified if match(f)],
1878 return scmutil.status([f for f in self._status.modified if match(f)],
1876 [f for f in self._status.added if match(f)],
1879 [f for f in self._status.added if match(f)],
1877 [f for f in self._status.removed if match(f)],
1880 [f for f in self._status.removed if match(f)],
1878 [], [], [], clean)
1881 [], [], [], clean)
1879
1882
1880 @propertycache
1883 @propertycache
1881 def _changedset(self):
1884 def _changedset(self):
1882 """Return the set of files changed in this context
1885 """Return the set of files changed in this context
1883 """
1886 """
1884 changed = set(self._status.modified)
1887 changed = set(self._status.modified)
1885 changed.update(self._status.added)
1888 changed.update(self._status.added)
1886 changed.update(self._status.removed)
1889 changed.update(self._status.removed)
1887 return changed
1890 return changed
1888
1891
1889 def makecachingfilectxfn(func):
1892 def makecachingfilectxfn(func):
1890 """Create a filectxfn that caches based on the path.
1893 """Create a filectxfn that caches based on the path.
1891
1894
1892 We can't use util.cachefunc because it uses all arguments as the cache
1895 We can't use util.cachefunc because it uses all arguments as the cache
1893 key and this creates a cycle since the arguments include the repo and
1896 key and this creates a cycle since the arguments include the repo and
1894 memctx.
1897 memctx.
1895 """
1898 """
1896 cache = {}
1899 cache = {}
1897
1900
1898 def getfilectx(repo, memctx, path):
1901 def getfilectx(repo, memctx, path):
1899 if path not in cache:
1902 if path not in cache:
1900 cache[path] = func(repo, memctx, path)
1903 cache[path] = func(repo, memctx, path)
1901 return cache[path]
1904 return cache[path]
1902
1905
1903 return getfilectx
1906 return getfilectx
1904
1907
1905 class memctx(committablectx):
1908 class memctx(committablectx):
1906 """Use memctx to perform in-memory commits via localrepo.commitctx().
1909 """Use memctx to perform in-memory commits via localrepo.commitctx().
1907
1910
1908 Revision information is supplied at initialization time while
1911 Revision information is supplied at initialization time while
1909 related files data and is made available through a callback
1912 related files data and is made available through a callback
1910 mechanism. 'repo' is the current localrepo, 'parents' is a
1913 mechanism. 'repo' is the current localrepo, 'parents' is a
1911 sequence of two parent revisions identifiers (pass None for every
1914 sequence of two parent revisions identifiers (pass None for every
1912 missing parent), 'text' is the commit message and 'files' lists
1915 missing parent), 'text' is the commit message and 'files' lists
1913 names of files touched by the revision (normalized and relative to
1916 names of files touched by the revision (normalized and relative to
1914 repository root).
1917 repository root).
1915
1918
1916 filectxfn(repo, memctx, path) is a callable receiving the
1919 filectxfn(repo, memctx, path) is a callable receiving the
1917 repository, the current memctx object and the normalized path of
1920 repository, the current memctx object and the normalized path of
1918 requested file, relative to repository root. It is fired by the
1921 requested file, relative to repository root. It is fired by the
1919 commit function for every file in 'files', but calls order is
1922 commit function for every file in 'files', but calls order is
1920 undefined. If the file is available in the revision being
1923 undefined. If the file is available in the revision being
1921 committed (updated or added), filectxfn returns a memfilectx
1924 committed (updated or added), filectxfn returns a memfilectx
1922 object. If the file was removed, filectxfn return None for recent
1925 object. If the file was removed, filectxfn return None for recent
1923 Mercurial. Moved files are represented by marking the source file
1926 Mercurial. Moved files are represented by marking the source file
1924 removed and the new file added with copy information (see
1927 removed and the new file added with copy information (see
1925 memfilectx).
1928 memfilectx).
1926
1929
1927 user receives the committer name and defaults to current
1930 user receives the committer name and defaults to current
1928 repository username, date is the commit date in any format
1931 repository username, date is the commit date in any format
1929 supported by util.parsedate() and defaults to current date, extra
1932 supported by util.parsedate() and defaults to current date, extra
1930 is a dictionary of metadata or is left empty.
1933 is a dictionary of metadata or is left empty.
1931 """
1934 """
1932
1935
1933 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1936 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1934 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1937 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1935 # this field to determine what to do in filectxfn.
1938 # this field to determine what to do in filectxfn.
1936 _returnnoneformissingfiles = True
1939 _returnnoneformissingfiles = True
1937
1940
1938 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1941 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1939 date=None, extra=None, editor=False):
1942 date=None, extra=None, editor=False):
1940 super(memctx, self).__init__(repo, text, user, date, extra)
1943 super(memctx, self).__init__(repo, text, user, date, extra)
1941 self._rev = None
1944 self._rev = None
1942 self._node = None
1945 self._node = None
1943 parents = [(p or nullid) for p in parents]
1946 parents = [(p or nullid) for p in parents]
1944 p1, p2 = parents
1947 p1, p2 = parents
1945 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1948 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1946 files = sorted(set(files))
1949 files = sorted(set(files))
1947 self._files = files
1950 self._files = files
1948 self.substate = {}
1951 self.substate = {}
1949
1952
1950 # if store is not callable, wrap it in a function
1953 # if store is not callable, wrap it in a function
1951 if not callable(filectxfn):
1954 if not callable(filectxfn):
1952 def getfilectx(repo, memctx, path):
1955 def getfilectx(repo, memctx, path):
1953 fctx = filectxfn[path]
1956 fctx = filectxfn[path]
1954 # this is weird but apparently we only keep track of one parent
1957 # this is weird but apparently we only keep track of one parent
1955 # (why not only store that instead of a tuple?)
1958 # (why not only store that instead of a tuple?)
1956 copied = fctx.renamed()
1959 copied = fctx.renamed()
1957 if copied:
1960 if copied:
1958 copied = copied[0]
1961 copied = copied[0]
1959 return memfilectx(repo, path, fctx.data(),
1962 return memfilectx(repo, path, fctx.data(),
1960 islink=fctx.islink(), isexec=fctx.isexec(),
1963 islink=fctx.islink(), isexec=fctx.isexec(),
1961 copied=copied, memctx=memctx)
1964 copied=copied, memctx=memctx)
1962 self._filectxfn = getfilectx
1965 self._filectxfn = getfilectx
1963 else:
1966 else:
1964 # memoizing increases performance for e.g. vcs convert scenarios.
1967 # memoizing increases performance for e.g. vcs convert scenarios.
1965 self._filectxfn = makecachingfilectxfn(filectxfn)
1968 self._filectxfn = makecachingfilectxfn(filectxfn)
1966
1969
1967 if extra:
1970 if extra:
1968 self._extra = extra.copy()
1971 self._extra = extra.copy()
1969 else:
1972 else:
1970 self._extra = {}
1973 self._extra = {}
1971
1974
1972 if self._extra.get('branch', '') == '':
1975 if self._extra.get('branch', '') == '':
1973 self._extra['branch'] = 'default'
1976 self._extra['branch'] = 'default'
1974
1977
1975 if editor:
1978 if editor:
1976 self._text = editor(self._repo, self, [])
1979 self._text = editor(self._repo, self, [])
1977 self._repo.savecommitmessage(self._text)
1980 self._repo.savecommitmessage(self._text)
1978
1981
1979 def filectx(self, path, filelog=None):
1982 def filectx(self, path, filelog=None):
1980 """get a file context from the working directory
1983 """get a file context from the working directory
1981
1984
1982 Returns None if file doesn't exist and should be removed."""
1985 Returns None if file doesn't exist and should be removed."""
1983 return self._filectxfn(self._repo, self, path)
1986 return self._filectxfn(self._repo, self, path)
1984
1987
1985 def commit(self):
1988 def commit(self):
1986 """commit context to the repo"""
1989 """commit context to the repo"""
1987 return self._repo.commitctx(self)
1990 return self._repo.commitctx(self)
1988
1991
1989 @propertycache
1992 @propertycache
1990 def _manifest(self):
1993 def _manifest(self):
1991 """generate a manifest based on the return values of filectxfn"""
1994 """generate a manifest based on the return values of filectxfn"""
1992
1995
1993 # keep this simple for now; just worry about p1
1996 # keep this simple for now; just worry about p1
1994 pctx = self._parents[0]
1997 pctx = self._parents[0]
1995 man = pctx.manifest().copy()
1998 man = pctx.manifest().copy()
1996
1999
1997 for f in self._status.modified:
2000 for f in self._status.modified:
1998 p1node = nullid
2001 p1node = nullid
1999 p2node = nullid
2002 p2node = nullid
2000 p = pctx[f].parents() # if file isn't in pctx, check p2?
2003 p = pctx[f].parents() # if file isn't in pctx, check p2?
2001 if len(p) > 0:
2004 if len(p) > 0:
2002 p1node = p[0].filenode()
2005 p1node = p[0].filenode()
2003 if len(p) > 1:
2006 if len(p) > 1:
2004 p2node = p[1].filenode()
2007 p2node = p[1].filenode()
2005 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2008 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2006
2009
2007 for f in self._status.added:
2010 for f in self._status.added:
2008 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2011 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2009
2012
2010 for f in self._status.removed:
2013 for f in self._status.removed:
2011 if f in man:
2014 if f in man:
2012 del man[f]
2015 del man[f]
2013
2016
2014 return man
2017 return man
2015
2018
2016 @propertycache
2019 @propertycache
2017 def _status(self):
2020 def _status(self):
2018 """Calculate exact status from ``files`` specified at construction
2021 """Calculate exact status from ``files`` specified at construction
2019 """
2022 """
2020 man1 = self.p1().manifest()
2023 man1 = self.p1().manifest()
2021 p2 = self._parents[1]
2024 p2 = self._parents[1]
2022 # "1 < len(self._parents)" can't be used for checking
2025 # "1 < len(self._parents)" can't be used for checking
2023 # existence of the 2nd parent, because "memctx._parents" is
2026 # existence of the 2nd parent, because "memctx._parents" is
2024 # explicitly initialized by the list, of which length is 2.
2027 # explicitly initialized by the list, of which length is 2.
2025 if p2.node() != nullid:
2028 if p2.node() != nullid:
2026 man2 = p2.manifest()
2029 man2 = p2.manifest()
2027 managing = lambda f: f in man1 or f in man2
2030 managing = lambda f: f in man1 or f in man2
2028 else:
2031 else:
2029 managing = lambda f: f in man1
2032 managing = lambda f: f in man1
2030
2033
2031 modified, added, removed = [], [], []
2034 modified, added, removed = [], [], []
2032 for f in self._files:
2035 for f in self._files:
2033 if not managing(f):
2036 if not managing(f):
2034 added.append(f)
2037 added.append(f)
2035 elif self[f]:
2038 elif self[f]:
2036 modified.append(f)
2039 modified.append(f)
2037 else:
2040 else:
2038 removed.append(f)
2041 removed.append(f)
2039
2042
2040 return scmutil.status(modified, added, removed, [], [], [], [])
2043 return scmutil.status(modified, added, removed, [], [], [], [])
2041
2044
2042 class memfilectx(committablefilectx):
2045 class memfilectx(committablefilectx):
2043 """memfilectx represents an in-memory file to commit.
2046 """memfilectx represents an in-memory file to commit.
2044
2047
2045 See memctx and committablefilectx for more details.
2048 See memctx and committablefilectx for more details.
2046 """
2049 """
2047 def __init__(self, repo, path, data, islink=False,
2050 def __init__(self, repo, path, data, islink=False,
2048 isexec=False, copied=None, memctx=None):
2051 isexec=False, copied=None, memctx=None):
2049 """
2052 """
2050 path is the normalized file path relative to repository root.
2053 path is the normalized file path relative to repository root.
2051 data is the file content as a string.
2054 data is the file content as a string.
2052 islink is True if the file is a symbolic link.
2055 islink is True if the file is a symbolic link.
2053 isexec is True if the file is executable.
2056 isexec is True if the file is executable.
2054 copied is the source file path if current file was copied in the
2057 copied is the source file path if current file was copied in the
2055 revision being committed, or None."""
2058 revision being committed, or None."""
2056 super(memfilectx, self).__init__(repo, path, None, memctx)
2059 super(memfilectx, self).__init__(repo, path, None, memctx)
2057 self._data = data
2060 self._data = data
2058 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2061 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2059 self._copied = None
2062 self._copied = None
2060 if copied:
2063 if copied:
2061 self._copied = (copied, nullid)
2064 self._copied = (copied, nullid)
2062
2065
2063 def data(self):
2066 def data(self):
2064 return self._data
2067 return self._data
2065 def size(self):
2068 def size(self):
2066 return len(self.data())
2069 return len(self.data())
2067 def renamed(self):
2068 return self._copied
2069
2070
2070 def remove(self, ignoremissing=False):
2071 def remove(self, ignoremissing=False):
2071 """wraps unlink for a repo's working directory"""
2072 """wraps unlink for a repo's working directory"""
2072 # need to figure out what to do here
2073 # need to figure out what to do here
2073 del self._changectx[self._path]
2074 del self._changectx[self._path]
2074
2075
2075 def write(self, data, flags):
2076 def write(self, data, flags):
2076 """wraps repo.wwrite"""
2077 """wraps repo.wwrite"""
2077 self._data = data
2078 self._data = data
2078
2079
2079 class metadataonlyctx(committablectx):
2080 class metadataonlyctx(committablectx):
2080 """Like memctx but it's reusing the manifest of different commit.
2081 """Like memctx but it's reusing the manifest of different commit.
2081 Intended to be used by lightweight operations that are creating
2082 Intended to be used by lightweight operations that are creating
2082 metadata-only changes.
2083 metadata-only changes.
2083
2084
2084 Revision information is supplied at initialization time. 'repo' is the
2085 Revision information is supplied at initialization time. 'repo' is the
2085 current localrepo, 'ctx' is original revision which manifest we're reuisng
2086 current localrepo, 'ctx' is original revision which manifest we're reuisng
2086 'parents' is a sequence of two parent revisions identifiers (pass None for
2087 'parents' is a sequence of two parent revisions identifiers (pass None for
2087 every missing parent), 'text' is the commit.
2088 every missing parent), 'text' is the commit.
2088
2089
2089 user receives the committer name and defaults to current repository
2090 user receives the committer name and defaults to current repository
2090 username, date is the commit date in any format supported by
2091 username, date is the commit date in any format supported by
2091 util.parsedate() and defaults to current date, extra is a dictionary of
2092 util.parsedate() and defaults to current date, extra is a dictionary of
2092 metadata or is left empty.
2093 metadata or is left empty.
2093 """
2094 """
2094 def __new__(cls, repo, originalctx, *args, **kwargs):
2095 def __new__(cls, repo, originalctx, *args, **kwargs):
2095 return super(metadataonlyctx, cls).__new__(cls, repo)
2096 return super(metadataonlyctx, cls).__new__(cls, repo)
2096
2097
2097 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2098 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2098 extra=None, editor=False):
2099 extra=None, editor=False):
2099 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2100 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2100 self._rev = None
2101 self._rev = None
2101 self._node = None
2102 self._node = None
2102 self._originalctx = originalctx
2103 self._originalctx = originalctx
2103 self._manifestnode = originalctx.manifestnode()
2104 self._manifestnode = originalctx.manifestnode()
2104 parents = [(p or nullid) for p in parents]
2105 parents = [(p or nullid) for p in parents]
2105 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2106 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2106
2107
2107 # sanity check to ensure that the reused manifest parents are
2108 # sanity check to ensure that the reused manifest parents are
2108 # manifests of our commit parents
2109 # manifests of our commit parents
2109 mp1, mp2 = self.manifestctx().parents
2110 mp1, mp2 = self.manifestctx().parents
2110 if p1 != nullid and p1.manifestnode() != mp1:
2111 if p1 != nullid and p1.manifestnode() != mp1:
2111 raise RuntimeError('can\'t reuse the manifest: '
2112 raise RuntimeError('can\'t reuse the manifest: '
2112 'its p1 doesn\'t match the new ctx p1')
2113 'its p1 doesn\'t match the new ctx p1')
2113 if p2 != nullid and p2.manifestnode() != mp2:
2114 if p2 != nullid and p2.manifestnode() != mp2:
2114 raise RuntimeError('can\'t reuse the manifest: '
2115 raise RuntimeError('can\'t reuse the manifest: '
2115 'its p2 doesn\'t match the new ctx p2')
2116 'its p2 doesn\'t match the new ctx p2')
2116
2117
2117 self._files = originalctx.files()
2118 self._files = originalctx.files()
2118 self.substate = {}
2119 self.substate = {}
2119
2120
2120 if extra:
2121 if extra:
2121 self._extra = extra.copy()
2122 self._extra = extra.copy()
2122 else:
2123 else:
2123 self._extra = {}
2124 self._extra = {}
2124
2125
2125 if self._extra.get('branch', '') == '':
2126 if self._extra.get('branch', '') == '':
2126 self._extra['branch'] = 'default'
2127 self._extra['branch'] = 'default'
2127
2128
2128 if editor:
2129 if editor:
2129 self._text = editor(self._repo, self, [])
2130 self._text = editor(self._repo, self, [])
2130 self._repo.savecommitmessage(self._text)
2131 self._repo.savecommitmessage(self._text)
2131
2132
2132 def manifestnode(self):
2133 def manifestnode(self):
2133 return self._manifestnode
2134 return self._manifestnode
2134
2135
2135 @propertycache
2136 @propertycache
2136 def _manifestctx(self):
2137 def _manifestctx(self):
2137 return self._repo.manifestlog[self._manifestnode]
2138 return self._repo.manifestlog[self._manifestnode]
2138
2139
2139 def filectx(self, path, filelog=None):
2140 def filectx(self, path, filelog=None):
2140 return self._originalctx.filectx(path, filelog=filelog)
2141 return self._originalctx.filectx(path, filelog=filelog)
2141
2142
2142 def commit(self):
2143 def commit(self):
2143 """commit context to the repo"""
2144 """commit context to the repo"""
2144 return self._repo.commitctx(self)
2145 return self._repo.commitctx(self)
2145
2146
2146 @property
2147 @property
2147 def _manifest(self):
2148 def _manifest(self):
2148 return self._originalctx.manifest()
2149 return self._originalctx.manifest()
2149
2150
2150 @propertycache
2151 @propertycache
2151 def _status(self):
2152 def _status(self):
2152 """Calculate exact status from ``files`` specified in the ``origctx``
2153 """Calculate exact status from ``files`` specified in the ``origctx``
2153 and parents manifests.
2154 and parents manifests.
2154 """
2155 """
2155 man1 = self.p1().manifest()
2156 man1 = self.p1().manifest()
2156 p2 = self._parents[1]
2157 p2 = self._parents[1]
2157 # "1 < len(self._parents)" can't be used for checking
2158 # "1 < len(self._parents)" can't be used for checking
2158 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2159 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2159 # explicitly initialized by the list, of which length is 2.
2160 # explicitly initialized by the list, of which length is 2.
2160 if p2.node() != nullid:
2161 if p2.node() != nullid:
2161 man2 = p2.manifest()
2162 man2 = p2.manifest()
2162 managing = lambda f: f in man1 or f in man2
2163 managing = lambda f: f in man1 or f in man2
2163 else:
2164 else:
2164 managing = lambda f: f in man1
2165 managing = lambda f: f in man1
2165
2166
2166 modified, added, removed = [], [], []
2167 modified, added, removed = [], [], []
2167 for f in self._files:
2168 for f in self._files:
2168 if not managing(f):
2169 if not managing(f):
2169 added.append(f)
2170 added.append(f)
2170 elif self[f]:
2171 elif self[f]:
2171 modified.append(f)
2172 modified.append(f)
2172 else:
2173 else:
2173 removed.append(f)
2174 removed.append(f)
2174
2175
2175 return scmutil.status(modified, added, removed, [], [], [], [])
2176 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now