##// END OF EJS Templates
context: fix troubled deprecation...
Boris Feld -
r33794:4abf34f4 default
parent child Browse files
Show More
@@ -1,2371 +1,2371 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """return match.always if match is none
106 """return match.always if match is none
107
107
108 This internal method provides a way for child objects to override the
108 This internal method provides a way for child objects to override the
109 match operator.
109 match operator.
110 """
110 """
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112
112
113 def _buildstatus(self, other, s, match, listignored, listclean,
113 def _buildstatus(self, other, s, match, listignored, listclean,
114 listunknown):
114 listunknown):
115 """build a status with respect to another context"""
115 """build a status with respect to another context"""
116 # Load earliest manifest first for caching reasons. More specifically,
116 # Load earliest manifest first for caching reasons. More specifically,
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta application.
121 # delta application.
122 mf2 = None
122 mf2 = None
123 if self.rev() is not None and self.rev() < other.rev():
123 if self.rev() is not None and self.rev() < other.rev():
124 mf2 = self._buildstatusmanifest(s)
124 mf2 = self._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
126 if mf2 is None:
126 if mf2 is None:
127 mf2 = self._buildstatusmanifest(s)
127 mf2 = self._buildstatusmanifest(s)
128
128
129 modified, added = [], []
129 modified, added = [], []
130 removed = []
130 removed = []
131 clean = []
131 clean = []
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deletedset = set(deleted)
133 deletedset = set(deleted)
134 d = mf1.diff(mf2, match=match, clean=listclean)
134 d = mf1.diff(mf2, match=match, clean=listclean)
135 for fn, value in d.iteritems():
135 for fn, value in d.iteritems():
136 if fn in deletedset:
136 if fn in deletedset:
137 continue
137 continue
138 if value is None:
138 if value is None:
139 clean.append(fn)
139 clean.append(fn)
140 continue
140 continue
141 (node1, flag1), (node2, flag2) = value
141 (node1, flag1), (node2, flag2) = value
142 if node1 is None:
142 if node1 is None:
143 added.append(fn)
143 added.append(fn)
144 elif node2 is None:
144 elif node2 is None:
145 removed.append(fn)
145 removed.append(fn)
146 elif flag1 != flag2:
146 elif flag1 != flag2:
147 modified.append(fn)
147 modified.append(fn)
148 elif node2 not in wdirnodes:
148 elif node2 not in wdirnodes:
149 # When comparing files between two commits, we save time by
149 # When comparing files between two commits, we save time by
150 # not comparing the file contents when the nodeids differ.
150 # not comparing the file contents when the nodeids differ.
151 # Note that this means we incorrectly report a reverted change
151 # Note that this means we incorrectly report a reverted change
152 # to a file as a modification.
152 # to a file as a modification.
153 modified.append(fn)
153 modified.append(fn)
154 elif self[fn].cmp(other[fn]):
154 elif self[fn].cmp(other[fn]):
155 modified.append(fn)
155 modified.append(fn)
156 else:
156 else:
157 clean.append(fn)
157 clean.append(fn)
158
158
159 if removed:
159 if removed:
160 # need to filter files if they are already reported as removed
160 # need to filter files if they are already reported as removed
161 unknown = [fn for fn in unknown if fn not in mf1 and
161 unknown = [fn for fn in unknown if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 ignored = [fn for fn in ignored if fn not in mf1 and
163 ignored = [fn for fn in ignored if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(modified, added, removed, deleted, unknown,
168 return scmutil.status(modified, added, removed, deleted, unknown,
169 ignored, clean)
169 ignored, clean)
170
170
171 @propertycache
171 @propertycache
172 def substate(self):
172 def substate(self):
173 return subrepo.state(self, self._repo.ui)
173 return subrepo.state(self, self._repo.ui)
174
174
175 def subrev(self, subpath):
175 def subrev(self, subpath):
176 return self.substate[subpath][1]
176 return self.substate[subpath][1]
177
177
178 def rev(self):
178 def rev(self):
179 return self._rev
179 return self._rev
180 def node(self):
180 def node(self):
181 return self._node
181 return self._node
182 def hex(self):
182 def hex(self):
183 return hex(self.node())
183 return hex(self.node())
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186 def manifestctx(self):
186 def manifestctx(self):
187 return self._manifestctx
187 return self._manifestctx
188 def repo(self):
188 def repo(self):
189 return self._repo
189 return self._repo
190 def phasestr(self):
190 def phasestr(self):
191 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
192 def mutable(self):
192 def mutable(self):
193 return self.phase() > phases.public
193 return self.phase() > phases.public
194
194
195 def getfileset(self, expr):
195 def getfileset(self, expr):
196 return fileset.getfileset(self, expr)
196 return fileset.getfileset(self, expr)
197
197
198 def obsolete(self):
198 def obsolete(self):
199 """True if the changeset is obsolete"""
199 """True if the changeset is obsolete"""
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201
201
202 def extinct(self):
202 def extinct(self):
203 """True if the changeset is extinct"""
203 """True if the changeset is extinct"""
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205
205
206 def unstable(self):
206 def unstable(self):
207 msg = ("'context.unstable' is deprecated, "
207 msg = ("'context.unstable' is deprecated, "
208 "use 'context.orphan'")
208 "use 'context.orphan'")
209 self._repo.ui.deprecwarn(msg, '4.4')
209 self._repo.ui.deprecwarn(msg, '4.4')
210 return self.orphan()
210 return self.orphan()
211
211
212 def orphan(self):
212 def orphan(self):
213 """True if the changeset is not obsolete but it's ancestor are"""
213 """True if the changeset is not obsolete but it's ancestor are"""
214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
215
215
216 def bumped(self):
216 def bumped(self):
217 msg = ("'context.bumped' is deprecated, "
217 msg = ("'context.bumped' is deprecated, "
218 "use 'context.phasedivergent'")
218 "use 'context.phasedivergent'")
219 self._repo.ui.deprecwarn(msg, '4.4')
219 self._repo.ui.deprecwarn(msg, '4.4')
220 return self.phasedivergent()
220 return self.phasedivergent()
221
221
222 def phasedivergent(self):
222 def phasedivergent(self):
223 """True if the changeset try to be a successor of a public changeset
223 """True if the changeset try to be a successor of a public changeset
224
224
225 Only non-public and non-obsolete changesets may be bumped.
225 Only non-public and non-obsolete changesets may be bumped.
226 """
226 """
227 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
227 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
228
228
229 def divergent(self):
229 def divergent(self):
230 msg = ("'context.divergent' is deprecated, "
230 msg = ("'context.divergent' is deprecated, "
231 "use 'context.contentdivergent'")
231 "use 'context.contentdivergent'")
232 self._repo.ui.deprecwarn(msg, '4.4')
232 self._repo.ui.deprecwarn(msg, '4.4')
233 return self.contentdivergent()
233 return self.contentdivergent()
234
234
235 def contentdivergent(self):
235 def contentdivergent(self):
236 """Is a successors of a changeset with multiple possible successors set
236 """Is a successors of a changeset with multiple possible successors set
237
237
238 Only non-public and non-obsolete changesets may be divergent.
238 Only non-public and non-obsolete changesets may be divergent.
239 """
239 """
240 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
240 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
241
241
242 def troubled(self):
242 def troubled(self):
243 msg = ("'context.troubled' is deprecated, "
243 msg = ("'context.troubled' is deprecated, "
244 "use 'context.isunstable'")
244 "use 'context.isunstable'")
245 self._repo.ui.deprecwarn(msg, '4.4')
245 self._repo.ui.deprecwarn(msg, '4.4')
246 return self.unstable()
246 return self.isunstable()
247
247
248 def isunstable(self):
248 def isunstable(self):
249 """True if the changeset is either unstable, bumped or divergent"""
249 """True if the changeset is either unstable, bumped or divergent"""
250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
251
251
252 def troubles(self):
252 def troubles(self):
253 """Keep the old version around in order to avoid breaking extensions
253 """Keep the old version around in order to avoid breaking extensions
254 about different return values.
254 about different return values.
255 """
255 """
256 msg = ("'context.troubles' is deprecated, "
256 msg = ("'context.troubles' is deprecated, "
257 "use 'context.instabilities'")
257 "use 'context.instabilities'")
258 self._repo.ui.deprecwarn(msg, '4.4')
258 self._repo.ui.deprecwarn(msg, '4.4')
259
259
260 troubles = []
260 troubles = []
261 if self.orphan():
261 if self.orphan():
262 troubles.append('orphan')
262 troubles.append('orphan')
263 if self.phasedivergent():
263 if self.phasedivergent():
264 troubles.append('bumped')
264 troubles.append('bumped')
265 if self.contentdivergent():
265 if self.contentdivergent():
266 troubles.append('divergent')
266 troubles.append('divergent')
267 return troubles
267 return troubles
268
268
269 def instabilities(self):
269 def instabilities(self):
270 """return the list of instabilities affecting this changeset.
270 """return the list of instabilities affecting this changeset.
271
271
272 Instabilities are returned as strings. possible values are:
272 Instabilities are returned as strings. possible values are:
273 - orphan,
273 - orphan,
274 - phase-divergent,
274 - phase-divergent,
275 - content-divergent.
275 - content-divergent.
276 """
276 """
277 instabilities = []
277 instabilities = []
278 if self.orphan():
278 if self.orphan():
279 instabilities.append('orphan')
279 instabilities.append('orphan')
280 if self.phasedivergent():
280 if self.phasedivergent():
281 instabilities.append('phase-divergent')
281 instabilities.append('phase-divergent')
282 if self.contentdivergent():
282 if self.contentdivergent():
283 instabilities.append('content-divergent')
283 instabilities.append('content-divergent')
284 return instabilities
284 return instabilities
285
285
286 def parents(self):
286 def parents(self):
287 """return contexts for each parent changeset"""
287 """return contexts for each parent changeset"""
288 return self._parents
288 return self._parents
289
289
290 def p1(self):
290 def p1(self):
291 return self._parents[0]
291 return self._parents[0]
292
292
293 def p2(self):
293 def p2(self):
294 parents = self._parents
294 parents = self._parents
295 if len(parents) == 2:
295 if len(parents) == 2:
296 return parents[1]
296 return parents[1]
297 return changectx(self._repo, nullrev)
297 return changectx(self._repo, nullrev)
298
298
299 def _fileinfo(self, path):
299 def _fileinfo(self, path):
300 if r'_manifest' in self.__dict__:
300 if r'_manifest' in self.__dict__:
301 try:
301 try:
302 return self._manifest[path], self._manifest.flags(path)
302 return self._manifest[path], self._manifest.flags(path)
303 except KeyError:
303 except KeyError:
304 raise error.ManifestLookupError(self._node, path,
304 raise error.ManifestLookupError(self._node, path,
305 _('not found in manifest'))
305 _('not found in manifest'))
306 if r'_manifestdelta' in self.__dict__ or path in self.files():
306 if r'_manifestdelta' in self.__dict__ or path in self.files():
307 if path in self._manifestdelta:
307 if path in self._manifestdelta:
308 return (self._manifestdelta[path],
308 return (self._manifestdelta[path],
309 self._manifestdelta.flags(path))
309 self._manifestdelta.flags(path))
310 mfl = self._repo.manifestlog
310 mfl = self._repo.manifestlog
311 try:
311 try:
312 node, flag = mfl[self._changeset.manifest].find(path)
312 node, flag = mfl[self._changeset.manifest].find(path)
313 except KeyError:
313 except KeyError:
314 raise error.ManifestLookupError(self._node, path,
314 raise error.ManifestLookupError(self._node, path,
315 _('not found in manifest'))
315 _('not found in manifest'))
316
316
317 return node, flag
317 return node, flag
318
318
319 def filenode(self, path):
319 def filenode(self, path):
320 return self._fileinfo(path)[0]
320 return self._fileinfo(path)[0]
321
321
322 def flags(self, path):
322 def flags(self, path):
323 try:
323 try:
324 return self._fileinfo(path)[1]
324 return self._fileinfo(path)[1]
325 except error.LookupError:
325 except error.LookupError:
326 return ''
326 return ''
327
327
328 def sub(self, path, allowcreate=True):
328 def sub(self, path, allowcreate=True):
329 '''return a subrepo for the stored revision of path, never wdir()'''
329 '''return a subrepo for the stored revision of path, never wdir()'''
330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
331
331
332 def nullsub(self, path, pctx):
332 def nullsub(self, path, pctx):
333 return subrepo.nullsubrepo(self, path, pctx)
333 return subrepo.nullsubrepo(self, path, pctx)
334
334
335 def workingsub(self, path):
335 def workingsub(self, path):
336 '''return a subrepo for the stored revision, or wdir if this is a wdir
336 '''return a subrepo for the stored revision, or wdir if this is a wdir
337 context.
337 context.
338 '''
338 '''
339 return subrepo.subrepo(self, path, allowwdir=True)
339 return subrepo.subrepo(self, path, allowwdir=True)
340
340
341 def match(self, pats=None, include=None, exclude=None, default='glob',
341 def match(self, pats=None, include=None, exclude=None, default='glob',
342 listsubrepos=False, badfn=None):
342 listsubrepos=False, badfn=None):
343 r = self._repo
343 r = self._repo
344 return matchmod.match(r.root, r.getcwd(), pats,
344 return matchmod.match(r.root, r.getcwd(), pats,
345 include, exclude, default,
345 include, exclude, default,
346 auditor=r.nofsauditor, ctx=self,
346 auditor=r.nofsauditor, ctx=self,
347 listsubrepos=listsubrepos, badfn=badfn)
347 listsubrepos=listsubrepos, badfn=badfn)
348
348
349 def diff(self, ctx2=None, match=None, **opts):
349 def diff(self, ctx2=None, match=None, **opts):
350 """Returns a diff generator for the given contexts and matcher"""
350 """Returns a diff generator for the given contexts and matcher"""
351 if ctx2 is None:
351 if ctx2 is None:
352 ctx2 = self.p1()
352 ctx2 = self.p1()
353 if ctx2 is not None:
353 if ctx2 is not None:
354 ctx2 = self._repo[ctx2]
354 ctx2 = self._repo[ctx2]
355 diffopts = patch.diffopts(self._repo.ui, opts)
355 diffopts = patch.diffopts(self._repo.ui, opts)
356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
357
357
358 def dirs(self):
358 def dirs(self):
359 return self._manifest.dirs()
359 return self._manifest.dirs()
360
360
361 def hasdir(self, dir):
361 def hasdir(self, dir):
362 return self._manifest.hasdir(dir)
362 return self._manifest.hasdir(dir)
363
363
364 def status(self, other=None, match=None, listignored=False,
364 def status(self, other=None, match=None, listignored=False,
365 listclean=False, listunknown=False, listsubrepos=False):
365 listclean=False, listunknown=False, listsubrepos=False):
366 """return status of files between two nodes or node and working
366 """return status of files between two nodes or node and working
367 directory.
367 directory.
368
368
369 If other is None, compare this node with working directory.
369 If other is None, compare this node with working directory.
370
370
371 returns (modified, added, removed, deleted, unknown, ignored, clean)
371 returns (modified, added, removed, deleted, unknown, ignored, clean)
372 """
372 """
373
373
374 ctx1 = self
374 ctx1 = self
375 ctx2 = self._repo[other]
375 ctx2 = self._repo[other]
376
376
377 # This next code block is, admittedly, fragile logic that tests for
377 # This next code block is, admittedly, fragile logic that tests for
378 # reversing the contexts and wouldn't need to exist if it weren't for
378 # reversing the contexts and wouldn't need to exist if it weren't for
379 # the fast (and common) code path of comparing the working directory
379 # the fast (and common) code path of comparing the working directory
380 # with its first parent.
380 # with its first parent.
381 #
381 #
382 # What we're aiming for here is the ability to call:
382 # What we're aiming for here is the ability to call:
383 #
383 #
384 # workingctx.status(parentctx)
384 # workingctx.status(parentctx)
385 #
385 #
386 # If we always built the manifest for each context and compared those,
386 # If we always built the manifest for each context and compared those,
387 # then we'd be done. But the special case of the above call means we
387 # then we'd be done. But the special case of the above call means we
388 # just copy the manifest of the parent.
388 # just copy the manifest of the parent.
389 reversed = False
389 reversed = False
390 if (not isinstance(ctx1, changectx)
390 if (not isinstance(ctx1, changectx)
391 and isinstance(ctx2, changectx)):
391 and isinstance(ctx2, changectx)):
392 reversed = True
392 reversed = True
393 ctx1, ctx2 = ctx2, ctx1
393 ctx1, ctx2 = ctx2, ctx1
394
394
395 match = ctx2._matchstatus(ctx1, match)
395 match = ctx2._matchstatus(ctx1, match)
396 r = scmutil.status([], [], [], [], [], [], [])
396 r = scmutil.status([], [], [], [], [], [], [])
397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
398 listunknown)
398 listunknown)
399
399
400 if reversed:
400 if reversed:
401 # Reverse added and removed. Clear deleted, unknown and ignored as
401 # Reverse added and removed. Clear deleted, unknown and ignored as
402 # these make no sense to reverse.
402 # these make no sense to reverse.
403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
404 r.clean)
404 r.clean)
405
405
406 if listsubrepos:
406 if listsubrepos:
407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
408 try:
408 try:
409 rev2 = ctx2.subrev(subpath)
409 rev2 = ctx2.subrev(subpath)
410 except KeyError:
410 except KeyError:
411 # A subrepo that existed in node1 was deleted between
411 # A subrepo that existed in node1 was deleted between
412 # node1 and node2 (inclusive). Thus, ctx2's substate
412 # node1 and node2 (inclusive). Thus, ctx2's substate
413 # won't contain that subpath. The best we can do ignore it.
413 # won't contain that subpath. The best we can do ignore it.
414 rev2 = None
414 rev2 = None
415 submatch = matchmod.subdirmatcher(subpath, match)
415 submatch = matchmod.subdirmatcher(subpath, match)
416 s = sub.status(rev2, match=submatch, ignored=listignored,
416 s = sub.status(rev2, match=submatch, ignored=listignored,
417 clean=listclean, unknown=listunknown,
417 clean=listclean, unknown=listunknown,
418 listsubrepos=True)
418 listsubrepos=True)
419 for rfiles, sfiles in zip(r, s):
419 for rfiles, sfiles in zip(r, s):
420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
421
421
422 for l in r:
422 for l in r:
423 l.sort()
423 l.sort()
424
424
425 return r
425 return r
426
426
427 def _filterederror(repo, changeid):
427 def _filterederror(repo, changeid):
428 """build an exception to be raised about a filtered changeid
428 """build an exception to be raised about a filtered changeid
429
429
430 This is extracted in a function to help extensions (eg: evolve) to
430 This is extracted in a function to help extensions (eg: evolve) to
431 experiment with various message variants."""
431 experiment with various message variants."""
432 if repo.filtername.startswith('visible'):
432 if repo.filtername.startswith('visible'):
433 msg = _("hidden revision '%s'") % changeid
433 msg = _("hidden revision '%s'") % changeid
434 hint = _('use --hidden to access hidden revisions')
434 hint = _('use --hidden to access hidden revisions')
435 return error.FilteredRepoLookupError(msg, hint=hint)
435 return error.FilteredRepoLookupError(msg, hint=hint)
436 msg = _("filtered revision '%s' (not in '%s' subset)")
436 msg = _("filtered revision '%s' (not in '%s' subset)")
437 msg %= (changeid, repo.filtername)
437 msg %= (changeid, repo.filtername)
438 return error.FilteredRepoLookupError(msg)
438 return error.FilteredRepoLookupError(msg)
439
439
440 class changectx(basectx):
440 class changectx(basectx):
441 """A changecontext object makes access to data related to a particular
441 """A changecontext object makes access to data related to a particular
442 changeset convenient. It represents a read-only context already present in
442 changeset convenient. It represents a read-only context already present in
443 the repo."""
443 the repo."""
444 def __init__(self, repo, changeid=''):
444 def __init__(self, repo, changeid=''):
445 """changeid is a revision number, node, or tag"""
445 """changeid is a revision number, node, or tag"""
446
446
447 # since basectx.__new__ already took care of copying the object, we
447 # since basectx.__new__ already took care of copying the object, we
448 # don't need to do anything in __init__, so we just exit here
448 # don't need to do anything in __init__, so we just exit here
449 if isinstance(changeid, basectx):
449 if isinstance(changeid, basectx):
450 return
450 return
451
451
452 if changeid == '':
452 if changeid == '':
453 changeid = '.'
453 changeid = '.'
454 self._repo = repo
454 self._repo = repo
455
455
456 try:
456 try:
457 if isinstance(changeid, int):
457 if isinstance(changeid, int):
458 self._node = repo.changelog.node(changeid)
458 self._node = repo.changelog.node(changeid)
459 self._rev = changeid
459 self._rev = changeid
460 return
460 return
461 if not pycompat.ispy3 and isinstance(changeid, long):
461 if not pycompat.ispy3 and isinstance(changeid, long):
462 changeid = str(changeid)
462 changeid = str(changeid)
463 if changeid == 'null':
463 if changeid == 'null':
464 self._node = nullid
464 self._node = nullid
465 self._rev = nullrev
465 self._rev = nullrev
466 return
466 return
467 if changeid == 'tip':
467 if changeid == 'tip':
468 self._node = repo.changelog.tip()
468 self._node = repo.changelog.tip()
469 self._rev = repo.changelog.rev(self._node)
469 self._rev = repo.changelog.rev(self._node)
470 return
470 return
471 if changeid == '.' or changeid == repo.dirstate.p1():
471 if changeid == '.' or changeid == repo.dirstate.p1():
472 # this is a hack to delay/avoid loading obsmarkers
472 # this is a hack to delay/avoid loading obsmarkers
473 # when we know that '.' won't be hidden
473 # when we know that '.' won't be hidden
474 self._node = repo.dirstate.p1()
474 self._node = repo.dirstate.p1()
475 self._rev = repo.unfiltered().changelog.rev(self._node)
475 self._rev = repo.unfiltered().changelog.rev(self._node)
476 return
476 return
477 if len(changeid) == 20:
477 if len(changeid) == 20:
478 try:
478 try:
479 self._node = changeid
479 self._node = changeid
480 self._rev = repo.changelog.rev(changeid)
480 self._rev = repo.changelog.rev(changeid)
481 return
481 return
482 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
483 raise
483 raise
484 except LookupError:
484 except LookupError:
485 pass
485 pass
486
486
487 try:
487 try:
488 r = int(changeid)
488 r = int(changeid)
489 if '%d' % r != changeid:
489 if '%d' % r != changeid:
490 raise ValueError
490 raise ValueError
491 l = len(repo.changelog)
491 l = len(repo.changelog)
492 if r < 0:
492 if r < 0:
493 r += l
493 r += l
494 if r < 0 or r >= l and r != wdirrev:
494 if r < 0 or r >= l and r != wdirrev:
495 raise ValueError
495 raise ValueError
496 self._rev = r
496 self._rev = r
497 self._node = repo.changelog.node(r)
497 self._node = repo.changelog.node(r)
498 return
498 return
499 except error.FilteredIndexError:
499 except error.FilteredIndexError:
500 raise
500 raise
501 except (ValueError, OverflowError, IndexError):
501 except (ValueError, OverflowError, IndexError):
502 pass
502 pass
503
503
504 if len(changeid) == 40:
504 if len(changeid) == 40:
505 try:
505 try:
506 self._node = bin(changeid)
506 self._node = bin(changeid)
507 self._rev = repo.changelog.rev(self._node)
507 self._rev = repo.changelog.rev(self._node)
508 return
508 return
509 except error.FilteredLookupError:
509 except error.FilteredLookupError:
510 raise
510 raise
511 except (TypeError, LookupError):
511 except (TypeError, LookupError):
512 pass
512 pass
513
513
514 # lookup bookmarks through the name interface
514 # lookup bookmarks through the name interface
515 try:
515 try:
516 self._node = repo.names.singlenode(repo, changeid)
516 self._node = repo.names.singlenode(repo, changeid)
517 self._rev = repo.changelog.rev(self._node)
517 self._rev = repo.changelog.rev(self._node)
518 return
518 return
519 except KeyError:
519 except KeyError:
520 pass
520 pass
521 except error.FilteredRepoLookupError:
521 except error.FilteredRepoLookupError:
522 raise
522 raise
523 except error.RepoLookupError:
523 except error.RepoLookupError:
524 pass
524 pass
525
525
526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
527 if self._node is not None:
527 if self._node is not None:
528 self._rev = repo.changelog.rev(self._node)
528 self._rev = repo.changelog.rev(self._node)
529 return
529 return
530
530
531 # lookup failed
531 # lookup failed
532 # check if it might have come from damaged dirstate
532 # check if it might have come from damaged dirstate
533 #
533 #
534 # XXX we could avoid the unfiltered if we had a recognizable
534 # XXX we could avoid the unfiltered if we had a recognizable
535 # exception for filtered changeset access
535 # exception for filtered changeset access
536 if changeid in repo.unfiltered().dirstate.parents():
536 if changeid in repo.unfiltered().dirstate.parents():
537 msg = _("working directory has unknown parent '%s'!")
537 msg = _("working directory has unknown parent '%s'!")
538 raise error.Abort(msg % short(changeid))
538 raise error.Abort(msg % short(changeid))
539 try:
539 try:
540 if len(changeid) == 20 and nonascii(changeid):
540 if len(changeid) == 20 and nonascii(changeid):
541 changeid = hex(changeid)
541 changeid = hex(changeid)
542 except TypeError:
542 except TypeError:
543 pass
543 pass
544 except (error.FilteredIndexError, error.FilteredLookupError,
544 except (error.FilteredIndexError, error.FilteredLookupError,
545 error.FilteredRepoLookupError):
545 error.FilteredRepoLookupError):
546 raise _filterederror(repo, changeid)
546 raise _filterederror(repo, changeid)
547 except IndexError:
547 except IndexError:
548 pass
548 pass
549 raise error.RepoLookupError(
549 raise error.RepoLookupError(
550 _("unknown revision '%s'") % changeid)
550 _("unknown revision '%s'") % changeid)
551
551
552 def __hash__(self):
552 def __hash__(self):
553 try:
553 try:
554 return hash(self._rev)
554 return hash(self._rev)
555 except AttributeError:
555 except AttributeError:
556 return id(self)
556 return id(self)
557
557
558 def __nonzero__(self):
558 def __nonzero__(self):
559 return self._rev != nullrev
559 return self._rev != nullrev
560
560
561 __bool__ = __nonzero__
561 __bool__ = __nonzero__
562
562
563 @propertycache
563 @propertycache
564 def _changeset(self):
564 def _changeset(self):
565 return self._repo.changelog.changelogrevision(self.rev())
565 return self._repo.changelog.changelogrevision(self.rev())
566
566
567 @propertycache
567 @propertycache
568 def _manifest(self):
568 def _manifest(self):
569 return self._manifestctx.read()
569 return self._manifestctx.read()
570
570
571 @property
571 @property
572 def _manifestctx(self):
572 def _manifestctx(self):
573 return self._repo.manifestlog[self._changeset.manifest]
573 return self._repo.manifestlog[self._changeset.manifest]
574
574
575 @propertycache
575 @propertycache
576 def _manifestdelta(self):
576 def _manifestdelta(self):
577 return self._manifestctx.readdelta()
577 return self._manifestctx.readdelta()
578
578
579 @propertycache
579 @propertycache
580 def _parents(self):
580 def _parents(self):
581 repo = self._repo
581 repo = self._repo
582 p1, p2 = repo.changelog.parentrevs(self._rev)
582 p1, p2 = repo.changelog.parentrevs(self._rev)
583 if p2 == nullrev:
583 if p2 == nullrev:
584 return [changectx(repo, p1)]
584 return [changectx(repo, p1)]
585 return [changectx(repo, p1), changectx(repo, p2)]
585 return [changectx(repo, p1), changectx(repo, p2)]
586
586
587 def changeset(self):
587 def changeset(self):
588 c = self._changeset
588 c = self._changeset
589 return (
589 return (
590 c.manifest,
590 c.manifest,
591 c.user,
591 c.user,
592 c.date,
592 c.date,
593 c.files,
593 c.files,
594 c.description,
594 c.description,
595 c.extra,
595 c.extra,
596 )
596 )
597 def manifestnode(self):
597 def manifestnode(self):
598 return self._changeset.manifest
598 return self._changeset.manifest
599
599
600 def user(self):
600 def user(self):
601 return self._changeset.user
601 return self._changeset.user
602 def date(self):
602 def date(self):
603 return self._changeset.date
603 return self._changeset.date
604 def files(self):
604 def files(self):
605 return self._changeset.files
605 return self._changeset.files
606 def description(self):
606 def description(self):
607 return self._changeset.description
607 return self._changeset.description
608 def branch(self):
608 def branch(self):
609 return encoding.tolocal(self._changeset.extra.get("branch"))
609 return encoding.tolocal(self._changeset.extra.get("branch"))
610 def closesbranch(self):
610 def closesbranch(self):
611 return 'close' in self._changeset.extra
611 return 'close' in self._changeset.extra
612 def extra(self):
612 def extra(self):
613 return self._changeset.extra
613 return self._changeset.extra
614 def tags(self):
614 def tags(self):
615 return self._repo.nodetags(self._node)
615 return self._repo.nodetags(self._node)
616 def bookmarks(self):
616 def bookmarks(self):
617 return self._repo.nodebookmarks(self._node)
617 return self._repo.nodebookmarks(self._node)
618 def phase(self):
618 def phase(self):
619 return self._repo._phasecache.phase(self._repo, self._rev)
619 return self._repo._phasecache.phase(self._repo, self._rev)
620 def hidden(self):
620 def hidden(self):
621 return self._rev in repoview.filterrevs(self._repo, 'visible')
621 return self._rev in repoview.filterrevs(self._repo, 'visible')
622
622
623 def children(self):
623 def children(self):
624 """return contexts for each child changeset"""
624 """return contexts for each child changeset"""
625 c = self._repo.changelog.children(self._node)
625 c = self._repo.changelog.children(self._node)
626 return [changectx(self._repo, x) for x in c]
626 return [changectx(self._repo, x) for x in c]
627
627
628 def ancestors(self):
628 def ancestors(self):
629 for a in self._repo.changelog.ancestors([self._rev]):
629 for a in self._repo.changelog.ancestors([self._rev]):
630 yield changectx(self._repo, a)
630 yield changectx(self._repo, a)
631
631
632 def descendants(self):
632 def descendants(self):
633 for d in self._repo.changelog.descendants([self._rev]):
633 for d in self._repo.changelog.descendants([self._rev]):
634 yield changectx(self._repo, d)
634 yield changectx(self._repo, d)
635
635
636 def filectx(self, path, fileid=None, filelog=None):
636 def filectx(self, path, fileid=None, filelog=None):
637 """get a file context from this changeset"""
637 """get a file context from this changeset"""
638 if fileid is None:
638 if fileid is None:
639 fileid = self.filenode(path)
639 fileid = self.filenode(path)
640 return filectx(self._repo, path, fileid=fileid,
640 return filectx(self._repo, path, fileid=fileid,
641 changectx=self, filelog=filelog)
641 changectx=self, filelog=filelog)
642
642
643 def ancestor(self, c2, warn=False):
643 def ancestor(self, c2, warn=False):
644 """return the "best" ancestor context of self and c2
644 """return the "best" ancestor context of self and c2
645
645
646 If there are multiple candidates, it will show a message and check
646 If there are multiple candidates, it will show a message and check
647 merge.preferancestor configuration before falling back to the
647 merge.preferancestor configuration before falling back to the
648 revlog ancestor."""
648 revlog ancestor."""
649 # deal with workingctxs
649 # deal with workingctxs
650 n2 = c2._node
650 n2 = c2._node
651 if n2 is None:
651 if n2 is None:
652 n2 = c2._parents[0]._node
652 n2 = c2._parents[0]._node
653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
654 if not cahs:
654 if not cahs:
655 anc = nullid
655 anc = nullid
656 elif len(cahs) == 1:
656 elif len(cahs) == 1:
657 anc = cahs[0]
657 anc = cahs[0]
658 else:
658 else:
659 # experimental config: merge.preferancestor
659 # experimental config: merge.preferancestor
660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
661 try:
661 try:
662 ctx = changectx(self._repo, r)
662 ctx = changectx(self._repo, r)
663 except error.RepoLookupError:
663 except error.RepoLookupError:
664 continue
664 continue
665 anc = ctx.node()
665 anc = ctx.node()
666 if anc in cahs:
666 if anc in cahs:
667 break
667 break
668 else:
668 else:
669 anc = self._repo.changelog.ancestor(self._node, n2)
669 anc = self._repo.changelog.ancestor(self._node, n2)
670 if warn:
670 if warn:
671 self._repo.ui.status(
671 self._repo.ui.status(
672 (_("note: using %s as ancestor of %s and %s\n") %
672 (_("note: using %s as ancestor of %s and %s\n") %
673 (short(anc), short(self._node), short(n2))) +
673 (short(anc), short(self._node), short(n2))) +
674 ''.join(_(" alternatively, use --config "
674 ''.join(_(" alternatively, use --config "
675 "merge.preferancestor=%s\n") %
675 "merge.preferancestor=%s\n") %
676 short(n) for n in sorted(cahs) if n != anc))
676 short(n) for n in sorted(cahs) if n != anc))
677 return changectx(self._repo, anc)
677 return changectx(self._repo, anc)
678
678
679 def descendant(self, other):
679 def descendant(self, other):
680 """True if other is descendant of this changeset"""
680 """True if other is descendant of this changeset"""
681 return self._repo.changelog.descendant(self._rev, other._rev)
681 return self._repo.changelog.descendant(self._rev, other._rev)
682
682
683 def walk(self, match):
683 def walk(self, match):
684 '''Generates matching file names.'''
684 '''Generates matching file names.'''
685
685
686 # Wrap match.bad method to have message with nodeid
686 # Wrap match.bad method to have message with nodeid
687 def bad(fn, msg):
687 def bad(fn, msg):
688 # The manifest doesn't know about subrepos, so don't complain about
688 # The manifest doesn't know about subrepos, so don't complain about
689 # paths into valid subrepos.
689 # paths into valid subrepos.
690 if any(fn == s or fn.startswith(s + '/')
690 if any(fn == s or fn.startswith(s + '/')
691 for s in self.substate):
691 for s in self.substate):
692 return
692 return
693 match.bad(fn, _('no such file in rev %s') % self)
693 match.bad(fn, _('no such file in rev %s') % self)
694
694
695 m = matchmod.badmatch(match, bad)
695 m = matchmod.badmatch(match, bad)
696 return self._manifest.walk(m)
696 return self._manifest.walk(m)
697
697
698 def matches(self, match):
698 def matches(self, match):
699 return self.walk(match)
699 return self.walk(match)
700
700
701 class basefilectx(object):
701 class basefilectx(object):
702 """A filecontext object represents the common logic for its children:
702 """A filecontext object represents the common logic for its children:
703 filectx: read-only access to a filerevision that is already present
703 filectx: read-only access to a filerevision that is already present
704 in the repo,
704 in the repo,
705 workingfilectx: a filecontext that represents files from the working
705 workingfilectx: a filecontext that represents files from the working
706 directory,
706 directory,
707 memfilectx: a filecontext that represents files in-memory,
707 memfilectx: a filecontext that represents files in-memory,
708 overlayfilectx: duplicate another filecontext with some fields overridden.
708 overlayfilectx: duplicate another filecontext with some fields overridden.
709 """
709 """
710 @propertycache
710 @propertycache
711 def _filelog(self):
711 def _filelog(self):
712 return self._repo.file(self._path)
712 return self._repo.file(self._path)
713
713
714 @propertycache
714 @propertycache
715 def _changeid(self):
715 def _changeid(self):
716 if r'_changeid' in self.__dict__:
716 if r'_changeid' in self.__dict__:
717 return self._changeid
717 return self._changeid
718 elif r'_changectx' in self.__dict__:
718 elif r'_changectx' in self.__dict__:
719 return self._changectx.rev()
719 return self._changectx.rev()
720 elif r'_descendantrev' in self.__dict__:
720 elif r'_descendantrev' in self.__dict__:
721 # this file context was created from a revision with a known
721 # this file context was created from a revision with a known
722 # descendant, we can (lazily) correct for linkrev aliases
722 # descendant, we can (lazily) correct for linkrev aliases
723 return self._adjustlinkrev(self._descendantrev)
723 return self._adjustlinkrev(self._descendantrev)
724 else:
724 else:
725 return self._filelog.linkrev(self._filerev)
725 return self._filelog.linkrev(self._filerev)
726
726
727 @propertycache
727 @propertycache
728 def _filenode(self):
728 def _filenode(self):
729 if r'_fileid' in self.__dict__:
729 if r'_fileid' in self.__dict__:
730 return self._filelog.lookup(self._fileid)
730 return self._filelog.lookup(self._fileid)
731 else:
731 else:
732 return self._changectx.filenode(self._path)
732 return self._changectx.filenode(self._path)
733
733
734 @propertycache
734 @propertycache
735 def _filerev(self):
735 def _filerev(self):
736 return self._filelog.rev(self._filenode)
736 return self._filelog.rev(self._filenode)
737
737
738 @propertycache
738 @propertycache
739 def _repopath(self):
739 def _repopath(self):
740 return self._path
740 return self._path
741
741
742 def __nonzero__(self):
742 def __nonzero__(self):
743 try:
743 try:
744 self._filenode
744 self._filenode
745 return True
745 return True
746 except error.LookupError:
746 except error.LookupError:
747 # file is missing
747 # file is missing
748 return False
748 return False
749
749
750 __bool__ = __nonzero__
750 __bool__ = __nonzero__
751
751
752 def __bytes__(self):
752 def __bytes__(self):
753 try:
753 try:
754 return "%s@%s" % (self.path(), self._changectx)
754 return "%s@%s" % (self.path(), self._changectx)
755 except error.LookupError:
755 except error.LookupError:
756 return "%s@???" % self.path()
756 return "%s@???" % self.path()
757
757
758 __str__ = encoding.strmethod(__bytes__)
758 __str__ = encoding.strmethod(__bytes__)
759
759
760 def __repr__(self):
760 def __repr__(self):
761 return "<%s %s>" % (type(self).__name__, str(self))
761 return "<%s %s>" % (type(self).__name__, str(self))
762
762
763 def __hash__(self):
763 def __hash__(self):
764 try:
764 try:
765 return hash((self._path, self._filenode))
765 return hash((self._path, self._filenode))
766 except AttributeError:
766 except AttributeError:
767 return id(self)
767 return id(self)
768
768
769 def __eq__(self, other):
769 def __eq__(self, other):
770 try:
770 try:
771 return (type(self) == type(other) and self._path == other._path
771 return (type(self) == type(other) and self._path == other._path
772 and self._filenode == other._filenode)
772 and self._filenode == other._filenode)
773 except AttributeError:
773 except AttributeError:
774 return False
774 return False
775
775
776 def __ne__(self, other):
776 def __ne__(self, other):
777 return not (self == other)
777 return not (self == other)
778
778
779 def filerev(self):
779 def filerev(self):
780 return self._filerev
780 return self._filerev
781 def filenode(self):
781 def filenode(self):
782 return self._filenode
782 return self._filenode
783 @propertycache
783 @propertycache
784 def _flags(self):
784 def _flags(self):
785 return self._changectx.flags(self._path)
785 return self._changectx.flags(self._path)
786 def flags(self):
786 def flags(self):
787 return self._flags
787 return self._flags
788 def filelog(self):
788 def filelog(self):
789 return self._filelog
789 return self._filelog
790 def rev(self):
790 def rev(self):
791 return self._changeid
791 return self._changeid
792 def linkrev(self):
792 def linkrev(self):
793 return self._filelog.linkrev(self._filerev)
793 return self._filelog.linkrev(self._filerev)
794 def node(self):
794 def node(self):
795 return self._changectx.node()
795 return self._changectx.node()
796 def hex(self):
796 def hex(self):
797 return self._changectx.hex()
797 return self._changectx.hex()
798 def user(self):
798 def user(self):
799 return self._changectx.user()
799 return self._changectx.user()
800 def date(self):
800 def date(self):
801 return self._changectx.date()
801 return self._changectx.date()
802 def files(self):
802 def files(self):
803 return self._changectx.files()
803 return self._changectx.files()
804 def description(self):
804 def description(self):
805 return self._changectx.description()
805 return self._changectx.description()
806 def branch(self):
806 def branch(self):
807 return self._changectx.branch()
807 return self._changectx.branch()
808 def extra(self):
808 def extra(self):
809 return self._changectx.extra()
809 return self._changectx.extra()
810 def phase(self):
810 def phase(self):
811 return self._changectx.phase()
811 return self._changectx.phase()
812 def phasestr(self):
812 def phasestr(self):
813 return self._changectx.phasestr()
813 return self._changectx.phasestr()
814 def manifest(self):
814 def manifest(self):
815 return self._changectx.manifest()
815 return self._changectx.manifest()
816 def changectx(self):
816 def changectx(self):
817 return self._changectx
817 return self._changectx
818 def renamed(self):
818 def renamed(self):
819 return self._copied
819 return self._copied
820 def repo(self):
820 def repo(self):
821 return self._repo
821 return self._repo
822 def size(self):
822 def size(self):
823 return len(self.data())
823 return len(self.data())
824
824
825 def path(self):
825 def path(self):
826 return self._path
826 return self._path
827
827
828 def isbinary(self):
828 def isbinary(self):
829 try:
829 try:
830 return util.binary(self.data())
830 return util.binary(self.data())
831 except IOError:
831 except IOError:
832 return False
832 return False
833 def isexec(self):
833 def isexec(self):
834 return 'x' in self.flags()
834 return 'x' in self.flags()
835 def islink(self):
835 def islink(self):
836 return 'l' in self.flags()
836 return 'l' in self.flags()
837
837
838 def isabsent(self):
838 def isabsent(self):
839 """whether this filectx represents a file not in self._changectx
839 """whether this filectx represents a file not in self._changectx
840
840
841 This is mainly for merge code to detect change/delete conflicts. This is
841 This is mainly for merge code to detect change/delete conflicts. This is
842 expected to be True for all subclasses of basectx."""
842 expected to be True for all subclasses of basectx."""
843 return False
843 return False
844
844
845 _customcmp = False
845 _customcmp = False
846 def cmp(self, fctx):
846 def cmp(self, fctx):
847 """compare with other file context
847 """compare with other file context
848
848
849 returns True if different than fctx.
849 returns True if different than fctx.
850 """
850 """
851 if fctx._customcmp:
851 if fctx._customcmp:
852 return fctx.cmp(self)
852 return fctx.cmp(self)
853
853
854 if (fctx._filenode is None
854 if (fctx._filenode is None
855 and (self._repo._encodefilterpats
855 and (self._repo._encodefilterpats
856 # if file data starts with '\1\n', empty metadata block is
856 # if file data starts with '\1\n', empty metadata block is
857 # prepended, which adds 4 bytes to filelog.size().
857 # prepended, which adds 4 bytes to filelog.size().
858 or self.size() - 4 == fctx.size())
858 or self.size() - 4 == fctx.size())
859 or self.size() == fctx.size()):
859 or self.size() == fctx.size()):
860 return self._filelog.cmp(self._filenode, fctx.data())
860 return self._filelog.cmp(self._filenode, fctx.data())
861
861
862 return True
862 return True
863
863
864 def _adjustlinkrev(self, srcrev, inclusive=False):
864 def _adjustlinkrev(self, srcrev, inclusive=False):
865 """return the first ancestor of <srcrev> introducing <fnode>
865 """return the first ancestor of <srcrev> introducing <fnode>
866
866
867 If the linkrev of the file revision does not point to an ancestor of
867 If the linkrev of the file revision does not point to an ancestor of
868 srcrev, we'll walk down the ancestors until we find one introducing
868 srcrev, we'll walk down the ancestors until we find one introducing
869 this file revision.
869 this file revision.
870
870
871 :srcrev: the changeset revision we search ancestors from
871 :srcrev: the changeset revision we search ancestors from
872 :inclusive: if true, the src revision will also be checked
872 :inclusive: if true, the src revision will also be checked
873 """
873 """
874 repo = self._repo
874 repo = self._repo
875 cl = repo.unfiltered().changelog
875 cl = repo.unfiltered().changelog
876 mfl = repo.manifestlog
876 mfl = repo.manifestlog
877 # fetch the linkrev
877 # fetch the linkrev
878 lkr = self.linkrev()
878 lkr = self.linkrev()
879 # hack to reuse ancestor computation when searching for renames
879 # hack to reuse ancestor computation when searching for renames
880 memberanc = getattr(self, '_ancestrycontext', None)
880 memberanc = getattr(self, '_ancestrycontext', None)
881 iteranc = None
881 iteranc = None
882 if srcrev is None:
882 if srcrev is None:
883 # wctx case, used by workingfilectx during mergecopy
883 # wctx case, used by workingfilectx during mergecopy
884 revs = [p.rev() for p in self._repo[None].parents()]
884 revs = [p.rev() for p in self._repo[None].parents()]
885 inclusive = True # we skipped the real (revless) source
885 inclusive = True # we skipped the real (revless) source
886 else:
886 else:
887 revs = [srcrev]
887 revs = [srcrev]
888 if memberanc is None:
888 if memberanc is None:
889 memberanc = iteranc = cl.ancestors(revs, lkr,
889 memberanc = iteranc = cl.ancestors(revs, lkr,
890 inclusive=inclusive)
890 inclusive=inclusive)
891 # check if this linkrev is an ancestor of srcrev
891 # check if this linkrev is an ancestor of srcrev
892 if lkr not in memberanc:
892 if lkr not in memberanc:
893 if iteranc is None:
893 if iteranc is None:
894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
895 fnode = self._filenode
895 fnode = self._filenode
896 path = self._path
896 path = self._path
897 for a in iteranc:
897 for a in iteranc:
898 ac = cl.read(a) # get changeset data (we avoid object creation)
898 ac = cl.read(a) # get changeset data (we avoid object creation)
899 if path in ac[3]: # checking the 'files' field.
899 if path in ac[3]: # checking the 'files' field.
900 # The file has been touched, check if the content is
900 # The file has been touched, check if the content is
901 # similar to the one we search for.
901 # similar to the one we search for.
902 if fnode == mfl[ac[0]].readfast().get(path):
902 if fnode == mfl[ac[0]].readfast().get(path):
903 return a
903 return a
904 # In theory, we should never get out of that loop without a result.
904 # In theory, we should never get out of that loop without a result.
905 # But if manifest uses a buggy file revision (not children of the
905 # But if manifest uses a buggy file revision (not children of the
906 # one it replaces) we could. Such a buggy situation will likely
906 # one it replaces) we could. Such a buggy situation will likely
907 # result is crash somewhere else at to some point.
907 # result is crash somewhere else at to some point.
908 return lkr
908 return lkr
909
909
910 def introrev(self):
910 def introrev(self):
911 """return the rev of the changeset which introduced this file revision
911 """return the rev of the changeset which introduced this file revision
912
912
913 This method is different from linkrev because it take into account the
913 This method is different from linkrev because it take into account the
914 changeset the filectx was created from. It ensures the returned
914 changeset the filectx was created from. It ensures the returned
915 revision is one of its ancestors. This prevents bugs from
915 revision is one of its ancestors. This prevents bugs from
916 'linkrev-shadowing' when a file revision is used by multiple
916 'linkrev-shadowing' when a file revision is used by multiple
917 changesets.
917 changesets.
918 """
918 """
919 lkr = self.linkrev()
919 lkr = self.linkrev()
920 attrs = vars(self)
920 attrs = vars(self)
921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
922 if noctx or self.rev() == lkr:
922 if noctx or self.rev() == lkr:
923 return self.linkrev()
923 return self.linkrev()
924 return self._adjustlinkrev(self.rev(), inclusive=True)
924 return self._adjustlinkrev(self.rev(), inclusive=True)
925
925
926 def _parentfilectx(self, path, fileid, filelog):
926 def _parentfilectx(self, path, fileid, filelog):
927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
929 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 if '_changeid' in vars(self) or '_changectx' in vars(self):
930 # If self is associated with a changeset (probably explicitly
930 # If self is associated with a changeset (probably explicitly
931 # fed), ensure the created filectx is associated with a
931 # fed), ensure the created filectx is associated with a
932 # changeset that is an ancestor of self.changectx.
932 # changeset that is an ancestor of self.changectx.
933 # This lets us later use _adjustlinkrev to get a correct link.
933 # This lets us later use _adjustlinkrev to get a correct link.
934 fctx._descendantrev = self.rev()
934 fctx._descendantrev = self.rev()
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
936 elif '_descendantrev' in vars(self):
936 elif '_descendantrev' in vars(self):
937 # Otherwise propagate _descendantrev if we have one associated.
937 # Otherwise propagate _descendantrev if we have one associated.
938 fctx._descendantrev = self._descendantrev
938 fctx._descendantrev = self._descendantrev
939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
940 return fctx
940 return fctx
941
941
942 def parents(self):
942 def parents(self):
943 _path = self._path
943 _path = self._path
944 fl = self._filelog
944 fl = self._filelog
945 parents = self._filelog.parents(self._filenode)
945 parents = self._filelog.parents(self._filenode)
946 pl = [(_path, node, fl) for node in parents if node != nullid]
946 pl = [(_path, node, fl) for node in parents if node != nullid]
947
947
948 r = fl.renamed(self._filenode)
948 r = fl.renamed(self._filenode)
949 if r:
949 if r:
950 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In the simple rename case, both parent are nullid, pl is empty.
951 # - In case of merge, only one of the parent is null id and should
951 # - In case of merge, only one of the parent is null id and should
952 # be replaced with the rename information. This parent is -always-
952 # be replaced with the rename information. This parent is -always-
953 # the first one.
953 # the first one.
954 #
954 #
955 # As null id have always been filtered out in the previous list
955 # As null id have always been filtered out in the previous list
956 # comprehension, inserting to 0 will always result in "replacing
956 # comprehension, inserting to 0 will always result in "replacing
957 # first nullid parent with rename information.
957 # first nullid parent with rename information.
958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
959
959
960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
961
961
962 def p1(self):
962 def p1(self):
963 return self.parents()[0]
963 return self.parents()[0]
964
964
965 def p2(self):
965 def p2(self):
966 p = self.parents()
966 p = self.parents()
967 if len(p) == 2:
967 if len(p) == 2:
968 return p[1]
968 return p[1]
969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
970
970
971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
972 diffopts=None):
972 diffopts=None):
973 '''returns a list of tuples of ((ctx, number), line) for each line
973 '''returns a list of tuples of ((ctx, number), line) for each line
974 in the file, where ctx is the filectx of the node where
974 in the file, where ctx is the filectx of the node where
975 that line was last changed; if linenumber parameter is true, number is
975 that line was last changed; if linenumber parameter is true, number is
976 the line number at the first appearance in the managed file, otherwise,
976 the line number at the first appearance in the managed file, otherwise,
977 number has a fixed value of False.
977 number has a fixed value of False.
978 '''
978 '''
979
979
980 def lines(text):
980 def lines(text):
981 if text.endswith("\n"):
981 if text.endswith("\n"):
982 return text.count("\n")
982 return text.count("\n")
983 return text.count("\n") + int(bool(text))
983 return text.count("\n") + int(bool(text))
984
984
985 if linenumber:
985 if linenumber:
986 def decorate(text, rev):
986 def decorate(text, rev):
987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
988 else:
988 else:
989 def decorate(text, rev):
989 def decorate(text, rev):
990 return ([(rev, False)] * lines(text), text)
990 return ([(rev, False)] * lines(text), text)
991
991
992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
993
993
994 def parents(f):
994 def parents(f):
995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
998 # isn't an ancestor of the srcrev.
998 # isn't an ancestor of the srcrev.
999 f._changeid
999 f._changeid
1000 pl = f.parents()
1000 pl = f.parents()
1001
1001
1002 # Don't return renamed parents if we aren't following.
1002 # Don't return renamed parents if we aren't following.
1003 if not follow:
1003 if not follow:
1004 pl = [p for p in pl if p.path() == f.path()]
1004 pl = [p for p in pl if p.path() == f.path()]
1005
1005
1006 # renamed filectx won't have a filelog yet, so set it
1006 # renamed filectx won't have a filelog yet, so set it
1007 # from the cache to save time
1007 # from the cache to save time
1008 for p in pl:
1008 for p in pl:
1009 if not '_filelog' in p.__dict__:
1009 if not '_filelog' in p.__dict__:
1010 p._filelog = getlog(p.path())
1010 p._filelog = getlog(p.path())
1011
1011
1012 return pl
1012 return pl
1013
1013
1014 # use linkrev to find the first changeset where self appeared
1014 # use linkrev to find the first changeset where self appeared
1015 base = self
1015 base = self
1016 introrev = self.introrev()
1016 introrev = self.introrev()
1017 if self.rev() != introrev:
1017 if self.rev() != introrev:
1018 base = self.filectx(self.filenode(), changeid=introrev)
1018 base = self.filectx(self.filenode(), changeid=introrev)
1019 if getattr(base, '_ancestrycontext', None) is None:
1019 if getattr(base, '_ancestrycontext', None) is None:
1020 cl = self._repo.changelog
1020 cl = self._repo.changelog
1021 if introrev is None:
1021 if introrev is None:
1022 # wctx is not inclusive, but works because _ancestrycontext
1022 # wctx is not inclusive, but works because _ancestrycontext
1023 # is used to test filelog revisions
1023 # is used to test filelog revisions
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 inclusive=True)
1025 inclusive=True)
1026 else:
1026 else:
1027 ac = cl.ancestors([introrev], inclusive=True)
1027 ac = cl.ancestors([introrev], inclusive=True)
1028 base._ancestrycontext = ac
1028 base._ancestrycontext = ac
1029
1029
1030 # This algorithm would prefer to be recursive, but Python is a
1030 # This algorithm would prefer to be recursive, but Python is a
1031 # bit recursion-hostile. Instead we do an iterative
1031 # bit recursion-hostile. Instead we do an iterative
1032 # depth-first search.
1032 # depth-first search.
1033
1033
1034 # 1st DFS pre-calculates pcache and needed
1034 # 1st DFS pre-calculates pcache and needed
1035 visit = [base]
1035 visit = [base]
1036 pcache = {}
1036 pcache = {}
1037 needed = {base: 1}
1037 needed = {base: 1}
1038 while visit:
1038 while visit:
1039 f = visit.pop()
1039 f = visit.pop()
1040 if f in pcache:
1040 if f in pcache:
1041 continue
1041 continue
1042 pl = parents(f)
1042 pl = parents(f)
1043 pcache[f] = pl
1043 pcache[f] = pl
1044 for p in pl:
1044 for p in pl:
1045 needed[p] = needed.get(p, 0) + 1
1045 needed[p] = needed.get(p, 0) + 1
1046 if p not in pcache:
1046 if p not in pcache:
1047 visit.append(p)
1047 visit.append(p)
1048
1048
1049 # 2nd DFS does the actual annotate
1049 # 2nd DFS does the actual annotate
1050 visit[:] = [base]
1050 visit[:] = [base]
1051 hist = {}
1051 hist = {}
1052 while visit:
1052 while visit:
1053 f = visit[-1]
1053 f = visit[-1]
1054 if f in hist:
1054 if f in hist:
1055 visit.pop()
1055 visit.pop()
1056 continue
1056 continue
1057
1057
1058 ready = True
1058 ready = True
1059 pl = pcache[f]
1059 pl = pcache[f]
1060 for p in pl:
1060 for p in pl:
1061 if p not in hist:
1061 if p not in hist:
1062 ready = False
1062 ready = False
1063 visit.append(p)
1063 visit.append(p)
1064 if ready:
1064 if ready:
1065 visit.pop()
1065 visit.pop()
1066 curr = decorate(f.data(), f)
1066 curr = decorate(f.data(), f)
1067 skipchild = False
1067 skipchild = False
1068 if skiprevs is not None:
1068 if skiprevs is not None:
1069 skipchild = f._changeid in skiprevs
1069 skipchild = f._changeid in skiprevs
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 diffopts)
1071 diffopts)
1072 for p in pl:
1072 for p in pl:
1073 if needed[p] == 1:
1073 if needed[p] == 1:
1074 del hist[p]
1074 del hist[p]
1075 del needed[p]
1075 del needed[p]
1076 else:
1076 else:
1077 needed[p] -= 1
1077 needed[p] -= 1
1078
1078
1079 hist[f] = curr
1079 hist[f] = curr
1080 del pcache[f]
1080 del pcache[f]
1081
1081
1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1083
1083
1084 def ancestors(self, followfirst=False):
1084 def ancestors(self, followfirst=False):
1085 visit = {}
1085 visit = {}
1086 c = self
1086 c = self
1087 if followfirst:
1087 if followfirst:
1088 cut = 1
1088 cut = 1
1089 else:
1089 else:
1090 cut = None
1090 cut = None
1091
1091
1092 while True:
1092 while True:
1093 for parent in c.parents()[:cut]:
1093 for parent in c.parents()[:cut]:
1094 visit[(parent.linkrev(), parent.filenode())] = parent
1094 visit[(parent.linkrev(), parent.filenode())] = parent
1095 if not visit:
1095 if not visit:
1096 break
1096 break
1097 c = visit.pop(max(visit))
1097 c = visit.pop(max(visit))
1098 yield c
1098 yield c
1099
1099
1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1101 r'''
1101 r'''
1102 Given parent and child fctxes and annotate data for parents, for all lines
1102 Given parent and child fctxes and annotate data for parents, for all lines
1103 in either parent that match the child, annotate the child with the parent's
1103 in either parent that match the child, annotate the child with the parent's
1104 data.
1104 data.
1105
1105
1106 Additionally, if `skipchild` is True, replace all other lines with parent
1106 Additionally, if `skipchild` is True, replace all other lines with parent
1107 annotate data as well such that child is never blamed for any lines.
1107 annotate data as well such that child is never blamed for any lines.
1108
1108
1109 >>> oldfctx = 'old'
1109 >>> oldfctx = 'old'
1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1111 >>> olddata = 'a\nb\n'
1111 >>> olddata = 'a\nb\n'
1112 >>> p1data = 'a\nb\nc\n'
1112 >>> p1data = 'a\nb\nc\n'
1113 >>> p2data = 'a\nc\nd\n'
1113 >>> p2data = 'a\nc\nd\n'
1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1115 >>> diffopts = mdiff.diffopts()
1115 >>> diffopts = mdiff.diffopts()
1116
1116
1117 >>> def decorate(text, rev):
1117 >>> def decorate(text, rev):
1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1119
1119
1120 Basic usage:
1120 Basic usage:
1121
1121
1122 >>> oldann = decorate(olddata, oldfctx)
1122 >>> oldann = decorate(olddata, oldfctx)
1123 >>> p1ann = decorate(p1data, p1fctx)
1123 >>> p1ann = decorate(p1data, p1fctx)
1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1125 >>> p1ann[0]
1125 >>> p1ann[0]
1126 [('old', 1), ('old', 2), ('p1', 3)]
1126 [('old', 1), ('old', 2), ('p1', 3)]
1127 >>> p2ann = decorate(p2data, p2fctx)
1127 >>> p2ann = decorate(p2data, p2fctx)
1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1129 >>> p2ann[0]
1129 >>> p2ann[0]
1130 [('old', 1), ('p2', 2), ('p2', 3)]
1130 [('old', 1), ('p2', 2), ('p2', 3)]
1131
1131
1132 Test with multiple parents (note the difference caused by ordering):
1132 Test with multiple parents (note the difference caused by ordering):
1133
1133
1134 >>> childann = decorate(childdata, childfctx)
1134 >>> childann = decorate(childdata, childfctx)
1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1136 ... diffopts)
1136 ... diffopts)
1137 >>> childann[0]
1137 >>> childann[0]
1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1139
1139
1140 >>> childann = decorate(childdata, childfctx)
1140 >>> childann = decorate(childdata, childfctx)
1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1142 ... diffopts)
1142 ... diffopts)
1143 >>> childann[0]
1143 >>> childann[0]
1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1145
1145
1146 Test with skipchild (note the difference caused by ordering):
1146 Test with skipchild (note the difference caused by ordering):
1147
1147
1148 >>> childann = decorate(childdata, childfctx)
1148 >>> childann = decorate(childdata, childfctx)
1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1150 ... diffopts)
1150 ... diffopts)
1151 >>> childann[0]
1151 >>> childann[0]
1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1153
1153
1154 >>> childann = decorate(childdata, childfctx)
1154 >>> childann = decorate(childdata, childfctx)
1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1156 ... diffopts)
1156 ... diffopts)
1157 >>> childann[0]
1157 >>> childann[0]
1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1159 '''
1159 '''
1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1161 for parent in parents]
1161 for parent in parents]
1162
1162
1163 if skipchild:
1163 if skipchild:
1164 # Need to iterate over the blocks twice -- make it a list
1164 # Need to iterate over the blocks twice -- make it a list
1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1166 # Mercurial currently prefers p2 over p1 for annotate.
1166 # Mercurial currently prefers p2 over p1 for annotate.
1167 # TODO: change this?
1167 # TODO: change this?
1168 for parent, blocks in pblocks:
1168 for parent, blocks in pblocks:
1169 for (a1, a2, b1, b2), t in blocks:
1169 for (a1, a2, b1, b2), t in blocks:
1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1171 # belong to the child.
1171 # belong to the child.
1172 if t == '=':
1172 if t == '=':
1173 child[0][b1:b2] = parent[0][a1:a2]
1173 child[0][b1:b2] = parent[0][a1:a2]
1174
1174
1175 if skipchild:
1175 if skipchild:
1176 # Now try and match up anything that couldn't be matched,
1176 # Now try and match up anything that couldn't be matched,
1177 # Reversing pblocks maintains bias towards p2, matching above
1177 # Reversing pblocks maintains bias towards p2, matching above
1178 # behavior.
1178 # behavior.
1179 pblocks.reverse()
1179 pblocks.reverse()
1180
1180
1181 # The heuristics are:
1181 # The heuristics are:
1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1183 # This could potentially be smarter but works well enough.
1183 # This could potentially be smarter but works well enough.
1184 # * For a non-matching section, do a best-effort fit. Match lines in
1184 # * For a non-matching section, do a best-effort fit. Match lines in
1185 # diff hunks 1:1, dropping lines as necessary.
1185 # diff hunks 1:1, dropping lines as necessary.
1186 # * Repeat the last line as a last resort.
1186 # * Repeat the last line as a last resort.
1187
1187
1188 # First, replace as much as possible without repeating the last line.
1188 # First, replace as much as possible without repeating the last line.
1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1190 for idx, (parent, blocks) in enumerate(pblocks):
1190 for idx, (parent, blocks) in enumerate(pblocks):
1191 for (a1, a2, b1, b2), _t in blocks:
1191 for (a1, a2, b1, b2), _t in blocks:
1192 if a2 - a1 >= b2 - b1:
1192 if a2 - a1 >= b2 - b1:
1193 for bk in xrange(b1, b2):
1193 for bk in xrange(b1, b2):
1194 if child[0][bk][0] == childfctx:
1194 if child[0][bk][0] == childfctx:
1195 ak = min(a1 + (bk - b1), a2 - 1)
1195 ak = min(a1 + (bk - b1), a2 - 1)
1196 child[0][bk] = parent[0][ak]
1196 child[0][bk] = parent[0][ak]
1197 else:
1197 else:
1198 remaining[idx][1].append((a1, a2, b1, b2))
1198 remaining[idx][1].append((a1, a2, b1, b2))
1199
1199
1200 # Then, look at anything left, which might involve repeating the last
1200 # Then, look at anything left, which might involve repeating the last
1201 # line.
1201 # line.
1202 for parent, blocks in remaining:
1202 for parent, blocks in remaining:
1203 for a1, a2, b1, b2 in blocks:
1203 for a1, a2, b1, b2 in blocks:
1204 for bk in xrange(b1, b2):
1204 for bk in xrange(b1, b2):
1205 if child[0][bk][0] == childfctx:
1205 if child[0][bk][0] == childfctx:
1206 ak = min(a1 + (bk - b1), a2 - 1)
1206 ak = min(a1 + (bk - b1), a2 - 1)
1207 child[0][bk] = parent[0][ak]
1207 child[0][bk] = parent[0][ak]
1208 return child
1208 return child
1209
1209
1210 class filectx(basefilectx):
1210 class filectx(basefilectx):
1211 """A filecontext object makes access to data related to a particular
1211 """A filecontext object makes access to data related to a particular
1212 filerevision convenient."""
1212 filerevision convenient."""
1213 def __init__(self, repo, path, changeid=None, fileid=None,
1213 def __init__(self, repo, path, changeid=None, fileid=None,
1214 filelog=None, changectx=None):
1214 filelog=None, changectx=None):
1215 """changeid can be a changeset revision, node, or tag.
1215 """changeid can be a changeset revision, node, or tag.
1216 fileid can be a file revision or node."""
1216 fileid can be a file revision or node."""
1217 self._repo = repo
1217 self._repo = repo
1218 self._path = path
1218 self._path = path
1219
1219
1220 assert (changeid is not None
1220 assert (changeid is not None
1221 or fileid is not None
1221 or fileid is not None
1222 or changectx is not None), \
1222 or changectx is not None), \
1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1224 % (changeid, fileid, changectx))
1224 % (changeid, fileid, changectx))
1225
1225
1226 if filelog is not None:
1226 if filelog is not None:
1227 self._filelog = filelog
1227 self._filelog = filelog
1228
1228
1229 if changeid is not None:
1229 if changeid is not None:
1230 self._changeid = changeid
1230 self._changeid = changeid
1231 if changectx is not None:
1231 if changectx is not None:
1232 self._changectx = changectx
1232 self._changectx = changectx
1233 if fileid is not None:
1233 if fileid is not None:
1234 self._fileid = fileid
1234 self._fileid = fileid
1235
1235
1236 @propertycache
1236 @propertycache
1237 def _changectx(self):
1237 def _changectx(self):
1238 try:
1238 try:
1239 return changectx(self._repo, self._changeid)
1239 return changectx(self._repo, self._changeid)
1240 except error.FilteredRepoLookupError:
1240 except error.FilteredRepoLookupError:
1241 # Linkrev may point to any revision in the repository. When the
1241 # Linkrev may point to any revision in the repository. When the
1242 # repository is filtered this may lead to `filectx` trying to build
1242 # repository is filtered this may lead to `filectx` trying to build
1243 # `changectx` for filtered revision. In such case we fallback to
1243 # `changectx` for filtered revision. In such case we fallback to
1244 # creating `changectx` on the unfiltered version of the reposition.
1244 # creating `changectx` on the unfiltered version of the reposition.
1245 # This fallback should not be an issue because `changectx` from
1245 # This fallback should not be an issue because `changectx` from
1246 # `filectx` are not used in complex operations that care about
1246 # `filectx` are not used in complex operations that care about
1247 # filtering.
1247 # filtering.
1248 #
1248 #
1249 # This fallback is a cheap and dirty fix that prevent several
1249 # This fallback is a cheap and dirty fix that prevent several
1250 # crashes. It does not ensure the behavior is correct. However the
1250 # crashes. It does not ensure the behavior is correct. However the
1251 # behavior was not correct before filtering either and "incorrect
1251 # behavior was not correct before filtering either and "incorrect
1252 # behavior" is seen as better as "crash"
1252 # behavior" is seen as better as "crash"
1253 #
1253 #
1254 # Linkrevs have several serious troubles with filtering that are
1254 # Linkrevs have several serious troubles with filtering that are
1255 # complicated to solve. Proper handling of the issue here should be
1255 # complicated to solve. Proper handling of the issue here should be
1256 # considered when solving linkrev issue are on the table.
1256 # considered when solving linkrev issue are on the table.
1257 return changectx(self._repo.unfiltered(), self._changeid)
1257 return changectx(self._repo.unfiltered(), self._changeid)
1258
1258
1259 def filectx(self, fileid, changeid=None):
1259 def filectx(self, fileid, changeid=None):
1260 '''opens an arbitrary revision of the file without
1260 '''opens an arbitrary revision of the file without
1261 opening a new filelog'''
1261 opening a new filelog'''
1262 return filectx(self._repo, self._path, fileid=fileid,
1262 return filectx(self._repo, self._path, fileid=fileid,
1263 filelog=self._filelog, changeid=changeid)
1263 filelog=self._filelog, changeid=changeid)
1264
1264
1265 def rawdata(self):
1265 def rawdata(self):
1266 return self._filelog.revision(self._filenode, raw=True)
1266 return self._filelog.revision(self._filenode, raw=True)
1267
1267
1268 def rawflags(self):
1268 def rawflags(self):
1269 """low-level revlog flags"""
1269 """low-level revlog flags"""
1270 return self._filelog.flags(self._filerev)
1270 return self._filelog.flags(self._filerev)
1271
1271
1272 def data(self):
1272 def data(self):
1273 try:
1273 try:
1274 return self._filelog.read(self._filenode)
1274 return self._filelog.read(self._filenode)
1275 except error.CensoredNodeError:
1275 except error.CensoredNodeError:
1276 if self._repo.ui.config("censor", "policy") == "ignore":
1276 if self._repo.ui.config("censor", "policy") == "ignore":
1277 return ""
1277 return ""
1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1279 hint=_("set censor.policy to ignore errors"))
1279 hint=_("set censor.policy to ignore errors"))
1280
1280
1281 def size(self):
1281 def size(self):
1282 return self._filelog.size(self._filerev)
1282 return self._filelog.size(self._filerev)
1283
1283
1284 @propertycache
1284 @propertycache
1285 def _copied(self):
1285 def _copied(self):
1286 """check if file was actually renamed in this changeset revision
1286 """check if file was actually renamed in this changeset revision
1287
1287
1288 If rename logged in file revision, we report copy for changeset only
1288 If rename logged in file revision, we report copy for changeset only
1289 if file revisions linkrev points back to the changeset in question
1289 if file revisions linkrev points back to the changeset in question
1290 or both changeset parents contain different file revisions.
1290 or both changeset parents contain different file revisions.
1291 """
1291 """
1292
1292
1293 renamed = self._filelog.renamed(self._filenode)
1293 renamed = self._filelog.renamed(self._filenode)
1294 if not renamed:
1294 if not renamed:
1295 return renamed
1295 return renamed
1296
1296
1297 if self.rev() == self.linkrev():
1297 if self.rev() == self.linkrev():
1298 return renamed
1298 return renamed
1299
1299
1300 name = self.path()
1300 name = self.path()
1301 fnode = self._filenode
1301 fnode = self._filenode
1302 for p in self._changectx.parents():
1302 for p in self._changectx.parents():
1303 try:
1303 try:
1304 if fnode == p.filenode(name):
1304 if fnode == p.filenode(name):
1305 return None
1305 return None
1306 except error.LookupError:
1306 except error.LookupError:
1307 pass
1307 pass
1308 return renamed
1308 return renamed
1309
1309
1310 def children(self):
1310 def children(self):
1311 # hard for renames
1311 # hard for renames
1312 c = self._filelog.children(self._filenode)
1312 c = self._filelog.children(self._filenode)
1313 return [filectx(self._repo, self._path, fileid=x,
1313 return [filectx(self._repo, self._path, fileid=x,
1314 filelog=self._filelog) for x in c]
1314 filelog=self._filelog) for x in c]
1315
1315
1316 class committablectx(basectx):
1316 class committablectx(basectx):
1317 """A committablectx object provides common functionality for a context that
1317 """A committablectx object provides common functionality for a context that
1318 wants the ability to commit, e.g. workingctx or memctx."""
1318 wants the ability to commit, e.g. workingctx or memctx."""
1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1320 changes=None):
1320 changes=None):
1321 self._repo = repo
1321 self._repo = repo
1322 self._rev = None
1322 self._rev = None
1323 self._node = None
1323 self._node = None
1324 self._text = text
1324 self._text = text
1325 if date:
1325 if date:
1326 self._date = util.parsedate(date)
1326 self._date = util.parsedate(date)
1327 if user:
1327 if user:
1328 self._user = user
1328 self._user = user
1329 if changes:
1329 if changes:
1330 self._status = changes
1330 self._status = changes
1331
1331
1332 self._extra = {}
1332 self._extra = {}
1333 if extra:
1333 if extra:
1334 self._extra = extra.copy()
1334 self._extra = extra.copy()
1335 if 'branch' not in self._extra:
1335 if 'branch' not in self._extra:
1336 try:
1336 try:
1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1338 except UnicodeDecodeError:
1338 except UnicodeDecodeError:
1339 raise error.Abort(_('branch name not in UTF-8!'))
1339 raise error.Abort(_('branch name not in UTF-8!'))
1340 self._extra['branch'] = branch
1340 self._extra['branch'] = branch
1341 if self._extra['branch'] == '':
1341 if self._extra['branch'] == '':
1342 self._extra['branch'] = 'default'
1342 self._extra['branch'] = 'default'
1343
1343
1344 def __bytes__(self):
1344 def __bytes__(self):
1345 return bytes(self._parents[0]) + "+"
1345 return bytes(self._parents[0]) + "+"
1346
1346
1347 __str__ = encoding.strmethod(__bytes__)
1347 __str__ = encoding.strmethod(__bytes__)
1348
1348
1349 def __nonzero__(self):
1349 def __nonzero__(self):
1350 return True
1350 return True
1351
1351
1352 __bool__ = __nonzero__
1352 __bool__ = __nonzero__
1353
1353
1354 def _buildflagfunc(self):
1354 def _buildflagfunc(self):
1355 # Create a fallback function for getting file flags when the
1355 # Create a fallback function for getting file flags when the
1356 # filesystem doesn't support them
1356 # filesystem doesn't support them
1357
1357
1358 copiesget = self._repo.dirstate.copies().get
1358 copiesget = self._repo.dirstate.copies().get
1359 parents = self.parents()
1359 parents = self.parents()
1360 if len(parents) < 2:
1360 if len(parents) < 2:
1361 # when we have one parent, it's easy: copy from parent
1361 # when we have one parent, it's easy: copy from parent
1362 man = parents[0].manifest()
1362 man = parents[0].manifest()
1363 def func(f):
1363 def func(f):
1364 f = copiesget(f, f)
1364 f = copiesget(f, f)
1365 return man.flags(f)
1365 return man.flags(f)
1366 else:
1366 else:
1367 # merges are tricky: we try to reconstruct the unstored
1367 # merges are tricky: we try to reconstruct the unstored
1368 # result from the merge (issue1802)
1368 # result from the merge (issue1802)
1369 p1, p2 = parents
1369 p1, p2 = parents
1370 pa = p1.ancestor(p2)
1370 pa = p1.ancestor(p2)
1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1372
1372
1373 def func(f):
1373 def func(f):
1374 f = copiesget(f, f) # may be wrong for merges with copies
1374 f = copiesget(f, f) # may be wrong for merges with copies
1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1376 if fl1 == fl2:
1376 if fl1 == fl2:
1377 return fl1
1377 return fl1
1378 if fl1 == fla:
1378 if fl1 == fla:
1379 return fl2
1379 return fl2
1380 if fl2 == fla:
1380 if fl2 == fla:
1381 return fl1
1381 return fl1
1382 return '' # punt for conflicts
1382 return '' # punt for conflicts
1383
1383
1384 return func
1384 return func
1385
1385
1386 @propertycache
1386 @propertycache
1387 def _flagfunc(self):
1387 def _flagfunc(self):
1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1389
1389
1390 @propertycache
1390 @propertycache
1391 def _status(self):
1391 def _status(self):
1392 return self._repo.status()
1392 return self._repo.status()
1393
1393
1394 @propertycache
1394 @propertycache
1395 def _user(self):
1395 def _user(self):
1396 return self._repo.ui.username()
1396 return self._repo.ui.username()
1397
1397
1398 @propertycache
1398 @propertycache
1399 def _date(self):
1399 def _date(self):
1400 ui = self._repo.ui
1400 ui = self._repo.ui
1401 date = ui.configdate('devel', 'default-date')
1401 date = ui.configdate('devel', 'default-date')
1402 if date is None:
1402 if date is None:
1403 date = util.makedate()
1403 date = util.makedate()
1404 return date
1404 return date
1405
1405
1406 def subrev(self, subpath):
1406 def subrev(self, subpath):
1407 return None
1407 return None
1408
1408
1409 def manifestnode(self):
1409 def manifestnode(self):
1410 return None
1410 return None
1411 def user(self):
1411 def user(self):
1412 return self._user or self._repo.ui.username()
1412 return self._user or self._repo.ui.username()
1413 def date(self):
1413 def date(self):
1414 return self._date
1414 return self._date
1415 def description(self):
1415 def description(self):
1416 return self._text
1416 return self._text
1417 def files(self):
1417 def files(self):
1418 return sorted(self._status.modified + self._status.added +
1418 return sorted(self._status.modified + self._status.added +
1419 self._status.removed)
1419 self._status.removed)
1420
1420
1421 def modified(self):
1421 def modified(self):
1422 return self._status.modified
1422 return self._status.modified
1423 def added(self):
1423 def added(self):
1424 return self._status.added
1424 return self._status.added
1425 def removed(self):
1425 def removed(self):
1426 return self._status.removed
1426 return self._status.removed
1427 def deleted(self):
1427 def deleted(self):
1428 return self._status.deleted
1428 return self._status.deleted
1429 def branch(self):
1429 def branch(self):
1430 return encoding.tolocal(self._extra['branch'])
1430 return encoding.tolocal(self._extra['branch'])
1431 def closesbranch(self):
1431 def closesbranch(self):
1432 return 'close' in self._extra
1432 return 'close' in self._extra
1433 def extra(self):
1433 def extra(self):
1434 return self._extra
1434 return self._extra
1435
1435
1436 def tags(self):
1436 def tags(self):
1437 return []
1437 return []
1438
1438
1439 def bookmarks(self):
1439 def bookmarks(self):
1440 b = []
1440 b = []
1441 for p in self.parents():
1441 for p in self.parents():
1442 b.extend(p.bookmarks())
1442 b.extend(p.bookmarks())
1443 return b
1443 return b
1444
1444
1445 def phase(self):
1445 def phase(self):
1446 phase = phases.draft # default phase to draft
1446 phase = phases.draft # default phase to draft
1447 for p in self.parents():
1447 for p in self.parents():
1448 phase = max(phase, p.phase())
1448 phase = max(phase, p.phase())
1449 return phase
1449 return phase
1450
1450
1451 def hidden(self):
1451 def hidden(self):
1452 return False
1452 return False
1453
1453
1454 def children(self):
1454 def children(self):
1455 return []
1455 return []
1456
1456
1457 def flags(self, path):
1457 def flags(self, path):
1458 if r'_manifest' in self.__dict__:
1458 if r'_manifest' in self.__dict__:
1459 try:
1459 try:
1460 return self._manifest.flags(path)
1460 return self._manifest.flags(path)
1461 except KeyError:
1461 except KeyError:
1462 return ''
1462 return ''
1463
1463
1464 try:
1464 try:
1465 return self._flagfunc(path)
1465 return self._flagfunc(path)
1466 except OSError:
1466 except OSError:
1467 return ''
1467 return ''
1468
1468
1469 def ancestor(self, c2):
1469 def ancestor(self, c2):
1470 """return the "best" ancestor context of self and c2"""
1470 """return the "best" ancestor context of self and c2"""
1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1472
1472
1473 def walk(self, match):
1473 def walk(self, match):
1474 '''Generates matching file names.'''
1474 '''Generates matching file names.'''
1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1476 True, False))
1476 True, False))
1477
1477
1478 def matches(self, match):
1478 def matches(self, match):
1479 return sorted(self._repo.dirstate.matches(match))
1479 return sorted(self._repo.dirstate.matches(match))
1480
1480
1481 def ancestors(self):
1481 def ancestors(self):
1482 for p in self._parents:
1482 for p in self._parents:
1483 yield p
1483 yield p
1484 for a in self._repo.changelog.ancestors(
1484 for a in self._repo.changelog.ancestors(
1485 [p.rev() for p in self._parents]):
1485 [p.rev() for p in self._parents]):
1486 yield changectx(self._repo, a)
1486 yield changectx(self._repo, a)
1487
1487
1488 def markcommitted(self, node):
1488 def markcommitted(self, node):
1489 """Perform post-commit cleanup necessary after committing this ctx
1489 """Perform post-commit cleanup necessary after committing this ctx
1490
1490
1491 Specifically, this updates backing stores this working context
1491 Specifically, this updates backing stores this working context
1492 wraps to reflect the fact that the changes reflected by this
1492 wraps to reflect the fact that the changes reflected by this
1493 workingctx have been committed. For example, it marks
1493 workingctx have been committed. For example, it marks
1494 modified and added files as normal in the dirstate.
1494 modified and added files as normal in the dirstate.
1495
1495
1496 """
1496 """
1497
1497
1498 with self._repo.dirstate.parentchange():
1498 with self._repo.dirstate.parentchange():
1499 for f in self.modified() + self.added():
1499 for f in self.modified() + self.added():
1500 self._repo.dirstate.normal(f)
1500 self._repo.dirstate.normal(f)
1501 for f in self.removed():
1501 for f in self.removed():
1502 self._repo.dirstate.drop(f)
1502 self._repo.dirstate.drop(f)
1503 self._repo.dirstate.setparents(node)
1503 self._repo.dirstate.setparents(node)
1504
1504
1505 # write changes out explicitly, because nesting wlock at
1505 # write changes out explicitly, because nesting wlock at
1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1507 # from immediately doing so for subsequent changing files
1507 # from immediately doing so for subsequent changing files
1508 self._repo.dirstate.write(self._repo.currenttransaction())
1508 self._repo.dirstate.write(self._repo.currenttransaction())
1509
1509
1510 def dirty(self, missing=False, merge=True, branch=True):
1510 def dirty(self, missing=False, merge=True, branch=True):
1511 return False
1511 return False
1512
1512
1513 class workingctx(committablectx):
1513 class workingctx(committablectx):
1514 """A workingctx object makes access to data related to
1514 """A workingctx object makes access to data related to
1515 the current working directory convenient.
1515 the current working directory convenient.
1516 date - any valid date string or (unixtime, offset), or None.
1516 date - any valid date string or (unixtime, offset), or None.
1517 user - username string, or None.
1517 user - username string, or None.
1518 extra - a dictionary of extra values, or None.
1518 extra - a dictionary of extra values, or None.
1519 changes - a list of file lists as returned by localrepo.status()
1519 changes - a list of file lists as returned by localrepo.status()
1520 or None to use the repository status.
1520 or None to use the repository status.
1521 """
1521 """
1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1523 changes=None):
1523 changes=None):
1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1525
1525
1526 def __iter__(self):
1526 def __iter__(self):
1527 d = self._repo.dirstate
1527 d = self._repo.dirstate
1528 for f in d:
1528 for f in d:
1529 if d[f] != 'r':
1529 if d[f] != 'r':
1530 yield f
1530 yield f
1531
1531
1532 def __contains__(self, key):
1532 def __contains__(self, key):
1533 return self._repo.dirstate[key] not in "?r"
1533 return self._repo.dirstate[key] not in "?r"
1534
1534
1535 def hex(self):
1535 def hex(self):
1536 return hex(wdirid)
1536 return hex(wdirid)
1537
1537
1538 @propertycache
1538 @propertycache
1539 def _parents(self):
1539 def _parents(self):
1540 p = self._repo.dirstate.parents()
1540 p = self._repo.dirstate.parents()
1541 if p[1] == nullid:
1541 if p[1] == nullid:
1542 p = p[:-1]
1542 p = p[:-1]
1543 return [changectx(self._repo, x) for x in p]
1543 return [changectx(self._repo, x) for x in p]
1544
1544
1545 def filectx(self, path, filelog=None):
1545 def filectx(self, path, filelog=None):
1546 """get a file context from the working directory"""
1546 """get a file context from the working directory"""
1547 return workingfilectx(self._repo, path, workingctx=self,
1547 return workingfilectx(self._repo, path, workingctx=self,
1548 filelog=filelog)
1548 filelog=filelog)
1549
1549
1550 def dirty(self, missing=False, merge=True, branch=True):
1550 def dirty(self, missing=False, merge=True, branch=True):
1551 "check whether a working directory is modified"
1551 "check whether a working directory is modified"
1552 # check subrepos first
1552 # check subrepos first
1553 for s in sorted(self.substate):
1553 for s in sorted(self.substate):
1554 if self.sub(s).dirty(missing=missing):
1554 if self.sub(s).dirty(missing=missing):
1555 return True
1555 return True
1556 # check current working dir
1556 # check current working dir
1557 return ((merge and self.p2()) or
1557 return ((merge and self.p2()) or
1558 (branch and self.branch() != self.p1().branch()) or
1558 (branch and self.branch() != self.p1().branch()) or
1559 self.modified() or self.added() or self.removed() or
1559 self.modified() or self.added() or self.removed() or
1560 (missing and self.deleted()))
1560 (missing and self.deleted()))
1561
1561
1562 def add(self, list, prefix=""):
1562 def add(self, list, prefix=""):
1563 with self._repo.wlock():
1563 with self._repo.wlock():
1564 ui, ds = self._repo.ui, self._repo.dirstate
1564 ui, ds = self._repo.ui, self._repo.dirstate
1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1566 rejected = []
1566 rejected = []
1567 lstat = self._repo.wvfs.lstat
1567 lstat = self._repo.wvfs.lstat
1568 for f in list:
1568 for f in list:
1569 # ds.pathto() returns an absolute file when this is invoked from
1569 # ds.pathto() returns an absolute file when this is invoked from
1570 # the keyword extension. That gets flagged as non-portable on
1570 # the keyword extension. That gets flagged as non-portable on
1571 # Windows, since it contains the drive letter and colon.
1571 # Windows, since it contains the drive letter and colon.
1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1573 try:
1573 try:
1574 st = lstat(f)
1574 st = lstat(f)
1575 except OSError:
1575 except OSError:
1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1577 rejected.append(f)
1577 rejected.append(f)
1578 continue
1578 continue
1579 if st.st_size > 10000000:
1579 if st.st_size > 10000000:
1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1581 "to manage this file\n"
1581 "to manage this file\n"
1582 "(use 'hg revert %s' to cancel the "
1582 "(use 'hg revert %s' to cancel the "
1583 "pending addition)\n")
1583 "pending addition)\n")
1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 ui.warn(_("%s not added: only files and symlinks "
1586 ui.warn(_("%s not added: only files and symlinks "
1587 "supported currently\n") % uipath(f))
1587 "supported currently\n") % uipath(f))
1588 rejected.append(f)
1588 rejected.append(f)
1589 elif ds[f] in 'amn':
1589 elif ds[f] in 'amn':
1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1591 elif ds[f] == 'r':
1591 elif ds[f] == 'r':
1592 ds.normallookup(f)
1592 ds.normallookup(f)
1593 else:
1593 else:
1594 ds.add(f)
1594 ds.add(f)
1595 return rejected
1595 return rejected
1596
1596
1597 def forget(self, files, prefix=""):
1597 def forget(self, files, prefix=""):
1598 with self._repo.wlock():
1598 with self._repo.wlock():
1599 ds = self._repo.dirstate
1599 ds = self._repo.dirstate
1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1601 rejected = []
1601 rejected = []
1602 for f in files:
1602 for f in files:
1603 if f not in self._repo.dirstate:
1603 if f not in self._repo.dirstate:
1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1605 rejected.append(f)
1605 rejected.append(f)
1606 elif self._repo.dirstate[f] != 'a':
1606 elif self._repo.dirstate[f] != 'a':
1607 self._repo.dirstate.remove(f)
1607 self._repo.dirstate.remove(f)
1608 else:
1608 else:
1609 self._repo.dirstate.drop(f)
1609 self._repo.dirstate.drop(f)
1610 return rejected
1610 return rejected
1611
1611
1612 def undelete(self, list):
1612 def undelete(self, list):
1613 pctxs = self.parents()
1613 pctxs = self.parents()
1614 with self._repo.wlock():
1614 with self._repo.wlock():
1615 ds = self._repo.dirstate
1615 ds = self._repo.dirstate
1616 for f in list:
1616 for f in list:
1617 if self._repo.dirstate[f] != 'r':
1617 if self._repo.dirstate[f] != 'r':
1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1619 else:
1619 else:
1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1621 t = fctx.data()
1621 t = fctx.data()
1622 self._repo.wwrite(f, t, fctx.flags())
1622 self._repo.wwrite(f, t, fctx.flags())
1623 self._repo.dirstate.normal(f)
1623 self._repo.dirstate.normal(f)
1624
1624
1625 def copy(self, source, dest):
1625 def copy(self, source, dest):
1626 try:
1626 try:
1627 st = self._repo.wvfs.lstat(dest)
1627 st = self._repo.wvfs.lstat(dest)
1628 except OSError as err:
1628 except OSError as err:
1629 if err.errno != errno.ENOENT:
1629 if err.errno != errno.ENOENT:
1630 raise
1630 raise
1631 self._repo.ui.warn(_("%s does not exist!\n")
1631 self._repo.ui.warn(_("%s does not exist!\n")
1632 % self._repo.dirstate.pathto(dest))
1632 % self._repo.dirstate.pathto(dest))
1633 return
1633 return
1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1636 "symbolic link\n")
1636 "symbolic link\n")
1637 % self._repo.dirstate.pathto(dest))
1637 % self._repo.dirstate.pathto(dest))
1638 else:
1638 else:
1639 with self._repo.wlock():
1639 with self._repo.wlock():
1640 if self._repo.dirstate[dest] in '?':
1640 if self._repo.dirstate[dest] in '?':
1641 self._repo.dirstate.add(dest)
1641 self._repo.dirstate.add(dest)
1642 elif self._repo.dirstate[dest] in 'r':
1642 elif self._repo.dirstate[dest] in 'r':
1643 self._repo.dirstate.normallookup(dest)
1643 self._repo.dirstate.normallookup(dest)
1644 self._repo.dirstate.copy(source, dest)
1644 self._repo.dirstate.copy(source, dest)
1645
1645
1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1647 listsubrepos=False, badfn=None):
1647 listsubrepos=False, badfn=None):
1648 r = self._repo
1648 r = self._repo
1649
1649
1650 # Only a case insensitive filesystem needs magic to translate user input
1650 # Only a case insensitive filesystem needs magic to translate user input
1651 # to actual case in the filesystem.
1651 # to actual case in the filesystem.
1652 icasefs = not util.fscasesensitive(r.root)
1652 icasefs = not util.fscasesensitive(r.root)
1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1654 default, auditor=r.auditor, ctx=self,
1654 default, auditor=r.auditor, ctx=self,
1655 listsubrepos=listsubrepos, badfn=badfn,
1655 listsubrepos=listsubrepos, badfn=badfn,
1656 icasefs=icasefs)
1656 icasefs=icasefs)
1657
1657
1658 def _filtersuspectsymlink(self, files):
1658 def _filtersuspectsymlink(self, files):
1659 if not files or self._repo.dirstate._checklink:
1659 if not files or self._repo.dirstate._checklink:
1660 return files
1660 return files
1661
1661
1662 # Symlink placeholders may get non-symlink-like contents
1662 # Symlink placeholders may get non-symlink-like contents
1663 # via user error or dereferencing by NFS or Samba servers,
1663 # via user error or dereferencing by NFS or Samba servers,
1664 # so we filter out any placeholders that don't look like a
1664 # so we filter out any placeholders that don't look like a
1665 # symlink
1665 # symlink
1666 sane = []
1666 sane = []
1667 for f in files:
1667 for f in files:
1668 if self.flags(f) == 'l':
1668 if self.flags(f) == 'l':
1669 d = self[f].data()
1669 d = self[f].data()
1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1672 ' "%s"\n' % f)
1672 ' "%s"\n' % f)
1673 continue
1673 continue
1674 sane.append(f)
1674 sane.append(f)
1675 return sane
1675 return sane
1676
1676
1677 def _checklookup(self, files):
1677 def _checklookup(self, files):
1678 # check for any possibly clean files
1678 # check for any possibly clean files
1679 if not files:
1679 if not files:
1680 return [], [], []
1680 return [], [], []
1681
1681
1682 modified = []
1682 modified = []
1683 deleted = []
1683 deleted = []
1684 fixup = []
1684 fixup = []
1685 pctx = self._parents[0]
1685 pctx = self._parents[0]
1686 # do a full compare of any files that might have changed
1686 # do a full compare of any files that might have changed
1687 for f in sorted(files):
1687 for f in sorted(files):
1688 try:
1688 try:
1689 # This will return True for a file that got replaced by a
1689 # This will return True for a file that got replaced by a
1690 # directory in the interim, but fixing that is pretty hard.
1690 # directory in the interim, but fixing that is pretty hard.
1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1692 or pctx[f].cmp(self[f])):
1692 or pctx[f].cmp(self[f])):
1693 modified.append(f)
1693 modified.append(f)
1694 else:
1694 else:
1695 fixup.append(f)
1695 fixup.append(f)
1696 except (IOError, OSError):
1696 except (IOError, OSError):
1697 # A file become inaccessible in between? Mark it as deleted,
1697 # A file become inaccessible in between? Mark it as deleted,
1698 # matching dirstate behavior (issue5584).
1698 # matching dirstate behavior (issue5584).
1699 # The dirstate has more complex behavior around whether a
1699 # The dirstate has more complex behavior around whether a
1700 # missing file matches a directory, etc, but we don't need to
1700 # missing file matches a directory, etc, but we don't need to
1701 # bother with that: if f has made it to this point, we're sure
1701 # bother with that: if f has made it to this point, we're sure
1702 # it's in the dirstate.
1702 # it's in the dirstate.
1703 deleted.append(f)
1703 deleted.append(f)
1704
1704
1705 return modified, deleted, fixup
1705 return modified, deleted, fixup
1706
1706
1707 def _poststatusfixup(self, status, fixup):
1707 def _poststatusfixup(self, status, fixup):
1708 """update dirstate for files that are actually clean"""
1708 """update dirstate for files that are actually clean"""
1709 poststatus = self._repo.postdsstatus()
1709 poststatus = self._repo.postdsstatus()
1710 if fixup or poststatus:
1710 if fixup or poststatus:
1711 try:
1711 try:
1712 oldid = self._repo.dirstate.identity()
1712 oldid = self._repo.dirstate.identity()
1713
1713
1714 # updating the dirstate is optional
1714 # updating the dirstate is optional
1715 # so we don't wait on the lock
1715 # so we don't wait on the lock
1716 # wlock can invalidate the dirstate, so cache normal _after_
1716 # wlock can invalidate the dirstate, so cache normal _after_
1717 # taking the lock
1717 # taking the lock
1718 with self._repo.wlock(False):
1718 with self._repo.wlock(False):
1719 if self._repo.dirstate.identity() == oldid:
1719 if self._repo.dirstate.identity() == oldid:
1720 if fixup:
1720 if fixup:
1721 normal = self._repo.dirstate.normal
1721 normal = self._repo.dirstate.normal
1722 for f in fixup:
1722 for f in fixup:
1723 normal(f)
1723 normal(f)
1724 # write changes out explicitly, because nesting
1724 # write changes out explicitly, because nesting
1725 # wlock at runtime may prevent 'wlock.release()'
1725 # wlock at runtime may prevent 'wlock.release()'
1726 # after this block from doing so for subsequent
1726 # after this block from doing so for subsequent
1727 # changing files
1727 # changing files
1728 tr = self._repo.currenttransaction()
1728 tr = self._repo.currenttransaction()
1729 self._repo.dirstate.write(tr)
1729 self._repo.dirstate.write(tr)
1730
1730
1731 if poststatus:
1731 if poststatus:
1732 for ps in poststatus:
1732 for ps in poststatus:
1733 ps(self, status)
1733 ps(self, status)
1734 else:
1734 else:
1735 # in this case, writing changes out breaks
1735 # in this case, writing changes out breaks
1736 # consistency, because .hg/dirstate was
1736 # consistency, because .hg/dirstate was
1737 # already changed simultaneously after last
1737 # already changed simultaneously after last
1738 # caching (see also issue5584 for detail)
1738 # caching (see also issue5584 for detail)
1739 self._repo.ui.debug('skip updating dirstate: '
1739 self._repo.ui.debug('skip updating dirstate: '
1740 'identity mismatch\n')
1740 'identity mismatch\n')
1741 except error.LockError:
1741 except error.LockError:
1742 pass
1742 pass
1743 finally:
1743 finally:
1744 # Even if the wlock couldn't be grabbed, clear out the list.
1744 # Even if the wlock couldn't be grabbed, clear out the list.
1745 self._repo.clearpostdsstatus()
1745 self._repo.clearpostdsstatus()
1746
1746
1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1748 unknown=False):
1748 unknown=False):
1749 '''Gets the status from the dirstate -- internal use only.'''
1749 '''Gets the status from the dirstate -- internal use only.'''
1750 listignored, listclean, listunknown = ignored, clean, unknown
1750 listignored, listclean, listunknown = ignored, clean, unknown
1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1752 subrepos = []
1752 subrepos = []
1753 if '.hgsub' in self:
1753 if '.hgsub' in self:
1754 subrepos = sorted(self.substate)
1754 subrepos = sorted(self.substate)
1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1756 listclean, listunknown)
1756 listclean, listunknown)
1757
1757
1758 # check for any possibly clean files
1758 # check for any possibly clean files
1759 fixup = []
1759 fixup = []
1760 if cmp:
1760 if cmp:
1761 modified2, deleted2, fixup = self._checklookup(cmp)
1761 modified2, deleted2, fixup = self._checklookup(cmp)
1762 s.modified.extend(modified2)
1762 s.modified.extend(modified2)
1763 s.deleted.extend(deleted2)
1763 s.deleted.extend(deleted2)
1764
1764
1765 if fixup and listclean:
1765 if fixup and listclean:
1766 s.clean.extend(fixup)
1766 s.clean.extend(fixup)
1767
1767
1768 self._poststatusfixup(s, fixup)
1768 self._poststatusfixup(s, fixup)
1769
1769
1770 if match.always():
1770 if match.always():
1771 # cache for performance
1771 # cache for performance
1772 if s.unknown or s.ignored or s.clean:
1772 if s.unknown or s.ignored or s.clean:
1773 # "_status" is cached with list*=False in the normal route
1773 # "_status" is cached with list*=False in the normal route
1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1775 s.deleted, [], [], [])
1775 s.deleted, [], [], [])
1776 else:
1776 else:
1777 self._status = s
1777 self._status = s
1778
1778
1779 return s
1779 return s
1780
1780
1781 @propertycache
1781 @propertycache
1782 def _manifest(self):
1782 def _manifest(self):
1783 """generate a manifest corresponding to the values in self._status
1783 """generate a manifest corresponding to the values in self._status
1784
1784
1785 This reuse the file nodeid from parent, but we use special node
1785 This reuse the file nodeid from parent, but we use special node
1786 identifiers for added and modified files. This is used by manifests
1786 identifiers for added and modified files. This is used by manifests
1787 merge to see that files are different and by update logic to avoid
1787 merge to see that files are different and by update logic to avoid
1788 deleting newly added files.
1788 deleting newly added files.
1789 """
1789 """
1790 return self._buildstatusmanifest(self._status)
1790 return self._buildstatusmanifest(self._status)
1791
1791
1792 def _buildstatusmanifest(self, status):
1792 def _buildstatusmanifest(self, status):
1793 """Builds a manifest that includes the given status results."""
1793 """Builds a manifest that includes the given status results."""
1794 parents = self.parents()
1794 parents = self.parents()
1795
1795
1796 man = parents[0].manifest().copy()
1796 man = parents[0].manifest().copy()
1797
1797
1798 ff = self._flagfunc
1798 ff = self._flagfunc
1799 for i, l in ((addednodeid, status.added),
1799 for i, l in ((addednodeid, status.added),
1800 (modifiednodeid, status.modified)):
1800 (modifiednodeid, status.modified)):
1801 for f in l:
1801 for f in l:
1802 man[f] = i
1802 man[f] = i
1803 try:
1803 try:
1804 man.setflag(f, ff(f))
1804 man.setflag(f, ff(f))
1805 except OSError:
1805 except OSError:
1806 pass
1806 pass
1807
1807
1808 for f in status.deleted + status.removed:
1808 for f in status.deleted + status.removed:
1809 if f in man:
1809 if f in man:
1810 del man[f]
1810 del man[f]
1811
1811
1812 return man
1812 return man
1813
1813
1814 def _buildstatus(self, other, s, match, listignored, listclean,
1814 def _buildstatus(self, other, s, match, listignored, listclean,
1815 listunknown):
1815 listunknown):
1816 """build a status with respect to another context
1816 """build a status with respect to another context
1817
1817
1818 This includes logic for maintaining the fast path of status when
1818 This includes logic for maintaining the fast path of status when
1819 comparing the working directory against its parent, which is to skip
1819 comparing the working directory against its parent, which is to skip
1820 building a new manifest if self (working directory) is not comparing
1820 building a new manifest if self (working directory) is not comparing
1821 against its parent (repo['.']).
1821 against its parent (repo['.']).
1822 """
1822 """
1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1825 # might have accidentally ended up with the entire contents of the file
1825 # might have accidentally ended up with the entire contents of the file
1826 # they are supposed to be linking to.
1826 # they are supposed to be linking to.
1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1828 if other != self._repo['.']:
1828 if other != self._repo['.']:
1829 s = super(workingctx, self)._buildstatus(other, s, match,
1829 s = super(workingctx, self)._buildstatus(other, s, match,
1830 listignored, listclean,
1830 listignored, listclean,
1831 listunknown)
1831 listunknown)
1832 return s
1832 return s
1833
1833
1834 def _matchstatus(self, other, match):
1834 def _matchstatus(self, other, match):
1835 """override the match method with a filter for directory patterns
1835 """override the match method with a filter for directory patterns
1836
1836
1837 We use inheritance to customize the match.bad method only in cases of
1837 We use inheritance to customize the match.bad method only in cases of
1838 workingctx since it belongs only to the working directory when
1838 workingctx since it belongs only to the working directory when
1839 comparing against the parent changeset.
1839 comparing against the parent changeset.
1840
1840
1841 If we aren't comparing against the working directory's parent, then we
1841 If we aren't comparing against the working directory's parent, then we
1842 just use the default match object sent to us.
1842 just use the default match object sent to us.
1843 """
1843 """
1844 superself = super(workingctx, self)
1844 superself = super(workingctx, self)
1845 match = superself._matchstatus(other, match)
1845 match = superself._matchstatus(other, match)
1846 if other != self._repo['.']:
1846 if other != self._repo['.']:
1847 def bad(f, msg):
1847 def bad(f, msg):
1848 # 'f' may be a directory pattern from 'match.files()',
1848 # 'f' may be a directory pattern from 'match.files()',
1849 # so 'f not in ctx1' is not enough
1849 # so 'f not in ctx1' is not enough
1850 if f not in other and not other.hasdir(f):
1850 if f not in other and not other.hasdir(f):
1851 self._repo.ui.warn('%s: %s\n' %
1851 self._repo.ui.warn('%s: %s\n' %
1852 (self._repo.dirstate.pathto(f), msg))
1852 (self._repo.dirstate.pathto(f), msg))
1853 match.bad = bad
1853 match.bad = bad
1854 return match
1854 return match
1855
1855
1856 def markcommitted(self, node):
1856 def markcommitted(self, node):
1857 super(workingctx, self).markcommitted(node)
1857 super(workingctx, self).markcommitted(node)
1858
1858
1859 sparse.aftercommit(self._repo, node)
1859 sparse.aftercommit(self._repo, node)
1860
1860
1861 class committablefilectx(basefilectx):
1861 class committablefilectx(basefilectx):
1862 """A committablefilectx provides common functionality for a file context
1862 """A committablefilectx provides common functionality for a file context
1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1864 def __init__(self, repo, path, filelog=None, ctx=None):
1864 def __init__(self, repo, path, filelog=None, ctx=None):
1865 self._repo = repo
1865 self._repo = repo
1866 self._path = path
1866 self._path = path
1867 self._changeid = None
1867 self._changeid = None
1868 self._filerev = self._filenode = None
1868 self._filerev = self._filenode = None
1869
1869
1870 if filelog is not None:
1870 if filelog is not None:
1871 self._filelog = filelog
1871 self._filelog = filelog
1872 if ctx:
1872 if ctx:
1873 self._changectx = ctx
1873 self._changectx = ctx
1874
1874
1875 def __nonzero__(self):
1875 def __nonzero__(self):
1876 return True
1876 return True
1877
1877
1878 __bool__ = __nonzero__
1878 __bool__ = __nonzero__
1879
1879
1880 def linkrev(self):
1880 def linkrev(self):
1881 # linked to self._changectx no matter if file is modified or not
1881 # linked to self._changectx no matter if file is modified or not
1882 return self.rev()
1882 return self.rev()
1883
1883
1884 def parents(self):
1884 def parents(self):
1885 '''return parent filectxs, following copies if necessary'''
1885 '''return parent filectxs, following copies if necessary'''
1886 def filenode(ctx, path):
1886 def filenode(ctx, path):
1887 return ctx._manifest.get(path, nullid)
1887 return ctx._manifest.get(path, nullid)
1888
1888
1889 path = self._path
1889 path = self._path
1890 fl = self._filelog
1890 fl = self._filelog
1891 pcl = self._changectx._parents
1891 pcl = self._changectx._parents
1892 renamed = self.renamed()
1892 renamed = self.renamed()
1893
1893
1894 if renamed:
1894 if renamed:
1895 pl = [renamed + (None,)]
1895 pl = [renamed + (None,)]
1896 else:
1896 else:
1897 pl = [(path, filenode(pcl[0], path), fl)]
1897 pl = [(path, filenode(pcl[0], path), fl)]
1898
1898
1899 for pc in pcl[1:]:
1899 for pc in pcl[1:]:
1900 pl.append((path, filenode(pc, path), fl))
1900 pl.append((path, filenode(pc, path), fl))
1901
1901
1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1903 for p, n, l in pl if n != nullid]
1903 for p, n, l in pl if n != nullid]
1904
1904
1905 def children(self):
1905 def children(self):
1906 return []
1906 return []
1907
1907
1908 class workingfilectx(committablefilectx):
1908 class workingfilectx(committablefilectx):
1909 """A workingfilectx object makes access to data related to a particular
1909 """A workingfilectx object makes access to data related to a particular
1910 file in the working directory convenient."""
1910 file in the working directory convenient."""
1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1913
1913
1914 @propertycache
1914 @propertycache
1915 def _changectx(self):
1915 def _changectx(self):
1916 return workingctx(self._repo)
1916 return workingctx(self._repo)
1917
1917
1918 def data(self):
1918 def data(self):
1919 return self._repo.wread(self._path)
1919 return self._repo.wread(self._path)
1920 def renamed(self):
1920 def renamed(self):
1921 rp = self._repo.dirstate.copied(self._path)
1921 rp = self._repo.dirstate.copied(self._path)
1922 if not rp:
1922 if not rp:
1923 return None
1923 return None
1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1925
1925
1926 def size(self):
1926 def size(self):
1927 return self._repo.wvfs.lstat(self._path).st_size
1927 return self._repo.wvfs.lstat(self._path).st_size
1928 def date(self):
1928 def date(self):
1929 t, tz = self._changectx.date()
1929 t, tz = self._changectx.date()
1930 try:
1930 try:
1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1932 except OSError as err:
1932 except OSError as err:
1933 if err.errno != errno.ENOENT:
1933 if err.errno != errno.ENOENT:
1934 raise
1934 raise
1935 return (t, tz)
1935 return (t, tz)
1936
1936
1937 def exists(self):
1937 def exists(self):
1938 return self._repo.wvfs.exists(self._path)
1938 return self._repo.wvfs.exists(self._path)
1939
1939
1940 def lexists(self):
1940 def lexists(self):
1941 return self._repo.wvfs.lexists(self._path)
1941 return self._repo.wvfs.lexists(self._path)
1942
1942
1943 def audit(self):
1943 def audit(self):
1944 return self._repo.wvfs.audit(self._path)
1944 return self._repo.wvfs.audit(self._path)
1945
1945
1946 def cmp(self, fctx):
1946 def cmp(self, fctx):
1947 """compare with other file context
1947 """compare with other file context
1948
1948
1949 returns True if different than fctx.
1949 returns True if different than fctx.
1950 """
1950 """
1951 # fctx should be a filectx (not a workingfilectx)
1951 # fctx should be a filectx (not a workingfilectx)
1952 # invert comparison to reuse the same code path
1952 # invert comparison to reuse the same code path
1953 return fctx.cmp(self)
1953 return fctx.cmp(self)
1954
1954
1955 def remove(self, ignoremissing=False):
1955 def remove(self, ignoremissing=False):
1956 """wraps unlink for a repo's working directory"""
1956 """wraps unlink for a repo's working directory"""
1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1958
1958
1959 def write(self, data, flags, backgroundclose=False):
1959 def write(self, data, flags, backgroundclose=False):
1960 """wraps repo.wwrite"""
1960 """wraps repo.wwrite"""
1961 self._repo.wwrite(self._path, data, flags,
1961 self._repo.wwrite(self._path, data, flags,
1962 backgroundclose=backgroundclose)
1962 backgroundclose=backgroundclose)
1963
1963
1964 def setflags(self, l, x):
1964 def setflags(self, l, x):
1965 self._repo.wvfs.setflags(self._path, l, x)
1965 self._repo.wvfs.setflags(self._path, l, x)
1966
1966
1967 class workingcommitctx(workingctx):
1967 class workingcommitctx(workingctx):
1968 """A workingcommitctx object makes access to data related to
1968 """A workingcommitctx object makes access to data related to
1969 the revision being committed convenient.
1969 the revision being committed convenient.
1970
1970
1971 This hides changes in the working directory, if they aren't
1971 This hides changes in the working directory, if they aren't
1972 committed in this context.
1972 committed in this context.
1973 """
1973 """
1974 def __init__(self, repo, changes,
1974 def __init__(self, repo, changes,
1975 text="", user=None, date=None, extra=None):
1975 text="", user=None, date=None, extra=None):
1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1977 changes)
1977 changes)
1978
1978
1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1980 unknown=False):
1980 unknown=False):
1981 """Return matched files only in ``self._status``
1981 """Return matched files only in ``self._status``
1982
1982
1983 Uncommitted files appear "clean" via this context, even if
1983 Uncommitted files appear "clean" via this context, even if
1984 they aren't actually so in the working directory.
1984 they aren't actually so in the working directory.
1985 """
1985 """
1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1987 if clean:
1987 if clean:
1988 clean = [f for f in self._manifest if f not in self._changedset]
1988 clean = [f for f in self._manifest if f not in self._changedset]
1989 else:
1989 else:
1990 clean = []
1990 clean = []
1991 return scmutil.status([f for f in self._status.modified if match(f)],
1991 return scmutil.status([f for f in self._status.modified if match(f)],
1992 [f for f in self._status.added if match(f)],
1992 [f for f in self._status.added if match(f)],
1993 [f for f in self._status.removed if match(f)],
1993 [f for f in self._status.removed if match(f)],
1994 [], [], [], clean)
1994 [], [], [], clean)
1995
1995
1996 @propertycache
1996 @propertycache
1997 def _changedset(self):
1997 def _changedset(self):
1998 """Return the set of files changed in this context
1998 """Return the set of files changed in this context
1999 """
1999 """
2000 changed = set(self._status.modified)
2000 changed = set(self._status.modified)
2001 changed.update(self._status.added)
2001 changed.update(self._status.added)
2002 changed.update(self._status.removed)
2002 changed.update(self._status.removed)
2003 return changed
2003 return changed
2004
2004
2005 def makecachingfilectxfn(func):
2005 def makecachingfilectxfn(func):
2006 """Create a filectxfn that caches based on the path.
2006 """Create a filectxfn that caches based on the path.
2007
2007
2008 We can't use util.cachefunc because it uses all arguments as the cache
2008 We can't use util.cachefunc because it uses all arguments as the cache
2009 key and this creates a cycle since the arguments include the repo and
2009 key and this creates a cycle since the arguments include the repo and
2010 memctx.
2010 memctx.
2011 """
2011 """
2012 cache = {}
2012 cache = {}
2013
2013
2014 def getfilectx(repo, memctx, path):
2014 def getfilectx(repo, memctx, path):
2015 if path not in cache:
2015 if path not in cache:
2016 cache[path] = func(repo, memctx, path)
2016 cache[path] = func(repo, memctx, path)
2017 return cache[path]
2017 return cache[path]
2018
2018
2019 return getfilectx
2019 return getfilectx
2020
2020
2021 def memfilefromctx(ctx):
2021 def memfilefromctx(ctx):
2022 """Given a context return a memfilectx for ctx[path]
2022 """Given a context return a memfilectx for ctx[path]
2023
2023
2024 This is a convenience method for building a memctx based on another
2024 This is a convenience method for building a memctx based on another
2025 context.
2025 context.
2026 """
2026 """
2027 def getfilectx(repo, memctx, path):
2027 def getfilectx(repo, memctx, path):
2028 fctx = ctx[path]
2028 fctx = ctx[path]
2029 # this is weird but apparently we only keep track of one parent
2029 # this is weird but apparently we only keep track of one parent
2030 # (why not only store that instead of a tuple?)
2030 # (why not only store that instead of a tuple?)
2031 copied = fctx.renamed()
2031 copied = fctx.renamed()
2032 if copied:
2032 if copied:
2033 copied = copied[0]
2033 copied = copied[0]
2034 return memfilectx(repo, path, fctx.data(),
2034 return memfilectx(repo, path, fctx.data(),
2035 islink=fctx.islink(), isexec=fctx.isexec(),
2035 islink=fctx.islink(), isexec=fctx.isexec(),
2036 copied=copied, memctx=memctx)
2036 copied=copied, memctx=memctx)
2037
2037
2038 return getfilectx
2038 return getfilectx
2039
2039
2040 def memfilefrompatch(patchstore):
2040 def memfilefrompatch(patchstore):
2041 """Given a patch (e.g. patchstore object) return a memfilectx
2041 """Given a patch (e.g. patchstore object) return a memfilectx
2042
2042
2043 This is a convenience method for building a memctx based on a patchstore.
2043 This is a convenience method for building a memctx based on a patchstore.
2044 """
2044 """
2045 def getfilectx(repo, memctx, path):
2045 def getfilectx(repo, memctx, path):
2046 data, mode, copied = patchstore.getfile(path)
2046 data, mode, copied = patchstore.getfile(path)
2047 if data is None:
2047 if data is None:
2048 return None
2048 return None
2049 islink, isexec = mode
2049 islink, isexec = mode
2050 return memfilectx(repo, path, data, islink=islink,
2050 return memfilectx(repo, path, data, islink=islink,
2051 isexec=isexec, copied=copied,
2051 isexec=isexec, copied=copied,
2052 memctx=memctx)
2052 memctx=memctx)
2053
2053
2054 return getfilectx
2054 return getfilectx
2055
2055
2056 class memctx(committablectx):
2056 class memctx(committablectx):
2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2058
2058
2059 Revision information is supplied at initialization time while
2059 Revision information is supplied at initialization time while
2060 related files data and is made available through a callback
2060 related files data and is made available through a callback
2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2062 sequence of two parent revisions identifiers (pass None for every
2062 sequence of two parent revisions identifiers (pass None for every
2063 missing parent), 'text' is the commit message and 'files' lists
2063 missing parent), 'text' is the commit message and 'files' lists
2064 names of files touched by the revision (normalized and relative to
2064 names of files touched by the revision (normalized and relative to
2065 repository root).
2065 repository root).
2066
2066
2067 filectxfn(repo, memctx, path) is a callable receiving the
2067 filectxfn(repo, memctx, path) is a callable receiving the
2068 repository, the current memctx object and the normalized path of
2068 repository, the current memctx object and the normalized path of
2069 requested file, relative to repository root. It is fired by the
2069 requested file, relative to repository root. It is fired by the
2070 commit function for every file in 'files', but calls order is
2070 commit function for every file in 'files', but calls order is
2071 undefined. If the file is available in the revision being
2071 undefined. If the file is available in the revision being
2072 committed (updated or added), filectxfn returns a memfilectx
2072 committed (updated or added), filectxfn returns a memfilectx
2073 object. If the file was removed, filectxfn return None for recent
2073 object. If the file was removed, filectxfn return None for recent
2074 Mercurial. Moved files are represented by marking the source file
2074 Mercurial. Moved files are represented by marking the source file
2075 removed and the new file added with copy information (see
2075 removed and the new file added with copy information (see
2076 memfilectx).
2076 memfilectx).
2077
2077
2078 user receives the committer name and defaults to current
2078 user receives the committer name and defaults to current
2079 repository username, date is the commit date in any format
2079 repository username, date is the commit date in any format
2080 supported by util.parsedate() and defaults to current date, extra
2080 supported by util.parsedate() and defaults to current date, extra
2081 is a dictionary of metadata or is left empty.
2081 is a dictionary of metadata or is left empty.
2082 """
2082 """
2083
2083
2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2086 # this field to determine what to do in filectxfn.
2086 # this field to determine what to do in filectxfn.
2087 _returnnoneformissingfiles = True
2087 _returnnoneformissingfiles = True
2088
2088
2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2090 date=None, extra=None, branch=None, editor=False):
2090 date=None, extra=None, branch=None, editor=False):
2091 super(memctx, self).__init__(repo, text, user, date, extra)
2091 super(memctx, self).__init__(repo, text, user, date, extra)
2092 self._rev = None
2092 self._rev = None
2093 self._node = None
2093 self._node = None
2094 parents = [(p or nullid) for p in parents]
2094 parents = [(p or nullid) for p in parents]
2095 p1, p2 = parents
2095 p1, p2 = parents
2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2097 files = sorted(set(files))
2097 files = sorted(set(files))
2098 self._files = files
2098 self._files = files
2099 if branch is not None:
2099 if branch is not None:
2100 self._extra['branch'] = encoding.fromlocal(branch)
2100 self._extra['branch'] = encoding.fromlocal(branch)
2101 self.substate = {}
2101 self.substate = {}
2102
2102
2103 if isinstance(filectxfn, patch.filestore):
2103 if isinstance(filectxfn, patch.filestore):
2104 filectxfn = memfilefrompatch(filectxfn)
2104 filectxfn = memfilefrompatch(filectxfn)
2105 elif not callable(filectxfn):
2105 elif not callable(filectxfn):
2106 # if store is not callable, wrap it in a function
2106 # if store is not callable, wrap it in a function
2107 filectxfn = memfilefromctx(filectxfn)
2107 filectxfn = memfilefromctx(filectxfn)
2108
2108
2109 # memoizing increases performance for e.g. vcs convert scenarios.
2109 # memoizing increases performance for e.g. vcs convert scenarios.
2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2111
2111
2112 if editor:
2112 if editor:
2113 self._text = editor(self._repo, self, [])
2113 self._text = editor(self._repo, self, [])
2114 self._repo.savecommitmessage(self._text)
2114 self._repo.savecommitmessage(self._text)
2115
2115
2116 def filectx(self, path, filelog=None):
2116 def filectx(self, path, filelog=None):
2117 """get a file context from the working directory
2117 """get a file context from the working directory
2118
2118
2119 Returns None if file doesn't exist and should be removed."""
2119 Returns None if file doesn't exist and should be removed."""
2120 return self._filectxfn(self._repo, self, path)
2120 return self._filectxfn(self._repo, self, path)
2121
2121
2122 def commit(self):
2122 def commit(self):
2123 """commit context to the repo"""
2123 """commit context to the repo"""
2124 return self._repo.commitctx(self)
2124 return self._repo.commitctx(self)
2125
2125
2126 @propertycache
2126 @propertycache
2127 def _manifest(self):
2127 def _manifest(self):
2128 """generate a manifest based on the return values of filectxfn"""
2128 """generate a manifest based on the return values of filectxfn"""
2129
2129
2130 # keep this simple for now; just worry about p1
2130 # keep this simple for now; just worry about p1
2131 pctx = self._parents[0]
2131 pctx = self._parents[0]
2132 man = pctx.manifest().copy()
2132 man = pctx.manifest().copy()
2133
2133
2134 for f in self._status.modified:
2134 for f in self._status.modified:
2135 p1node = nullid
2135 p1node = nullid
2136 p2node = nullid
2136 p2node = nullid
2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2138 if len(p) > 0:
2138 if len(p) > 0:
2139 p1node = p[0].filenode()
2139 p1node = p[0].filenode()
2140 if len(p) > 1:
2140 if len(p) > 1:
2141 p2node = p[1].filenode()
2141 p2node = p[1].filenode()
2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2143
2143
2144 for f in self._status.added:
2144 for f in self._status.added:
2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2146
2146
2147 for f in self._status.removed:
2147 for f in self._status.removed:
2148 if f in man:
2148 if f in man:
2149 del man[f]
2149 del man[f]
2150
2150
2151 return man
2151 return man
2152
2152
2153 @propertycache
2153 @propertycache
2154 def _status(self):
2154 def _status(self):
2155 """Calculate exact status from ``files`` specified at construction
2155 """Calculate exact status from ``files`` specified at construction
2156 """
2156 """
2157 man1 = self.p1().manifest()
2157 man1 = self.p1().manifest()
2158 p2 = self._parents[1]
2158 p2 = self._parents[1]
2159 # "1 < len(self._parents)" can't be used for checking
2159 # "1 < len(self._parents)" can't be used for checking
2160 # existence of the 2nd parent, because "memctx._parents" is
2160 # existence of the 2nd parent, because "memctx._parents" is
2161 # explicitly initialized by the list, of which length is 2.
2161 # explicitly initialized by the list, of which length is 2.
2162 if p2.node() != nullid:
2162 if p2.node() != nullid:
2163 man2 = p2.manifest()
2163 man2 = p2.manifest()
2164 managing = lambda f: f in man1 or f in man2
2164 managing = lambda f: f in man1 or f in man2
2165 else:
2165 else:
2166 managing = lambda f: f in man1
2166 managing = lambda f: f in man1
2167
2167
2168 modified, added, removed = [], [], []
2168 modified, added, removed = [], [], []
2169 for f in self._files:
2169 for f in self._files:
2170 if not managing(f):
2170 if not managing(f):
2171 added.append(f)
2171 added.append(f)
2172 elif self[f]:
2172 elif self[f]:
2173 modified.append(f)
2173 modified.append(f)
2174 else:
2174 else:
2175 removed.append(f)
2175 removed.append(f)
2176
2176
2177 return scmutil.status(modified, added, removed, [], [], [], [])
2177 return scmutil.status(modified, added, removed, [], [], [], [])
2178
2178
2179 class memfilectx(committablefilectx):
2179 class memfilectx(committablefilectx):
2180 """memfilectx represents an in-memory file to commit.
2180 """memfilectx represents an in-memory file to commit.
2181
2181
2182 See memctx and committablefilectx for more details.
2182 See memctx and committablefilectx for more details.
2183 """
2183 """
2184 def __init__(self, repo, path, data, islink=False,
2184 def __init__(self, repo, path, data, islink=False,
2185 isexec=False, copied=None, memctx=None):
2185 isexec=False, copied=None, memctx=None):
2186 """
2186 """
2187 path is the normalized file path relative to repository root.
2187 path is the normalized file path relative to repository root.
2188 data is the file content as a string.
2188 data is the file content as a string.
2189 islink is True if the file is a symbolic link.
2189 islink is True if the file is a symbolic link.
2190 isexec is True if the file is executable.
2190 isexec is True if the file is executable.
2191 copied is the source file path if current file was copied in the
2191 copied is the source file path if current file was copied in the
2192 revision being committed, or None."""
2192 revision being committed, or None."""
2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2194 self._data = data
2194 self._data = data
2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2196 self._copied = None
2196 self._copied = None
2197 if copied:
2197 if copied:
2198 self._copied = (copied, nullid)
2198 self._copied = (copied, nullid)
2199
2199
2200 def data(self):
2200 def data(self):
2201 return self._data
2201 return self._data
2202
2202
2203 def remove(self, ignoremissing=False):
2203 def remove(self, ignoremissing=False):
2204 """wraps unlink for a repo's working directory"""
2204 """wraps unlink for a repo's working directory"""
2205 # need to figure out what to do here
2205 # need to figure out what to do here
2206 del self._changectx[self._path]
2206 del self._changectx[self._path]
2207
2207
2208 def write(self, data, flags):
2208 def write(self, data, flags):
2209 """wraps repo.wwrite"""
2209 """wraps repo.wwrite"""
2210 self._data = data
2210 self._data = data
2211
2211
2212 class overlayfilectx(committablefilectx):
2212 class overlayfilectx(committablefilectx):
2213 """Like memfilectx but take an original filectx and optional parameters to
2213 """Like memfilectx but take an original filectx and optional parameters to
2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2215 flag processor is expensive) and raw data, flags, and filenode could be
2215 flag processor is expensive) and raw data, flags, and filenode could be
2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2217 """
2217 """
2218
2218
2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2220 copied=None, ctx=None):
2220 copied=None, ctx=None):
2221 """originalfctx: filecontext to duplicate
2221 """originalfctx: filecontext to duplicate
2222
2222
2223 datafunc: None or a function to override data (file content). It is a
2223 datafunc: None or a function to override data (file content). It is a
2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2225
2225
2226 copied could be (path, rev), or False. copied could also be just path,
2226 copied could be (path, rev), or False. copied could also be just path,
2227 and will be converted to (path, nullid). This simplifies some callers.
2227 and will be converted to (path, nullid). This simplifies some callers.
2228 """
2228 """
2229
2229
2230 if path is None:
2230 if path is None:
2231 path = originalfctx.path()
2231 path = originalfctx.path()
2232 if ctx is None:
2232 if ctx is None:
2233 ctx = originalfctx.changectx()
2233 ctx = originalfctx.changectx()
2234 ctxmatch = lambda: True
2234 ctxmatch = lambda: True
2235 else:
2235 else:
2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2237
2237
2238 repo = originalfctx.repo()
2238 repo = originalfctx.repo()
2239 flog = originalfctx.filelog()
2239 flog = originalfctx.filelog()
2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2241
2241
2242 if copied is None:
2242 if copied is None:
2243 copied = originalfctx.renamed()
2243 copied = originalfctx.renamed()
2244 copiedmatch = lambda: True
2244 copiedmatch = lambda: True
2245 else:
2245 else:
2246 if copied and not isinstance(copied, tuple):
2246 if copied and not isinstance(copied, tuple):
2247 # repo._filecommit will recalculate copyrev so nullid is okay
2247 # repo._filecommit will recalculate copyrev so nullid is okay
2248 copied = (copied, nullid)
2248 copied = (copied, nullid)
2249 copiedmatch = lambda: copied == originalfctx.renamed()
2249 copiedmatch = lambda: copied == originalfctx.renamed()
2250
2250
2251 # When data, copied (could affect data), ctx (could affect filelog
2251 # When data, copied (could affect data), ctx (could affect filelog
2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2253 # reused (repo._filecommit should double check filelog parents).
2253 # reused (repo._filecommit should double check filelog parents).
2254 #
2254 #
2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2256 # not affect reusable here.
2256 # not affect reusable here.
2257 #
2257 #
2258 # If ctx or copied is overridden to a same value with originalfctx,
2258 # If ctx or copied is overridden to a same value with originalfctx,
2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2260 # expensive so it's not called unless necessary. Assuming datafunc is
2260 # expensive so it's not called unless necessary. Assuming datafunc is
2261 # always expensive, do not call it for this "reusable" test.
2261 # always expensive, do not call it for this "reusable" test.
2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2263
2263
2264 if datafunc is None:
2264 if datafunc is None:
2265 datafunc = originalfctx.data
2265 datafunc = originalfctx.data
2266 if flags is None:
2266 if flags is None:
2267 flags = originalfctx.flags()
2267 flags = originalfctx.flags()
2268
2268
2269 self._datafunc = datafunc
2269 self._datafunc = datafunc
2270 self._flags = flags
2270 self._flags = flags
2271 self._copied = copied
2271 self._copied = copied
2272
2272
2273 if reusable:
2273 if reusable:
2274 # copy extra fields from originalfctx
2274 # copy extra fields from originalfctx
2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2276 for attr in attrs:
2276 for attr in attrs:
2277 if util.safehasattr(originalfctx, attr):
2277 if util.safehasattr(originalfctx, attr):
2278 setattr(self, attr, getattr(originalfctx, attr))
2278 setattr(self, attr, getattr(originalfctx, attr))
2279
2279
2280 def data(self):
2280 def data(self):
2281 return self._datafunc()
2281 return self._datafunc()
2282
2282
2283 class metadataonlyctx(committablectx):
2283 class metadataonlyctx(committablectx):
2284 """Like memctx but it's reusing the manifest of different commit.
2284 """Like memctx but it's reusing the manifest of different commit.
2285 Intended to be used by lightweight operations that are creating
2285 Intended to be used by lightweight operations that are creating
2286 metadata-only changes.
2286 metadata-only changes.
2287
2287
2288 Revision information is supplied at initialization time. 'repo' is the
2288 Revision information is supplied at initialization time. 'repo' is the
2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2291 every missing parent), 'text' is the commit.
2291 every missing parent), 'text' is the commit.
2292
2292
2293 user receives the committer name and defaults to current repository
2293 user receives the committer name and defaults to current repository
2294 username, date is the commit date in any format supported by
2294 username, date is the commit date in any format supported by
2295 util.parsedate() and defaults to current date, extra is a dictionary of
2295 util.parsedate() and defaults to current date, extra is a dictionary of
2296 metadata or is left empty.
2296 metadata or is left empty.
2297 """
2297 """
2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2300
2300
2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2302 extra=None, editor=False):
2302 extra=None, editor=False):
2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2304 self._rev = None
2304 self._rev = None
2305 self._node = None
2305 self._node = None
2306 self._originalctx = originalctx
2306 self._originalctx = originalctx
2307 self._manifestnode = originalctx.manifestnode()
2307 self._manifestnode = originalctx.manifestnode()
2308 parents = [(p or nullid) for p in parents]
2308 parents = [(p or nullid) for p in parents]
2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2310
2310
2311 # sanity check to ensure that the reused manifest parents are
2311 # sanity check to ensure that the reused manifest parents are
2312 # manifests of our commit parents
2312 # manifests of our commit parents
2313 mp1, mp2 = self.manifestctx().parents
2313 mp1, mp2 = self.manifestctx().parents
2314 if p1 != nullid and p1.manifestnode() != mp1:
2314 if p1 != nullid and p1.manifestnode() != mp1:
2315 raise RuntimeError('can\'t reuse the manifest: '
2315 raise RuntimeError('can\'t reuse the manifest: '
2316 'its p1 doesn\'t match the new ctx p1')
2316 'its p1 doesn\'t match the new ctx p1')
2317 if p2 != nullid and p2.manifestnode() != mp2:
2317 if p2 != nullid and p2.manifestnode() != mp2:
2318 raise RuntimeError('can\'t reuse the manifest: '
2318 raise RuntimeError('can\'t reuse the manifest: '
2319 'its p2 doesn\'t match the new ctx p2')
2319 'its p2 doesn\'t match the new ctx p2')
2320
2320
2321 self._files = originalctx.files()
2321 self._files = originalctx.files()
2322 self.substate = {}
2322 self.substate = {}
2323
2323
2324 if editor:
2324 if editor:
2325 self._text = editor(self._repo, self, [])
2325 self._text = editor(self._repo, self, [])
2326 self._repo.savecommitmessage(self._text)
2326 self._repo.savecommitmessage(self._text)
2327
2327
2328 def manifestnode(self):
2328 def manifestnode(self):
2329 return self._manifestnode
2329 return self._manifestnode
2330
2330
2331 @property
2331 @property
2332 def _manifestctx(self):
2332 def _manifestctx(self):
2333 return self._repo.manifestlog[self._manifestnode]
2333 return self._repo.manifestlog[self._manifestnode]
2334
2334
2335 def filectx(self, path, filelog=None):
2335 def filectx(self, path, filelog=None):
2336 return self._originalctx.filectx(path, filelog=filelog)
2336 return self._originalctx.filectx(path, filelog=filelog)
2337
2337
2338 def commit(self):
2338 def commit(self):
2339 """commit context to the repo"""
2339 """commit context to the repo"""
2340 return self._repo.commitctx(self)
2340 return self._repo.commitctx(self)
2341
2341
2342 @property
2342 @property
2343 def _manifest(self):
2343 def _manifest(self):
2344 return self._originalctx.manifest()
2344 return self._originalctx.manifest()
2345
2345
2346 @propertycache
2346 @propertycache
2347 def _status(self):
2347 def _status(self):
2348 """Calculate exact status from ``files`` specified in the ``origctx``
2348 """Calculate exact status from ``files`` specified in the ``origctx``
2349 and parents manifests.
2349 and parents manifests.
2350 """
2350 """
2351 man1 = self.p1().manifest()
2351 man1 = self.p1().manifest()
2352 p2 = self._parents[1]
2352 p2 = self._parents[1]
2353 # "1 < len(self._parents)" can't be used for checking
2353 # "1 < len(self._parents)" can't be used for checking
2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2355 # explicitly initialized by the list, of which length is 2.
2355 # explicitly initialized by the list, of which length is 2.
2356 if p2.node() != nullid:
2356 if p2.node() != nullid:
2357 man2 = p2.manifest()
2357 man2 = p2.manifest()
2358 managing = lambda f: f in man1 or f in man2
2358 managing = lambda f: f in man1 or f in man2
2359 else:
2359 else:
2360 managing = lambda f: f in man1
2360 managing = lambda f: f in man1
2361
2361
2362 modified, added, removed = [], [], []
2362 modified, added, removed = [], [], []
2363 for f in self._files:
2363 for f in self._files:
2364 if not managing(f):
2364 if not managing(f):
2365 added.append(f)
2365 added.append(f)
2366 elif self[f]:
2366 elif self[f]:
2367 modified.append(f)
2367 modified.append(f)
2368 else:
2368 else:
2369 removed.append(f)
2369 removed.append(f)
2370
2370
2371 return scmutil.status(modified, added, removed, [], [], [], [])
2371 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now