##// END OF EJS Templates
context: rename unstable into orphan...
Boris Feld -
r33693:f163edb4 default
parent child Browse files
Show More
@@ -1,2347 +1,2353 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """return match.always if match is none
106 """return match.always if match is none
107
107
108 This internal method provides a way for child objects to override the
108 This internal method provides a way for child objects to override the
109 match operator.
109 match operator.
110 """
110 """
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112
112
113 def _buildstatus(self, other, s, match, listignored, listclean,
113 def _buildstatus(self, other, s, match, listignored, listclean,
114 listunknown):
114 listunknown):
115 """build a status with respect to another context"""
115 """build a status with respect to another context"""
116 # Load earliest manifest first for caching reasons. More specifically,
116 # Load earliest manifest first for caching reasons. More specifically,
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta application.
121 # delta application.
122 mf2 = None
122 mf2 = None
123 if self.rev() is not None and self.rev() < other.rev():
123 if self.rev() is not None and self.rev() < other.rev():
124 mf2 = self._buildstatusmanifest(s)
124 mf2 = self._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
125 mf1 = other._buildstatusmanifest(s)
126 if mf2 is None:
126 if mf2 is None:
127 mf2 = self._buildstatusmanifest(s)
127 mf2 = self._buildstatusmanifest(s)
128
128
129 modified, added = [], []
129 modified, added = [], []
130 removed = []
130 removed = []
131 clean = []
131 clean = []
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deletedset = set(deleted)
133 deletedset = set(deleted)
134 d = mf1.diff(mf2, match=match, clean=listclean)
134 d = mf1.diff(mf2, match=match, clean=listclean)
135 for fn, value in d.iteritems():
135 for fn, value in d.iteritems():
136 if fn in deletedset:
136 if fn in deletedset:
137 continue
137 continue
138 if value is None:
138 if value is None:
139 clean.append(fn)
139 clean.append(fn)
140 continue
140 continue
141 (node1, flag1), (node2, flag2) = value
141 (node1, flag1), (node2, flag2) = value
142 if node1 is None:
142 if node1 is None:
143 added.append(fn)
143 added.append(fn)
144 elif node2 is None:
144 elif node2 is None:
145 removed.append(fn)
145 removed.append(fn)
146 elif flag1 != flag2:
146 elif flag1 != flag2:
147 modified.append(fn)
147 modified.append(fn)
148 elif node2 not in wdirnodes:
148 elif node2 not in wdirnodes:
149 # When comparing files between two commits, we save time by
149 # When comparing files between two commits, we save time by
150 # not comparing the file contents when the nodeids differ.
150 # not comparing the file contents when the nodeids differ.
151 # Note that this means we incorrectly report a reverted change
151 # Note that this means we incorrectly report a reverted change
152 # to a file as a modification.
152 # to a file as a modification.
153 modified.append(fn)
153 modified.append(fn)
154 elif self[fn].cmp(other[fn]):
154 elif self[fn].cmp(other[fn]):
155 modified.append(fn)
155 modified.append(fn)
156 else:
156 else:
157 clean.append(fn)
157 clean.append(fn)
158
158
159 if removed:
159 if removed:
160 # need to filter files if they are already reported as removed
160 # need to filter files if they are already reported as removed
161 unknown = [fn for fn in unknown if fn not in mf1 and
161 unknown = [fn for fn in unknown if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 ignored = [fn for fn in ignored if fn not in mf1 and
163 ignored = [fn for fn in ignored if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(modified, added, removed, deleted, unknown,
168 return scmutil.status(modified, added, removed, deleted, unknown,
169 ignored, clean)
169 ignored, clean)
170
170
171 @propertycache
171 @propertycache
172 def substate(self):
172 def substate(self):
173 return subrepo.state(self, self._repo.ui)
173 return subrepo.state(self, self._repo.ui)
174
174
175 def subrev(self, subpath):
175 def subrev(self, subpath):
176 return self.substate[subpath][1]
176 return self.substate[subpath][1]
177
177
178 def rev(self):
178 def rev(self):
179 return self._rev
179 return self._rev
180 def node(self):
180 def node(self):
181 return self._node
181 return self._node
182 def hex(self):
182 def hex(self):
183 return hex(self.node())
183 return hex(self.node())
184 def manifest(self):
184 def manifest(self):
185 return self._manifest
185 return self._manifest
186 def manifestctx(self):
186 def manifestctx(self):
187 return self._manifestctx
187 return self._manifestctx
188 def repo(self):
188 def repo(self):
189 return self._repo
189 return self._repo
190 def phasestr(self):
190 def phasestr(self):
191 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
192 def mutable(self):
192 def mutable(self):
193 return self.phase() > phases.public
193 return self.phase() > phases.public
194
194
195 def getfileset(self, expr):
195 def getfileset(self, expr):
196 return fileset.getfileset(self, expr)
196 return fileset.getfileset(self, expr)
197
197
198 def obsolete(self):
198 def obsolete(self):
199 """True if the changeset is obsolete"""
199 """True if the changeset is obsolete"""
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201
201
202 def extinct(self):
202 def extinct(self):
203 """True if the changeset is extinct"""
203 """True if the changeset is extinct"""
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205
205
206 def unstable(self):
206 def unstable(self):
207 msg = ("'context.unstable' is deprecated, "
208 "use 'context.orphan'")
209 self._repo.ui.deprecwarn(msg, '4.4')
210 return self.orphan()
211
212 def orphan(self):
207 """True if the changeset is not obsolete but it's ancestor are"""
213 """True if the changeset is not obsolete but it's ancestor are"""
208 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
214 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209
215
210 def bumped(self):
216 def bumped(self):
211 """True if the changeset try to be a successor of a public changeset
217 """True if the changeset try to be a successor of a public changeset
212
218
213 Only non-public and non-obsolete changesets may be bumped.
219 Only non-public and non-obsolete changesets may be bumped.
214 """
220 """
215 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
221 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216
222
217 def divergent(self):
223 def divergent(self):
218 """Is a successors of a changeset with multiple possible successors set
224 """Is a successors of a changeset with multiple possible successors set
219
225
220 Only non-public and non-obsolete changesets may be divergent.
226 Only non-public and non-obsolete changesets may be divergent.
221 """
227 """
222 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
228 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223
229
224 def troubled(self):
230 def troubled(self):
225 """True if the changeset is either unstable, bumped or divergent"""
231 """True if the changeset is either unstable, bumped or divergent"""
226 return self.unstable() or self.bumped() or self.divergent()
232 return self.orphan() or self.bumped() or self.divergent()
227
233
228 def troubles(self):
234 def troubles(self):
229 """Keep the old version around in order to avoid breaking extensions
235 """Keep the old version around in order to avoid breaking extensions
230 about different return values.
236 about different return values.
231 """
237 """
232 msg = ("'context.troubles' is deprecated, "
238 msg = ("'context.troubles' is deprecated, "
233 "use 'context.instabilities'")
239 "use 'context.instabilities'")
234 self._repo.ui.deprecwarn(msg, '4.4')
240 self._repo.ui.deprecwarn(msg, '4.4')
235
241
236 troubles = []
242 troubles = []
237 if self.unstable():
243 if self.orphan():
238 troubles.append('orphan')
244 troubles.append('orphan')
239 if self.bumped():
245 if self.bumped():
240 troubles.append('bumped')
246 troubles.append('bumped')
241 if self.divergent():
247 if self.divergent():
242 troubles.append('divergent')
248 troubles.append('divergent')
243 return troubles
249 return troubles
244
250
245 def instabilities(self):
251 def instabilities(self):
246 """return the list of instabilities affecting this changeset.
252 """return the list of instabilities affecting this changeset.
247
253
248 Instabilities are returned as strings. possible values are:
254 Instabilities are returned as strings. possible values are:
249 - orphan,
255 - orphan,
250 - phase-divergent,
256 - phase-divergent,
251 - content-divergent.
257 - content-divergent.
252 """
258 """
253 instabilities = []
259 instabilities = []
254 if self.unstable():
260 if self.orphan():
255 instabilities.append('orphan')
261 instabilities.append('orphan')
256 if self.bumped():
262 if self.bumped():
257 instabilities.append('phase-divergent')
263 instabilities.append('phase-divergent')
258 if self.divergent():
264 if self.divergent():
259 instabilities.append('content-divergent')
265 instabilities.append('content-divergent')
260 return instabilities
266 return instabilities
261
267
262 def parents(self):
268 def parents(self):
263 """return contexts for each parent changeset"""
269 """return contexts for each parent changeset"""
264 return self._parents
270 return self._parents
265
271
266 def p1(self):
272 def p1(self):
267 return self._parents[0]
273 return self._parents[0]
268
274
269 def p2(self):
275 def p2(self):
270 parents = self._parents
276 parents = self._parents
271 if len(parents) == 2:
277 if len(parents) == 2:
272 return parents[1]
278 return parents[1]
273 return changectx(self._repo, nullrev)
279 return changectx(self._repo, nullrev)
274
280
275 def _fileinfo(self, path):
281 def _fileinfo(self, path):
276 if r'_manifest' in self.__dict__:
282 if r'_manifest' in self.__dict__:
277 try:
283 try:
278 return self._manifest[path], self._manifest.flags(path)
284 return self._manifest[path], self._manifest.flags(path)
279 except KeyError:
285 except KeyError:
280 raise error.ManifestLookupError(self._node, path,
286 raise error.ManifestLookupError(self._node, path,
281 _('not found in manifest'))
287 _('not found in manifest'))
282 if r'_manifestdelta' in self.__dict__ or path in self.files():
288 if r'_manifestdelta' in self.__dict__ or path in self.files():
283 if path in self._manifestdelta:
289 if path in self._manifestdelta:
284 return (self._manifestdelta[path],
290 return (self._manifestdelta[path],
285 self._manifestdelta.flags(path))
291 self._manifestdelta.flags(path))
286 mfl = self._repo.manifestlog
292 mfl = self._repo.manifestlog
287 try:
293 try:
288 node, flag = mfl[self._changeset.manifest].find(path)
294 node, flag = mfl[self._changeset.manifest].find(path)
289 except KeyError:
295 except KeyError:
290 raise error.ManifestLookupError(self._node, path,
296 raise error.ManifestLookupError(self._node, path,
291 _('not found in manifest'))
297 _('not found in manifest'))
292
298
293 return node, flag
299 return node, flag
294
300
295 def filenode(self, path):
301 def filenode(self, path):
296 return self._fileinfo(path)[0]
302 return self._fileinfo(path)[0]
297
303
298 def flags(self, path):
304 def flags(self, path):
299 try:
305 try:
300 return self._fileinfo(path)[1]
306 return self._fileinfo(path)[1]
301 except error.LookupError:
307 except error.LookupError:
302 return ''
308 return ''
303
309
304 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
305 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
306 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
307
313
308 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
309 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
310
316
311 def workingsub(self, path):
317 def workingsub(self, path):
312 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
313 context.
319 context.
314 '''
320 '''
315 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
316
322
317 def match(self, pats=None, include=None, exclude=None, default='glob',
323 def match(self, pats=None, include=None, exclude=None, default='glob',
318 listsubrepos=False, badfn=None):
324 listsubrepos=False, badfn=None):
319 r = self._repo
325 r = self._repo
320 return matchmod.match(r.root, r.getcwd(), pats,
326 return matchmod.match(r.root, r.getcwd(), pats,
321 include, exclude, default,
327 include, exclude, default,
322 auditor=r.nofsauditor, ctx=self,
328 auditor=r.nofsauditor, ctx=self,
323 listsubrepos=listsubrepos, badfn=badfn)
329 listsubrepos=listsubrepos, badfn=badfn)
324
330
325 def diff(self, ctx2=None, match=None, **opts):
331 def diff(self, ctx2=None, match=None, **opts):
326 """Returns a diff generator for the given contexts and matcher"""
332 """Returns a diff generator for the given contexts and matcher"""
327 if ctx2 is None:
333 if ctx2 is None:
328 ctx2 = self.p1()
334 ctx2 = self.p1()
329 if ctx2 is not None:
335 if ctx2 is not None:
330 ctx2 = self._repo[ctx2]
336 ctx2 = self._repo[ctx2]
331 diffopts = patch.diffopts(self._repo.ui, opts)
337 diffopts = patch.diffopts(self._repo.ui, opts)
332 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
338 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
333
339
334 def dirs(self):
340 def dirs(self):
335 return self._manifest.dirs()
341 return self._manifest.dirs()
336
342
337 def hasdir(self, dir):
343 def hasdir(self, dir):
338 return self._manifest.hasdir(dir)
344 return self._manifest.hasdir(dir)
339
345
340 def status(self, other=None, match=None, listignored=False,
346 def status(self, other=None, match=None, listignored=False,
341 listclean=False, listunknown=False, listsubrepos=False):
347 listclean=False, listunknown=False, listsubrepos=False):
342 """return status of files between two nodes or node and working
348 """return status of files between two nodes or node and working
343 directory.
349 directory.
344
350
345 If other is None, compare this node with working directory.
351 If other is None, compare this node with working directory.
346
352
347 returns (modified, added, removed, deleted, unknown, ignored, clean)
353 returns (modified, added, removed, deleted, unknown, ignored, clean)
348 """
354 """
349
355
350 ctx1 = self
356 ctx1 = self
351 ctx2 = self._repo[other]
357 ctx2 = self._repo[other]
352
358
353 # This next code block is, admittedly, fragile logic that tests for
359 # This next code block is, admittedly, fragile logic that tests for
354 # reversing the contexts and wouldn't need to exist if it weren't for
360 # reversing the contexts and wouldn't need to exist if it weren't for
355 # the fast (and common) code path of comparing the working directory
361 # the fast (and common) code path of comparing the working directory
356 # with its first parent.
362 # with its first parent.
357 #
363 #
358 # What we're aiming for here is the ability to call:
364 # What we're aiming for here is the ability to call:
359 #
365 #
360 # workingctx.status(parentctx)
366 # workingctx.status(parentctx)
361 #
367 #
362 # If we always built the manifest for each context and compared those,
368 # If we always built the manifest for each context and compared those,
363 # then we'd be done. But the special case of the above call means we
369 # then we'd be done. But the special case of the above call means we
364 # just copy the manifest of the parent.
370 # just copy the manifest of the parent.
365 reversed = False
371 reversed = False
366 if (not isinstance(ctx1, changectx)
372 if (not isinstance(ctx1, changectx)
367 and isinstance(ctx2, changectx)):
373 and isinstance(ctx2, changectx)):
368 reversed = True
374 reversed = True
369 ctx1, ctx2 = ctx2, ctx1
375 ctx1, ctx2 = ctx2, ctx1
370
376
371 match = ctx2._matchstatus(ctx1, match)
377 match = ctx2._matchstatus(ctx1, match)
372 r = scmutil.status([], [], [], [], [], [], [])
378 r = scmutil.status([], [], [], [], [], [], [])
373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
379 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 listunknown)
380 listunknown)
375
381
376 if reversed:
382 if reversed:
377 # Reverse added and removed. Clear deleted, unknown and ignored as
383 # Reverse added and removed. Clear deleted, unknown and ignored as
378 # these make no sense to reverse.
384 # these make no sense to reverse.
379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
385 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 r.clean)
386 r.clean)
381
387
382 if listsubrepos:
388 if listsubrepos:
383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
389 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 try:
390 try:
385 rev2 = ctx2.subrev(subpath)
391 rev2 = ctx2.subrev(subpath)
386 except KeyError:
392 except KeyError:
387 # A subrepo that existed in node1 was deleted between
393 # A subrepo that existed in node1 was deleted between
388 # node1 and node2 (inclusive). Thus, ctx2's substate
394 # node1 and node2 (inclusive). Thus, ctx2's substate
389 # won't contain that subpath. The best we can do ignore it.
395 # won't contain that subpath. The best we can do ignore it.
390 rev2 = None
396 rev2 = None
391 submatch = matchmod.subdirmatcher(subpath, match)
397 submatch = matchmod.subdirmatcher(subpath, match)
392 s = sub.status(rev2, match=submatch, ignored=listignored,
398 s = sub.status(rev2, match=submatch, ignored=listignored,
393 clean=listclean, unknown=listunknown,
399 clean=listclean, unknown=listunknown,
394 listsubrepos=True)
400 listsubrepos=True)
395 for rfiles, sfiles in zip(r, s):
401 for rfiles, sfiles in zip(r, s):
396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
402 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397
403
398 for l in r:
404 for l in r:
399 l.sort()
405 l.sort()
400
406
401 return r
407 return r
402
408
403 def _filterederror(repo, changeid):
409 def _filterederror(repo, changeid):
404 """build an exception to be raised about a filtered changeid
410 """build an exception to be raised about a filtered changeid
405
411
406 This is extracted in a function to help extensions (eg: evolve) to
412 This is extracted in a function to help extensions (eg: evolve) to
407 experiment with various message variants."""
413 experiment with various message variants."""
408 if repo.filtername.startswith('visible'):
414 if repo.filtername.startswith('visible'):
409 msg = _("hidden revision '%s'") % changeid
415 msg = _("hidden revision '%s'") % changeid
410 hint = _('use --hidden to access hidden revisions')
416 hint = _('use --hidden to access hidden revisions')
411 return error.FilteredRepoLookupError(msg, hint=hint)
417 return error.FilteredRepoLookupError(msg, hint=hint)
412 msg = _("filtered revision '%s' (not in '%s' subset)")
418 msg = _("filtered revision '%s' (not in '%s' subset)")
413 msg %= (changeid, repo.filtername)
419 msg %= (changeid, repo.filtername)
414 return error.FilteredRepoLookupError(msg)
420 return error.FilteredRepoLookupError(msg)
415
421
416 class changectx(basectx):
422 class changectx(basectx):
417 """A changecontext object makes access to data related to a particular
423 """A changecontext object makes access to data related to a particular
418 changeset convenient. It represents a read-only context already present in
424 changeset convenient. It represents a read-only context already present in
419 the repo."""
425 the repo."""
420 def __init__(self, repo, changeid=''):
426 def __init__(self, repo, changeid=''):
421 """changeid is a revision number, node, or tag"""
427 """changeid is a revision number, node, or tag"""
422
428
423 # since basectx.__new__ already took care of copying the object, we
429 # since basectx.__new__ already took care of copying the object, we
424 # don't need to do anything in __init__, so we just exit here
430 # don't need to do anything in __init__, so we just exit here
425 if isinstance(changeid, basectx):
431 if isinstance(changeid, basectx):
426 return
432 return
427
433
428 if changeid == '':
434 if changeid == '':
429 changeid = '.'
435 changeid = '.'
430 self._repo = repo
436 self._repo = repo
431
437
432 try:
438 try:
433 if isinstance(changeid, int):
439 if isinstance(changeid, int):
434 self._node = repo.changelog.node(changeid)
440 self._node = repo.changelog.node(changeid)
435 self._rev = changeid
441 self._rev = changeid
436 return
442 return
437 if not pycompat.ispy3 and isinstance(changeid, long):
443 if not pycompat.ispy3 and isinstance(changeid, long):
438 changeid = str(changeid)
444 changeid = str(changeid)
439 if changeid == 'null':
445 if changeid == 'null':
440 self._node = nullid
446 self._node = nullid
441 self._rev = nullrev
447 self._rev = nullrev
442 return
448 return
443 if changeid == 'tip':
449 if changeid == 'tip':
444 self._node = repo.changelog.tip()
450 self._node = repo.changelog.tip()
445 self._rev = repo.changelog.rev(self._node)
451 self._rev = repo.changelog.rev(self._node)
446 return
452 return
447 if changeid == '.' or changeid == repo.dirstate.p1():
453 if changeid == '.' or changeid == repo.dirstate.p1():
448 # this is a hack to delay/avoid loading obsmarkers
454 # this is a hack to delay/avoid loading obsmarkers
449 # when we know that '.' won't be hidden
455 # when we know that '.' won't be hidden
450 self._node = repo.dirstate.p1()
456 self._node = repo.dirstate.p1()
451 self._rev = repo.unfiltered().changelog.rev(self._node)
457 self._rev = repo.unfiltered().changelog.rev(self._node)
452 return
458 return
453 if len(changeid) == 20:
459 if len(changeid) == 20:
454 try:
460 try:
455 self._node = changeid
461 self._node = changeid
456 self._rev = repo.changelog.rev(changeid)
462 self._rev = repo.changelog.rev(changeid)
457 return
463 return
458 except error.FilteredRepoLookupError:
464 except error.FilteredRepoLookupError:
459 raise
465 raise
460 except LookupError:
466 except LookupError:
461 pass
467 pass
462
468
463 try:
469 try:
464 r = int(changeid)
470 r = int(changeid)
465 if '%d' % r != changeid:
471 if '%d' % r != changeid:
466 raise ValueError
472 raise ValueError
467 l = len(repo.changelog)
473 l = len(repo.changelog)
468 if r < 0:
474 if r < 0:
469 r += l
475 r += l
470 if r < 0 or r >= l and r != wdirrev:
476 if r < 0 or r >= l and r != wdirrev:
471 raise ValueError
477 raise ValueError
472 self._rev = r
478 self._rev = r
473 self._node = repo.changelog.node(r)
479 self._node = repo.changelog.node(r)
474 return
480 return
475 except error.FilteredIndexError:
481 except error.FilteredIndexError:
476 raise
482 raise
477 except (ValueError, OverflowError, IndexError):
483 except (ValueError, OverflowError, IndexError):
478 pass
484 pass
479
485
480 if len(changeid) == 40:
486 if len(changeid) == 40:
481 try:
487 try:
482 self._node = bin(changeid)
488 self._node = bin(changeid)
483 self._rev = repo.changelog.rev(self._node)
489 self._rev = repo.changelog.rev(self._node)
484 return
490 return
485 except error.FilteredLookupError:
491 except error.FilteredLookupError:
486 raise
492 raise
487 except (TypeError, LookupError):
493 except (TypeError, LookupError):
488 pass
494 pass
489
495
490 # lookup bookmarks through the name interface
496 # lookup bookmarks through the name interface
491 try:
497 try:
492 self._node = repo.names.singlenode(repo, changeid)
498 self._node = repo.names.singlenode(repo, changeid)
493 self._rev = repo.changelog.rev(self._node)
499 self._rev = repo.changelog.rev(self._node)
494 return
500 return
495 except KeyError:
501 except KeyError:
496 pass
502 pass
497 except error.FilteredRepoLookupError:
503 except error.FilteredRepoLookupError:
498 raise
504 raise
499 except error.RepoLookupError:
505 except error.RepoLookupError:
500 pass
506 pass
501
507
502 self._node = repo.unfiltered().changelog._partialmatch(changeid)
508 self._node = repo.unfiltered().changelog._partialmatch(changeid)
503 if self._node is not None:
509 if self._node is not None:
504 self._rev = repo.changelog.rev(self._node)
510 self._rev = repo.changelog.rev(self._node)
505 return
511 return
506
512
507 # lookup failed
513 # lookup failed
508 # check if it might have come from damaged dirstate
514 # check if it might have come from damaged dirstate
509 #
515 #
510 # XXX we could avoid the unfiltered if we had a recognizable
516 # XXX we could avoid the unfiltered if we had a recognizable
511 # exception for filtered changeset access
517 # exception for filtered changeset access
512 if changeid in repo.unfiltered().dirstate.parents():
518 if changeid in repo.unfiltered().dirstate.parents():
513 msg = _("working directory has unknown parent '%s'!")
519 msg = _("working directory has unknown parent '%s'!")
514 raise error.Abort(msg % short(changeid))
520 raise error.Abort(msg % short(changeid))
515 try:
521 try:
516 if len(changeid) == 20 and nonascii(changeid):
522 if len(changeid) == 20 and nonascii(changeid):
517 changeid = hex(changeid)
523 changeid = hex(changeid)
518 except TypeError:
524 except TypeError:
519 pass
525 pass
520 except (error.FilteredIndexError, error.FilteredLookupError,
526 except (error.FilteredIndexError, error.FilteredLookupError,
521 error.FilteredRepoLookupError):
527 error.FilteredRepoLookupError):
522 raise _filterederror(repo, changeid)
528 raise _filterederror(repo, changeid)
523 except IndexError:
529 except IndexError:
524 pass
530 pass
525 raise error.RepoLookupError(
531 raise error.RepoLookupError(
526 _("unknown revision '%s'") % changeid)
532 _("unknown revision '%s'") % changeid)
527
533
528 def __hash__(self):
534 def __hash__(self):
529 try:
535 try:
530 return hash(self._rev)
536 return hash(self._rev)
531 except AttributeError:
537 except AttributeError:
532 return id(self)
538 return id(self)
533
539
534 def __nonzero__(self):
540 def __nonzero__(self):
535 return self._rev != nullrev
541 return self._rev != nullrev
536
542
537 __bool__ = __nonzero__
543 __bool__ = __nonzero__
538
544
539 @propertycache
545 @propertycache
540 def _changeset(self):
546 def _changeset(self):
541 return self._repo.changelog.changelogrevision(self.rev())
547 return self._repo.changelog.changelogrevision(self.rev())
542
548
543 @propertycache
549 @propertycache
544 def _manifest(self):
550 def _manifest(self):
545 return self._manifestctx.read()
551 return self._manifestctx.read()
546
552
547 @property
553 @property
548 def _manifestctx(self):
554 def _manifestctx(self):
549 return self._repo.manifestlog[self._changeset.manifest]
555 return self._repo.manifestlog[self._changeset.manifest]
550
556
551 @propertycache
557 @propertycache
552 def _manifestdelta(self):
558 def _manifestdelta(self):
553 return self._manifestctx.readdelta()
559 return self._manifestctx.readdelta()
554
560
555 @propertycache
561 @propertycache
556 def _parents(self):
562 def _parents(self):
557 repo = self._repo
563 repo = self._repo
558 p1, p2 = repo.changelog.parentrevs(self._rev)
564 p1, p2 = repo.changelog.parentrevs(self._rev)
559 if p2 == nullrev:
565 if p2 == nullrev:
560 return [changectx(repo, p1)]
566 return [changectx(repo, p1)]
561 return [changectx(repo, p1), changectx(repo, p2)]
567 return [changectx(repo, p1), changectx(repo, p2)]
562
568
563 def changeset(self):
569 def changeset(self):
564 c = self._changeset
570 c = self._changeset
565 return (
571 return (
566 c.manifest,
572 c.manifest,
567 c.user,
573 c.user,
568 c.date,
574 c.date,
569 c.files,
575 c.files,
570 c.description,
576 c.description,
571 c.extra,
577 c.extra,
572 )
578 )
573 def manifestnode(self):
579 def manifestnode(self):
574 return self._changeset.manifest
580 return self._changeset.manifest
575
581
576 def user(self):
582 def user(self):
577 return self._changeset.user
583 return self._changeset.user
578 def date(self):
584 def date(self):
579 return self._changeset.date
585 return self._changeset.date
580 def files(self):
586 def files(self):
581 return self._changeset.files
587 return self._changeset.files
582 def description(self):
588 def description(self):
583 return self._changeset.description
589 return self._changeset.description
584 def branch(self):
590 def branch(self):
585 return encoding.tolocal(self._changeset.extra.get("branch"))
591 return encoding.tolocal(self._changeset.extra.get("branch"))
586 def closesbranch(self):
592 def closesbranch(self):
587 return 'close' in self._changeset.extra
593 return 'close' in self._changeset.extra
588 def extra(self):
594 def extra(self):
589 return self._changeset.extra
595 return self._changeset.extra
590 def tags(self):
596 def tags(self):
591 return self._repo.nodetags(self._node)
597 return self._repo.nodetags(self._node)
592 def bookmarks(self):
598 def bookmarks(self):
593 return self._repo.nodebookmarks(self._node)
599 return self._repo.nodebookmarks(self._node)
594 def phase(self):
600 def phase(self):
595 return self._repo._phasecache.phase(self._repo, self._rev)
601 return self._repo._phasecache.phase(self._repo, self._rev)
596 def hidden(self):
602 def hidden(self):
597 return self._rev in repoview.filterrevs(self._repo, 'visible')
603 return self._rev in repoview.filterrevs(self._repo, 'visible')
598
604
599 def children(self):
605 def children(self):
600 """return contexts for each child changeset"""
606 """return contexts for each child changeset"""
601 c = self._repo.changelog.children(self._node)
607 c = self._repo.changelog.children(self._node)
602 return [changectx(self._repo, x) for x in c]
608 return [changectx(self._repo, x) for x in c]
603
609
604 def ancestors(self):
610 def ancestors(self):
605 for a in self._repo.changelog.ancestors([self._rev]):
611 for a in self._repo.changelog.ancestors([self._rev]):
606 yield changectx(self._repo, a)
612 yield changectx(self._repo, a)
607
613
608 def descendants(self):
614 def descendants(self):
609 for d in self._repo.changelog.descendants([self._rev]):
615 for d in self._repo.changelog.descendants([self._rev]):
610 yield changectx(self._repo, d)
616 yield changectx(self._repo, d)
611
617
612 def filectx(self, path, fileid=None, filelog=None):
618 def filectx(self, path, fileid=None, filelog=None):
613 """get a file context from this changeset"""
619 """get a file context from this changeset"""
614 if fileid is None:
620 if fileid is None:
615 fileid = self.filenode(path)
621 fileid = self.filenode(path)
616 return filectx(self._repo, path, fileid=fileid,
622 return filectx(self._repo, path, fileid=fileid,
617 changectx=self, filelog=filelog)
623 changectx=self, filelog=filelog)
618
624
619 def ancestor(self, c2, warn=False):
625 def ancestor(self, c2, warn=False):
620 """return the "best" ancestor context of self and c2
626 """return the "best" ancestor context of self and c2
621
627
622 If there are multiple candidates, it will show a message and check
628 If there are multiple candidates, it will show a message and check
623 merge.preferancestor configuration before falling back to the
629 merge.preferancestor configuration before falling back to the
624 revlog ancestor."""
630 revlog ancestor."""
625 # deal with workingctxs
631 # deal with workingctxs
626 n2 = c2._node
632 n2 = c2._node
627 if n2 is None:
633 if n2 is None:
628 n2 = c2._parents[0]._node
634 n2 = c2._parents[0]._node
629 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
635 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
630 if not cahs:
636 if not cahs:
631 anc = nullid
637 anc = nullid
632 elif len(cahs) == 1:
638 elif len(cahs) == 1:
633 anc = cahs[0]
639 anc = cahs[0]
634 else:
640 else:
635 # experimental config: merge.preferancestor
641 # experimental config: merge.preferancestor
636 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
642 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
637 try:
643 try:
638 ctx = changectx(self._repo, r)
644 ctx = changectx(self._repo, r)
639 except error.RepoLookupError:
645 except error.RepoLookupError:
640 continue
646 continue
641 anc = ctx.node()
647 anc = ctx.node()
642 if anc in cahs:
648 if anc in cahs:
643 break
649 break
644 else:
650 else:
645 anc = self._repo.changelog.ancestor(self._node, n2)
651 anc = self._repo.changelog.ancestor(self._node, n2)
646 if warn:
652 if warn:
647 self._repo.ui.status(
653 self._repo.ui.status(
648 (_("note: using %s as ancestor of %s and %s\n") %
654 (_("note: using %s as ancestor of %s and %s\n") %
649 (short(anc), short(self._node), short(n2))) +
655 (short(anc), short(self._node), short(n2))) +
650 ''.join(_(" alternatively, use --config "
656 ''.join(_(" alternatively, use --config "
651 "merge.preferancestor=%s\n") %
657 "merge.preferancestor=%s\n") %
652 short(n) for n in sorted(cahs) if n != anc))
658 short(n) for n in sorted(cahs) if n != anc))
653 return changectx(self._repo, anc)
659 return changectx(self._repo, anc)
654
660
655 def descendant(self, other):
661 def descendant(self, other):
656 """True if other is descendant of this changeset"""
662 """True if other is descendant of this changeset"""
657 return self._repo.changelog.descendant(self._rev, other._rev)
663 return self._repo.changelog.descendant(self._rev, other._rev)
658
664
659 def walk(self, match):
665 def walk(self, match):
660 '''Generates matching file names.'''
666 '''Generates matching file names.'''
661
667
662 # Wrap match.bad method to have message with nodeid
668 # Wrap match.bad method to have message with nodeid
663 def bad(fn, msg):
669 def bad(fn, msg):
664 # The manifest doesn't know about subrepos, so don't complain about
670 # The manifest doesn't know about subrepos, so don't complain about
665 # paths into valid subrepos.
671 # paths into valid subrepos.
666 if any(fn == s or fn.startswith(s + '/')
672 if any(fn == s or fn.startswith(s + '/')
667 for s in self.substate):
673 for s in self.substate):
668 return
674 return
669 match.bad(fn, _('no such file in rev %s') % self)
675 match.bad(fn, _('no such file in rev %s') % self)
670
676
671 m = matchmod.badmatch(match, bad)
677 m = matchmod.badmatch(match, bad)
672 return self._manifest.walk(m)
678 return self._manifest.walk(m)
673
679
674 def matches(self, match):
680 def matches(self, match):
675 return self.walk(match)
681 return self.walk(match)
676
682
677 class basefilectx(object):
683 class basefilectx(object):
678 """A filecontext object represents the common logic for its children:
684 """A filecontext object represents the common logic for its children:
679 filectx: read-only access to a filerevision that is already present
685 filectx: read-only access to a filerevision that is already present
680 in the repo,
686 in the repo,
681 workingfilectx: a filecontext that represents files from the working
687 workingfilectx: a filecontext that represents files from the working
682 directory,
688 directory,
683 memfilectx: a filecontext that represents files in-memory,
689 memfilectx: a filecontext that represents files in-memory,
684 overlayfilectx: duplicate another filecontext with some fields overridden.
690 overlayfilectx: duplicate another filecontext with some fields overridden.
685 """
691 """
686 @propertycache
692 @propertycache
687 def _filelog(self):
693 def _filelog(self):
688 return self._repo.file(self._path)
694 return self._repo.file(self._path)
689
695
690 @propertycache
696 @propertycache
691 def _changeid(self):
697 def _changeid(self):
692 if r'_changeid' in self.__dict__:
698 if r'_changeid' in self.__dict__:
693 return self._changeid
699 return self._changeid
694 elif r'_changectx' in self.__dict__:
700 elif r'_changectx' in self.__dict__:
695 return self._changectx.rev()
701 return self._changectx.rev()
696 elif r'_descendantrev' in self.__dict__:
702 elif r'_descendantrev' in self.__dict__:
697 # this file context was created from a revision with a known
703 # this file context was created from a revision with a known
698 # descendant, we can (lazily) correct for linkrev aliases
704 # descendant, we can (lazily) correct for linkrev aliases
699 return self._adjustlinkrev(self._descendantrev)
705 return self._adjustlinkrev(self._descendantrev)
700 else:
706 else:
701 return self._filelog.linkrev(self._filerev)
707 return self._filelog.linkrev(self._filerev)
702
708
703 @propertycache
709 @propertycache
704 def _filenode(self):
710 def _filenode(self):
705 if r'_fileid' in self.__dict__:
711 if r'_fileid' in self.__dict__:
706 return self._filelog.lookup(self._fileid)
712 return self._filelog.lookup(self._fileid)
707 else:
713 else:
708 return self._changectx.filenode(self._path)
714 return self._changectx.filenode(self._path)
709
715
710 @propertycache
716 @propertycache
711 def _filerev(self):
717 def _filerev(self):
712 return self._filelog.rev(self._filenode)
718 return self._filelog.rev(self._filenode)
713
719
714 @propertycache
720 @propertycache
715 def _repopath(self):
721 def _repopath(self):
716 return self._path
722 return self._path
717
723
718 def __nonzero__(self):
724 def __nonzero__(self):
719 try:
725 try:
720 self._filenode
726 self._filenode
721 return True
727 return True
722 except error.LookupError:
728 except error.LookupError:
723 # file is missing
729 # file is missing
724 return False
730 return False
725
731
726 __bool__ = __nonzero__
732 __bool__ = __nonzero__
727
733
728 def __bytes__(self):
734 def __bytes__(self):
729 try:
735 try:
730 return "%s@%s" % (self.path(), self._changectx)
736 return "%s@%s" % (self.path(), self._changectx)
731 except error.LookupError:
737 except error.LookupError:
732 return "%s@???" % self.path()
738 return "%s@???" % self.path()
733
739
734 __str__ = encoding.strmethod(__bytes__)
740 __str__ = encoding.strmethod(__bytes__)
735
741
736 def __repr__(self):
742 def __repr__(self):
737 return "<%s %s>" % (type(self).__name__, str(self))
743 return "<%s %s>" % (type(self).__name__, str(self))
738
744
739 def __hash__(self):
745 def __hash__(self):
740 try:
746 try:
741 return hash((self._path, self._filenode))
747 return hash((self._path, self._filenode))
742 except AttributeError:
748 except AttributeError:
743 return id(self)
749 return id(self)
744
750
745 def __eq__(self, other):
751 def __eq__(self, other):
746 try:
752 try:
747 return (type(self) == type(other) and self._path == other._path
753 return (type(self) == type(other) and self._path == other._path
748 and self._filenode == other._filenode)
754 and self._filenode == other._filenode)
749 except AttributeError:
755 except AttributeError:
750 return False
756 return False
751
757
752 def __ne__(self, other):
758 def __ne__(self, other):
753 return not (self == other)
759 return not (self == other)
754
760
755 def filerev(self):
761 def filerev(self):
756 return self._filerev
762 return self._filerev
757 def filenode(self):
763 def filenode(self):
758 return self._filenode
764 return self._filenode
759 @propertycache
765 @propertycache
760 def _flags(self):
766 def _flags(self):
761 return self._changectx.flags(self._path)
767 return self._changectx.flags(self._path)
762 def flags(self):
768 def flags(self):
763 return self._flags
769 return self._flags
764 def filelog(self):
770 def filelog(self):
765 return self._filelog
771 return self._filelog
766 def rev(self):
772 def rev(self):
767 return self._changeid
773 return self._changeid
768 def linkrev(self):
774 def linkrev(self):
769 return self._filelog.linkrev(self._filerev)
775 return self._filelog.linkrev(self._filerev)
770 def node(self):
776 def node(self):
771 return self._changectx.node()
777 return self._changectx.node()
772 def hex(self):
778 def hex(self):
773 return self._changectx.hex()
779 return self._changectx.hex()
774 def user(self):
780 def user(self):
775 return self._changectx.user()
781 return self._changectx.user()
776 def date(self):
782 def date(self):
777 return self._changectx.date()
783 return self._changectx.date()
778 def files(self):
784 def files(self):
779 return self._changectx.files()
785 return self._changectx.files()
780 def description(self):
786 def description(self):
781 return self._changectx.description()
787 return self._changectx.description()
782 def branch(self):
788 def branch(self):
783 return self._changectx.branch()
789 return self._changectx.branch()
784 def extra(self):
790 def extra(self):
785 return self._changectx.extra()
791 return self._changectx.extra()
786 def phase(self):
792 def phase(self):
787 return self._changectx.phase()
793 return self._changectx.phase()
788 def phasestr(self):
794 def phasestr(self):
789 return self._changectx.phasestr()
795 return self._changectx.phasestr()
790 def manifest(self):
796 def manifest(self):
791 return self._changectx.manifest()
797 return self._changectx.manifest()
792 def changectx(self):
798 def changectx(self):
793 return self._changectx
799 return self._changectx
794 def renamed(self):
800 def renamed(self):
795 return self._copied
801 return self._copied
796 def repo(self):
802 def repo(self):
797 return self._repo
803 return self._repo
798 def size(self):
804 def size(self):
799 return len(self.data())
805 return len(self.data())
800
806
801 def path(self):
807 def path(self):
802 return self._path
808 return self._path
803
809
804 def isbinary(self):
810 def isbinary(self):
805 try:
811 try:
806 return util.binary(self.data())
812 return util.binary(self.data())
807 except IOError:
813 except IOError:
808 return False
814 return False
809 def isexec(self):
815 def isexec(self):
810 return 'x' in self.flags()
816 return 'x' in self.flags()
811 def islink(self):
817 def islink(self):
812 return 'l' in self.flags()
818 return 'l' in self.flags()
813
819
814 def isabsent(self):
820 def isabsent(self):
815 """whether this filectx represents a file not in self._changectx
821 """whether this filectx represents a file not in self._changectx
816
822
817 This is mainly for merge code to detect change/delete conflicts. This is
823 This is mainly for merge code to detect change/delete conflicts. This is
818 expected to be True for all subclasses of basectx."""
824 expected to be True for all subclasses of basectx."""
819 return False
825 return False
820
826
821 _customcmp = False
827 _customcmp = False
822 def cmp(self, fctx):
828 def cmp(self, fctx):
823 """compare with other file context
829 """compare with other file context
824
830
825 returns True if different than fctx.
831 returns True if different than fctx.
826 """
832 """
827 if fctx._customcmp:
833 if fctx._customcmp:
828 return fctx.cmp(self)
834 return fctx.cmp(self)
829
835
830 if (fctx._filenode is None
836 if (fctx._filenode is None
831 and (self._repo._encodefilterpats
837 and (self._repo._encodefilterpats
832 # if file data starts with '\1\n', empty metadata block is
838 # if file data starts with '\1\n', empty metadata block is
833 # prepended, which adds 4 bytes to filelog.size().
839 # prepended, which adds 4 bytes to filelog.size().
834 or self.size() - 4 == fctx.size())
840 or self.size() - 4 == fctx.size())
835 or self.size() == fctx.size()):
841 or self.size() == fctx.size()):
836 return self._filelog.cmp(self._filenode, fctx.data())
842 return self._filelog.cmp(self._filenode, fctx.data())
837
843
838 return True
844 return True
839
845
840 def _adjustlinkrev(self, srcrev, inclusive=False):
846 def _adjustlinkrev(self, srcrev, inclusive=False):
841 """return the first ancestor of <srcrev> introducing <fnode>
847 """return the first ancestor of <srcrev> introducing <fnode>
842
848
843 If the linkrev of the file revision does not point to an ancestor of
849 If the linkrev of the file revision does not point to an ancestor of
844 srcrev, we'll walk down the ancestors until we find one introducing
850 srcrev, we'll walk down the ancestors until we find one introducing
845 this file revision.
851 this file revision.
846
852
847 :srcrev: the changeset revision we search ancestors from
853 :srcrev: the changeset revision we search ancestors from
848 :inclusive: if true, the src revision will also be checked
854 :inclusive: if true, the src revision will also be checked
849 """
855 """
850 repo = self._repo
856 repo = self._repo
851 cl = repo.unfiltered().changelog
857 cl = repo.unfiltered().changelog
852 mfl = repo.manifestlog
858 mfl = repo.manifestlog
853 # fetch the linkrev
859 # fetch the linkrev
854 lkr = self.linkrev()
860 lkr = self.linkrev()
855 # hack to reuse ancestor computation when searching for renames
861 # hack to reuse ancestor computation when searching for renames
856 memberanc = getattr(self, '_ancestrycontext', None)
862 memberanc = getattr(self, '_ancestrycontext', None)
857 iteranc = None
863 iteranc = None
858 if srcrev is None:
864 if srcrev is None:
859 # wctx case, used by workingfilectx during mergecopy
865 # wctx case, used by workingfilectx during mergecopy
860 revs = [p.rev() for p in self._repo[None].parents()]
866 revs = [p.rev() for p in self._repo[None].parents()]
861 inclusive = True # we skipped the real (revless) source
867 inclusive = True # we skipped the real (revless) source
862 else:
868 else:
863 revs = [srcrev]
869 revs = [srcrev]
864 if memberanc is None:
870 if memberanc is None:
865 memberanc = iteranc = cl.ancestors(revs, lkr,
871 memberanc = iteranc = cl.ancestors(revs, lkr,
866 inclusive=inclusive)
872 inclusive=inclusive)
867 # check if this linkrev is an ancestor of srcrev
873 # check if this linkrev is an ancestor of srcrev
868 if lkr not in memberanc:
874 if lkr not in memberanc:
869 if iteranc is None:
875 if iteranc is None:
870 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
876 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
871 fnode = self._filenode
877 fnode = self._filenode
872 path = self._path
878 path = self._path
873 for a in iteranc:
879 for a in iteranc:
874 ac = cl.read(a) # get changeset data (we avoid object creation)
880 ac = cl.read(a) # get changeset data (we avoid object creation)
875 if path in ac[3]: # checking the 'files' field.
881 if path in ac[3]: # checking the 'files' field.
876 # The file has been touched, check if the content is
882 # The file has been touched, check if the content is
877 # similar to the one we search for.
883 # similar to the one we search for.
878 if fnode == mfl[ac[0]].readfast().get(path):
884 if fnode == mfl[ac[0]].readfast().get(path):
879 return a
885 return a
880 # In theory, we should never get out of that loop without a result.
886 # In theory, we should never get out of that loop without a result.
881 # But if manifest uses a buggy file revision (not children of the
887 # But if manifest uses a buggy file revision (not children of the
882 # one it replaces) we could. Such a buggy situation will likely
888 # one it replaces) we could. Such a buggy situation will likely
883 # result is crash somewhere else at to some point.
889 # result is crash somewhere else at to some point.
884 return lkr
890 return lkr
885
891
886 def introrev(self):
892 def introrev(self):
887 """return the rev of the changeset which introduced this file revision
893 """return the rev of the changeset which introduced this file revision
888
894
889 This method is different from linkrev because it take into account the
895 This method is different from linkrev because it take into account the
890 changeset the filectx was created from. It ensures the returned
896 changeset the filectx was created from. It ensures the returned
891 revision is one of its ancestors. This prevents bugs from
897 revision is one of its ancestors. This prevents bugs from
892 'linkrev-shadowing' when a file revision is used by multiple
898 'linkrev-shadowing' when a file revision is used by multiple
893 changesets.
899 changesets.
894 """
900 """
895 lkr = self.linkrev()
901 lkr = self.linkrev()
896 attrs = vars(self)
902 attrs = vars(self)
897 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
903 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
898 if noctx or self.rev() == lkr:
904 if noctx or self.rev() == lkr:
899 return self.linkrev()
905 return self.linkrev()
900 return self._adjustlinkrev(self.rev(), inclusive=True)
906 return self._adjustlinkrev(self.rev(), inclusive=True)
901
907
902 def _parentfilectx(self, path, fileid, filelog):
908 def _parentfilectx(self, path, fileid, filelog):
903 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
909 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
904 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
910 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
905 if '_changeid' in vars(self) or '_changectx' in vars(self):
911 if '_changeid' in vars(self) or '_changectx' in vars(self):
906 # If self is associated with a changeset (probably explicitly
912 # If self is associated with a changeset (probably explicitly
907 # fed), ensure the created filectx is associated with a
913 # fed), ensure the created filectx is associated with a
908 # changeset that is an ancestor of self.changectx.
914 # changeset that is an ancestor of self.changectx.
909 # This lets us later use _adjustlinkrev to get a correct link.
915 # This lets us later use _adjustlinkrev to get a correct link.
910 fctx._descendantrev = self.rev()
916 fctx._descendantrev = self.rev()
911 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
912 elif '_descendantrev' in vars(self):
918 elif '_descendantrev' in vars(self):
913 # Otherwise propagate _descendantrev if we have one associated.
919 # Otherwise propagate _descendantrev if we have one associated.
914 fctx._descendantrev = self._descendantrev
920 fctx._descendantrev = self._descendantrev
915 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
916 return fctx
922 return fctx
917
923
918 def parents(self):
924 def parents(self):
919 _path = self._path
925 _path = self._path
920 fl = self._filelog
926 fl = self._filelog
921 parents = self._filelog.parents(self._filenode)
927 parents = self._filelog.parents(self._filenode)
922 pl = [(_path, node, fl) for node in parents if node != nullid]
928 pl = [(_path, node, fl) for node in parents if node != nullid]
923
929
924 r = fl.renamed(self._filenode)
930 r = fl.renamed(self._filenode)
925 if r:
931 if r:
926 # - In the simple rename case, both parent are nullid, pl is empty.
932 # - In the simple rename case, both parent are nullid, pl is empty.
927 # - In case of merge, only one of the parent is null id and should
933 # - In case of merge, only one of the parent is null id and should
928 # be replaced with the rename information. This parent is -always-
934 # be replaced with the rename information. This parent is -always-
929 # the first one.
935 # the first one.
930 #
936 #
931 # As null id have always been filtered out in the previous list
937 # As null id have always been filtered out in the previous list
932 # comprehension, inserting to 0 will always result in "replacing
938 # comprehension, inserting to 0 will always result in "replacing
933 # first nullid parent with rename information.
939 # first nullid parent with rename information.
934 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
940 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
935
941
936 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
942 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
937
943
938 def p1(self):
944 def p1(self):
939 return self.parents()[0]
945 return self.parents()[0]
940
946
941 def p2(self):
947 def p2(self):
942 p = self.parents()
948 p = self.parents()
943 if len(p) == 2:
949 if len(p) == 2:
944 return p[1]
950 return p[1]
945 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
951 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
946
952
947 def annotate(self, follow=False, linenumber=False, skiprevs=None,
953 def annotate(self, follow=False, linenumber=False, skiprevs=None,
948 diffopts=None):
954 diffopts=None):
949 '''returns a list of tuples of ((ctx, number), line) for each line
955 '''returns a list of tuples of ((ctx, number), line) for each line
950 in the file, where ctx is the filectx of the node where
956 in the file, where ctx is the filectx of the node where
951 that line was last changed; if linenumber parameter is true, number is
957 that line was last changed; if linenumber parameter is true, number is
952 the line number at the first appearance in the managed file, otherwise,
958 the line number at the first appearance in the managed file, otherwise,
953 number has a fixed value of False.
959 number has a fixed value of False.
954 '''
960 '''
955
961
956 def lines(text):
962 def lines(text):
957 if text.endswith("\n"):
963 if text.endswith("\n"):
958 return text.count("\n")
964 return text.count("\n")
959 return text.count("\n") + int(bool(text))
965 return text.count("\n") + int(bool(text))
960
966
961 if linenumber:
967 if linenumber:
962 def decorate(text, rev):
968 def decorate(text, rev):
963 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
969 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
964 else:
970 else:
965 def decorate(text, rev):
971 def decorate(text, rev):
966 return ([(rev, False)] * lines(text), text)
972 return ([(rev, False)] * lines(text), text)
967
973
968 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
974 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
969
975
970 def parents(f):
976 def parents(f):
971 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
977 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
972 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
978 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
973 # from the topmost introrev (= srcrev) down to p.linkrev() if it
979 # from the topmost introrev (= srcrev) down to p.linkrev() if it
974 # isn't an ancestor of the srcrev.
980 # isn't an ancestor of the srcrev.
975 f._changeid
981 f._changeid
976 pl = f.parents()
982 pl = f.parents()
977
983
978 # Don't return renamed parents if we aren't following.
984 # Don't return renamed parents if we aren't following.
979 if not follow:
985 if not follow:
980 pl = [p for p in pl if p.path() == f.path()]
986 pl = [p for p in pl if p.path() == f.path()]
981
987
982 # renamed filectx won't have a filelog yet, so set it
988 # renamed filectx won't have a filelog yet, so set it
983 # from the cache to save time
989 # from the cache to save time
984 for p in pl:
990 for p in pl:
985 if not '_filelog' in p.__dict__:
991 if not '_filelog' in p.__dict__:
986 p._filelog = getlog(p.path())
992 p._filelog = getlog(p.path())
987
993
988 return pl
994 return pl
989
995
990 # use linkrev to find the first changeset where self appeared
996 # use linkrev to find the first changeset where self appeared
991 base = self
997 base = self
992 introrev = self.introrev()
998 introrev = self.introrev()
993 if self.rev() != introrev:
999 if self.rev() != introrev:
994 base = self.filectx(self.filenode(), changeid=introrev)
1000 base = self.filectx(self.filenode(), changeid=introrev)
995 if getattr(base, '_ancestrycontext', None) is None:
1001 if getattr(base, '_ancestrycontext', None) is None:
996 cl = self._repo.changelog
1002 cl = self._repo.changelog
997 if introrev is None:
1003 if introrev is None:
998 # wctx is not inclusive, but works because _ancestrycontext
1004 # wctx is not inclusive, but works because _ancestrycontext
999 # is used to test filelog revisions
1005 # is used to test filelog revisions
1000 ac = cl.ancestors([p.rev() for p in base.parents()],
1006 ac = cl.ancestors([p.rev() for p in base.parents()],
1001 inclusive=True)
1007 inclusive=True)
1002 else:
1008 else:
1003 ac = cl.ancestors([introrev], inclusive=True)
1009 ac = cl.ancestors([introrev], inclusive=True)
1004 base._ancestrycontext = ac
1010 base._ancestrycontext = ac
1005
1011
1006 # This algorithm would prefer to be recursive, but Python is a
1012 # This algorithm would prefer to be recursive, but Python is a
1007 # bit recursion-hostile. Instead we do an iterative
1013 # bit recursion-hostile. Instead we do an iterative
1008 # depth-first search.
1014 # depth-first search.
1009
1015
1010 # 1st DFS pre-calculates pcache and needed
1016 # 1st DFS pre-calculates pcache and needed
1011 visit = [base]
1017 visit = [base]
1012 pcache = {}
1018 pcache = {}
1013 needed = {base: 1}
1019 needed = {base: 1}
1014 while visit:
1020 while visit:
1015 f = visit.pop()
1021 f = visit.pop()
1016 if f in pcache:
1022 if f in pcache:
1017 continue
1023 continue
1018 pl = parents(f)
1024 pl = parents(f)
1019 pcache[f] = pl
1025 pcache[f] = pl
1020 for p in pl:
1026 for p in pl:
1021 needed[p] = needed.get(p, 0) + 1
1027 needed[p] = needed.get(p, 0) + 1
1022 if p not in pcache:
1028 if p not in pcache:
1023 visit.append(p)
1029 visit.append(p)
1024
1030
1025 # 2nd DFS does the actual annotate
1031 # 2nd DFS does the actual annotate
1026 visit[:] = [base]
1032 visit[:] = [base]
1027 hist = {}
1033 hist = {}
1028 while visit:
1034 while visit:
1029 f = visit[-1]
1035 f = visit[-1]
1030 if f in hist:
1036 if f in hist:
1031 visit.pop()
1037 visit.pop()
1032 continue
1038 continue
1033
1039
1034 ready = True
1040 ready = True
1035 pl = pcache[f]
1041 pl = pcache[f]
1036 for p in pl:
1042 for p in pl:
1037 if p not in hist:
1043 if p not in hist:
1038 ready = False
1044 ready = False
1039 visit.append(p)
1045 visit.append(p)
1040 if ready:
1046 if ready:
1041 visit.pop()
1047 visit.pop()
1042 curr = decorate(f.data(), f)
1048 curr = decorate(f.data(), f)
1043 skipchild = False
1049 skipchild = False
1044 if skiprevs is not None:
1050 if skiprevs is not None:
1045 skipchild = f._changeid in skiprevs
1051 skipchild = f._changeid in skiprevs
1046 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1052 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1047 diffopts)
1053 diffopts)
1048 for p in pl:
1054 for p in pl:
1049 if needed[p] == 1:
1055 if needed[p] == 1:
1050 del hist[p]
1056 del hist[p]
1051 del needed[p]
1057 del needed[p]
1052 else:
1058 else:
1053 needed[p] -= 1
1059 needed[p] -= 1
1054
1060
1055 hist[f] = curr
1061 hist[f] = curr
1056 del pcache[f]
1062 del pcache[f]
1057
1063
1058 return zip(hist[base][0], hist[base][1].splitlines(True))
1064 return zip(hist[base][0], hist[base][1].splitlines(True))
1059
1065
1060 def ancestors(self, followfirst=False):
1066 def ancestors(self, followfirst=False):
1061 visit = {}
1067 visit = {}
1062 c = self
1068 c = self
1063 if followfirst:
1069 if followfirst:
1064 cut = 1
1070 cut = 1
1065 else:
1071 else:
1066 cut = None
1072 cut = None
1067
1073
1068 while True:
1074 while True:
1069 for parent in c.parents()[:cut]:
1075 for parent in c.parents()[:cut]:
1070 visit[(parent.linkrev(), parent.filenode())] = parent
1076 visit[(parent.linkrev(), parent.filenode())] = parent
1071 if not visit:
1077 if not visit:
1072 break
1078 break
1073 c = visit.pop(max(visit))
1079 c = visit.pop(max(visit))
1074 yield c
1080 yield c
1075
1081
1076 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1082 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1077 r'''
1083 r'''
1078 Given parent and child fctxes and annotate data for parents, for all lines
1084 Given parent and child fctxes and annotate data for parents, for all lines
1079 in either parent that match the child, annotate the child with the parent's
1085 in either parent that match the child, annotate the child with the parent's
1080 data.
1086 data.
1081
1087
1082 Additionally, if `skipchild` is True, replace all other lines with parent
1088 Additionally, if `skipchild` is True, replace all other lines with parent
1083 annotate data as well such that child is never blamed for any lines.
1089 annotate data as well such that child is never blamed for any lines.
1084
1090
1085 >>> oldfctx = 'old'
1091 >>> oldfctx = 'old'
1086 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1092 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1087 >>> olddata = 'a\nb\n'
1093 >>> olddata = 'a\nb\n'
1088 >>> p1data = 'a\nb\nc\n'
1094 >>> p1data = 'a\nb\nc\n'
1089 >>> p2data = 'a\nc\nd\n'
1095 >>> p2data = 'a\nc\nd\n'
1090 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1096 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1091 >>> diffopts = mdiff.diffopts()
1097 >>> diffopts = mdiff.diffopts()
1092
1098
1093 >>> def decorate(text, rev):
1099 >>> def decorate(text, rev):
1094 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1100 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1095
1101
1096 Basic usage:
1102 Basic usage:
1097
1103
1098 >>> oldann = decorate(olddata, oldfctx)
1104 >>> oldann = decorate(olddata, oldfctx)
1099 >>> p1ann = decorate(p1data, p1fctx)
1105 >>> p1ann = decorate(p1data, p1fctx)
1100 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1106 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1101 >>> p1ann[0]
1107 >>> p1ann[0]
1102 [('old', 1), ('old', 2), ('p1', 3)]
1108 [('old', 1), ('old', 2), ('p1', 3)]
1103 >>> p2ann = decorate(p2data, p2fctx)
1109 >>> p2ann = decorate(p2data, p2fctx)
1104 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1110 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1105 >>> p2ann[0]
1111 >>> p2ann[0]
1106 [('old', 1), ('p2', 2), ('p2', 3)]
1112 [('old', 1), ('p2', 2), ('p2', 3)]
1107
1113
1108 Test with multiple parents (note the difference caused by ordering):
1114 Test with multiple parents (note the difference caused by ordering):
1109
1115
1110 >>> childann = decorate(childdata, childfctx)
1116 >>> childann = decorate(childdata, childfctx)
1111 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1117 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1112 ... diffopts)
1118 ... diffopts)
1113 >>> childann[0]
1119 >>> childann[0]
1114 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1120 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1115
1121
1116 >>> childann = decorate(childdata, childfctx)
1122 >>> childann = decorate(childdata, childfctx)
1117 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1123 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1118 ... diffopts)
1124 ... diffopts)
1119 >>> childann[0]
1125 >>> childann[0]
1120 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1126 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1121
1127
1122 Test with skipchild (note the difference caused by ordering):
1128 Test with skipchild (note the difference caused by ordering):
1123
1129
1124 >>> childann = decorate(childdata, childfctx)
1130 >>> childann = decorate(childdata, childfctx)
1125 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1131 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1126 ... diffopts)
1132 ... diffopts)
1127 >>> childann[0]
1133 >>> childann[0]
1128 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1134 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1129
1135
1130 >>> childann = decorate(childdata, childfctx)
1136 >>> childann = decorate(childdata, childfctx)
1131 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1137 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1132 ... diffopts)
1138 ... diffopts)
1133 >>> childann[0]
1139 >>> childann[0]
1134 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1140 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1135 '''
1141 '''
1136 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1142 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1137 for parent in parents]
1143 for parent in parents]
1138
1144
1139 if skipchild:
1145 if skipchild:
1140 # Need to iterate over the blocks twice -- make it a list
1146 # Need to iterate over the blocks twice -- make it a list
1141 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1147 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1142 # Mercurial currently prefers p2 over p1 for annotate.
1148 # Mercurial currently prefers p2 over p1 for annotate.
1143 # TODO: change this?
1149 # TODO: change this?
1144 for parent, blocks in pblocks:
1150 for parent, blocks in pblocks:
1145 for (a1, a2, b1, b2), t in blocks:
1151 for (a1, a2, b1, b2), t in blocks:
1146 # Changed blocks ('!') or blocks made only of blank lines ('~')
1152 # Changed blocks ('!') or blocks made only of blank lines ('~')
1147 # belong to the child.
1153 # belong to the child.
1148 if t == '=':
1154 if t == '=':
1149 child[0][b1:b2] = parent[0][a1:a2]
1155 child[0][b1:b2] = parent[0][a1:a2]
1150
1156
1151 if skipchild:
1157 if skipchild:
1152 # Now try and match up anything that couldn't be matched,
1158 # Now try and match up anything that couldn't be matched,
1153 # Reversing pblocks maintains bias towards p2, matching above
1159 # Reversing pblocks maintains bias towards p2, matching above
1154 # behavior.
1160 # behavior.
1155 pblocks.reverse()
1161 pblocks.reverse()
1156
1162
1157 # The heuristics are:
1163 # The heuristics are:
1158 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1164 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1159 # This could potentially be smarter but works well enough.
1165 # This could potentially be smarter but works well enough.
1160 # * For a non-matching section, do a best-effort fit. Match lines in
1166 # * For a non-matching section, do a best-effort fit. Match lines in
1161 # diff hunks 1:1, dropping lines as necessary.
1167 # diff hunks 1:1, dropping lines as necessary.
1162 # * Repeat the last line as a last resort.
1168 # * Repeat the last line as a last resort.
1163
1169
1164 # First, replace as much as possible without repeating the last line.
1170 # First, replace as much as possible without repeating the last line.
1165 remaining = [(parent, []) for parent, _blocks in pblocks]
1171 remaining = [(parent, []) for parent, _blocks in pblocks]
1166 for idx, (parent, blocks) in enumerate(pblocks):
1172 for idx, (parent, blocks) in enumerate(pblocks):
1167 for (a1, a2, b1, b2), _t in blocks:
1173 for (a1, a2, b1, b2), _t in blocks:
1168 if a2 - a1 >= b2 - b1:
1174 if a2 - a1 >= b2 - b1:
1169 for bk in xrange(b1, b2):
1175 for bk in xrange(b1, b2):
1170 if child[0][bk][0] == childfctx:
1176 if child[0][bk][0] == childfctx:
1171 ak = min(a1 + (bk - b1), a2 - 1)
1177 ak = min(a1 + (bk - b1), a2 - 1)
1172 child[0][bk] = parent[0][ak]
1178 child[0][bk] = parent[0][ak]
1173 else:
1179 else:
1174 remaining[idx][1].append((a1, a2, b1, b2))
1180 remaining[idx][1].append((a1, a2, b1, b2))
1175
1181
1176 # Then, look at anything left, which might involve repeating the last
1182 # Then, look at anything left, which might involve repeating the last
1177 # line.
1183 # line.
1178 for parent, blocks in remaining:
1184 for parent, blocks in remaining:
1179 for a1, a2, b1, b2 in blocks:
1185 for a1, a2, b1, b2 in blocks:
1180 for bk in xrange(b1, b2):
1186 for bk in xrange(b1, b2):
1181 if child[0][bk][0] == childfctx:
1187 if child[0][bk][0] == childfctx:
1182 ak = min(a1 + (bk - b1), a2 - 1)
1188 ak = min(a1 + (bk - b1), a2 - 1)
1183 child[0][bk] = parent[0][ak]
1189 child[0][bk] = parent[0][ak]
1184 return child
1190 return child
1185
1191
1186 class filectx(basefilectx):
1192 class filectx(basefilectx):
1187 """A filecontext object makes access to data related to a particular
1193 """A filecontext object makes access to data related to a particular
1188 filerevision convenient."""
1194 filerevision convenient."""
1189 def __init__(self, repo, path, changeid=None, fileid=None,
1195 def __init__(self, repo, path, changeid=None, fileid=None,
1190 filelog=None, changectx=None):
1196 filelog=None, changectx=None):
1191 """changeid can be a changeset revision, node, or tag.
1197 """changeid can be a changeset revision, node, or tag.
1192 fileid can be a file revision or node."""
1198 fileid can be a file revision or node."""
1193 self._repo = repo
1199 self._repo = repo
1194 self._path = path
1200 self._path = path
1195
1201
1196 assert (changeid is not None
1202 assert (changeid is not None
1197 or fileid is not None
1203 or fileid is not None
1198 or changectx is not None), \
1204 or changectx is not None), \
1199 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1205 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1200 % (changeid, fileid, changectx))
1206 % (changeid, fileid, changectx))
1201
1207
1202 if filelog is not None:
1208 if filelog is not None:
1203 self._filelog = filelog
1209 self._filelog = filelog
1204
1210
1205 if changeid is not None:
1211 if changeid is not None:
1206 self._changeid = changeid
1212 self._changeid = changeid
1207 if changectx is not None:
1213 if changectx is not None:
1208 self._changectx = changectx
1214 self._changectx = changectx
1209 if fileid is not None:
1215 if fileid is not None:
1210 self._fileid = fileid
1216 self._fileid = fileid
1211
1217
1212 @propertycache
1218 @propertycache
1213 def _changectx(self):
1219 def _changectx(self):
1214 try:
1220 try:
1215 return changectx(self._repo, self._changeid)
1221 return changectx(self._repo, self._changeid)
1216 except error.FilteredRepoLookupError:
1222 except error.FilteredRepoLookupError:
1217 # Linkrev may point to any revision in the repository. When the
1223 # Linkrev may point to any revision in the repository. When the
1218 # repository is filtered this may lead to `filectx` trying to build
1224 # repository is filtered this may lead to `filectx` trying to build
1219 # `changectx` for filtered revision. In such case we fallback to
1225 # `changectx` for filtered revision. In such case we fallback to
1220 # creating `changectx` on the unfiltered version of the reposition.
1226 # creating `changectx` on the unfiltered version of the reposition.
1221 # This fallback should not be an issue because `changectx` from
1227 # This fallback should not be an issue because `changectx` from
1222 # `filectx` are not used in complex operations that care about
1228 # `filectx` are not used in complex operations that care about
1223 # filtering.
1229 # filtering.
1224 #
1230 #
1225 # This fallback is a cheap and dirty fix that prevent several
1231 # This fallback is a cheap and dirty fix that prevent several
1226 # crashes. It does not ensure the behavior is correct. However the
1232 # crashes. It does not ensure the behavior is correct. However the
1227 # behavior was not correct before filtering either and "incorrect
1233 # behavior was not correct before filtering either and "incorrect
1228 # behavior" is seen as better as "crash"
1234 # behavior" is seen as better as "crash"
1229 #
1235 #
1230 # Linkrevs have several serious troubles with filtering that are
1236 # Linkrevs have several serious troubles with filtering that are
1231 # complicated to solve. Proper handling of the issue here should be
1237 # complicated to solve. Proper handling of the issue here should be
1232 # considered when solving linkrev issue are on the table.
1238 # considered when solving linkrev issue are on the table.
1233 return changectx(self._repo.unfiltered(), self._changeid)
1239 return changectx(self._repo.unfiltered(), self._changeid)
1234
1240
1235 def filectx(self, fileid, changeid=None):
1241 def filectx(self, fileid, changeid=None):
1236 '''opens an arbitrary revision of the file without
1242 '''opens an arbitrary revision of the file without
1237 opening a new filelog'''
1243 opening a new filelog'''
1238 return filectx(self._repo, self._path, fileid=fileid,
1244 return filectx(self._repo, self._path, fileid=fileid,
1239 filelog=self._filelog, changeid=changeid)
1245 filelog=self._filelog, changeid=changeid)
1240
1246
1241 def rawdata(self):
1247 def rawdata(self):
1242 return self._filelog.revision(self._filenode, raw=True)
1248 return self._filelog.revision(self._filenode, raw=True)
1243
1249
1244 def rawflags(self):
1250 def rawflags(self):
1245 """low-level revlog flags"""
1251 """low-level revlog flags"""
1246 return self._filelog.flags(self._filerev)
1252 return self._filelog.flags(self._filerev)
1247
1253
1248 def data(self):
1254 def data(self):
1249 try:
1255 try:
1250 return self._filelog.read(self._filenode)
1256 return self._filelog.read(self._filenode)
1251 except error.CensoredNodeError:
1257 except error.CensoredNodeError:
1252 if self._repo.ui.config("censor", "policy") == "ignore":
1258 if self._repo.ui.config("censor", "policy") == "ignore":
1253 return ""
1259 return ""
1254 raise error.Abort(_("censored node: %s") % short(self._filenode),
1260 raise error.Abort(_("censored node: %s") % short(self._filenode),
1255 hint=_("set censor.policy to ignore errors"))
1261 hint=_("set censor.policy to ignore errors"))
1256
1262
1257 def size(self):
1263 def size(self):
1258 return self._filelog.size(self._filerev)
1264 return self._filelog.size(self._filerev)
1259
1265
1260 @propertycache
1266 @propertycache
1261 def _copied(self):
1267 def _copied(self):
1262 """check if file was actually renamed in this changeset revision
1268 """check if file was actually renamed in this changeset revision
1263
1269
1264 If rename logged in file revision, we report copy for changeset only
1270 If rename logged in file revision, we report copy for changeset only
1265 if file revisions linkrev points back to the changeset in question
1271 if file revisions linkrev points back to the changeset in question
1266 or both changeset parents contain different file revisions.
1272 or both changeset parents contain different file revisions.
1267 """
1273 """
1268
1274
1269 renamed = self._filelog.renamed(self._filenode)
1275 renamed = self._filelog.renamed(self._filenode)
1270 if not renamed:
1276 if not renamed:
1271 return renamed
1277 return renamed
1272
1278
1273 if self.rev() == self.linkrev():
1279 if self.rev() == self.linkrev():
1274 return renamed
1280 return renamed
1275
1281
1276 name = self.path()
1282 name = self.path()
1277 fnode = self._filenode
1283 fnode = self._filenode
1278 for p in self._changectx.parents():
1284 for p in self._changectx.parents():
1279 try:
1285 try:
1280 if fnode == p.filenode(name):
1286 if fnode == p.filenode(name):
1281 return None
1287 return None
1282 except error.LookupError:
1288 except error.LookupError:
1283 pass
1289 pass
1284 return renamed
1290 return renamed
1285
1291
1286 def children(self):
1292 def children(self):
1287 # hard for renames
1293 # hard for renames
1288 c = self._filelog.children(self._filenode)
1294 c = self._filelog.children(self._filenode)
1289 return [filectx(self._repo, self._path, fileid=x,
1295 return [filectx(self._repo, self._path, fileid=x,
1290 filelog=self._filelog) for x in c]
1296 filelog=self._filelog) for x in c]
1291
1297
1292 class committablectx(basectx):
1298 class committablectx(basectx):
1293 """A committablectx object provides common functionality for a context that
1299 """A committablectx object provides common functionality for a context that
1294 wants the ability to commit, e.g. workingctx or memctx."""
1300 wants the ability to commit, e.g. workingctx or memctx."""
1295 def __init__(self, repo, text="", user=None, date=None, extra=None,
1301 def __init__(self, repo, text="", user=None, date=None, extra=None,
1296 changes=None):
1302 changes=None):
1297 self._repo = repo
1303 self._repo = repo
1298 self._rev = None
1304 self._rev = None
1299 self._node = None
1305 self._node = None
1300 self._text = text
1306 self._text = text
1301 if date:
1307 if date:
1302 self._date = util.parsedate(date)
1308 self._date = util.parsedate(date)
1303 if user:
1309 if user:
1304 self._user = user
1310 self._user = user
1305 if changes:
1311 if changes:
1306 self._status = changes
1312 self._status = changes
1307
1313
1308 self._extra = {}
1314 self._extra = {}
1309 if extra:
1315 if extra:
1310 self._extra = extra.copy()
1316 self._extra = extra.copy()
1311 if 'branch' not in self._extra:
1317 if 'branch' not in self._extra:
1312 try:
1318 try:
1313 branch = encoding.fromlocal(self._repo.dirstate.branch())
1319 branch = encoding.fromlocal(self._repo.dirstate.branch())
1314 except UnicodeDecodeError:
1320 except UnicodeDecodeError:
1315 raise error.Abort(_('branch name not in UTF-8!'))
1321 raise error.Abort(_('branch name not in UTF-8!'))
1316 self._extra['branch'] = branch
1322 self._extra['branch'] = branch
1317 if self._extra['branch'] == '':
1323 if self._extra['branch'] == '':
1318 self._extra['branch'] = 'default'
1324 self._extra['branch'] = 'default'
1319
1325
1320 def __bytes__(self):
1326 def __bytes__(self):
1321 return bytes(self._parents[0]) + "+"
1327 return bytes(self._parents[0]) + "+"
1322
1328
1323 __str__ = encoding.strmethod(__bytes__)
1329 __str__ = encoding.strmethod(__bytes__)
1324
1330
1325 def __nonzero__(self):
1331 def __nonzero__(self):
1326 return True
1332 return True
1327
1333
1328 __bool__ = __nonzero__
1334 __bool__ = __nonzero__
1329
1335
1330 def _buildflagfunc(self):
1336 def _buildflagfunc(self):
1331 # Create a fallback function for getting file flags when the
1337 # Create a fallback function for getting file flags when the
1332 # filesystem doesn't support them
1338 # filesystem doesn't support them
1333
1339
1334 copiesget = self._repo.dirstate.copies().get
1340 copiesget = self._repo.dirstate.copies().get
1335 parents = self.parents()
1341 parents = self.parents()
1336 if len(parents) < 2:
1342 if len(parents) < 2:
1337 # when we have one parent, it's easy: copy from parent
1343 # when we have one parent, it's easy: copy from parent
1338 man = parents[0].manifest()
1344 man = parents[0].manifest()
1339 def func(f):
1345 def func(f):
1340 f = copiesget(f, f)
1346 f = copiesget(f, f)
1341 return man.flags(f)
1347 return man.flags(f)
1342 else:
1348 else:
1343 # merges are tricky: we try to reconstruct the unstored
1349 # merges are tricky: we try to reconstruct the unstored
1344 # result from the merge (issue1802)
1350 # result from the merge (issue1802)
1345 p1, p2 = parents
1351 p1, p2 = parents
1346 pa = p1.ancestor(p2)
1352 pa = p1.ancestor(p2)
1347 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1353 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1348
1354
1349 def func(f):
1355 def func(f):
1350 f = copiesget(f, f) # may be wrong for merges with copies
1356 f = copiesget(f, f) # may be wrong for merges with copies
1351 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1357 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1352 if fl1 == fl2:
1358 if fl1 == fl2:
1353 return fl1
1359 return fl1
1354 if fl1 == fla:
1360 if fl1 == fla:
1355 return fl2
1361 return fl2
1356 if fl2 == fla:
1362 if fl2 == fla:
1357 return fl1
1363 return fl1
1358 return '' # punt for conflicts
1364 return '' # punt for conflicts
1359
1365
1360 return func
1366 return func
1361
1367
1362 @propertycache
1368 @propertycache
1363 def _flagfunc(self):
1369 def _flagfunc(self):
1364 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1370 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1365
1371
1366 @propertycache
1372 @propertycache
1367 def _status(self):
1373 def _status(self):
1368 return self._repo.status()
1374 return self._repo.status()
1369
1375
1370 @propertycache
1376 @propertycache
1371 def _user(self):
1377 def _user(self):
1372 return self._repo.ui.username()
1378 return self._repo.ui.username()
1373
1379
1374 @propertycache
1380 @propertycache
1375 def _date(self):
1381 def _date(self):
1376 ui = self._repo.ui
1382 ui = self._repo.ui
1377 date = ui.configdate('devel', 'default-date')
1383 date = ui.configdate('devel', 'default-date')
1378 if date is None:
1384 if date is None:
1379 date = util.makedate()
1385 date = util.makedate()
1380 return date
1386 return date
1381
1387
1382 def subrev(self, subpath):
1388 def subrev(self, subpath):
1383 return None
1389 return None
1384
1390
1385 def manifestnode(self):
1391 def manifestnode(self):
1386 return None
1392 return None
1387 def user(self):
1393 def user(self):
1388 return self._user or self._repo.ui.username()
1394 return self._user or self._repo.ui.username()
1389 def date(self):
1395 def date(self):
1390 return self._date
1396 return self._date
1391 def description(self):
1397 def description(self):
1392 return self._text
1398 return self._text
1393 def files(self):
1399 def files(self):
1394 return sorted(self._status.modified + self._status.added +
1400 return sorted(self._status.modified + self._status.added +
1395 self._status.removed)
1401 self._status.removed)
1396
1402
1397 def modified(self):
1403 def modified(self):
1398 return self._status.modified
1404 return self._status.modified
1399 def added(self):
1405 def added(self):
1400 return self._status.added
1406 return self._status.added
1401 def removed(self):
1407 def removed(self):
1402 return self._status.removed
1408 return self._status.removed
1403 def deleted(self):
1409 def deleted(self):
1404 return self._status.deleted
1410 return self._status.deleted
1405 def branch(self):
1411 def branch(self):
1406 return encoding.tolocal(self._extra['branch'])
1412 return encoding.tolocal(self._extra['branch'])
1407 def closesbranch(self):
1413 def closesbranch(self):
1408 return 'close' in self._extra
1414 return 'close' in self._extra
1409 def extra(self):
1415 def extra(self):
1410 return self._extra
1416 return self._extra
1411
1417
1412 def tags(self):
1418 def tags(self):
1413 return []
1419 return []
1414
1420
1415 def bookmarks(self):
1421 def bookmarks(self):
1416 b = []
1422 b = []
1417 for p in self.parents():
1423 for p in self.parents():
1418 b.extend(p.bookmarks())
1424 b.extend(p.bookmarks())
1419 return b
1425 return b
1420
1426
1421 def phase(self):
1427 def phase(self):
1422 phase = phases.draft # default phase to draft
1428 phase = phases.draft # default phase to draft
1423 for p in self.parents():
1429 for p in self.parents():
1424 phase = max(phase, p.phase())
1430 phase = max(phase, p.phase())
1425 return phase
1431 return phase
1426
1432
1427 def hidden(self):
1433 def hidden(self):
1428 return False
1434 return False
1429
1435
1430 def children(self):
1436 def children(self):
1431 return []
1437 return []
1432
1438
1433 def flags(self, path):
1439 def flags(self, path):
1434 if r'_manifest' in self.__dict__:
1440 if r'_manifest' in self.__dict__:
1435 try:
1441 try:
1436 return self._manifest.flags(path)
1442 return self._manifest.flags(path)
1437 except KeyError:
1443 except KeyError:
1438 return ''
1444 return ''
1439
1445
1440 try:
1446 try:
1441 return self._flagfunc(path)
1447 return self._flagfunc(path)
1442 except OSError:
1448 except OSError:
1443 return ''
1449 return ''
1444
1450
1445 def ancestor(self, c2):
1451 def ancestor(self, c2):
1446 """return the "best" ancestor context of self and c2"""
1452 """return the "best" ancestor context of self and c2"""
1447 return self._parents[0].ancestor(c2) # punt on two parents for now
1453 return self._parents[0].ancestor(c2) # punt on two parents for now
1448
1454
1449 def walk(self, match):
1455 def walk(self, match):
1450 '''Generates matching file names.'''
1456 '''Generates matching file names.'''
1451 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1457 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1452 True, False))
1458 True, False))
1453
1459
1454 def matches(self, match):
1460 def matches(self, match):
1455 return sorted(self._repo.dirstate.matches(match))
1461 return sorted(self._repo.dirstate.matches(match))
1456
1462
1457 def ancestors(self):
1463 def ancestors(self):
1458 for p in self._parents:
1464 for p in self._parents:
1459 yield p
1465 yield p
1460 for a in self._repo.changelog.ancestors(
1466 for a in self._repo.changelog.ancestors(
1461 [p.rev() for p in self._parents]):
1467 [p.rev() for p in self._parents]):
1462 yield changectx(self._repo, a)
1468 yield changectx(self._repo, a)
1463
1469
1464 def markcommitted(self, node):
1470 def markcommitted(self, node):
1465 """Perform post-commit cleanup necessary after committing this ctx
1471 """Perform post-commit cleanup necessary after committing this ctx
1466
1472
1467 Specifically, this updates backing stores this working context
1473 Specifically, this updates backing stores this working context
1468 wraps to reflect the fact that the changes reflected by this
1474 wraps to reflect the fact that the changes reflected by this
1469 workingctx have been committed. For example, it marks
1475 workingctx have been committed. For example, it marks
1470 modified and added files as normal in the dirstate.
1476 modified and added files as normal in the dirstate.
1471
1477
1472 """
1478 """
1473
1479
1474 with self._repo.dirstate.parentchange():
1480 with self._repo.dirstate.parentchange():
1475 for f in self.modified() + self.added():
1481 for f in self.modified() + self.added():
1476 self._repo.dirstate.normal(f)
1482 self._repo.dirstate.normal(f)
1477 for f in self.removed():
1483 for f in self.removed():
1478 self._repo.dirstate.drop(f)
1484 self._repo.dirstate.drop(f)
1479 self._repo.dirstate.setparents(node)
1485 self._repo.dirstate.setparents(node)
1480
1486
1481 # write changes out explicitly, because nesting wlock at
1487 # write changes out explicitly, because nesting wlock at
1482 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1488 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 # from immediately doing so for subsequent changing files
1489 # from immediately doing so for subsequent changing files
1484 self._repo.dirstate.write(self._repo.currenttransaction())
1490 self._repo.dirstate.write(self._repo.currenttransaction())
1485
1491
1486 def dirty(self, missing=False, merge=True, branch=True):
1492 def dirty(self, missing=False, merge=True, branch=True):
1487 return False
1493 return False
1488
1494
1489 class workingctx(committablectx):
1495 class workingctx(committablectx):
1490 """A workingctx object makes access to data related to
1496 """A workingctx object makes access to data related to
1491 the current working directory convenient.
1497 the current working directory convenient.
1492 date - any valid date string or (unixtime, offset), or None.
1498 date - any valid date string or (unixtime, offset), or None.
1493 user - username string, or None.
1499 user - username string, or None.
1494 extra - a dictionary of extra values, or None.
1500 extra - a dictionary of extra values, or None.
1495 changes - a list of file lists as returned by localrepo.status()
1501 changes - a list of file lists as returned by localrepo.status()
1496 or None to use the repository status.
1502 or None to use the repository status.
1497 """
1503 """
1498 def __init__(self, repo, text="", user=None, date=None, extra=None,
1504 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 changes=None):
1505 changes=None):
1500 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1506 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501
1507
1502 def __iter__(self):
1508 def __iter__(self):
1503 d = self._repo.dirstate
1509 d = self._repo.dirstate
1504 for f in d:
1510 for f in d:
1505 if d[f] != 'r':
1511 if d[f] != 'r':
1506 yield f
1512 yield f
1507
1513
1508 def __contains__(self, key):
1514 def __contains__(self, key):
1509 return self._repo.dirstate[key] not in "?r"
1515 return self._repo.dirstate[key] not in "?r"
1510
1516
1511 def hex(self):
1517 def hex(self):
1512 return hex(wdirid)
1518 return hex(wdirid)
1513
1519
1514 @propertycache
1520 @propertycache
1515 def _parents(self):
1521 def _parents(self):
1516 p = self._repo.dirstate.parents()
1522 p = self._repo.dirstate.parents()
1517 if p[1] == nullid:
1523 if p[1] == nullid:
1518 p = p[:-1]
1524 p = p[:-1]
1519 return [changectx(self._repo, x) for x in p]
1525 return [changectx(self._repo, x) for x in p]
1520
1526
1521 def filectx(self, path, filelog=None):
1527 def filectx(self, path, filelog=None):
1522 """get a file context from the working directory"""
1528 """get a file context from the working directory"""
1523 return workingfilectx(self._repo, path, workingctx=self,
1529 return workingfilectx(self._repo, path, workingctx=self,
1524 filelog=filelog)
1530 filelog=filelog)
1525
1531
1526 def dirty(self, missing=False, merge=True, branch=True):
1532 def dirty(self, missing=False, merge=True, branch=True):
1527 "check whether a working directory is modified"
1533 "check whether a working directory is modified"
1528 # check subrepos first
1534 # check subrepos first
1529 for s in sorted(self.substate):
1535 for s in sorted(self.substate):
1530 if self.sub(s).dirty(missing=missing):
1536 if self.sub(s).dirty(missing=missing):
1531 return True
1537 return True
1532 # check current working dir
1538 # check current working dir
1533 return ((merge and self.p2()) or
1539 return ((merge and self.p2()) or
1534 (branch and self.branch() != self.p1().branch()) or
1540 (branch and self.branch() != self.p1().branch()) or
1535 self.modified() or self.added() or self.removed() or
1541 self.modified() or self.added() or self.removed() or
1536 (missing and self.deleted()))
1542 (missing and self.deleted()))
1537
1543
1538 def add(self, list, prefix=""):
1544 def add(self, list, prefix=""):
1539 with self._repo.wlock():
1545 with self._repo.wlock():
1540 ui, ds = self._repo.ui, self._repo.dirstate
1546 ui, ds = self._repo.ui, self._repo.dirstate
1541 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1547 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 rejected = []
1548 rejected = []
1543 lstat = self._repo.wvfs.lstat
1549 lstat = self._repo.wvfs.lstat
1544 for f in list:
1550 for f in list:
1545 # ds.pathto() returns an absolute file when this is invoked from
1551 # ds.pathto() returns an absolute file when this is invoked from
1546 # the keyword extension. That gets flagged as non-portable on
1552 # the keyword extension. That gets flagged as non-portable on
1547 # Windows, since it contains the drive letter and colon.
1553 # Windows, since it contains the drive letter and colon.
1548 scmutil.checkportable(ui, os.path.join(prefix, f))
1554 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 try:
1555 try:
1550 st = lstat(f)
1556 st = lstat(f)
1551 except OSError:
1557 except OSError:
1552 ui.warn(_("%s does not exist!\n") % uipath(f))
1558 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 rejected.append(f)
1559 rejected.append(f)
1554 continue
1560 continue
1555 if st.st_size > 10000000:
1561 if st.st_size > 10000000:
1556 ui.warn(_("%s: up to %d MB of RAM may be required "
1562 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 "to manage this file\n"
1563 "to manage this file\n"
1558 "(use 'hg revert %s' to cancel the "
1564 "(use 'hg revert %s' to cancel the "
1559 "pending addition)\n")
1565 "pending addition)\n")
1560 % (f, 3 * st.st_size // 1000000, uipath(f)))
1566 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 ui.warn(_("%s not added: only files and symlinks "
1568 ui.warn(_("%s not added: only files and symlinks "
1563 "supported currently\n") % uipath(f))
1569 "supported currently\n") % uipath(f))
1564 rejected.append(f)
1570 rejected.append(f)
1565 elif ds[f] in 'amn':
1571 elif ds[f] in 'amn':
1566 ui.warn(_("%s already tracked!\n") % uipath(f))
1572 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 elif ds[f] == 'r':
1573 elif ds[f] == 'r':
1568 ds.normallookup(f)
1574 ds.normallookup(f)
1569 else:
1575 else:
1570 ds.add(f)
1576 ds.add(f)
1571 return rejected
1577 return rejected
1572
1578
1573 def forget(self, files, prefix=""):
1579 def forget(self, files, prefix=""):
1574 with self._repo.wlock():
1580 with self._repo.wlock():
1575 ds = self._repo.dirstate
1581 ds = self._repo.dirstate
1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1582 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 rejected = []
1583 rejected = []
1578 for f in files:
1584 for f in files:
1579 if f not in self._repo.dirstate:
1585 if f not in self._repo.dirstate:
1580 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1586 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 rejected.append(f)
1587 rejected.append(f)
1582 elif self._repo.dirstate[f] != 'a':
1588 elif self._repo.dirstate[f] != 'a':
1583 self._repo.dirstate.remove(f)
1589 self._repo.dirstate.remove(f)
1584 else:
1590 else:
1585 self._repo.dirstate.drop(f)
1591 self._repo.dirstate.drop(f)
1586 return rejected
1592 return rejected
1587
1593
1588 def undelete(self, list):
1594 def undelete(self, list):
1589 pctxs = self.parents()
1595 pctxs = self.parents()
1590 with self._repo.wlock():
1596 with self._repo.wlock():
1591 ds = self._repo.dirstate
1597 ds = self._repo.dirstate
1592 for f in list:
1598 for f in list:
1593 if self._repo.dirstate[f] != 'r':
1599 if self._repo.dirstate[f] != 'r':
1594 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1600 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 else:
1601 else:
1596 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1602 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 t = fctx.data()
1603 t = fctx.data()
1598 self._repo.wwrite(f, t, fctx.flags())
1604 self._repo.wwrite(f, t, fctx.flags())
1599 self._repo.dirstate.normal(f)
1605 self._repo.dirstate.normal(f)
1600
1606
1601 def copy(self, source, dest):
1607 def copy(self, source, dest):
1602 try:
1608 try:
1603 st = self._repo.wvfs.lstat(dest)
1609 st = self._repo.wvfs.lstat(dest)
1604 except OSError as err:
1610 except OSError as err:
1605 if err.errno != errno.ENOENT:
1611 if err.errno != errno.ENOENT:
1606 raise
1612 raise
1607 self._repo.ui.warn(_("%s does not exist!\n")
1613 self._repo.ui.warn(_("%s does not exist!\n")
1608 % self._repo.dirstate.pathto(dest))
1614 % self._repo.dirstate.pathto(dest))
1609 return
1615 return
1610 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1616 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1617 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 "symbolic link\n")
1618 "symbolic link\n")
1613 % self._repo.dirstate.pathto(dest))
1619 % self._repo.dirstate.pathto(dest))
1614 else:
1620 else:
1615 with self._repo.wlock():
1621 with self._repo.wlock():
1616 if self._repo.dirstate[dest] in '?':
1622 if self._repo.dirstate[dest] in '?':
1617 self._repo.dirstate.add(dest)
1623 self._repo.dirstate.add(dest)
1618 elif self._repo.dirstate[dest] in 'r':
1624 elif self._repo.dirstate[dest] in 'r':
1619 self._repo.dirstate.normallookup(dest)
1625 self._repo.dirstate.normallookup(dest)
1620 self._repo.dirstate.copy(source, dest)
1626 self._repo.dirstate.copy(source, dest)
1621
1627
1622 def match(self, pats=None, include=None, exclude=None, default='glob',
1628 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 listsubrepos=False, badfn=None):
1629 listsubrepos=False, badfn=None):
1624 r = self._repo
1630 r = self._repo
1625
1631
1626 # Only a case insensitive filesystem needs magic to translate user input
1632 # Only a case insensitive filesystem needs magic to translate user input
1627 # to actual case in the filesystem.
1633 # to actual case in the filesystem.
1628 icasefs = not util.fscasesensitive(r.root)
1634 icasefs = not util.fscasesensitive(r.root)
1629 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1635 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 default, auditor=r.auditor, ctx=self,
1636 default, auditor=r.auditor, ctx=self,
1631 listsubrepos=listsubrepos, badfn=badfn,
1637 listsubrepos=listsubrepos, badfn=badfn,
1632 icasefs=icasefs)
1638 icasefs=icasefs)
1633
1639
1634 def _filtersuspectsymlink(self, files):
1640 def _filtersuspectsymlink(self, files):
1635 if not files or self._repo.dirstate._checklink:
1641 if not files or self._repo.dirstate._checklink:
1636 return files
1642 return files
1637
1643
1638 # Symlink placeholders may get non-symlink-like contents
1644 # Symlink placeholders may get non-symlink-like contents
1639 # via user error or dereferencing by NFS or Samba servers,
1645 # via user error or dereferencing by NFS or Samba servers,
1640 # so we filter out any placeholders that don't look like a
1646 # so we filter out any placeholders that don't look like a
1641 # symlink
1647 # symlink
1642 sane = []
1648 sane = []
1643 for f in files:
1649 for f in files:
1644 if self.flags(f) == 'l':
1650 if self.flags(f) == 'l':
1645 d = self[f].data()
1651 d = self[f].data()
1646 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1652 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1647 self._repo.ui.debug('ignoring suspect symlink placeholder'
1653 self._repo.ui.debug('ignoring suspect symlink placeholder'
1648 ' "%s"\n' % f)
1654 ' "%s"\n' % f)
1649 continue
1655 continue
1650 sane.append(f)
1656 sane.append(f)
1651 return sane
1657 return sane
1652
1658
1653 def _checklookup(self, files):
1659 def _checklookup(self, files):
1654 # check for any possibly clean files
1660 # check for any possibly clean files
1655 if not files:
1661 if not files:
1656 return [], [], []
1662 return [], [], []
1657
1663
1658 modified = []
1664 modified = []
1659 deleted = []
1665 deleted = []
1660 fixup = []
1666 fixup = []
1661 pctx = self._parents[0]
1667 pctx = self._parents[0]
1662 # do a full compare of any files that might have changed
1668 # do a full compare of any files that might have changed
1663 for f in sorted(files):
1669 for f in sorted(files):
1664 try:
1670 try:
1665 # This will return True for a file that got replaced by a
1671 # This will return True for a file that got replaced by a
1666 # directory in the interim, but fixing that is pretty hard.
1672 # directory in the interim, but fixing that is pretty hard.
1667 if (f not in pctx or self.flags(f) != pctx.flags(f)
1673 if (f not in pctx or self.flags(f) != pctx.flags(f)
1668 or pctx[f].cmp(self[f])):
1674 or pctx[f].cmp(self[f])):
1669 modified.append(f)
1675 modified.append(f)
1670 else:
1676 else:
1671 fixup.append(f)
1677 fixup.append(f)
1672 except (IOError, OSError):
1678 except (IOError, OSError):
1673 # A file become inaccessible in between? Mark it as deleted,
1679 # A file become inaccessible in between? Mark it as deleted,
1674 # matching dirstate behavior (issue5584).
1680 # matching dirstate behavior (issue5584).
1675 # The dirstate has more complex behavior around whether a
1681 # The dirstate has more complex behavior around whether a
1676 # missing file matches a directory, etc, but we don't need to
1682 # missing file matches a directory, etc, but we don't need to
1677 # bother with that: if f has made it to this point, we're sure
1683 # bother with that: if f has made it to this point, we're sure
1678 # it's in the dirstate.
1684 # it's in the dirstate.
1679 deleted.append(f)
1685 deleted.append(f)
1680
1686
1681 return modified, deleted, fixup
1687 return modified, deleted, fixup
1682
1688
1683 def _poststatusfixup(self, status, fixup):
1689 def _poststatusfixup(self, status, fixup):
1684 """update dirstate for files that are actually clean"""
1690 """update dirstate for files that are actually clean"""
1685 poststatus = self._repo.postdsstatus()
1691 poststatus = self._repo.postdsstatus()
1686 if fixup or poststatus:
1692 if fixup or poststatus:
1687 try:
1693 try:
1688 oldid = self._repo.dirstate.identity()
1694 oldid = self._repo.dirstate.identity()
1689
1695
1690 # updating the dirstate is optional
1696 # updating the dirstate is optional
1691 # so we don't wait on the lock
1697 # so we don't wait on the lock
1692 # wlock can invalidate the dirstate, so cache normal _after_
1698 # wlock can invalidate the dirstate, so cache normal _after_
1693 # taking the lock
1699 # taking the lock
1694 with self._repo.wlock(False):
1700 with self._repo.wlock(False):
1695 if self._repo.dirstate.identity() == oldid:
1701 if self._repo.dirstate.identity() == oldid:
1696 if fixup:
1702 if fixup:
1697 normal = self._repo.dirstate.normal
1703 normal = self._repo.dirstate.normal
1698 for f in fixup:
1704 for f in fixup:
1699 normal(f)
1705 normal(f)
1700 # write changes out explicitly, because nesting
1706 # write changes out explicitly, because nesting
1701 # wlock at runtime may prevent 'wlock.release()'
1707 # wlock at runtime may prevent 'wlock.release()'
1702 # after this block from doing so for subsequent
1708 # after this block from doing so for subsequent
1703 # changing files
1709 # changing files
1704 tr = self._repo.currenttransaction()
1710 tr = self._repo.currenttransaction()
1705 self._repo.dirstate.write(tr)
1711 self._repo.dirstate.write(tr)
1706
1712
1707 if poststatus:
1713 if poststatus:
1708 for ps in poststatus:
1714 for ps in poststatus:
1709 ps(self, status)
1715 ps(self, status)
1710 else:
1716 else:
1711 # in this case, writing changes out breaks
1717 # in this case, writing changes out breaks
1712 # consistency, because .hg/dirstate was
1718 # consistency, because .hg/dirstate was
1713 # already changed simultaneously after last
1719 # already changed simultaneously after last
1714 # caching (see also issue5584 for detail)
1720 # caching (see also issue5584 for detail)
1715 self._repo.ui.debug('skip updating dirstate: '
1721 self._repo.ui.debug('skip updating dirstate: '
1716 'identity mismatch\n')
1722 'identity mismatch\n')
1717 except error.LockError:
1723 except error.LockError:
1718 pass
1724 pass
1719 finally:
1725 finally:
1720 # Even if the wlock couldn't be grabbed, clear out the list.
1726 # Even if the wlock couldn't be grabbed, clear out the list.
1721 self._repo.clearpostdsstatus()
1727 self._repo.clearpostdsstatus()
1722
1728
1723 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1729 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1724 unknown=False):
1730 unknown=False):
1725 '''Gets the status from the dirstate -- internal use only.'''
1731 '''Gets the status from the dirstate -- internal use only.'''
1726 listignored, listclean, listunknown = ignored, clean, unknown
1732 listignored, listclean, listunknown = ignored, clean, unknown
1727 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1733 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1728 subrepos = []
1734 subrepos = []
1729 if '.hgsub' in self:
1735 if '.hgsub' in self:
1730 subrepos = sorted(self.substate)
1736 subrepos = sorted(self.substate)
1731 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1737 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1732 listclean, listunknown)
1738 listclean, listunknown)
1733
1739
1734 # check for any possibly clean files
1740 # check for any possibly clean files
1735 fixup = []
1741 fixup = []
1736 if cmp:
1742 if cmp:
1737 modified2, deleted2, fixup = self._checklookup(cmp)
1743 modified2, deleted2, fixup = self._checklookup(cmp)
1738 s.modified.extend(modified2)
1744 s.modified.extend(modified2)
1739 s.deleted.extend(deleted2)
1745 s.deleted.extend(deleted2)
1740
1746
1741 if fixup and listclean:
1747 if fixup and listclean:
1742 s.clean.extend(fixup)
1748 s.clean.extend(fixup)
1743
1749
1744 self._poststatusfixup(s, fixup)
1750 self._poststatusfixup(s, fixup)
1745
1751
1746 if match.always():
1752 if match.always():
1747 # cache for performance
1753 # cache for performance
1748 if s.unknown or s.ignored or s.clean:
1754 if s.unknown or s.ignored or s.clean:
1749 # "_status" is cached with list*=False in the normal route
1755 # "_status" is cached with list*=False in the normal route
1750 self._status = scmutil.status(s.modified, s.added, s.removed,
1756 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 s.deleted, [], [], [])
1757 s.deleted, [], [], [])
1752 else:
1758 else:
1753 self._status = s
1759 self._status = s
1754
1760
1755 return s
1761 return s
1756
1762
1757 @propertycache
1763 @propertycache
1758 def _manifest(self):
1764 def _manifest(self):
1759 """generate a manifest corresponding to the values in self._status
1765 """generate a manifest corresponding to the values in self._status
1760
1766
1761 This reuse the file nodeid from parent, but we use special node
1767 This reuse the file nodeid from parent, but we use special node
1762 identifiers for added and modified files. This is used by manifests
1768 identifiers for added and modified files. This is used by manifests
1763 merge to see that files are different and by update logic to avoid
1769 merge to see that files are different and by update logic to avoid
1764 deleting newly added files.
1770 deleting newly added files.
1765 """
1771 """
1766 return self._buildstatusmanifest(self._status)
1772 return self._buildstatusmanifest(self._status)
1767
1773
1768 def _buildstatusmanifest(self, status):
1774 def _buildstatusmanifest(self, status):
1769 """Builds a manifest that includes the given status results."""
1775 """Builds a manifest that includes the given status results."""
1770 parents = self.parents()
1776 parents = self.parents()
1771
1777
1772 man = parents[0].manifest().copy()
1778 man = parents[0].manifest().copy()
1773
1779
1774 ff = self._flagfunc
1780 ff = self._flagfunc
1775 for i, l in ((addednodeid, status.added),
1781 for i, l in ((addednodeid, status.added),
1776 (modifiednodeid, status.modified)):
1782 (modifiednodeid, status.modified)):
1777 for f in l:
1783 for f in l:
1778 man[f] = i
1784 man[f] = i
1779 try:
1785 try:
1780 man.setflag(f, ff(f))
1786 man.setflag(f, ff(f))
1781 except OSError:
1787 except OSError:
1782 pass
1788 pass
1783
1789
1784 for f in status.deleted + status.removed:
1790 for f in status.deleted + status.removed:
1785 if f in man:
1791 if f in man:
1786 del man[f]
1792 del man[f]
1787
1793
1788 return man
1794 return man
1789
1795
1790 def _buildstatus(self, other, s, match, listignored, listclean,
1796 def _buildstatus(self, other, s, match, listignored, listclean,
1791 listunknown):
1797 listunknown):
1792 """build a status with respect to another context
1798 """build a status with respect to another context
1793
1799
1794 This includes logic for maintaining the fast path of status when
1800 This includes logic for maintaining the fast path of status when
1795 comparing the working directory against its parent, which is to skip
1801 comparing the working directory against its parent, which is to skip
1796 building a new manifest if self (working directory) is not comparing
1802 building a new manifest if self (working directory) is not comparing
1797 against its parent (repo['.']).
1803 against its parent (repo['.']).
1798 """
1804 """
1799 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1805 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1806 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 # might have accidentally ended up with the entire contents of the file
1807 # might have accidentally ended up with the entire contents of the file
1802 # they are supposed to be linking to.
1808 # they are supposed to be linking to.
1803 s.modified[:] = self._filtersuspectsymlink(s.modified)
1809 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 if other != self._repo['.']:
1810 if other != self._repo['.']:
1805 s = super(workingctx, self)._buildstatus(other, s, match,
1811 s = super(workingctx, self)._buildstatus(other, s, match,
1806 listignored, listclean,
1812 listignored, listclean,
1807 listunknown)
1813 listunknown)
1808 return s
1814 return s
1809
1815
1810 def _matchstatus(self, other, match):
1816 def _matchstatus(self, other, match):
1811 """override the match method with a filter for directory patterns
1817 """override the match method with a filter for directory patterns
1812
1818
1813 We use inheritance to customize the match.bad method only in cases of
1819 We use inheritance to customize the match.bad method only in cases of
1814 workingctx since it belongs only to the working directory when
1820 workingctx since it belongs only to the working directory when
1815 comparing against the parent changeset.
1821 comparing against the parent changeset.
1816
1822
1817 If we aren't comparing against the working directory's parent, then we
1823 If we aren't comparing against the working directory's parent, then we
1818 just use the default match object sent to us.
1824 just use the default match object sent to us.
1819 """
1825 """
1820 superself = super(workingctx, self)
1826 superself = super(workingctx, self)
1821 match = superself._matchstatus(other, match)
1827 match = superself._matchstatus(other, match)
1822 if other != self._repo['.']:
1828 if other != self._repo['.']:
1823 def bad(f, msg):
1829 def bad(f, msg):
1824 # 'f' may be a directory pattern from 'match.files()',
1830 # 'f' may be a directory pattern from 'match.files()',
1825 # so 'f not in ctx1' is not enough
1831 # so 'f not in ctx1' is not enough
1826 if f not in other and not other.hasdir(f):
1832 if f not in other and not other.hasdir(f):
1827 self._repo.ui.warn('%s: %s\n' %
1833 self._repo.ui.warn('%s: %s\n' %
1828 (self._repo.dirstate.pathto(f), msg))
1834 (self._repo.dirstate.pathto(f), msg))
1829 match.bad = bad
1835 match.bad = bad
1830 return match
1836 return match
1831
1837
1832 def markcommitted(self, node):
1838 def markcommitted(self, node):
1833 super(workingctx, self).markcommitted(node)
1839 super(workingctx, self).markcommitted(node)
1834
1840
1835 sparse.aftercommit(self._repo, node)
1841 sparse.aftercommit(self._repo, node)
1836
1842
1837 class committablefilectx(basefilectx):
1843 class committablefilectx(basefilectx):
1838 """A committablefilectx provides common functionality for a file context
1844 """A committablefilectx provides common functionality for a file context
1839 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1845 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1840 def __init__(self, repo, path, filelog=None, ctx=None):
1846 def __init__(self, repo, path, filelog=None, ctx=None):
1841 self._repo = repo
1847 self._repo = repo
1842 self._path = path
1848 self._path = path
1843 self._changeid = None
1849 self._changeid = None
1844 self._filerev = self._filenode = None
1850 self._filerev = self._filenode = None
1845
1851
1846 if filelog is not None:
1852 if filelog is not None:
1847 self._filelog = filelog
1853 self._filelog = filelog
1848 if ctx:
1854 if ctx:
1849 self._changectx = ctx
1855 self._changectx = ctx
1850
1856
1851 def __nonzero__(self):
1857 def __nonzero__(self):
1852 return True
1858 return True
1853
1859
1854 __bool__ = __nonzero__
1860 __bool__ = __nonzero__
1855
1861
1856 def linkrev(self):
1862 def linkrev(self):
1857 # linked to self._changectx no matter if file is modified or not
1863 # linked to self._changectx no matter if file is modified or not
1858 return self.rev()
1864 return self.rev()
1859
1865
1860 def parents(self):
1866 def parents(self):
1861 '''return parent filectxs, following copies if necessary'''
1867 '''return parent filectxs, following copies if necessary'''
1862 def filenode(ctx, path):
1868 def filenode(ctx, path):
1863 return ctx._manifest.get(path, nullid)
1869 return ctx._manifest.get(path, nullid)
1864
1870
1865 path = self._path
1871 path = self._path
1866 fl = self._filelog
1872 fl = self._filelog
1867 pcl = self._changectx._parents
1873 pcl = self._changectx._parents
1868 renamed = self.renamed()
1874 renamed = self.renamed()
1869
1875
1870 if renamed:
1876 if renamed:
1871 pl = [renamed + (None,)]
1877 pl = [renamed + (None,)]
1872 else:
1878 else:
1873 pl = [(path, filenode(pcl[0], path), fl)]
1879 pl = [(path, filenode(pcl[0], path), fl)]
1874
1880
1875 for pc in pcl[1:]:
1881 for pc in pcl[1:]:
1876 pl.append((path, filenode(pc, path), fl))
1882 pl.append((path, filenode(pc, path), fl))
1877
1883
1878 return [self._parentfilectx(p, fileid=n, filelog=l)
1884 return [self._parentfilectx(p, fileid=n, filelog=l)
1879 for p, n, l in pl if n != nullid]
1885 for p, n, l in pl if n != nullid]
1880
1886
1881 def children(self):
1887 def children(self):
1882 return []
1888 return []
1883
1889
1884 class workingfilectx(committablefilectx):
1890 class workingfilectx(committablefilectx):
1885 """A workingfilectx object makes access to data related to a particular
1891 """A workingfilectx object makes access to data related to a particular
1886 file in the working directory convenient."""
1892 file in the working directory convenient."""
1887 def __init__(self, repo, path, filelog=None, workingctx=None):
1893 def __init__(self, repo, path, filelog=None, workingctx=None):
1888 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1894 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1889
1895
1890 @propertycache
1896 @propertycache
1891 def _changectx(self):
1897 def _changectx(self):
1892 return workingctx(self._repo)
1898 return workingctx(self._repo)
1893
1899
1894 def data(self):
1900 def data(self):
1895 return self._repo.wread(self._path)
1901 return self._repo.wread(self._path)
1896 def renamed(self):
1902 def renamed(self):
1897 rp = self._repo.dirstate.copied(self._path)
1903 rp = self._repo.dirstate.copied(self._path)
1898 if not rp:
1904 if not rp:
1899 return None
1905 return None
1900 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1906 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1901
1907
1902 def size(self):
1908 def size(self):
1903 return self._repo.wvfs.lstat(self._path).st_size
1909 return self._repo.wvfs.lstat(self._path).st_size
1904 def date(self):
1910 def date(self):
1905 t, tz = self._changectx.date()
1911 t, tz = self._changectx.date()
1906 try:
1912 try:
1907 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1913 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1908 except OSError as err:
1914 except OSError as err:
1909 if err.errno != errno.ENOENT:
1915 if err.errno != errno.ENOENT:
1910 raise
1916 raise
1911 return (t, tz)
1917 return (t, tz)
1912
1918
1913 def exists(self):
1919 def exists(self):
1914 return self._repo.wvfs.exists(self._path)
1920 return self._repo.wvfs.exists(self._path)
1915
1921
1916 def lexists(self):
1922 def lexists(self):
1917 return self._repo.wvfs.lexists(self._path)
1923 return self._repo.wvfs.lexists(self._path)
1918
1924
1919 def audit(self):
1925 def audit(self):
1920 return self._repo.wvfs.audit(self._path)
1926 return self._repo.wvfs.audit(self._path)
1921
1927
1922 def cmp(self, fctx):
1928 def cmp(self, fctx):
1923 """compare with other file context
1929 """compare with other file context
1924
1930
1925 returns True if different than fctx.
1931 returns True if different than fctx.
1926 """
1932 """
1927 # fctx should be a filectx (not a workingfilectx)
1933 # fctx should be a filectx (not a workingfilectx)
1928 # invert comparison to reuse the same code path
1934 # invert comparison to reuse the same code path
1929 return fctx.cmp(self)
1935 return fctx.cmp(self)
1930
1936
1931 def remove(self, ignoremissing=False):
1937 def remove(self, ignoremissing=False):
1932 """wraps unlink for a repo's working directory"""
1938 """wraps unlink for a repo's working directory"""
1933 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1939 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1934
1940
1935 def write(self, data, flags, backgroundclose=False):
1941 def write(self, data, flags, backgroundclose=False):
1936 """wraps repo.wwrite"""
1942 """wraps repo.wwrite"""
1937 self._repo.wwrite(self._path, data, flags,
1943 self._repo.wwrite(self._path, data, flags,
1938 backgroundclose=backgroundclose)
1944 backgroundclose=backgroundclose)
1939
1945
1940 def setflags(self, l, x):
1946 def setflags(self, l, x):
1941 self._repo.wvfs.setflags(self._path, l, x)
1947 self._repo.wvfs.setflags(self._path, l, x)
1942
1948
1943 class workingcommitctx(workingctx):
1949 class workingcommitctx(workingctx):
1944 """A workingcommitctx object makes access to data related to
1950 """A workingcommitctx object makes access to data related to
1945 the revision being committed convenient.
1951 the revision being committed convenient.
1946
1952
1947 This hides changes in the working directory, if they aren't
1953 This hides changes in the working directory, if they aren't
1948 committed in this context.
1954 committed in this context.
1949 """
1955 """
1950 def __init__(self, repo, changes,
1956 def __init__(self, repo, changes,
1951 text="", user=None, date=None, extra=None):
1957 text="", user=None, date=None, extra=None):
1952 super(workingctx, self).__init__(repo, text, user, date, extra,
1958 super(workingctx, self).__init__(repo, text, user, date, extra,
1953 changes)
1959 changes)
1954
1960
1955 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1961 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1956 unknown=False):
1962 unknown=False):
1957 """Return matched files only in ``self._status``
1963 """Return matched files only in ``self._status``
1958
1964
1959 Uncommitted files appear "clean" via this context, even if
1965 Uncommitted files appear "clean" via this context, even if
1960 they aren't actually so in the working directory.
1966 they aren't actually so in the working directory.
1961 """
1967 """
1962 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1968 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1963 if clean:
1969 if clean:
1964 clean = [f for f in self._manifest if f not in self._changedset]
1970 clean = [f for f in self._manifest if f not in self._changedset]
1965 else:
1971 else:
1966 clean = []
1972 clean = []
1967 return scmutil.status([f for f in self._status.modified if match(f)],
1973 return scmutil.status([f for f in self._status.modified if match(f)],
1968 [f for f in self._status.added if match(f)],
1974 [f for f in self._status.added if match(f)],
1969 [f for f in self._status.removed if match(f)],
1975 [f for f in self._status.removed if match(f)],
1970 [], [], [], clean)
1976 [], [], [], clean)
1971
1977
1972 @propertycache
1978 @propertycache
1973 def _changedset(self):
1979 def _changedset(self):
1974 """Return the set of files changed in this context
1980 """Return the set of files changed in this context
1975 """
1981 """
1976 changed = set(self._status.modified)
1982 changed = set(self._status.modified)
1977 changed.update(self._status.added)
1983 changed.update(self._status.added)
1978 changed.update(self._status.removed)
1984 changed.update(self._status.removed)
1979 return changed
1985 return changed
1980
1986
1981 def makecachingfilectxfn(func):
1987 def makecachingfilectxfn(func):
1982 """Create a filectxfn that caches based on the path.
1988 """Create a filectxfn that caches based on the path.
1983
1989
1984 We can't use util.cachefunc because it uses all arguments as the cache
1990 We can't use util.cachefunc because it uses all arguments as the cache
1985 key and this creates a cycle since the arguments include the repo and
1991 key and this creates a cycle since the arguments include the repo and
1986 memctx.
1992 memctx.
1987 """
1993 """
1988 cache = {}
1994 cache = {}
1989
1995
1990 def getfilectx(repo, memctx, path):
1996 def getfilectx(repo, memctx, path):
1991 if path not in cache:
1997 if path not in cache:
1992 cache[path] = func(repo, memctx, path)
1998 cache[path] = func(repo, memctx, path)
1993 return cache[path]
1999 return cache[path]
1994
2000
1995 return getfilectx
2001 return getfilectx
1996
2002
1997 def memfilefromctx(ctx):
2003 def memfilefromctx(ctx):
1998 """Given a context return a memfilectx for ctx[path]
2004 """Given a context return a memfilectx for ctx[path]
1999
2005
2000 This is a convenience method for building a memctx based on another
2006 This is a convenience method for building a memctx based on another
2001 context.
2007 context.
2002 """
2008 """
2003 def getfilectx(repo, memctx, path):
2009 def getfilectx(repo, memctx, path):
2004 fctx = ctx[path]
2010 fctx = ctx[path]
2005 # this is weird but apparently we only keep track of one parent
2011 # this is weird but apparently we only keep track of one parent
2006 # (why not only store that instead of a tuple?)
2012 # (why not only store that instead of a tuple?)
2007 copied = fctx.renamed()
2013 copied = fctx.renamed()
2008 if copied:
2014 if copied:
2009 copied = copied[0]
2015 copied = copied[0]
2010 return memfilectx(repo, path, fctx.data(),
2016 return memfilectx(repo, path, fctx.data(),
2011 islink=fctx.islink(), isexec=fctx.isexec(),
2017 islink=fctx.islink(), isexec=fctx.isexec(),
2012 copied=copied, memctx=memctx)
2018 copied=copied, memctx=memctx)
2013
2019
2014 return getfilectx
2020 return getfilectx
2015
2021
2016 def memfilefrompatch(patchstore):
2022 def memfilefrompatch(patchstore):
2017 """Given a patch (e.g. patchstore object) return a memfilectx
2023 """Given a patch (e.g. patchstore object) return a memfilectx
2018
2024
2019 This is a convenience method for building a memctx based on a patchstore.
2025 This is a convenience method for building a memctx based on a patchstore.
2020 """
2026 """
2021 def getfilectx(repo, memctx, path):
2027 def getfilectx(repo, memctx, path):
2022 data, mode, copied = patchstore.getfile(path)
2028 data, mode, copied = patchstore.getfile(path)
2023 if data is None:
2029 if data is None:
2024 return None
2030 return None
2025 islink, isexec = mode
2031 islink, isexec = mode
2026 return memfilectx(repo, path, data, islink=islink,
2032 return memfilectx(repo, path, data, islink=islink,
2027 isexec=isexec, copied=copied,
2033 isexec=isexec, copied=copied,
2028 memctx=memctx)
2034 memctx=memctx)
2029
2035
2030 return getfilectx
2036 return getfilectx
2031
2037
2032 class memctx(committablectx):
2038 class memctx(committablectx):
2033 """Use memctx to perform in-memory commits via localrepo.commitctx().
2039 """Use memctx to perform in-memory commits via localrepo.commitctx().
2034
2040
2035 Revision information is supplied at initialization time while
2041 Revision information is supplied at initialization time while
2036 related files data and is made available through a callback
2042 related files data and is made available through a callback
2037 mechanism. 'repo' is the current localrepo, 'parents' is a
2043 mechanism. 'repo' is the current localrepo, 'parents' is a
2038 sequence of two parent revisions identifiers (pass None for every
2044 sequence of two parent revisions identifiers (pass None for every
2039 missing parent), 'text' is the commit message and 'files' lists
2045 missing parent), 'text' is the commit message and 'files' lists
2040 names of files touched by the revision (normalized and relative to
2046 names of files touched by the revision (normalized and relative to
2041 repository root).
2047 repository root).
2042
2048
2043 filectxfn(repo, memctx, path) is a callable receiving the
2049 filectxfn(repo, memctx, path) is a callable receiving the
2044 repository, the current memctx object and the normalized path of
2050 repository, the current memctx object and the normalized path of
2045 requested file, relative to repository root. It is fired by the
2051 requested file, relative to repository root. It is fired by the
2046 commit function for every file in 'files', but calls order is
2052 commit function for every file in 'files', but calls order is
2047 undefined. If the file is available in the revision being
2053 undefined. If the file is available in the revision being
2048 committed (updated or added), filectxfn returns a memfilectx
2054 committed (updated or added), filectxfn returns a memfilectx
2049 object. If the file was removed, filectxfn return None for recent
2055 object. If the file was removed, filectxfn return None for recent
2050 Mercurial. Moved files are represented by marking the source file
2056 Mercurial. Moved files are represented by marking the source file
2051 removed and the new file added with copy information (see
2057 removed and the new file added with copy information (see
2052 memfilectx).
2058 memfilectx).
2053
2059
2054 user receives the committer name and defaults to current
2060 user receives the committer name and defaults to current
2055 repository username, date is the commit date in any format
2061 repository username, date is the commit date in any format
2056 supported by util.parsedate() and defaults to current date, extra
2062 supported by util.parsedate() and defaults to current date, extra
2057 is a dictionary of metadata or is left empty.
2063 is a dictionary of metadata or is left empty.
2058 """
2064 """
2059
2065
2060 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2066 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2061 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2067 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2062 # this field to determine what to do in filectxfn.
2068 # this field to determine what to do in filectxfn.
2063 _returnnoneformissingfiles = True
2069 _returnnoneformissingfiles = True
2064
2070
2065 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2071 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2066 date=None, extra=None, branch=None, editor=False):
2072 date=None, extra=None, branch=None, editor=False):
2067 super(memctx, self).__init__(repo, text, user, date, extra)
2073 super(memctx, self).__init__(repo, text, user, date, extra)
2068 self._rev = None
2074 self._rev = None
2069 self._node = None
2075 self._node = None
2070 parents = [(p or nullid) for p in parents]
2076 parents = [(p or nullid) for p in parents]
2071 p1, p2 = parents
2077 p1, p2 = parents
2072 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2078 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2073 files = sorted(set(files))
2079 files = sorted(set(files))
2074 self._files = files
2080 self._files = files
2075 if branch is not None:
2081 if branch is not None:
2076 self._extra['branch'] = encoding.fromlocal(branch)
2082 self._extra['branch'] = encoding.fromlocal(branch)
2077 self.substate = {}
2083 self.substate = {}
2078
2084
2079 if isinstance(filectxfn, patch.filestore):
2085 if isinstance(filectxfn, patch.filestore):
2080 filectxfn = memfilefrompatch(filectxfn)
2086 filectxfn = memfilefrompatch(filectxfn)
2081 elif not callable(filectxfn):
2087 elif not callable(filectxfn):
2082 # if store is not callable, wrap it in a function
2088 # if store is not callable, wrap it in a function
2083 filectxfn = memfilefromctx(filectxfn)
2089 filectxfn = memfilefromctx(filectxfn)
2084
2090
2085 # memoizing increases performance for e.g. vcs convert scenarios.
2091 # memoizing increases performance for e.g. vcs convert scenarios.
2086 self._filectxfn = makecachingfilectxfn(filectxfn)
2092 self._filectxfn = makecachingfilectxfn(filectxfn)
2087
2093
2088 if editor:
2094 if editor:
2089 self._text = editor(self._repo, self, [])
2095 self._text = editor(self._repo, self, [])
2090 self._repo.savecommitmessage(self._text)
2096 self._repo.savecommitmessage(self._text)
2091
2097
2092 def filectx(self, path, filelog=None):
2098 def filectx(self, path, filelog=None):
2093 """get a file context from the working directory
2099 """get a file context from the working directory
2094
2100
2095 Returns None if file doesn't exist and should be removed."""
2101 Returns None if file doesn't exist and should be removed."""
2096 return self._filectxfn(self._repo, self, path)
2102 return self._filectxfn(self._repo, self, path)
2097
2103
2098 def commit(self):
2104 def commit(self):
2099 """commit context to the repo"""
2105 """commit context to the repo"""
2100 return self._repo.commitctx(self)
2106 return self._repo.commitctx(self)
2101
2107
2102 @propertycache
2108 @propertycache
2103 def _manifest(self):
2109 def _manifest(self):
2104 """generate a manifest based on the return values of filectxfn"""
2110 """generate a manifest based on the return values of filectxfn"""
2105
2111
2106 # keep this simple for now; just worry about p1
2112 # keep this simple for now; just worry about p1
2107 pctx = self._parents[0]
2113 pctx = self._parents[0]
2108 man = pctx.manifest().copy()
2114 man = pctx.manifest().copy()
2109
2115
2110 for f in self._status.modified:
2116 for f in self._status.modified:
2111 p1node = nullid
2117 p1node = nullid
2112 p2node = nullid
2118 p2node = nullid
2113 p = pctx[f].parents() # if file isn't in pctx, check p2?
2119 p = pctx[f].parents() # if file isn't in pctx, check p2?
2114 if len(p) > 0:
2120 if len(p) > 0:
2115 p1node = p[0].filenode()
2121 p1node = p[0].filenode()
2116 if len(p) > 1:
2122 if len(p) > 1:
2117 p2node = p[1].filenode()
2123 p2node = p[1].filenode()
2118 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2124 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2119
2125
2120 for f in self._status.added:
2126 for f in self._status.added:
2121 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2127 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2122
2128
2123 for f in self._status.removed:
2129 for f in self._status.removed:
2124 if f in man:
2130 if f in man:
2125 del man[f]
2131 del man[f]
2126
2132
2127 return man
2133 return man
2128
2134
2129 @propertycache
2135 @propertycache
2130 def _status(self):
2136 def _status(self):
2131 """Calculate exact status from ``files`` specified at construction
2137 """Calculate exact status from ``files`` specified at construction
2132 """
2138 """
2133 man1 = self.p1().manifest()
2139 man1 = self.p1().manifest()
2134 p2 = self._parents[1]
2140 p2 = self._parents[1]
2135 # "1 < len(self._parents)" can't be used for checking
2141 # "1 < len(self._parents)" can't be used for checking
2136 # existence of the 2nd parent, because "memctx._parents" is
2142 # existence of the 2nd parent, because "memctx._parents" is
2137 # explicitly initialized by the list, of which length is 2.
2143 # explicitly initialized by the list, of which length is 2.
2138 if p2.node() != nullid:
2144 if p2.node() != nullid:
2139 man2 = p2.manifest()
2145 man2 = p2.manifest()
2140 managing = lambda f: f in man1 or f in man2
2146 managing = lambda f: f in man1 or f in man2
2141 else:
2147 else:
2142 managing = lambda f: f in man1
2148 managing = lambda f: f in man1
2143
2149
2144 modified, added, removed = [], [], []
2150 modified, added, removed = [], [], []
2145 for f in self._files:
2151 for f in self._files:
2146 if not managing(f):
2152 if not managing(f):
2147 added.append(f)
2153 added.append(f)
2148 elif self[f]:
2154 elif self[f]:
2149 modified.append(f)
2155 modified.append(f)
2150 else:
2156 else:
2151 removed.append(f)
2157 removed.append(f)
2152
2158
2153 return scmutil.status(modified, added, removed, [], [], [], [])
2159 return scmutil.status(modified, added, removed, [], [], [], [])
2154
2160
2155 class memfilectx(committablefilectx):
2161 class memfilectx(committablefilectx):
2156 """memfilectx represents an in-memory file to commit.
2162 """memfilectx represents an in-memory file to commit.
2157
2163
2158 See memctx and committablefilectx for more details.
2164 See memctx and committablefilectx for more details.
2159 """
2165 """
2160 def __init__(self, repo, path, data, islink=False,
2166 def __init__(self, repo, path, data, islink=False,
2161 isexec=False, copied=None, memctx=None):
2167 isexec=False, copied=None, memctx=None):
2162 """
2168 """
2163 path is the normalized file path relative to repository root.
2169 path is the normalized file path relative to repository root.
2164 data is the file content as a string.
2170 data is the file content as a string.
2165 islink is True if the file is a symbolic link.
2171 islink is True if the file is a symbolic link.
2166 isexec is True if the file is executable.
2172 isexec is True if the file is executable.
2167 copied is the source file path if current file was copied in the
2173 copied is the source file path if current file was copied in the
2168 revision being committed, or None."""
2174 revision being committed, or None."""
2169 super(memfilectx, self).__init__(repo, path, None, memctx)
2175 super(memfilectx, self).__init__(repo, path, None, memctx)
2170 self._data = data
2176 self._data = data
2171 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2177 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2172 self._copied = None
2178 self._copied = None
2173 if copied:
2179 if copied:
2174 self._copied = (copied, nullid)
2180 self._copied = (copied, nullid)
2175
2181
2176 def data(self):
2182 def data(self):
2177 return self._data
2183 return self._data
2178
2184
2179 def remove(self, ignoremissing=False):
2185 def remove(self, ignoremissing=False):
2180 """wraps unlink for a repo's working directory"""
2186 """wraps unlink for a repo's working directory"""
2181 # need to figure out what to do here
2187 # need to figure out what to do here
2182 del self._changectx[self._path]
2188 del self._changectx[self._path]
2183
2189
2184 def write(self, data, flags):
2190 def write(self, data, flags):
2185 """wraps repo.wwrite"""
2191 """wraps repo.wwrite"""
2186 self._data = data
2192 self._data = data
2187
2193
2188 class overlayfilectx(committablefilectx):
2194 class overlayfilectx(committablefilectx):
2189 """Like memfilectx but take an original filectx and optional parameters to
2195 """Like memfilectx but take an original filectx and optional parameters to
2190 override parts of it. This is useful when fctx.data() is expensive (i.e.
2196 override parts of it. This is useful when fctx.data() is expensive (i.e.
2191 flag processor is expensive) and raw data, flags, and filenode could be
2197 flag processor is expensive) and raw data, flags, and filenode could be
2192 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2198 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2193 """
2199 """
2194
2200
2195 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2201 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2196 copied=None, ctx=None):
2202 copied=None, ctx=None):
2197 """originalfctx: filecontext to duplicate
2203 """originalfctx: filecontext to duplicate
2198
2204
2199 datafunc: None or a function to override data (file content). It is a
2205 datafunc: None or a function to override data (file content). It is a
2200 function to be lazy. path, flags, copied, ctx: None or overridden value
2206 function to be lazy. path, flags, copied, ctx: None or overridden value
2201
2207
2202 copied could be (path, rev), or False. copied could also be just path,
2208 copied could be (path, rev), or False. copied could also be just path,
2203 and will be converted to (path, nullid). This simplifies some callers.
2209 and will be converted to (path, nullid). This simplifies some callers.
2204 """
2210 """
2205
2211
2206 if path is None:
2212 if path is None:
2207 path = originalfctx.path()
2213 path = originalfctx.path()
2208 if ctx is None:
2214 if ctx is None:
2209 ctx = originalfctx.changectx()
2215 ctx = originalfctx.changectx()
2210 ctxmatch = lambda: True
2216 ctxmatch = lambda: True
2211 else:
2217 else:
2212 ctxmatch = lambda: ctx == originalfctx.changectx()
2218 ctxmatch = lambda: ctx == originalfctx.changectx()
2213
2219
2214 repo = originalfctx.repo()
2220 repo = originalfctx.repo()
2215 flog = originalfctx.filelog()
2221 flog = originalfctx.filelog()
2216 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2222 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2217
2223
2218 if copied is None:
2224 if copied is None:
2219 copied = originalfctx.renamed()
2225 copied = originalfctx.renamed()
2220 copiedmatch = lambda: True
2226 copiedmatch = lambda: True
2221 else:
2227 else:
2222 if copied and not isinstance(copied, tuple):
2228 if copied and not isinstance(copied, tuple):
2223 # repo._filecommit will recalculate copyrev so nullid is okay
2229 # repo._filecommit will recalculate copyrev so nullid is okay
2224 copied = (copied, nullid)
2230 copied = (copied, nullid)
2225 copiedmatch = lambda: copied == originalfctx.renamed()
2231 copiedmatch = lambda: copied == originalfctx.renamed()
2226
2232
2227 # When data, copied (could affect data), ctx (could affect filelog
2233 # When data, copied (could affect data), ctx (could affect filelog
2228 # parents) are not overridden, rawdata, rawflags, and filenode may be
2234 # parents) are not overridden, rawdata, rawflags, and filenode may be
2229 # reused (repo._filecommit should double check filelog parents).
2235 # reused (repo._filecommit should double check filelog parents).
2230 #
2236 #
2231 # path, flags are not hashed in filelog (but in manifestlog) so they do
2237 # path, flags are not hashed in filelog (but in manifestlog) so they do
2232 # not affect reusable here.
2238 # not affect reusable here.
2233 #
2239 #
2234 # If ctx or copied is overridden to a same value with originalfctx,
2240 # If ctx or copied is overridden to a same value with originalfctx,
2235 # still consider it's reusable. originalfctx.renamed() may be a bit
2241 # still consider it's reusable. originalfctx.renamed() may be a bit
2236 # expensive so it's not called unless necessary. Assuming datafunc is
2242 # expensive so it's not called unless necessary. Assuming datafunc is
2237 # always expensive, do not call it for this "reusable" test.
2243 # always expensive, do not call it for this "reusable" test.
2238 reusable = datafunc is None and ctxmatch() and copiedmatch()
2244 reusable = datafunc is None and ctxmatch() and copiedmatch()
2239
2245
2240 if datafunc is None:
2246 if datafunc is None:
2241 datafunc = originalfctx.data
2247 datafunc = originalfctx.data
2242 if flags is None:
2248 if flags is None:
2243 flags = originalfctx.flags()
2249 flags = originalfctx.flags()
2244
2250
2245 self._datafunc = datafunc
2251 self._datafunc = datafunc
2246 self._flags = flags
2252 self._flags = flags
2247 self._copied = copied
2253 self._copied = copied
2248
2254
2249 if reusable:
2255 if reusable:
2250 # copy extra fields from originalfctx
2256 # copy extra fields from originalfctx
2251 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2257 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2252 for attr in attrs:
2258 for attr in attrs:
2253 if util.safehasattr(originalfctx, attr):
2259 if util.safehasattr(originalfctx, attr):
2254 setattr(self, attr, getattr(originalfctx, attr))
2260 setattr(self, attr, getattr(originalfctx, attr))
2255
2261
2256 def data(self):
2262 def data(self):
2257 return self._datafunc()
2263 return self._datafunc()
2258
2264
2259 class metadataonlyctx(committablectx):
2265 class metadataonlyctx(committablectx):
2260 """Like memctx but it's reusing the manifest of different commit.
2266 """Like memctx but it's reusing the manifest of different commit.
2261 Intended to be used by lightweight operations that are creating
2267 Intended to be used by lightweight operations that are creating
2262 metadata-only changes.
2268 metadata-only changes.
2263
2269
2264 Revision information is supplied at initialization time. 'repo' is the
2270 Revision information is supplied at initialization time. 'repo' is the
2265 current localrepo, 'ctx' is original revision which manifest we're reuisng
2271 current localrepo, 'ctx' is original revision which manifest we're reuisng
2266 'parents' is a sequence of two parent revisions identifiers (pass None for
2272 'parents' is a sequence of two parent revisions identifiers (pass None for
2267 every missing parent), 'text' is the commit.
2273 every missing parent), 'text' is the commit.
2268
2274
2269 user receives the committer name and defaults to current repository
2275 user receives the committer name and defaults to current repository
2270 username, date is the commit date in any format supported by
2276 username, date is the commit date in any format supported by
2271 util.parsedate() and defaults to current date, extra is a dictionary of
2277 util.parsedate() and defaults to current date, extra is a dictionary of
2272 metadata or is left empty.
2278 metadata or is left empty.
2273 """
2279 """
2274 def __new__(cls, repo, originalctx, *args, **kwargs):
2280 def __new__(cls, repo, originalctx, *args, **kwargs):
2275 return super(metadataonlyctx, cls).__new__(cls, repo)
2281 return super(metadataonlyctx, cls).__new__(cls, repo)
2276
2282
2277 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2283 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2278 extra=None, editor=False):
2284 extra=None, editor=False):
2279 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2285 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2280 self._rev = None
2286 self._rev = None
2281 self._node = None
2287 self._node = None
2282 self._originalctx = originalctx
2288 self._originalctx = originalctx
2283 self._manifestnode = originalctx.manifestnode()
2289 self._manifestnode = originalctx.manifestnode()
2284 parents = [(p or nullid) for p in parents]
2290 parents = [(p or nullid) for p in parents]
2285 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2291 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2286
2292
2287 # sanity check to ensure that the reused manifest parents are
2293 # sanity check to ensure that the reused manifest parents are
2288 # manifests of our commit parents
2294 # manifests of our commit parents
2289 mp1, mp2 = self.manifestctx().parents
2295 mp1, mp2 = self.manifestctx().parents
2290 if p1 != nullid and p1.manifestnode() != mp1:
2296 if p1 != nullid and p1.manifestnode() != mp1:
2291 raise RuntimeError('can\'t reuse the manifest: '
2297 raise RuntimeError('can\'t reuse the manifest: '
2292 'its p1 doesn\'t match the new ctx p1')
2298 'its p1 doesn\'t match the new ctx p1')
2293 if p2 != nullid and p2.manifestnode() != mp2:
2299 if p2 != nullid and p2.manifestnode() != mp2:
2294 raise RuntimeError('can\'t reuse the manifest: '
2300 raise RuntimeError('can\'t reuse the manifest: '
2295 'its p2 doesn\'t match the new ctx p2')
2301 'its p2 doesn\'t match the new ctx p2')
2296
2302
2297 self._files = originalctx.files()
2303 self._files = originalctx.files()
2298 self.substate = {}
2304 self.substate = {}
2299
2305
2300 if editor:
2306 if editor:
2301 self._text = editor(self._repo, self, [])
2307 self._text = editor(self._repo, self, [])
2302 self._repo.savecommitmessage(self._text)
2308 self._repo.savecommitmessage(self._text)
2303
2309
2304 def manifestnode(self):
2310 def manifestnode(self):
2305 return self._manifestnode
2311 return self._manifestnode
2306
2312
2307 @property
2313 @property
2308 def _manifestctx(self):
2314 def _manifestctx(self):
2309 return self._repo.manifestlog[self._manifestnode]
2315 return self._repo.manifestlog[self._manifestnode]
2310
2316
2311 def filectx(self, path, filelog=None):
2317 def filectx(self, path, filelog=None):
2312 return self._originalctx.filectx(path, filelog=filelog)
2318 return self._originalctx.filectx(path, filelog=filelog)
2313
2319
2314 def commit(self):
2320 def commit(self):
2315 """commit context to the repo"""
2321 """commit context to the repo"""
2316 return self._repo.commitctx(self)
2322 return self._repo.commitctx(self)
2317
2323
2318 @property
2324 @property
2319 def _manifest(self):
2325 def _manifest(self):
2320 return self._originalctx.manifest()
2326 return self._originalctx.manifest()
2321
2327
2322 @propertycache
2328 @propertycache
2323 def _status(self):
2329 def _status(self):
2324 """Calculate exact status from ``files`` specified in the ``origctx``
2330 """Calculate exact status from ``files`` specified in the ``origctx``
2325 and parents manifests.
2331 and parents manifests.
2326 """
2332 """
2327 man1 = self.p1().manifest()
2333 man1 = self.p1().manifest()
2328 p2 = self._parents[1]
2334 p2 = self._parents[1]
2329 # "1 < len(self._parents)" can't be used for checking
2335 # "1 < len(self._parents)" can't be used for checking
2330 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2336 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2331 # explicitly initialized by the list, of which length is 2.
2337 # explicitly initialized by the list, of which length is 2.
2332 if p2.node() != nullid:
2338 if p2.node() != nullid:
2333 man2 = p2.manifest()
2339 man2 = p2.manifest()
2334 managing = lambda f: f in man1 or f in man2
2340 managing = lambda f: f in man1 or f in man2
2335 else:
2341 else:
2336 managing = lambda f: f in man1
2342 managing = lambda f: f in man1
2337
2343
2338 modified, added, removed = [], [], []
2344 modified, added, removed = [], [], []
2339 for f in self._files:
2345 for f in self._files:
2340 if not managing(f):
2346 if not managing(f):
2341 added.append(f)
2347 added.append(f)
2342 elif self[f]:
2348 elif self[f]:
2343 modified.append(f)
2349 modified.append(f)
2344 else:
2350 else:
2345 removed.append(f)
2351 removed.append(f)
2346
2352
2347 return scmutil.status(modified, added, removed, [], [], [], [])
2353 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now