##// END OF EJS Templates
context: avoid writing outdated dirstate out (issue5584)...
FUJIWARA Katsunori -
r32752:dc7efa28 default
parent child Browse files
Show More
@@ -1,2354 +1,2365
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 subrepo,
41 subrepo,
42 util,
42 util,
43 )
43 )
44
44
45 propertycache = util.propertycache
45 propertycache = util.propertycache
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 r = short(self.node())
69 r = short(self.node())
70 if pycompat.ispy3:
70 if pycompat.ispy3:
71 return r.decode('ascii')
71 return r.decode('ascii')
72 return r
72 return r
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 def __int__(self):
77 def __int__(self):
78 return self.rev()
78 return self.rev()
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return r"<%s %s>" % (type(self).__name__, str(self))
81 return r"<%s %s>" % (type(self).__name__, str(self))
82
82
83 def __eq__(self, other):
83 def __eq__(self, other):
84 try:
84 try:
85 return type(self) == type(other) and self._rev == other._rev
85 return type(self) == type(other) and self._rev == other._rev
86 except AttributeError:
86 except AttributeError:
87 return False
87 return False
88
88
89 def __ne__(self, other):
89 def __ne__(self, other):
90 return not (self == other)
90 return not (self == other)
91
91
92 def __contains__(self, key):
92 def __contains__(self, key):
93 return key in self._manifest
93 return key in self._manifest
94
94
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return self.filectx(key)
96 return self.filectx(key)
97
97
98 def __iter__(self):
98 def __iter__(self):
99 return iter(self._manifest)
99 return iter(self._manifest)
100
100
101 def _buildstatusmanifest(self, status):
101 def _buildstatusmanifest(self, status):
102 """Builds a manifest that includes the given status results, if this is
102 """Builds a manifest that includes the given status results, if this is
103 a working copy context. For non-working copy contexts, it just returns
103 a working copy context. For non-working copy contexts, it just returns
104 the normal manifest."""
104 the normal manifest."""
105 return self.manifest()
105 return self.manifest()
106
106
107 def _matchstatus(self, other, match):
107 def _matchstatus(self, other, match):
108 """return match.always if match is none
108 """return match.always if match is none
109
109
110 This internal method provides a way for child objects to override the
110 This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211
211
212 def bumped(self):
212 def bumped(self):
213 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
214
214
215 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
216 """
216 """
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218
218
219 def divergent(self):
219 def divergent(self):
220 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
221
221
222 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225
225
226 def troubled(self):
226 def troubled(self):
227 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
228 return self.unstable() or self.bumped() or self.divergent()
228 return self.unstable() or self.bumped() or self.divergent()
229
229
230 def troubles(self):
230 def troubles(self):
231 """return the list of troubles affecting this changesets.
231 """return the list of troubles affecting this changesets.
232
232
233 Troubles are returned as strings. possible values are:
233 Troubles are returned as strings. possible values are:
234 - unstable,
234 - unstable,
235 - bumped,
235 - bumped,
236 - divergent.
236 - divergent.
237 """
237 """
238 troubles = []
238 troubles = []
239 if self.unstable():
239 if self.unstable():
240 troubles.append('unstable')
240 troubles.append('unstable')
241 if self.bumped():
241 if self.bumped():
242 troubles.append('bumped')
242 troubles.append('bumped')
243 if self.divergent():
243 if self.divergent():
244 troubles.append('divergent')
244 troubles.append('divergent')
245 return troubles
245 return troubles
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return changectx(self._repo, nullrev)
258 return changectx(self._repo, nullrev)
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
261 if r'_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest[path], self._manifest.flags(path)
263 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
268 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
269 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
270 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
271 mfl = self._repo.manifestlog
272 try:
272 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
273 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
274 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
275 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
276 _('not found in manifest'))
277
277
278 return node, flag
278 return node, flag
279
279
280 def filenode(self, path):
280 def filenode(self, path):
281 return self._fileinfo(path)[0]
281 return self._fileinfo(path)[0]
282
282
283 def flags(self, path):
283 def flags(self, path):
284 try:
284 try:
285 return self._fileinfo(path)[1]
285 return self._fileinfo(path)[1]
286 except error.LookupError:
286 except error.LookupError:
287 return ''
287 return ''
288
288
289 def sub(self, path, allowcreate=True):
289 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
290 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
292
293 def nullsub(self, path, pctx):
293 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
294 return subrepo.nullsubrepo(self, path, pctx)
295
295
296 def workingsub(self, path):
296 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
298 context.
299 '''
299 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
300 return subrepo.subrepo(self, path, allowwdir=True)
301
301
302 def match(self, pats=None, include=None, exclude=None, default='glob',
302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
303 listsubrepos=False, badfn=None):
304 r = self._repo
304 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
305 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
306 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
307 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
308 listsubrepos=listsubrepos, badfn=badfn)
309
309
310 def diff(self, ctx2=None, match=None, **opts):
310 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
311 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
312 if ctx2 is None:
313 ctx2 = self.p1()
313 ctx2 = self.p1()
314 if ctx2 is not None:
314 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
315 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, opts)
316 diffopts = patch.diffopts(self._repo.ui, opts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
318
319 def dirs(self):
319 def dirs(self):
320 return self._manifest.dirs()
320 return self._manifest.dirs()
321
321
322 def hasdir(self, dir):
322 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
323 return self._manifest.hasdir(dir)
324
324
325 def status(self, other=None, match=None, listignored=False,
325 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
326 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
327 """return status of files between two nodes or node and working
328 directory.
328 directory.
329
329
330 If other is None, compare this node with working directory.
330 If other is None, compare this node with working directory.
331
331
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
333 """
334
334
335 ctx1 = self
335 ctx1 = self
336 ctx2 = self._repo[other]
336 ctx2 = self._repo[other]
337
337
338 # This next code block is, admittedly, fragile logic that tests for
338 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
339 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
340 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
341 # with its first parent.
342 #
342 #
343 # What we're aiming for here is the ability to call:
343 # What we're aiming for here is the ability to call:
344 #
344 #
345 # workingctx.status(parentctx)
345 # workingctx.status(parentctx)
346 #
346 #
347 # If we always built the manifest for each context and compared those,
347 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
348 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
349 # just copy the manifest of the parent.
350 reversed = False
350 reversed = False
351 if (not isinstance(ctx1, changectx)
351 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
352 and isinstance(ctx2, changectx)):
353 reversed = True
353 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
354 ctx1, ctx2 = ctx2, ctx1
355
355
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388
388
389 def makememctx(repo, parents, text, user, date, branch, files, store,
389 def makememctx(repo, parents, text, user, date, branch, files, store,
390 editor=None, extra=None):
390 editor=None, extra=None):
391 def getfilectx(repo, memctx, path):
391 def getfilectx(repo, memctx, path):
392 data, mode, copied = store.getfile(path)
392 data, mode, copied = store.getfile(path)
393 if data is None:
393 if data is None:
394 return None
394 return None
395 islink, isexec = mode
395 islink, isexec = mode
396 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
396 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
397 copied=copied, memctx=memctx)
397 copied=copied, memctx=memctx)
398 if extra is None:
398 if extra is None:
399 extra = {}
399 extra = {}
400 if branch:
400 if branch:
401 extra['branch'] = encoding.fromlocal(branch)
401 extra['branch'] = encoding.fromlocal(branch)
402 ctx = memctx(repo, parents, text, files, getfilectx, user,
402 ctx = memctx(repo, parents, text, files, getfilectx, user,
403 date, extra, editor)
403 date, extra, editor)
404 return ctx
404 return ctx
405
405
406 def _filterederror(repo, changeid):
406 def _filterederror(repo, changeid):
407 """build an exception to be raised about a filtered changeid
407 """build an exception to be raised about a filtered changeid
408
408
409 This is extracted in a function to help extensions (eg: evolve) to
409 This is extracted in a function to help extensions (eg: evolve) to
410 experiment with various message variants."""
410 experiment with various message variants."""
411 if repo.filtername.startswith('visible'):
411 if repo.filtername.startswith('visible'):
412 msg = _("hidden revision '%s'") % changeid
412 msg = _("hidden revision '%s'") % changeid
413 hint = _('use --hidden to access hidden revisions')
413 hint = _('use --hidden to access hidden revisions')
414 return error.FilteredRepoLookupError(msg, hint=hint)
414 return error.FilteredRepoLookupError(msg, hint=hint)
415 msg = _("filtered revision '%s' (not in '%s' subset)")
415 msg = _("filtered revision '%s' (not in '%s' subset)")
416 msg %= (changeid, repo.filtername)
416 msg %= (changeid, repo.filtername)
417 return error.FilteredRepoLookupError(msg)
417 return error.FilteredRepoLookupError(msg)
418
418
419 class changectx(basectx):
419 class changectx(basectx):
420 """A changecontext object makes access to data related to a particular
420 """A changecontext object makes access to data related to a particular
421 changeset convenient. It represents a read-only context already present in
421 changeset convenient. It represents a read-only context already present in
422 the repo."""
422 the repo."""
423 def __init__(self, repo, changeid=''):
423 def __init__(self, repo, changeid=''):
424 """changeid is a revision number, node, or tag"""
424 """changeid is a revision number, node, or tag"""
425
425
426 # since basectx.__new__ already took care of copying the object, we
426 # since basectx.__new__ already took care of copying the object, we
427 # don't need to do anything in __init__, so we just exit here
427 # don't need to do anything in __init__, so we just exit here
428 if isinstance(changeid, basectx):
428 if isinstance(changeid, basectx):
429 return
429 return
430
430
431 if changeid == '':
431 if changeid == '':
432 changeid = '.'
432 changeid = '.'
433 self._repo = repo
433 self._repo = repo
434
434
435 try:
435 try:
436 if isinstance(changeid, int):
436 if isinstance(changeid, int):
437 self._node = repo.changelog.node(changeid)
437 self._node = repo.changelog.node(changeid)
438 self._rev = changeid
438 self._rev = changeid
439 return
439 return
440 if not pycompat.ispy3 and isinstance(changeid, long):
440 if not pycompat.ispy3 and isinstance(changeid, long):
441 changeid = str(changeid)
441 changeid = str(changeid)
442 if changeid == 'null':
442 if changeid == 'null':
443 self._node = nullid
443 self._node = nullid
444 self._rev = nullrev
444 self._rev = nullrev
445 return
445 return
446 if changeid == 'tip':
446 if changeid == 'tip':
447 self._node = repo.changelog.tip()
447 self._node = repo.changelog.tip()
448 self._rev = repo.changelog.rev(self._node)
448 self._rev = repo.changelog.rev(self._node)
449 return
449 return
450 if changeid == '.' or changeid == repo.dirstate.p1():
450 if changeid == '.' or changeid == repo.dirstate.p1():
451 # this is a hack to delay/avoid loading obsmarkers
451 # this is a hack to delay/avoid loading obsmarkers
452 # when we know that '.' won't be hidden
452 # when we know that '.' won't be hidden
453 self._node = repo.dirstate.p1()
453 self._node = repo.dirstate.p1()
454 self._rev = repo.unfiltered().changelog.rev(self._node)
454 self._rev = repo.unfiltered().changelog.rev(self._node)
455 return
455 return
456 if len(changeid) == 20:
456 if len(changeid) == 20:
457 try:
457 try:
458 self._node = changeid
458 self._node = changeid
459 self._rev = repo.changelog.rev(changeid)
459 self._rev = repo.changelog.rev(changeid)
460 return
460 return
461 except error.FilteredRepoLookupError:
461 except error.FilteredRepoLookupError:
462 raise
462 raise
463 except LookupError:
463 except LookupError:
464 pass
464 pass
465
465
466 try:
466 try:
467 r = int(changeid)
467 r = int(changeid)
468 if '%d' % r != changeid:
468 if '%d' % r != changeid:
469 raise ValueError
469 raise ValueError
470 l = len(repo.changelog)
470 l = len(repo.changelog)
471 if r < 0:
471 if r < 0:
472 r += l
472 r += l
473 if r < 0 or r >= l and r != wdirrev:
473 if r < 0 or r >= l and r != wdirrev:
474 raise ValueError
474 raise ValueError
475 self._rev = r
475 self._rev = r
476 self._node = repo.changelog.node(r)
476 self._node = repo.changelog.node(r)
477 return
477 return
478 except error.FilteredIndexError:
478 except error.FilteredIndexError:
479 raise
479 raise
480 except (ValueError, OverflowError, IndexError):
480 except (ValueError, OverflowError, IndexError):
481 pass
481 pass
482
482
483 if len(changeid) == 40:
483 if len(changeid) == 40:
484 try:
484 try:
485 self._node = bin(changeid)
485 self._node = bin(changeid)
486 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
487 return
487 return
488 except error.FilteredLookupError:
488 except error.FilteredLookupError:
489 raise
489 raise
490 except (TypeError, LookupError):
490 except (TypeError, LookupError):
491 pass
491 pass
492
492
493 # lookup bookmarks through the name interface
493 # lookup bookmarks through the name interface
494 try:
494 try:
495 self._node = repo.names.singlenode(repo, changeid)
495 self._node = repo.names.singlenode(repo, changeid)
496 self._rev = repo.changelog.rev(self._node)
496 self._rev = repo.changelog.rev(self._node)
497 return
497 return
498 except KeyError:
498 except KeyError:
499 pass
499 pass
500 except error.FilteredRepoLookupError:
500 except error.FilteredRepoLookupError:
501 raise
501 raise
502 except error.RepoLookupError:
502 except error.RepoLookupError:
503 pass
503 pass
504
504
505 self._node = repo.unfiltered().changelog._partialmatch(changeid)
505 self._node = repo.unfiltered().changelog._partialmatch(changeid)
506 if self._node is not None:
506 if self._node is not None:
507 self._rev = repo.changelog.rev(self._node)
507 self._rev = repo.changelog.rev(self._node)
508 return
508 return
509
509
510 # lookup failed
510 # lookup failed
511 # check if it might have come from damaged dirstate
511 # check if it might have come from damaged dirstate
512 #
512 #
513 # XXX we could avoid the unfiltered if we had a recognizable
513 # XXX we could avoid the unfiltered if we had a recognizable
514 # exception for filtered changeset access
514 # exception for filtered changeset access
515 if changeid in repo.unfiltered().dirstate.parents():
515 if changeid in repo.unfiltered().dirstate.parents():
516 msg = _("working directory has unknown parent '%s'!")
516 msg = _("working directory has unknown parent '%s'!")
517 raise error.Abort(msg % short(changeid))
517 raise error.Abort(msg % short(changeid))
518 try:
518 try:
519 if len(changeid) == 20 and nonascii(changeid):
519 if len(changeid) == 20 and nonascii(changeid):
520 changeid = hex(changeid)
520 changeid = hex(changeid)
521 except TypeError:
521 except TypeError:
522 pass
522 pass
523 except (error.FilteredIndexError, error.FilteredLookupError,
523 except (error.FilteredIndexError, error.FilteredLookupError,
524 error.FilteredRepoLookupError):
524 error.FilteredRepoLookupError):
525 raise _filterederror(repo, changeid)
525 raise _filterederror(repo, changeid)
526 except IndexError:
526 except IndexError:
527 pass
527 pass
528 raise error.RepoLookupError(
528 raise error.RepoLookupError(
529 _("unknown revision '%s'") % changeid)
529 _("unknown revision '%s'") % changeid)
530
530
531 def __hash__(self):
531 def __hash__(self):
532 try:
532 try:
533 return hash(self._rev)
533 return hash(self._rev)
534 except AttributeError:
534 except AttributeError:
535 return id(self)
535 return id(self)
536
536
537 def __nonzero__(self):
537 def __nonzero__(self):
538 return self._rev != nullrev
538 return self._rev != nullrev
539
539
540 __bool__ = __nonzero__
540 __bool__ = __nonzero__
541
541
542 @propertycache
542 @propertycache
543 def _changeset(self):
543 def _changeset(self):
544 return self._repo.changelog.changelogrevision(self.rev())
544 return self._repo.changelog.changelogrevision(self.rev())
545
545
546 @propertycache
546 @propertycache
547 def _manifest(self):
547 def _manifest(self):
548 return self._manifestctx.read()
548 return self._manifestctx.read()
549
549
550 @property
550 @property
551 def _manifestctx(self):
551 def _manifestctx(self):
552 return self._repo.manifestlog[self._changeset.manifest]
552 return self._repo.manifestlog[self._changeset.manifest]
553
553
554 @propertycache
554 @propertycache
555 def _manifestdelta(self):
555 def _manifestdelta(self):
556 return self._manifestctx.readdelta()
556 return self._manifestctx.readdelta()
557
557
558 @propertycache
558 @propertycache
559 def _parents(self):
559 def _parents(self):
560 repo = self._repo
560 repo = self._repo
561 p1, p2 = repo.changelog.parentrevs(self._rev)
561 p1, p2 = repo.changelog.parentrevs(self._rev)
562 if p2 == nullrev:
562 if p2 == nullrev:
563 return [changectx(repo, p1)]
563 return [changectx(repo, p1)]
564 return [changectx(repo, p1), changectx(repo, p2)]
564 return [changectx(repo, p1), changectx(repo, p2)]
565
565
566 def changeset(self):
566 def changeset(self):
567 c = self._changeset
567 c = self._changeset
568 return (
568 return (
569 c.manifest,
569 c.manifest,
570 c.user,
570 c.user,
571 c.date,
571 c.date,
572 c.files,
572 c.files,
573 c.description,
573 c.description,
574 c.extra,
574 c.extra,
575 )
575 )
576 def manifestnode(self):
576 def manifestnode(self):
577 return self._changeset.manifest
577 return self._changeset.manifest
578
578
579 def user(self):
579 def user(self):
580 return self._changeset.user
580 return self._changeset.user
581 def date(self):
581 def date(self):
582 return self._changeset.date
582 return self._changeset.date
583 def files(self):
583 def files(self):
584 return self._changeset.files
584 return self._changeset.files
585 def description(self):
585 def description(self):
586 return self._changeset.description
586 return self._changeset.description
587 def branch(self):
587 def branch(self):
588 return encoding.tolocal(self._changeset.extra.get("branch"))
588 return encoding.tolocal(self._changeset.extra.get("branch"))
589 def closesbranch(self):
589 def closesbranch(self):
590 return 'close' in self._changeset.extra
590 return 'close' in self._changeset.extra
591 def extra(self):
591 def extra(self):
592 return self._changeset.extra
592 return self._changeset.extra
593 def tags(self):
593 def tags(self):
594 return self._repo.nodetags(self._node)
594 return self._repo.nodetags(self._node)
595 def bookmarks(self):
595 def bookmarks(self):
596 return self._repo.nodebookmarks(self._node)
596 return self._repo.nodebookmarks(self._node)
597 def phase(self):
597 def phase(self):
598 return self._repo._phasecache.phase(self._repo, self._rev)
598 return self._repo._phasecache.phase(self._repo, self._rev)
599 def hidden(self):
599 def hidden(self):
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
600 return self._rev in repoview.filterrevs(self._repo, 'visible')
601
601
602 def children(self):
602 def children(self):
603 """return contexts for each child changeset"""
603 """return contexts for each child changeset"""
604 c = self._repo.changelog.children(self._node)
604 c = self._repo.changelog.children(self._node)
605 return [changectx(self._repo, x) for x in c]
605 return [changectx(self._repo, x) for x in c]
606
606
607 def ancestors(self):
607 def ancestors(self):
608 for a in self._repo.changelog.ancestors([self._rev]):
608 for a in self._repo.changelog.ancestors([self._rev]):
609 yield changectx(self._repo, a)
609 yield changectx(self._repo, a)
610
610
611 def descendants(self):
611 def descendants(self):
612 for d in self._repo.changelog.descendants([self._rev]):
612 for d in self._repo.changelog.descendants([self._rev]):
613 yield changectx(self._repo, d)
613 yield changectx(self._repo, d)
614
614
615 def filectx(self, path, fileid=None, filelog=None):
615 def filectx(self, path, fileid=None, filelog=None):
616 """get a file context from this changeset"""
616 """get a file context from this changeset"""
617 if fileid is None:
617 if fileid is None:
618 fileid = self.filenode(path)
618 fileid = self.filenode(path)
619 return filectx(self._repo, path, fileid=fileid,
619 return filectx(self._repo, path, fileid=fileid,
620 changectx=self, filelog=filelog)
620 changectx=self, filelog=filelog)
621
621
622 def ancestor(self, c2, warn=False):
622 def ancestor(self, c2, warn=False):
623 """return the "best" ancestor context of self and c2
623 """return the "best" ancestor context of self and c2
624
624
625 If there are multiple candidates, it will show a message and check
625 If there are multiple candidates, it will show a message and check
626 merge.preferancestor configuration before falling back to the
626 merge.preferancestor configuration before falling back to the
627 revlog ancestor."""
627 revlog ancestor."""
628 # deal with workingctxs
628 # deal with workingctxs
629 n2 = c2._node
629 n2 = c2._node
630 if n2 is None:
630 if n2 is None:
631 n2 = c2._parents[0]._node
631 n2 = c2._parents[0]._node
632 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
632 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
633 if not cahs:
633 if not cahs:
634 anc = nullid
634 anc = nullid
635 elif len(cahs) == 1:
635 elif len(cahs) == 1:
636 anc = cahs[0]
636 anc = cahs[0]
637 else:
637 else:
638 # experimental config: merge.preferancestor
638 # experimental config: merge.preferancestor
639 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
639 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
640 try:
640 try:
641 ctx = changectx(self._repo, r)
641 ctx = changectx(self._repo, r)
642 except error.RepoLookupError:
642 except error.RepoLookupError:
643 continue
643 continue
644 anc = ctx.node()
644 anc = ctx.node()
645 if anc in cahs:
645 if anc in cahs:
646 break
646 break
647 else:
647 else:
648 anc = self._repo.changelog.ancestor(self._node, n2)
648 anc = self._repo.changelog.ancestor(self._node, n2)
649 if warn:
649 if warn:
650 self._repo.ui.status(
650 self._repo.ui.status(
651 (_("note: using %s as ancestor of %s and %s\n") %
651 (_("note: using %s as ancestor of %s and %s\n") %
652 (short(anc), short(self._node), short(n2))) +
652 (short(anc), short(self._node), short(n2))) +
653 ''.join(_(" alternatively, use --config "
653 ''.join(_(" alternatively, use --config "
654 "merge.preferancestor=%s\n") %
654 "merge.preferancestor=%s\n") %
655 short(n) for n in sorted(cahs) if n != anc))
655 short(n) for n in sorted(cahs) if n != anc))
656 return changectx(self._repo, anc)
656 return changectx(self._repo, anc)
657
657
658 def descendant(self, other):
658 def descendant(self, other):
659 """True if other is descendant of this changeset"""
659 """True if other is descendant of this changeset"""
660 return self._repo.changelog.descendant(self._rev, other._rev)
660 return self._repo.changelog.descendant(self._rev, other._rev)
661
661
662 def walk(self, match):
662 def walk(self, match):
663 '''Generates matching file names.'''
663 '''Generates matching file names.'''
664
664
665 # Wrap match.bad method to have message with nodeid
665 # Wrap match.bad method to have message with nodeid
666 def bad(fn, msg):
666 def bad(fn, msg):
667 # The manifest doesn't know about subrepos, so don't complain about
667 # The manifest doesn't know about subrepos, so don't complain about
668 # paths into valid subrepos.
668 # paths into valid subrepos.
669 if any(fn == s or fn.startswith(s + '/')
669 if any(fn == s or fn.startswith(s + '/')
670 for s in self.substate):
670 for s in self.substate):
671 return
671 return
672 match.bad(fn, _('no such file in rev %s') % self)
672 match.bad(fn, _('no such file in rev %s') % self)
673
673
674 m = matchmod.badmatch(match, bad)
674 m = matchmod.badmatch(match, bad)
675 return self._manifest.walk(m)
675 return self._manifest.walk(m)
676
676
677 def matches(self, match):
677 def matches(self, match):
678 return self.walk(match)
678 return self.walk(match)
679
679
680 class basefilectx(object):
680 class basefilectx(object):
681 """A filecontext object represents the common logic for its children:
681 """A filecontext object represents the common logic for its children:
682 filectx: read-only access to a filerevision that is already present
682 filectx: read-only access to a filerevision that is already present
683 in the repo,
683 in the repo,
684 workingfilectx: a filecontext that represents files from the working
684 workingfilectx: a filecontext that represents files from the working
685 directory,
685 directory,
686 memfilectx: a filecontext that represents files in-memory,
686 memfilectx: a filecontext that represents files in-memory,
687 overlayfilectx: duplicate another filecontext with some fields overridden.
687 overlayfilectx: duplicate another filecontext with some fields overridden.
688 """
688 """
689 @propertycache
689 @propertycache
690 def _filelog(self):
690 def _filelog(self):
691 return self._repo.file(self._path)
691 return self._repo.file(self._path)
692
692
693 @propertycache
693 @propertycache
694 def _changeid(self):
694 def _changeid(self):
695 if r'_changeid' in self.__dict__:
695 if r'_changeid' in self.__dict__:
696 return self._changeid
696 return self._changeid
697 elif r'_changectx' in self.__dict__:
697 elif r'_changectx' in self.__dict__:
698 return self._changectx.rev()
698 return self._changectx.rev()
699 elif r'_descendantrev' in self.__dict__:
699 elif r'_descendantrev' in self.__dict__:
700 # this file context was created from a revision with a known
700 # this file context was created from a revision with a known
701 # descendant, we can (lazily) correct for linkrev aliases
701 # descendant, we can (lazily) correct for linkrev aliases
702 return self._adjustlinkrev(self._descendantrev)
702 return self._adjustlinkrev(self._descendantrev)
703 else:
703 else:
704 return self._filelog.linkrev(self._filerev)
704 return self._filelog.linkrev(self._filerev)
705
705
706 @propertycache
706 @propertycache
707 def _filenode(self):
707 def _filenode(self):
708 if r'_fileid' in self.__dict__:
708 if r'_fileid' in self.__dict__:
709 return self._filelog.lookup(self._fileid)
709 return self._filelog.lookup(self._fileid)
710 else:
710 else:
711 return self._changectx.filenode(self._path)
711 return self._changectx.filenode(self._path)
712
712
713 @propertycache
713 @propertycache
714 def _filerev(self):
714 def _filerev(self):
715 return self._filelog.rev(self._filenode)
715 return self._filelog.rev(self._filenode)
716
716
717 @propertycache
717 @propertycache
718 def _repopath(self):
718 def _repopath(self):
719 return self._path
719 return self._path
720
720
721 def __nonzero__(self):
721 def __nonzero__(self):
722 try:
722 try:
723 self._filenode
723 self._filenode
724 return True
724 return True
725 except error.LookupError:
725 except error.LookupError:
726 # file is missing
726 # file is missing
727 return False
727 return False
728
728
729 __bool__ = __nonzero__
729 __bool__ = __nonzero__
730
730
731 def __str__(self):
731 def __str__(self):
732 try:
732 try:
733 return "%s@%s" % (self.path(), self._changectx)
733 return "%s@%s" % (self.path(), self._changectx)
734 except error.LookupError:
734 except error.LookupError:
735 return "%s@???" % self.path()
735 return "%s@???" % self.path()
736
736
737 def __repr__(self):
737 def __repr__(self):
738 return "<%s %s>" % (type(self).__name__, str(self))
738 return "<%s %s>" % (type(self).__name__, str(self))
739
739
740 def __hash__(self):
740 def __hash__(self):
741 try:
741 try:
742 return hash((self._path, self._filenode))
742 return hash((self._path, self._filenode))
743 except AttributeError:
743 except AttributeError:
744 return id(self)
744 return id(self)
745
745
746 def __eq__(self, other):
746 def __eq__(self, other):
747 try:
747 try:
748 return (type(self) == type(other) and self._path == other._path
748 return (type(self) == type(other) and self._path == other._path
749 and self._filenode == other._filenode)
749 and self._filenode == other._filenode)
750 except AttributeError:
750 except AttributeError:
751 return False
751 return False
752
752
753 def __ne__(self, other):
753 def __ne__(self, other):
754 return not (self == other)
754 return not (self == other)
755
755
756 def filerev(self):
756 def filerev(self):
757 return self._filerev
757 return self._filerev
758 def filenode(self):
758 def filenode(self):
759 return self._filenode
759 return self._filenode
760 @propertycache
760 @propertycache
761 def _flags(self):
761 def _flags(self):
762 return self._changectx.flags(self._path)
762 return self._changectx.flags(self._path)
763 def flags(self):
763 def flags(self):
764 return self._flags
764 return self._flags
765 def filelog(self):
765 def filelog(self):
766 return self._filelog
766 return self._filelog
767 def rev(self):
767 def rev(self):
768 return self._changeid
768 return self._changeid
769 def linkrev(self):
769 def linkrev(self):
770 return self._filelog.linkrev(self._filerev)
770 return self._filelog.linkrev(self._filerev)
771 def node(self):
771 def node(self):
772 return self._changectx.node()
772 return self._changectx.node()
773 def hex(self):
773 def hex(self):
774 return self._changectx.hex()
774 return self._changectx.hex()
775 def user(self):
775 def user(self):
776 return self._changectx.user()
776 return self._changectx.user()
777 def date(self):
777 def date(self):
778 return self._changectx.date()
778 return self._changectx.date()
779 def files(self):
779 def files(self):
780 return self._changectx.files()
780 return self._changectx.files()
781 def description(self):
781 def description(self):
782 return self._changectx.description()
782 return self._changectx.description()
783 def branch(self):
783 def branch(self):
784 return self._changectx.branch()
784 return self._changectx.branch()
785 def extra(self):
785 def extra(self):
786 return self._changectx.extra()
786 return self._changectx.extra()
787 def phase(self):
787 def phase(self):
788 return self._changectx.phase()
788 return self._changectx.phase()
789 def phasestr(self):
789 def phasestr(self):
790 return self._changectx.phasestr()
790 return self._changectx.phasestr()
791 def manifest(self):
791 def manifest(self):
792 return self._changectx.manifest()
792 return self._changectx.manifest()
793 def changectx(self):
793 def changectx(self):
794 return self._changectx
794 return self._changectx
795 def renamed(self):
795 def renamed(self):
796 return self._copied
796 return self._copied
797 def repo(self):
797 def repo(self):
798 return self._repo
798 return self._repo
799 def size(self):
799 def size(self):
800 return len(self.data())
800 return len(self.data())
801
801
802 def path(self):
802 def path(self):
803 return self._path
803 return self._path
804
804
805 def isbinary(self):
805 def isbinary(self):
806 try:
806 try:
807 return util.binary(self.data())
807 return util.binary(self.data())
808 except IOError:
808 except IOError:
809 return False
809 return False
810 def isexec(self):
810 def isexec(self):
811 return 'x' in self.flags()
811 return 'x' in self.flags()
812 def islink(self):
812 def islink(self):
813 return 'l' in self.flags()
813 return 'l' in self.flags()
814
814
815 def isabsent(self):
815 def isabsent(self):
816 """whether this filectx represents a file not in self._changectx
816 """whether this filectx represents a file not in self._changectx
817
817
818 This is mainly for merge code to detect change/delete conflicts. This is
818 This is mainly for merge code to detect change/delete conflicts. This is
819 expected to be True for all subclasses of basectx."""
819 expected to be True for all subclasses of basectx."""
820 return False
820 return False
821
821
822 _customcmp = False
822 _customcmp = False
823 def cmp(self, fctx):
823 def cmp(self, fctx):
824 """compare with other file context
824 """compare with other file context
825
825
826 returns True if different than fctx.
826 returns True if different than fctx.
827 """
827 """
828 if fctx._customcmp:
828 if fctx._customcmp:
829 return fctx.cmp(self)
829 return fctx.cmp(self)
830
830
831 if (fctx._filenode is None
831 if (fctx._filenode is None
832 and (self._repo._encodefilterpats
832 and (self._repo._encodefilterpats
833 # if file data starts with '\1\n', empty metadata block is
833 # if file data starts with '\1\n', empty metadata block is
834 # prepended, which adds 4 bytes to filelog.size().
834 # prepended, which adds 4 bytes to filelog.size().
835 or self.size() - 4 == fctx.size())
835 or self.size() - 4 == fctx.size())
836 or self.size() == fctx.size()):
836 or self.size() == fctx.size()):
837 return self._filelog.cmp(self._filenode, fctx.data())
837 return self._filelog.cmp(self._filenode, fctx.data())
838
838
839 return True
839 return True
840
840
841 def _adjustlinkrev(self, srcrev, inclusive=False):
841 def _adjustlinkrev(self, srcrev, inclusive=False):
842 """return the first ancestor of <srcrev> introducing <fnode>
842 """return the first ancestor of <srcrev> introducing <fnode>
843
843
844 If the linkrev of the file revision does not point to an ancestor of
844 If the linkrev of the file revision does not point to an ancestor of
845 srcrev, we'll walk down the ancestors until we find one introducing
845 srcrev, we'll walk down the ancestors until we find one introducing
846 this file revision.
846 this file revision.
847
847
848 :srcrev: the changeset revision we search ancestors from
848 :srcrev: the changeset revision we search ancestors from
849 :inclusive: if true, the src revision will also be checked
849 :inclusive: if true, the src revision will also be checked
850 """
850 """
851 repo = self._repo
851 repo = self._repo
852 cl = repo.unfiltered().changelog
852 cl = repo.unfiltered().changelog
853 mfl = repo.manifestlog
853 mfl = repo.manifestlog
854 # fetch the linkrev
854 # fetch the linkrev
855 lkr = self.linkrev()
855 lkr = self.linkrev()
856 # hack to reuse ancestor computation when searching for renames
856 # hack to reuse ancestor computation when searching for renames
857 memberanc = getattr(self, '_ancestrycontext', None)
857 memberanc = getattr(self, '_ancestrycontext', None)
858 iteranc = None
858 iteranc = None
859 if srcrev is None:
859 if srcrev is None:
860 # wctx case, used by workingfilectx during mergecopy
860 # wctx case, used by workingfilectx during mergecopy
861 revs = [p.rev() for p in self._repo[None].parents()]
861 revs = [p.rev() for p in self._repo[None].parents()]
862 inclusive = True # we skipped the real (revless) source
862 inclusive = True # we skipped the real (revless) source
863 else:
863 else:
864 revs = [srcrev]
864 revs = [srcrev]
865 if memberanc is None:
865 if memberanc is None:
866 memberanc = iteranc = cl.ancestors(revs, lkr,
866 memberanc = iteranc = cl.ancestors(revs, lkr,
867 inclusive=inclusive)
867 inclusive=inclusive)
868 # check if this linkrev is an ancestor of srcrev
868 # check if this linkrev is an ancestor of srcrev
869 if lkr not in memberanc:
869 if lkr not in memberanc:
870 if iteranc is None:
870 if iteranc is None:
871 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
871 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
872 fnode = self._filenode
872 fnode = self._filenode
873 path = self._path
873 path = self._path
874 for a in iteranc:
874 for a in iteranc:
875 ac = cl.read(a) # get changeset data (we avoid object creation)
875 ac = cl.read(a) # get changeset data (we avoid object creation)
876 if path in ac[3]: # checking the 'files' field.
876 if path in ac[3]: # checking the 'files' field.
877 # The file has been touched, check if the content is
877 # The file has been touched, check if the content is
878 # similar to the one we search for.
878 # similar to the one we search for.
879 if fnode == mfl[ac[0]].readfast().get(path):
879 if fnode == mfl[ac[0]].readfast().get(path):
880 return a
880 return a
881 # In theory, we should never get out of that loop without a result.
881 # In theory, we should never get out of that loop without a result.
882 # But if manifest uses a buggy file revision (not children of the
882 # But if manifest uses a buggy file revision (not children of the
883 # one it replaces) we could. Such a buggy situation will likely
883 # one it replaces) we could. Such a buggy situation will likely
884 # result is crash somewhere else at to some point.
884 # result is crash somewhere else at to some point.
885 return lkr
885 return lkr
886
886
887 def introrev(self):
887 def introrev(self):
888 """return the rev of the changeset which introduced this file revision
888 """return the rev of the changeset which introduced this file revision
889
889
890 This method is different from linkrev because it take into account the
890 This method is different from linkrev because it take into account the
891 changeset the filectx was created from. It ensures the returned
891 changeset the filectx was created from. It ensures the returned
892 revision is one of its ancestors. This prevents bugs from
892 revision is one of its ancestors. This prevents bugs from
893 'linkrev-shadowing' when a file revision is used by multiple
893 'linkrev-shadowing' when a file revision is used by multiple
894 changesets.
894 changesets.
895 """
895 """
896 lkr = self.linkrev()
896 lkr = self.linkrev()
897 attrs = vars(self)
897 attrs = vars(self)
898 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
898 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
899 if noctx or self.rev() == lkr:
899 if noctx or self.rev() == lkr:
900 return self.linkrev()
900 return self.linkrev()
901 return self._adjustlinkrev(self.rev(), inclusive=True)
901 return self._adjustlinkrev(self.rev(), inclusive=True)
902
902
903 def _parentfilectx(self, path, fileid, filelog):
903 def _parentfilectx(self, path, fileid, filelog):
904 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
904 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
905 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
905 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
906 if '_changeid' in vars(self) or '_changectx' in vars(self):
906 if '_changeid' in vars(self) or '_changectx' in vars(self):
907 # If self is associated with a changeset (probably explicitly
907 # If self is associated with a changeset (probably explicitly
908 # fed), ensure the created filectx is associated with a
908 # fed), ensure the created filectx is associated with a
909 # changeset that is an ancestor of self.changectx.
909 # changeset that is an ancestor of self.changectx.
910 # This lets us later use _adjustlinkrev to get a correct link.
910 # This lets us later use _adjustlinkrev to get a correct link.
911 fctx._descendantrev = self.rev()
911 fctx._descendantrev = self.rev()
912 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
912 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
913 elif '_descendantrev' in vars(self):
913 elif '_descendantrev' in vars(self):
914 # Otherwise propagate _descendantrev if we have one associated.
914 # Otherwise propagate _descendantrev if we have one associated.
915 fctx._descendantrev = self._descendantrev
915 fctx._descendantrev = self._descendantrev
916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
916 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
917 return fctx
917 return fctx
918
918
919 def parents(self):
919 def parents(self):
920 _path = self._path
920 _path = self._path
921 fl = self._filelog
921 fl = self._filelog
922 parents = self._filelog.parents(self._filenode)
922 parents = self._filelog.parents(self._filenode)
923 pl = [(_path, node, fl) for node in parents if node != nullid]
923 pl = [(_path, node, fl) for node in parents if node != nullid]
924
924
925 r = fl.renamed(self._filenode)
925 r = fl.renamed(self._filenode)
926 if r:
926 if r:
927 # - In the simple rename case, both parent are nullid, pl is empty.
927 # - In the simple rename case, both parent are nullid, pl is empty.
928 # - In case of merge, only one of the parent is null id and should
928 # - In case of merge, only one of the parent is null id and should
929 # be replaced with the rename information. This parent is -always-
929 # be replaced with the rename information. This parent is -always-
930 # the first one.
930 # the first one.
931 #
931 #
932 # As null id have always been filtered out in the previous list
932 # As null id have always been filtered out in the previous list
933 # comprehension, inserting to 0 will always result in "replacing
933 # comprehension, inserting to 0 will always result in "replacing
934 # first nullid parent with rename information.
934 # first nullid parent with rename information.
935 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
935 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
936
936
937 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
937 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
938
938
939 def p1(self):
939 def p1(self):
940 return self.parents()[0]
940 return self.parents()[0]
941
941
942 def p2(self):
942 def p2(self):
943 p = self.parents()
943 p = self.parents()
944 if len(p) == 2:
944 if len(p) == 2:
945 return p[1]
945 return p[1]
946 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
946 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
947
947
948 def annotate(self, follow=False, linenumber=False, skiprevs=None,
948 def annotate(self, follow=False, linenumber=False, skiprevs=None,
949 diffopts=None):
949 diffopts=None):
950 '''returns a list of tuples of ((ctx, number), line) for each line
950 '''returns a list of tuples of ((ctx, number), line) for each line
951 in the file, where ctx is the filectx of the node where
951 in the file, where ctx is the filectx of the node where
952 that line was last changed; if linenumber parameter is true, number is
952 that line was last changed; if linenumber parameter is true, number is
953 the line number at the first appearance in the managed file, otherwise,
953 the line number at the first appearance in the managed file, otherwise,
954 number has a fixed value of False.
954 number has a fixed value of False.
955 '''
955 '''
956
956
957 def lines(text):
957 def lines(text):
958 if text.endswith("\n"):
958 if text.endswith("\n"):
959 return text.count("\n")
959 return text.count("\n")
960 return text.count("\n") + int(bool(text))
960 return text.count("\n") + int(bool(text))
961
961
962 if linenumber:
962 if linenumber:
963 def decorate(text, rev):
963 def decorate(text, rev):
964 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
964 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
965 else:
965 else:
966 def decorate(text, rev):
966 def decorate(text, rev):
967 return ([(rev, False)] * lines(text), text)
967 return ([(rev, False)] * lines(text), text)
968
968
969 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
969 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
970
970
971 def parents(f):
971 def parents(f):
972 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
972 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
973 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
973 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
974 # from the topmost introrev (= srcrev) down to p.linkrev() if it
974 # from the topmost introrev (= srcrev) down to p.linkrev() if it
975 # isn't an ancestor of the srcrev.
975 # isn't an ancestor of the srcrev.
976 f._changeid
976 f._changeid
977 pl = f.parents()
977 pl = f.parents()
978
978
979 # Don't return renamed parents if we aren't following.
979 # Don't return renamed parents if we aren't following.
980 if not follow:
980 if not follow:
981 pl = [p for p in pl if p.path() == f.path()]
981 pl = [p for p in pl if p.path() == f.path()]
982
982
983 # renamed filectx won't have a filelog yet, so set it
983 # renamed filectx won't have a filelog yet, so set it
984 # from the cache to save time
984 # from the cache to save time
985 for p in pl:
985 for p in pl:
986 if not '_filelog' in p.__dict__:
986 if not '_filelog' in p.__dict__:
987 p._filelog = getlog(p.path())
987 p._filelog = getlog(p.path())
988
988
989 return pl
989 return pl
990
990
991 # use linkrev to find the first changeset where self appeared
991 # use linkrev to find the first changeset where self appeared
992 base = self
992 base = self
993 introrev = self.introrev()
993 introrev = self.introrev()
994 if self.rev() != introrev:
994 if self.rev() != introrev:
995 base = self.filectx(self.filenode(), changeid=introrev)
995 base = self.filectx(self.filenode(), changeid=introrev)
996 if getattr(base, '_ancestrycontext', None) is None:
996 if getattr(base, '_ancestrycontext', None) is None:
997 cl = self._repo.changelog
997 cl = self._repo.changelog
998 if introrev is None:
998 if introrev is None:
999 # wctx is not inclusive, but works because _ancestrycontext
999 # wctx is not inclusive, but works because _ancestrycontext
1000 # is used to test filelog revisions
1000 # is used to test filelog revisions
1001 ac = cl.ancestors([p.rev() for p in base.parents()],
1001 ac = cl.ancestors([p.rev() for p in base.parents()],
1002 inclusive=True)
1002 inclusive=True)
1003 else:
1003 else:
1004 ac = cl.ancestors([introrev], inclusive=True)
1004 ac = cl.ancestors([introrev], inclusive=True)
1005 base._ancestrycontext = ac
1005 base._ancestrycontext = ac
1006
1006
1007 # This algorithm would prefer to be recursive, but Python is a
1007 # This algorithm would prefer to be recursive, but Python is a
1008 # bit recursion-hostile. Instead we do an iterative
1008 # bit recursion-hostile. Instead we do an iterative
1009 # depth-first search.
1009 # depth-first search.
1010
1010
1011 # 1st DFS pre-calculates pcache and needed
1011 # 1st DFS pre-calculates pcache and needed
1012 visit = [base]
1012 visit = [base]
1013 pcache = {}
1013 pcache = {}
1014 needed = {base: 1}
1014 needed = {base: 1}
1015 while visit:
1015 while visit:
1016 f = visit.pop()
1016 f = visit.pop()
1017 if f in pcache:
1017 if f in pcache:
1018 continue
1018 continue
1019 pl = parents(f)
1019 pl = parents(f)
1020 pcache[f] = pl
1020 pcache[f] = pl
1021 for p in pl:
1021 for p in pl:
1022 needed[p] = needed.get(p, 0) + 1
1022 needed[p] = needed.get(p, 0) + 1
1023 if p not in pcache:
1023 if p not in pcache:
1024 visit.append(p)
1024 visit.append(p)
1025
1025
1026 # 2nd DFS does the actual annotate
1026 # 2nd DFS does the actual annotate
1027 visit[:] = [base]
1027 visit[:] = [base]
1028 hist = {}
1028 hist = {}
1029 while visit:
1029 while visit:
1030 f = visit[-1]
1030 f = visit[-1]
1031 if f in hist:
1031 if f in hist:
1032 visit.pop()
1032 visit.pop()
1033 continue
1033 continue
1034
1034
1035 ready = True
1035 ready = True
1036 pl = pcache[f]
1036 pl = pcache[f]
1037 for p in pl:
1037 for p in pl:
1038 if p not in hist:
1038 if p not in hist:
1039 ready = False
1039 ready = False
1040 visit.append(p)
1040 visit.append(p)
1041 if ready:
1041 if ready:
1042 visit.pop()
1042 visit.pop()
1043 curr = decorate(f.data(), f)
1043 curr = decorate(f.data(), f)
1044 skipchild = False
1044 skipchild = False
1045 if skiprevs is not None:
1045 if skiprevs is not None:
1046 skipchild = f._changeid in skiprevs
1046 skipchild = f._changeid in skiprevs
1047 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1047 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1048 diffopts)
1048 diffopts)
1049 for p in pl:
1049 for p in pl:
1050 if needed[p] == 1:
1050 if needed[p] == 1:
1051 del hist[p]
1051 del hist[p]
1052 del needed[p]
1052 del needed[p]
1053 else:
1053 else:
1054 needed[p] -= 1
1054 needed[p] -= 1
1055
1055
1056 hist[f] = curr
1056 hist[f] = curr
1057 del pcache[f]
1057 del pcache[f]
1058
1058
1059 return zip(hist[base][0], hist[base][1].splitlines(True))
1059 return zip(hist[base][0], hist[base][1].splitlines(True))
1060
1060
1061 def ancestors(self, followfirst=False):
1061 def ancestors(self, followfirst=False):
1062 visit = {}
1062 visit = {}
1063 c = self
1063 c = self
1064 if followfirst:
1064 if followfirst:
1065 cut = 1
1065 cut = 1
1066 else:
1066 else:
1067 cut = None
1067 cut = None
1068
1068
1069 while True:
1069 while True:
1070 for parent in c.parents()[:cut]:
1070 for parent in c.parents()[:cut]:
1071 visit[(parent.linkrev(), parent.filenode())] = parent
1071 visit[(parent.linkrev(), parent.filenode())] = parent
1072 if not visit:
1072 if not visit:
1073 break
1073 break
1074 c = visit.pop(max(visit))
1074 c = visit.pop(max(visit))
1075 yield c
1075 yield c
1076
1076
1077 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1077 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1078 r'''
1078 r'''
1079 Given parent and child fctxes and annotate data for parents, for all lines
1079 Given parent and child fctxes and annotate data for parents, for all lines
1080 in either parent that match the child, annotate the child with the parent's
1080 in either parent that match the child, annotate the child with the parent's
1081 data.
1081 data.
1082
1082
1083 Additionally, if `skipchild` is True, replace all other lines with parent
1083 Additionally, if `skipchild` is True, replace all other lines with parent
1084 annotate data as well such that child is never blamed for any lines.
1084 annotate data as well such that child is never blamed for any lines.
1085
1085
1086 >>> oldfctx = 'old'
1086 >>> oldfctx = 'old'
1087 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1087 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1088 >>> olddata = 'a\nb\n'
1088 >>> olddata = 'a\nb\n'
1089 >>> p1data = 'a\nb\nc\n'
1089 >>> p1data = 'a\nb\nc\n'
1090 >>> p2data = 'a\nc\nd\n'
1090 >>> p2data = 'a\nc\nd\n'
1091 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1091 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1092 >>> diffopts = mdiff.diffopts()
1092 >>> diffopts = mdiff.diffopts()
1093
1093
1094 >>> def decorate(text, rev):
1094 >>> def decorate(text, rev):
1095 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1095 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1096
1096
1097 Basic usage:
1097 Basic usage:
1098
1098
1099 >>> oldann = decorate(olddata, oldfctx)
1099 >>> oldann = decorate(olddata, oldfctx)
1100 >>> p1ann = decorate(p1data, p1fctx)
1100 >>> p1ann = decorate(p1data, p1fctx)
1101 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1101 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1102 >>> p1ann[0]
1102 >>> p1ann[0]
1103 [('old', 1), ('old', 2), ('p1', 3)]
1103 [('old', 1), ('old', 2), ('p1', 3)]
1104 >>> p2ann = decorate(p2data, p2fctx)
1104 >>> p2ann = decorate(p2data, p2fctx)
1105 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1105 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1106 >>> p2ann[0]
1106 >>> p2ann[0]
1107 [('old', 1), ('p2', 2), ('p2', 3)]
1107 [('old', 1), ('p2', 2), ('p2', 3)]
1108
1108
1109 Test with multiple parents (note the difference caused by ordering):
1109 Test with multiple parents (note the difference caused by ordering):
1110
1110
1111 >>> childann = decorate(childdata, childfctx)
1111 >>> childann = decorate(childdata, childfctx)
1112 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1112 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1113 ... diffopts)
1113 ... diffopts)
1114 >>> childann[0]
1114 >>> childann[0]
1115 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1115 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1116
1116
1117 >>> childann = decorate(childdata, childfctx)
1117 >>> childann = decorate(childdata, childfctx)
1118 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1118 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1119 ... diffopts)
1119 ... diffopts)
1120 >>> childann[0]
1120 >>> childann[0]
1121 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1121 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1122
1122
1123 Test with skipchild (note the difference caused by ordering):
1123 Test with skipchild (note the difference caused by ordering):
1124
1124
1125 >>> childann = decorate(childdata, childfctx)
1125 >>> childann = decorate(childdata, childfctx)
1126 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1126 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1127 ... diffopts)
1127 ... diffopts)
1128 >>> childann[0]
1128 >>> childann[0]
1129 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1129 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1130
1130
1131 >>> childann = decorate(childdata, childfctx)
1131 >>> childann = decorate(childdata, childfctx)
1132 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1132 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1133 ... diffopts)
1133 ... diffopts)
1134 >>> childann[0]
1134 >>> childann[0]
1135 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1135 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1136 '''
1136 '''
1137 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1137 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1138 for parent in parents]
1138 for parent in parents]
1139
1139
1140 if skipchild:
1140 if skipchild:
1141 # Need to iterate over the blocks twice -- make it a list
1141 # Need to iterate over the blocks twice -- make it a list
1142 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1142 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1143 # Mercurial currently prefers p2 over p1 for annotate.
1143 # Mercurial currently prefers p2 over p1 for annotate.
1144 # TODO: change this?
1144 # TODO: change this?
1145 for parent, blocks in pblocks:
1145 for parent, blocks in pblocks:
1146 for (a1, a2, b1, b2), t in blocks:
1146 for (a1, a2, b1, b2), t in blocks:
1147 # Changed blocks ('!') or blocks made only of blank lines ('~')
1147 # Changed blocks ('!') or blocks made only of blank lines ('~')
1148 # belong to the child.
1148 # belong to the child.
1149 if t == '=':
1149 if t == '=':
1150 child[0][b1:b2] = parent[0][a1:a2]
1150 child[0][b1:b2] = parent[0][a1:a2]
1151
1151
1152 if skipchild:
1152 if skipchild:
1153 # Now try and match up anything that couldn't be matched,
1153 # Now try and match up anything that couldn't be matched,
1154 # Reversing pblocks maintains bias towards p2, matching above
1154 # Reversing pblocks maintains bias towards p2, matching above
1155 # behavior.
1155 # behavior.
1156 pblocks.reverse()
1156 pblocks.reverse()
1157
1157
1158 # The heuristics are:
1158 # The heuristics are:
1159 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1159 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1160 # This could potentially be smarter but works well enough.
1160 # This could potentially be smarter but works well enough.
1161 # * For a non-matching section, do a best-effort fit. Match lines in
1161 # * For a non-matching section, do a best-effort fit. Match lines in
1162 # diff hunks 1:1, dropping lines as necessary.
1162 # diff hunks 1:1, dropping lines as necessary.
1163 # * Repeat the last line as a last resort.
1163 # * Repeat the last line as a last resort.
1164
1164
1165 # First, replace as much as possible without repeating the last line.
1165 # First, replace as much as possible without repeating the last line.
1166 remaining = [(parent, []) for parent, _blocks in pblocks]
1166 remaining = [(parent, []) for parent, _blocks in pblocks]
1167 for idx, (parent, blocks) in enumerate(pblocks):
1167 for idx, (parent, blocks) in enumerate(pblocks):
1168 for (a1, a2, b1, b2), _t in blocks:
1168 for (a1, a2, b1, b2), _t in blocks:
1169 if a2 - a1 >= b2 - b1:
1169 if a2 - a1 >= b2 - b1:
1170 for bk in xrange(b1, b2):
1170 for bk in xrange(b1, b2):
1171 if child[0][bk][0] == childfctx:
1171 if child[0][bk][0] == childfctx:
1172 ak = min(a1 + (bk - b1), a2 - 1)
1172 ak = min(a1 + (bk - b1), a2 - 1)
1173 child[0][bk] = parent[0][ak]
1173 child[0][bk] = parent[0][ak]
1174 else:
1174 else:
1175 remaining[idx][1].append((a1, a2, b1, b2))
1175 remaining[idx][1].append((a1, a2, b1, b2))
1176
1176
1177 # Then, look at anything left, which might involve repeating the last
1177 # Then, look at anything left, which might involve repeating the last
1178 # line.
1178 # line.
1179 for parent, blocks in remaining:
1179 for parent, blocks in remaining:
1180 for a1, a2, b1, b2 in blocks:
1180 for a1, a2, b1, b2 in blocks:
1181 for bk in xrange(b1, b2):
1181 for bk in xrange(b1, b2):
1182 if child[0][bk][0] == childfctx:
1182 if child[0][bk][0] == childfctx:
1183 ak = min(a1 + (bk - b1), a2 - 1)
1183 ak = min(a1 + (bk - b1), a2 - 1)
1184 child[0][bk] = parent[0][ak]
1184 child[0][bk] = parent[0][ak]
1185 return child
1185 return child
1186
1186
1187 class filectx(basefilectx):
1187 class filectx(basefilectx):
1188 """A filecontext object makes access to data related to a particular
1188 """A filecontext object makes access to data related to a particular
1189 filerevision convenient."""
1189 filerevision convenient."""
1190 def __init__(self, repo, path, changeid=None, fileid=None,
1190 def __init__(self, repo, path, changeid=None, fileid=None,
1191 filelog=None, changectx=None):
1191 filelog=None, changectx=None):
1192 """changeid can be a changeset revision, node, or tag.
1192 """changeid can be a changeset revision, node, or tag.
1193 fileid can be a file revision or node."""
1193 fileid can be a file revision or node."""
1194 self._repo = repo
1194 self._repo = repo
1195 self._path = path
1195 self._path = path
1196
1196
1197 assert (changeid is not None
1197 assert (changeid is not None
1198 or fileid is not None
1198 or fileid is not None
1199 or changectx is not None), \
1199 or changectx is not None), \
1200 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1200 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1201 % (changeid, fileid, changectx))
1201 % (changeid, fileid, changectx))
1202
1202
1203 if filelog is not None:
1203 if filelog is not None:
1204 self._filelog = filelog
1204 self._filelog = filelog
1205
1205
1206 if changeid is not None:
1206 if changeid is not None:
1207 self._changeid = changeid
1207 self._changeid = changeid
1208 if changectx is not None:
1208 if changectx is not None:
1209 self._changectx = changectx
1209 self._changectx = changectx
1210 if fileid is not None:
1210 if fileid is not None:
1211 self._fileid = fileid
1211 self._fileid = fileid
1212
1212
1213 @propertycache
1213 @propertycache
1214 def _changectx(self):
1214 def _changectx(self):
1215 try:
1215 try:
1216 return changectx(self._repo, self._changeid)
1216 return changectx(self._repo, self._changeid)
1217 except error.FilteredRepoLookupError:
1217 except error.FilteredRepoLookupError:
1218 # Linkrev may point to any revision in the repository. When the
1218 # Linkrev may point to any revision in the repository. When the
1219 # repository is filtered this may lead to `filectx` trying to build
1219 # repository is filtered this may lead to `filectx` trying to build
1220 # `changectx` for filtered revision. In such case we fallback to
1220 # `changectx` for filtered revision. In such case we fallback to
1221 # creating `changectx` on the unfiltered version of the reposition.
1221 # creating `changectx` on the unfiltered version of the reposition.
1222 # This fallback should not be an issue because `changectx` from
1222 # This fallback should not be an issue because `changectx` from
1223 # `filectx` are not used in complex operations that care about
1223 # `filectx` are not used in complex operations that care about
1224 # filtering.
1224 # filtering.
1225 #
1225 #
1226 # This fallback is a cheap and dirty fix that prevent several
1226 # This fallback is a cheap and dirty fix that prevent several
1227 # crashes. It does not ensure the behavior is correct. However the
1227 # crashes. It does not ensure the behavior is correct. However the
1228 # behavior was not correct before filtering either and "incorrect
1228 # behavior was not correct before filtering either and "incorrect
1229 # behavior" is seen as better as "crash"
1229 # behavior" is seen as better as "crash"
1230 #
1230 #
1231 # Linkrevs have several serious troubles with filtering that are
1231 # Linkrevs have several serious troubles with filtering that are
1232 # complicated to solve. Proper handling of the issue here should be
1232 # complicated to solve. Proper handling of the issue here should be
1233 # considered when solving linkrev issue are on the table.
1233 # considered when solving linkrev issue are on the table.
1234 return changectx(self._repo.unfiltered(), self._changeid)
1234 return changectx(self._repo.unfiltered(), self._changeid)
1235
1235
1236 def filectx(self, fileid, changeid=None):
1236 def filectx(self, fileid, changeid=None):
1237 '''opens an arbitrary revision of the file without
1237 '''opens an arbitrary revision of the file without
1238 opening a new filelog'''
1238 opening a new filelog'''
1239 return filectx(self._repo, self._path, fileid=fileid,
1239 return filectx(self._repo, self._path, fileid=fileid,
1240 filelog=self._filelog, changeid=changeid)
1240 filelog=self._filelog, changeid=changeid)
1241
1241
1242 def rawdata(self):
1242 def rawdata(self):
1243 return self._filelog.revision(self._filenode, raw=True)
1243 return self._filelog.revision(self._filenode, raw=True)
1244
1244
1245 def rawflags(self):
1245 def rawflags(self):
1246 """low-level revlog flags"""
1246 """low-level revlog flags"""
1247 return self._filelog.flags(self._filerev)
1247 return self._filelog.flags(self._filerev)
1248
1248
1249 def data(self):
1249 def data(self):
1250 try:
1250 try:
1251 return self._filelog.read(self._filenode)
1251 return self._filelog.read(self._filenode)
1252 except error.CensoredNodeError:
1252 except error.CensoredNodeError:
1253 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1253 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1254 return ""
1254 return ""
1255 raise error.Abort(_("censored node: %s") % short(self._filenode),
1255 raise error.Abort(_("censored node: %s") % short(self._filenode),
1256 hint=_("set censor.policy to ignore errors"))
1256 hint=_("set censor.policy to ignore errors"))
1257
1257
1258 def size(self):
1258 def size(self):
1259 return self._filelog.size(self._filerev)
1259 return self._filelog.size(self._filerev)
1260
1260
1261 @propertycache
1261 @propertycache
1262 def _copied(self):
1262 def _copied(self):
1263 """check if file was actually renamed in this changeset revision
1263 """check if file was actually renamed in this changeset revision
1264
1264
1265 If rename logged in file revision, we report copy for changeset only
1265 If rename logged in file revision, we report copy for changeset only
1266 if file revisions linkrev points back to the changeset in question
1266 if file revisions linkrev points back to the changeset in question
1267 or both changeset parents contain different file revisions.
1267 or both changeset parents contain different file revisions.
1268 """
1268 """
1269
1269
1270 renamed = self._filelog.renamed(self._filenode)
1270 renamed = self._filelog.renamed(self._filenode)
1271 if not renamed:
1271 if not renamed:
1272 return renamed
1272 return renamed
1273
1273
1274 if self.rev() == self.linkrev():
1274 if self.rev() == self.linkrev():
1275 return renamed
1275 return renamed
1276
1276
1277 name = self.path()
1277 name = self.path()
1278 fnode = self._filenode
1278 fnode = self._filenode
1279 for p in self._changectx.parents():
1279 for p in self._changectx.parents():
1280 try:
1280 try:
1281 if fnode == p.filenode(name):
1281 if fnode == p.filenode(name):
1282 return None
1282 return None
1283 except error.LookupError:
1283 except error.LookupError:
1284 pass
1284 pass
1285 return renamed
1285 return renamed
1286
1286
1287 def children(self):
1287 def children(self):
1288 # hard for renames
1288 # hard for renames
1289 c = self._filelog.children(self._filenode)
1289 c = self._filelog.children(self._filenode)
1290 return [filectx(self._repo, self._path, fileid=x,
1290 return [filectx(self._repo, self._path, fileid=x,
1291 filelog=self._filelog) for x in c]
1291 filelog=self._filelog) for x in c]
1292
1292
1293 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1293 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1294 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1294 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1295 if diff from fctx2 to fctx1 has changes in linerange2 and
1295 if diff from fctx2 to fctx1 has changes in linerange2 and
1296 `linerange1` is the new line range for fctx1.
1296 `linerange1` is the new line range for fctx1.
1297 """
1297 """
1298 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1298 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1299 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1299 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1300 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1300 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1301 return diffinrange, linerange1
1301 return diffinrange, linerange1
1302
1302
1303 def blockancestors(fctx, fromline, toline, followfirst=False):
1303 def blockancestors(fctx, fromline, toline, followfirst=False):
1304 """Yield ancestors of `fctx` with respect to the block of lines within
1304 """Yield ancestors of `fctx` with respect to the block of lines within
1305 `fromline`-`toline` range.
1305 `fromline`-`toline` range.
1306 """
1306 """
1307 diffopts = patch.diffopts(fctx._repo.ui)
1307 diffopts = patch.diffopts(fctx._repo.ui)
1308 introrev = fctx.introrev()
1308 introrev = fctx.introrev()
1309 if fctx.rev() != introrev:
1309 if fctx.rev() != introrev:
1310 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1310 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1311 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1311 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1312 while visit:
1312 while visit:
1313 c, linerange2 = visit.pop(max(visit))
1313 c, linerange2 = visit.pop(max(visit))
1314 pl = c.parents()
1314 pl = c.parents()
1315 if followfirst:
1315 if followfirst:
1316 pl = pl[:1]
1316 pl = pl[:1]
1317 if not pl:
1317 if not pl:
1318 # The block originates from the initial revision.
1318 # The block originates from the initial revision.
1319 yield c, linerange2
1319 yield c, linerange2
1320 continue
1320 continue
1321 inrange = False
1321 inrange = False
1322 for p in pl:
1322 for p in pl:
1323 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1323 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1324 inrange = inrange or inrangep
1324 inrange = inrange or inrangep
1325 if linerange1[0] == linerange1[1]:
1325 if linerange1[0] == linerange1[1]:
1326 # Parent's linerange is empty, meaning that the block got
1326 # Parent's linerange is empty, meaning that the block got
1327 # introduced in this revision; no need to go futher in this
1327 # introduced in this revision; no need to go futher in this
1328 # branch.
1328 # branch.
1329 continue
1329 continue
1330 # Set _descendantrev with 'c' (a known descendant) so that, when
1330 # Set _descendantrev with 'c' (a known descendant) so that, when
1331 # _adjustlinkrev is called for 'p', it receives this descendant
1331 # _adjustlinkrev is called for 'p', it receives this descendant
1332 # (as srcrev) instead possibly topmost introrev.
1332 # (as srcrev) instead possibly topmost introrev.
1333 p._descendantrev = c.rev()
1333 p._descendantrev = c.rev()
1334 visit[p.linkrev(), p.filenode()] = p, linerange1
1334 visit[p.linkrev(), p.filenode()] = p, linerange1
1335 if inrange:
1335 if inrange:
1336 yield c, linerange2
1336 yield c, linerange2
1337
1337
1338 def blockdescendants(fctx, fromline, toline):
1338 def blockdescendants(fctx, fromline, toline):
1339 """Yield descendants of `fctx` with respect to the block of lines within
1339 """Yield descendants of `fctx` with respect to the block of lines within
1340 `fromline`-`toline` range.
1340 `fromline`-`toline` range.
1341 """
1341 """
1342 # First possibly yield 'fctx' if it has changes in range with respect to
1342 # First possibly yield 'fctx' if it has changes in range with respect to
1343 # its parents.
1343 # its parents.
1344 try:
1344 try:
1345 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1345 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1346 except StopIteration:
1346 except StopIteration:
1347 pass
1347 pass
1348 else:
1348 else:
1349 if c == fctx:
1349 if c == fctx:
1350 yield c, linerange1
1350 yield c, linerange1
1351
1351
1352 diffopts = patch.diffopts(fctx._repo.ui)
1352 diffopts = patch.diffopts(fctx._repo.ui)
1353 fl = fctx.filelog()
1353 fl = fctx.filelog()
1354 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1354 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1355 for i in fl.descendants([fctx.filerev()]):
1355 for i in fl.descendants([fctx.filerev()]):
1356 c = fctx.filectx(i)
1356 c = fctx.filectx(i)
1357 inrange = False
1357 inrange = False
1358 for x in fl.parentrevs(i):
1358 for x in fl.parentrevs(i):
1359 try:
1359 try:
1360 p, linerange2 = seen[x]
1360 p, linerange2 = seen[x]
1361 except KeyError:
1361 except KeyError:
1362 # nullrev or other branch
1362 # nullrev or other branch
1363 continue
1363 continue
1364 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1364 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1365 inrange = inrange or inrangep
1365 inrange = inrange or inrangep
1366 # If revision 'i' has been seen (it's a merge), we assume that its
1366 # If revision 'i' has been seen (it's a merge), we assume that its
1367 # line range is the same independently of which parents was used
1367 # line range is the same independently of which parents was used
1368 # to compute it.
1368 # to compute it.
1369 assert i not in seen or seen[i][1] == linerange1, (
1369 assert i not in seen or seen[i][1] == linerange1, (
1370 'computed line range for %s is not consistent between '
1370 'computed line range for %s is not consistent between '
1371 'ancestor branches' % c)
1371 'ancestor branches' % c)
1372 seen[i] = c, linerange1
1372 seen[i] = c, linerange1
1373 if inrange:
1373 if inrange:
1374 yield c, linerange1
1374 yield c, linerange1
1375
1375
1376 class committablectx(basectx):
1376 class committablectx(basectx):
1377 """A committablectx object provides common functionality for a context that
1377 """A committablectx object provides common functionality for a context that
1378 wants the ability to commit, e.g. workingctx or memctx."""
1378 wants the ability to commit, e.g. workingctx or memctx."""
1379 def __init__(self, repo, text="", user=None, date=None, extra=None,
1379 def __init__(self, repo, text="", user=None, date=None, extra=None,
1380 changes=None):
1380 changes=None):
1381 self._repo = repo
1381 self._repo = repo
1382 self._rev = None
1382 self._rev = None
1383 self._node = None
1383 self._node = None
1384 self._text = text
1384 self._text = text
1385 if date:
1385 if date:
1386 self._date = util.parsedate(date)
1386 self._date = util.parsedate(date)
1387 if user:
1387 if user:
1388 self._user = user
1388 self._user = user
1389 if changes:
1389 if changes:
1390 self._status = changes
1390 self._status = changes
1391
1391
1392 self._extra = {}
1392 self._extra = {}
1393 if extra:
1393 if extra:
1394 self._extra = extra.copy()
1394 self._extra = extra.copy()
1395 if 'branch' not in self._extra:
1395 if 'branch' not in self._extra:
1396 try:
1396 try:
1397 branch = encoding.fromlocal(self._repo.dirstate.branch())
1397 branch = encoding.fromlocal(self._repo.dirstate.branch())
1398 except UnicodeDecodeError:
1398 except UnicodeDecodeError:
1399 raise error.Abort(_('branch name not in UTF-8!'))
1399 raise error.Abort(_('branch name not in UTF-8!'))
1400 self._extra['branch'] = branch
1400 self._extra['branch'] = branch
1401 if self._extra['branch'] == '':
1401 if self._extra['branch'] == '':
1402 self._extra['branch'] = 'default'
1402 self._extra['branch'] = 'default'
1403
1403
1404 def __str__(self):
1404 def __str__(self):
1405 return str(self._parents[0]) + r"+"
1405 return str(self._parents[0]) + r"+"
1406
1406
1407 def __bytes__(self):
1407 def __bytes__(self):
1408 return bytes(self._parents[0]) + "+"
1408 return bytes(self._parents[0]) + "+"
1409
1409
1410 def __nonzero__(self):
1410 def __nonzero__(self):
1411 return True
1411 return True
1412
1412
1413 __bool__ = __nonzero__
1413 __bool__ = __nonzero__
1414
1414
1415 def _buildflagfunc(self):
1415 def _buildflagfunc(self):
1416 # Create a fallback function for getting file flags when the
1416 # Create a fallback function for getting file flags when the
1417 # filesystem doesn't support them
1417 # filesystem doesn't support them
1418
1418
1419 copiesget = self._repo.dirstate.copies().get
1419 copiesget = self._repo.dirstate.copies().get
1420 parents = self.parents()
1420 parents = self.parents()
1421 if len(parents) < 2:
1421 if len(parents) < 2:
1422 # when we have one parent, it's easy: copy from parent
1422 # when we have one parent, it's easy: copy from parent
1423 man = parents[0].manifest()
1423 man = parents[0].manifest()
1424 def func(f):
1424 def func(f):
1425 f = copiesget(f, f)
1425 f = copiesget(f, f)
1426 return man.flags(f)
1426 return man.flags(f)
1427 else:
1427 else:
1428 # merges are tricky: we try to reconstruct the unstored
1428 # merges are tricky: we try to reconstruct the unstored
1429 # result from the merge (issue1802)
1429 # result from the merge (issue1802)
1430 p1, p2 = parents
1430 p1, p2 = parents
1431 pa = p1.ancestor(p2)
1431 pa = p1.ancestor(p2)
1432 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1432 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1433
1433
1434 def func(f):
1434 def func(f):
1435 f = copiesget(f, f) # may be wrong for merges with copies
1435 f = copiesget(f, f) # may be wrong for merges with copies
1436 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1436 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1437 if fl1 == fl2:
1437 if fl1 == fl2:
1438 return fl1
1438 return fl1
1439 if fl1 == fla:
1439 if fl1 == fla:
1440 return fl2
1440 return fl2
1441 if fl2 == fla:
1441 if fl2 == fla:
1442 return fl1
1442 return fl1
1443 return '' # punt for conflicts
1443 return '' # punt for conflicts
1444
1444
1445 return func
1445 return func
1446
1446
1447 @propertycache
1447 @propertycache
1448 def _flagfunc(self):
1448 def _flagfunc(self):
1449 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1449 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1450
1450
1451 @propertycache
1451 @propertycache
1452 def _status(self):
1452 def _status(self):
1453 return self._repo.status()
1453 return self._repo.status()
1454
1454
1455 @propertycache
1455 @propertycache
1456 def _user(self):
1456 def _user(self):
1457 return self._repo.ui.username()
1457 return self._repo.ui.username()
1458
1458
1459 @propertycache
1459 @propertycache
1460 def _date(self):
1460 def _date(self):
1461 ui = self._repo.ui
1461 ui = self._repo.ui
1462 date = ui.configdate('devel', 'default-date')
1462 date = ui.configdate('devel', 'default-date')
1463 if date is None:
1463 if date is None:
1464 date = util.makedate()
1464 date = util.makedate()
1465 return date
1465 return date
1466
1466
1467 def subrev(self, subpath):
1467 def subrev(self, subpath):
1468 return None
1468 return None
1469
1469
1470 def manifestnode(self):
1470 def manifestnode(self):
1471 return None
1471 return None
1472 def user(self):
1472 def user(self):
1473 return self._user or self._repo.ui.username()
1473 return self._user or self._repo.ui.username()
1474 def date(self):
1474 def date(self):
1475 return self._date
1475 return self._date
1476 def description(self):
1476 def description(self):
1477 return self._text
1477 return self._text
1478 def files(self):
1478 def files(self):
1479 return sorted(self._status.modified + self._status.added +
1479 return sorted(self._status.modified + self._status.added +
1480 self._status.removed)
1480 self._status.removed)
1481
1481
1482 def modified(self):
1482 def modified(self):
1483 return self._status.modified
1483 return self._status.modified
1484 def added(self):
1484 def added(self):
1485 return self._status.added
1485 return self._status.added
1486 def removed(self):
1486 def removed(self):
1487 return self._status.removed
1487 return self._status.removed
1488 def deleted(self):
1488 def deleted(self):
1489 return self._status.deleted
1489 return self._status.deleted
1490 def branch(self):
1490 def branch(self):
1491 return encoding.tolocal(self._extra['branch'])
1491 return encoding.tolocal(self._extra['branch'])
1492 def closesbranch(self):
1492 def closesbranch(self):
1493 return 'close' in self._extra
1493 return 'close' in self._extra
1494 def extra(self):
1494 def extra(self):
1495 return self._extra
1495 return self._extra
1496
1496
1497 def tags(self):
1497 def tags(self):
1498 return []
1498 return []
1499
1499
1500 def bookmarks(self):
1500 def bookmarks(self):
1501 b = []
1501 b = []
1502 for p in self.parents():
1502 for p in self.parents():
1503 b.extend(p.bookmarks())
1503 b.extend(p.bookmarks())
1504 return b
1504 return b
1505
1505
1506 def phase(self):
1506 def phase(self):
1507 phase = phases.draft # default phase to draft
1507 phase = phases.draft # default phase to draft
1508 for p in self.parents():
1508 for p in self.parents():
1509 phase = max(phase, p.phase())
1509 phase = max(phase, p.phase())
1510 return phase
1510 return phase
1511
1511
1512 def hidden(self):
1512 def hidden(self):
1513 return False
1513 return False
1514
1514
1515 def children(self):
1515 def children(self):
1516 return []
1516 return []
1517
1517
1518 def flags(self, path):
1518 def flags(self, path):
1519 if r'_manifest' in self.__dict__:
1519 if r'_manifest' in self.__dict__:
1520 try:
1520 try:
1521 return self._manifest.flags(path)
1521 return self._manifest.flags(path)
1522 except KeyError:
1522 except KeyError:
1523 return ''
1523 return ''
1524
1524
1525 try:
1525 try:
1526 return self._flagfunc(path)
1526 return self._flagfunc(path)
1527 except OSError:
1527 except OSError:
1528 return ''
1528 return ''
1529
1529
1530 def ancestor(self, c2):
1530 def ancestor(self, c2):
1531 """return the "best" ancestor context of self and c2"""
1531 """return the "best" ancestor context of self and c2"""
1532 return self._parents[0].ancestor(c2) # punt on two parents for now
1532 return self._parents[0].ancestor(c2) # punt on two parents for now
1533
1533
1534 def walk(self, match):
1534 def walk(self, match):
1535 '''Generates matching file names.'''
1535 '''Generates matching file names.'''
1536 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1536 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1537 True, False))
1537 True, False))
1538
1538
1539 def matches(self, match):
1539 def matches(self, match):
1540 return sorted(self._repo.dirstate.matches(match))
1540 return sorted(self._repo.dirstate.matches(match))
1541
1541
1542 def ancestors(self):
1542 def ancestors(self):
1543 for p in self._parents:
1543 for p in self._parents:
1544 yield p
1544 yield p
1545 for a in self._repo.changelog.ancestors(
1545 for a in self._repo.changelog.ancestors(
1546 [p.rev() for p in self._parents]):
1546 [p.rev() for p in self._parents]):
1547 yield changectx(self._repo, a)
1547 yield changectx(self._repo, a)
1548
1548
1549 def markcommitted(self, node):
1549 def markcommitted(self, node):
1550 """Perform post-commit cleanup necessary after committing this ctx
1550 """Perform post-commit cleanup necessary after committing this ctx
1551
1551
1552 Specifically, this updates backing stores this working context
1552 Specifically, this updates backing stores this working context
1553 wraps to reflect the fact that the changes reflected by this
1553 wraps to reflect the fact that the changes reflected by this
1554 workingctx have been committed. For example, it marks
1554 workingctx have been committed. For example, it marks
1555 modified and added files as normal in the dirstate.
1555 modified and added files as normal in the dirstate.
1556
1556
1557 """
1557 """
1558
1558
1559 with self._repo.dirstate.parentchange():
1559 with self._repo.dirstate.parentchange():
1560 for f in self.modified() + self.added():
1560 for f in self.modified() + self.added():
1561 self._repo.dirstate.normal(f)
1561 self._repo.dirstate.normal(f)
1562 for f in self.removed():
1562 for f in self.removed():
1563 self._repo.dirstate.drop(f)
1563 self._repo.dirstate.drop(f)
1564 self._repo.dirstate.setparents(node)
1564 self._repo.dirstate.setparents(node)
1565
1565
1566 # write changes out explicitly, because nesting wlock at
1566 # write changes out explicitly, because nesting wlock at
1567 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1567 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1568 # from immediately doing so for subsequent changing files
1568 # from immediately doing so for subsequent changing files
1569 self._repo.dirstate.write(self._repo.currenttransaction())
1569 self._repo.dirstate.write(self._repo.currenttransaction())
1570
1570
1571 def dirty(self, missing=False, merge=True, branch=True):
1571 def dirty(self, missing=False, merge=True, branch=True):
1572 return False
1572 return False
1573
1573
1574 class workingctx(committablectx):
1574 class workingctx(committablectx):
1575 """A workingctx object makes access to data related to
1575 """A workingctx object makes access to data related to
1576 the current working directory convenient.
1576 the current working directory convenient.
1577 date - any valid date string or (unixtime, offset), or None.
1577 date - any valid date string or (unixtime, offset), or None.
1578 user - username string, or None.
1578 user - username string, or None.
1579 extra - a dictionary of extra values, or None.
1579 extra - a dictionary of extra values, or None.
1580 changes - a list of file lists as returned by localrepo.status()
1580 changes - a list of file lists as returned by localrepo.status()
1581 or None to use the repository status.
1581 or None to use the repository status.
1582 """
1582 """
1583 def __init__(self, repo, text="", user=None, date=None, extra=None,
1583 def __init__(self, repo, text="", user=None, date=None, extra=None,
1584 changes=None):
1584 changes=None):
1585 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1585 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1586
1586
1587 def __iter__(self):
1587 def __iter__(self):
1588 d = self._repo.dirstate
1588 d = self._repo.dirstate
1589 for f in d:
1589 for f in d:
1590 if d[f] != 'r':
1590 if d[f] != 'r':
1591 yield f
1591 yield f
1592
1592
1593 def __contains__(self, key):
1593 def __contains__(self, key):
1594 return self._repo.dirstate[key] not in "?r"
1594 return self._repo.dirstate[key] not in "?r"
1595
1595
1596 def hex(self):
1596 def hex(self):
1597 return hex(wdirid)
1597 return hex(wdirid)
1598
1598
1599 @propertycache
1599 @propertycache
1600 def _parents(self):
1600 def _parents(self):
1601 p = self._repo.dirstate.parents()
1601 p = self._repo.dirstate.parents()
1602 if p[1] == nullid:
1602 if p[1] == nullid:
1603 p = p[:-1]
1603 p = p[:-1]
1604 return [changectx(self._repo, x) for x in p]
1604 return [changectx(self._repo, x) for x in p]
1605
1605
1606 def filectx(self, path, filelog=None):
1606 def filectx(self, path, filelog=None):
1607 """get a file context from the working directory"""
1607 """get a file context from the working directory"""
1608 return workingfilectx(self._repo, path, workingctx=self,
1608 return workingfilectx(self._repo, path, workingctx=self,
1609 filelog=filelog)
1609 filelog=filelog)
1610
1610
1611 def dirty(self, missing=False, merge=True, branch=True):
1611 def dirty(self, missing=False, merge=True, branch=True):
1612 "check whether a working directory is modified"
1612 "check whether a working directory is modified"
1613 # check subrepos first
1613 # check subrepos first
1614 for s in sorted(self.substate):
1614 for s in sorted(self.substate):
1615 if self.sub(s).dirty():
1615 if self.sub(s).dirty():
1616 return True
1616 return True
1617 # check current working dir
1617 # check current working dir
1618 return ((merge and self.p2()) or
1618 return ((merge and self.p2()) or
1619 (branch and self.branch() != self.p1().branch()) or
1619 (branch and self.branch() != self.p1().branch()) or
1620 self.modified() or self.added() or self.removed() or
1620 self.modified() or self.added() or self.removed() or
1621 (missing and self.deleted()))
1621 (missing and self.deleted()))
1622
1622
1623 def add(self, list, prefix=""):
1623 def add(self, list, prefix=""):
1624 join = lambda f: os.path.join(prefix, f)
1624 join = lambda f: os.path.join(prefix, f)
1625 with self._repo.wlock():
1625 with self._repo.wlock():
1626 ui, ds = self._repo.ui, self._repo.dirstate
1626 ui, ds = self._repo.ui, self._repo.dirstate
1627 rejected = []
1627 rejected = []
1628 lstat = self._repo.wvfs.lstat
1628 lstat = self._repo.wvfs.lstat
1629 for f in list:
1629 for f in list:
1630 scmutil.checkportable(ui, join(f))
1630 scmutil.checkportable(ui, join(f))
1631 try:
1631 try:
1632 st = lstat(f)
1632 st = lstat(f)
1633 except OSError:
1633 except OSError:
1634 ui.warn(_("%s does not exist!\n") % join(f))
1634 ui.warn(_("%s does not exist!\n") % join(f))
1635 rejected.append(f)
1635 rejected.append(f)
1636 continue
1636 continue
1637 if st.st_size > 10000000:
1637 if st.st_size > 10000000:
1638 ui.warn(_("%s: up to %d MB of RAM may be required "
1638 ui.warn(_("%s: up to %d MB of RAM may be required "
1639 "to manage this file\n"
1639 "to manage this file\n"
1640 "(use 'hg revert %s' to cancel the "
1640 "(use 'hg revert %s' to cancel the "
1641 "pending addition)\n")
1641 "pending addition)\n")
1642 % (f, 3 * st.st_size // 1000000, join(f)))
1642 % (f, 3 * st.st_size // 1000000, join(f)))
1643 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1643 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1644 ui.warn(_("%s not added: only files and symlinks "
1644 ui.warn(_("%s not added: only files and symlinks "
1645 "supported currently\n") % join(f))
1645 "supported currently\n") % join(f))
1646 rejected.append(f)
1646 rejected.append(f)
1647 elif ds[f] in 'amn':
1647 elif ds[f] in 'amn':
1648 ui.warn(_("%s already tracked!\n") % join(f))
1648 ui.warn(_("%s already tracked!\n") % join(f))
1649 elif ds[f] == 'r':
1649 elif ds[f] == 'r':
1650 ds.normallookup(f)
1650 ds.normallookup(f)
1651 else:
1651 else:
1652 ds.add(f)
1652 ds.add(f)
1653 return rejected
1653 return rejected
1654
1654
1655 def forget(self, files, prefix=""):
1655 def forget(self, files, prefix=""):
1656 join = lambda f: os.path.join(prefix, f)
1656 join = lambda f: os.path.join(prefix, f)
1657 with self._repo.wlock():
1657 with self._repo.wlock():
1658 rejected = []
1658 rejected = []
1659 for f in files:
1659 for f in files:
1660 if f not in self._repo.dirstate:
1660 if f not in self._repo.dirstate:
1661 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1661 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1662 rejected.append(f)
1662 rejected.append(f)
1663 elif self._repo.dirstate[f] != 'a':
1663 elif self._repo.dirstate[f] != 'a':
1664 self._repo.dirstate.remove(f)
1664 self._repo.dirstate.remove(f)
1665 else:
1665 else:
1666 self._repo.dirstate.drop(f)
1666 self._repo.dirstate.drop(f)
1667 return rejected
1667 return rejected
1668
1668
1669 def undelete(self, list):
1669 def undelete(self, list):
1670 pctxs = self.parents()
1670 pctxs = self.parents()
1671 with self._repo.wlock():
1671 with self._repo.wlock():
1672 for f in list:
1672 for f in list:
1673 if self._repo.dirstate[f] != 'r':
1673 if self._repo.dirstate[f] != 'r':
1674 self._repo.ui.warn(_("%s not removed!\n") % f)
1674 self._repo.ui.warn(_("%s not removed!\n") % f)
1675 else:
1675 else:
1676 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1676 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1677 t = fctx.data()
1677 t = fctx.data()
1678 self._repo.wwrite(f, t, fctx.flags())
1678 self._repo.wwrite(f, t, fctx.flags())
1679 self._repo.dirstate.normal(f)
1679 self._repo.dirstate.normal(f)
1680
1680
1681 def copy(self, source, dest):
1681 def copy(self, source, dest):
1682 try:
1682 try:
1683 st = self._repo.wvfs.lstat(dest)
1683 st = self._repo.wvfs.lstat(dest)
1684 except OSError as err:
1684 except OSError as err:
1685 if err.errno != errno.ENOENT:
1685 if err.errno != errno.ENOENT:
1686 raise
1686 raise
1687 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1687 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1688 return
1688 return
1689 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1689 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1690 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1690 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1691 "symbolic link\n") % dest)
1691 "symbolic link\n") % dest)
1692 else:
1692 else:
1693 with self._repo.wlock():
1693 with self._repo.wlock():
1694 if self._repo.dirstate[dest] in '?':
1694 if self._repo.dirstate[dest] in '?':
1695 self._repo.dirstate.add(dest)
1695 self._repo.dirstate.add(dest)
1696 elif self._repo.dirstate[dest] in 'r':
1696 elif self._repo.dirstate[dest] in 'r':
1697 self._repo.dirstate.normallookup(dest)
1697 self._repo.dirstate.normallookup(dest)
1698 self._repo.dirstate.copy(source, dest)
1698 self._repo.dirstate.copy(source, dest)
1699
1699
1700 def match(self, pats=None, include=None, exclude=None, default='glob',
1700 def match(self, pats=None, include=None, exclude=None, default='glob',
1701 listsubrepos=False, badfn=None):
1701 listsubrepos=False, badfn=None):
1702 r = self._repo
1702 r = self._repo
1703
1703
1704 # Only a case insensitive filesystem needs magic to translate user input
1704 # Only a case insensitive filesystem needs magic to translate user input
1705 # to actual case in the filesystem.
1705 # to actual case in the filesystem.
1706 icasefs = not util.fscasesensitive(r.root)
1706 icasefs = not util.fscasesensitive(r.root)
1707 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1707 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1708 default, auditor=r.auditor, ctx=self,
1708 default, auditor=r.auditor, ctx=self,
1709 listsubrepos=listsubrepos, badfn=badfn,
1709 listsubrepos=listsubrepos, badfn=badfn,
1710 icasefs=icasefs)
1710 icasefs=icasefs)
1711
1711
1712 def _filtersuspectsymlink(self, files):
1712 def _filtersuspectsymlink(self, files):
1713 if not files or self._repo.dirstate._checklink:
1713 if not files or self._repo.dirstate._checklink:
1714 return files
1714 return files
1715
1715
1716 # Symlink placeholders may get non-symlink-like contents
1716 # Symlink placeholders may get non-symlink-like contents
1717 # via user error or dereferencing by NFS or Samba servers,
1717 # via user error or dereferencing by NFS or Samba servers,
1718 # so we filter out any placeholders that don't look like a
1718 # so we filter out any placeholders that don't look like a
1719 # symlink
1719 # symlink
1720 sane = []
1720 sane = []
1721 for f in files:
1721 for f in files:
1722 if self.flags(f) == 'l':
1722 if self.flags(f) == 'l':
1723 d = self[f].data()
1723 d = self[f].data()
1724 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1724 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1725 self._repo.ui.debug('ignoring suspect symlink placeholder'
1725 self._repo.ui.debug('ignoring suspect symlink placeholder'
1726 ' "%s"\n' % f)
1726 ' "%s"\n' % f)
1727 continue
1727 continue
1728 sane.append(f)
1728 sane.append(f)
1729 return sane
1729 return sane
1730
1730
1731 def _checklookup(self, files):
1731 def _checklookup(self, files):
1732 # check for any possibly clean files
1732 # check for any possibly clean files
1733 if not files:
1733 if not files:
1734 return [], [], []
1734 return [], [], []
1735
1735
1736 modified = []
1736 modified = []
1737 deleted = []
1737 deleted = []
1738 fixup = []
1738 fixup = []
1739 pctx = self._parents[0]
1739 pctx = self._parents[0]
1740 # do a full compare of any files that might have changed
1740 # do a full compare of any files that might have changed
1741 for f in sorted(files):
1741 for f in sorted(files):
1742 try:
1742 try:
1743 # This will return True for a file that got replaced by a
1743 # This will return True for a file that got replaced by a
1744 # directory in the interim, but fixing that is pretty hard.
1744 # directory in the interim, but fixing that is pretty hard.
1745 if (f not in pctx or self.flags(f) != pctx.flags(f)
1745 if (f not in pctx or self.flags(f) != pctx.flags(f)
1746 or pctx[f].cmp(self[f])):
1746 or pctx[f].cmp(self[f])):
1747 modified.append(f)
1747 modified.append(f)
1748 else:
1748 else:
1749 fixup.append(f)
1749 fixup.append(f)
1750 except (IOError, OSError):
1750 except (IOError, OSError):
1751 # A file become inaccessible in between? Mark it as deleted,
1751 # A file become inaccessible in between? Mark it as deleted,
1752 # matching dirstate behavior (issue5584).
1752 # matching dirstate behavior (issue5584).
1753 # The dirstate has more complex behavior around whether a
1753 # The dirstate has more complex behavior around whether a
1754 # missing file matches a directory, etc, but we don't need to
1754 # missing file matches a directory, etc, but we don't need to
1755 # bother with that: if f has made it to this point, we're sure
1755 # bother with that: if f has made it to this point, we're sure
1756 # it's in the dirstate.
1756 # it's in the dirstate.
1757 deleted.append(f)
1757 deleted.append(f)
1758
1758
1759 # update dirstate for files that are actually clean
1759 # update dirstate for files that are actually clean
1760 if fixup:
1760 if fixup:
1761 try:
1761 try:
1762 oldid = self._repo.dirstate.identity()
1763
1762 # updating the dirstate is optional
1764 # updating the dirstate is optional
1763 # so we don't wait on the lock
1765 # so we don't wait on the lock
1764 # wlock can invalidate the dirstate, so cache normal _after_
1766 # wlock can invalidate the dirstate, so cache normal _after_
1765 # taking the lock
1767 # taking the lock
1766 with self._repo.wlock(False):
1768 with self._repo.wlock(False):
1769 if self._repo.dirstate.identity() == oldid:
1767 normal = self._repo.dirstate.normal
1770 normal = self._repo.dirstate.normal
1768 for f in fixup:
1771 for f in fixup:
1769 normal(f)
1772 normal(f)
1770 # write changes out explicitly, because nesting
1773 # write changes out explicitly, because nesting
1771 # wlock at runtime may prevent 'wlock.release()'
1774 # wlock at runtime may prevent 'wlock.release()'
1772 # after this block from doing so for subsequent
1775 # after this block from doing so for subsequent
1773 # changing files
1776 # changing files
1774 self._repo.dirstate.write(self._repo.currenttransaction())
1777 tr = self._repo.currenttransaction()
1778 self._repo.dirstate.write(tr)
1779 else:
1780 # in this case, writing changes out breaks
1781 # consistency, because .hg/dirstate was
1782 # already changed simultaneously after last
1783 # caching (see also issue5584 for detail)
1784 self._repo.ui.debug('skip updating dirstate: '
1785 'identity mismatch\n')
1775 except error.LockError:
1786 except error.LockError:
1776 pass
1787 pass
1777 return modified, deleted, fixup
1788 return modified, deleted, fixup
1778
1789
1779 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1790 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1780 unknown=False):
1791 unknown=False):
1781 '''Gets the status from the dirstate -- internal use only.'''
1792 '''Gets the status from the dirstate -- internal use only.'''
1782 listignored, listclean, listunknown = ignored, clean, unknown
1793 listignored, listclean, listunknown = ignored, clean, unknown
1783 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1794 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1784 subrepos = []
1795 subrepos = []
1785 if '.hgsub' in self:
1796 if '.hgsub' in self:
1786 subrepos = sorted(self.substate)
1797 subrepos = sorted(self.substate)
1787 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1798 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1788 listclean, listunknown)
1799 listclean, listunknown)
1789
1800
1790 # check for any possibly clean files
1801 # check for any possibly clean files
1791 if cmp:
1802 if cmp:
1792 modified2, deleted2, fixup = self._checklookup(cmp)
1803 modified2, deleted2, fixup = self._checklookup(cmp)
1793 s.modified.extend(modified2)
1804 s.modified.extend(modified2)
1794 s.deleted.extend(deleted2)
1805 s.deleted.extend(deleted2)
1795
1806
1796 # update dirstate for files that are actually clean
1807 # update dirstate for files that are actually clean
1797 if fixup and listclean:
1808 if fixup and listclean:
1798 s.clean.extend(fixup)
1809 s.clean.extend(fixup)
1799
1810
1800 if match.always():
1811 if match.always():
1801 # cache for performance
1812 # cache for performance
1802 if s.unknown or s.ignored or s.clean:
1813 if s.unknown or s.ignored or s.clean:
1803 # "_status" is cached with list*=False in the normal route
1814 # "_status" is cached with list*=False in the normal route
1804 self._status = scmutil.status(s.modified, s.added, s.removed,
1815 self._status = scmutil.status(s.modified, s.added, s.removed,
1805 s.deleted, [], [], [])
1816 s.deleted, [], [], [])
1806 else:
1817 else:
1807 self._status = s
1818 self._status = s
1808
1819
1809 return s
1820 return s
1810
1821
1811 @propertycache
1822 @propertycache
1812 def _manifest(self):
1823 def _manifest(self):
1813 """generate a manifest corresponding to the values in self._status
1824 """generate a manifest corresponding to the values in self._status
1814
1825
1815 This reuse the file nodeid from parent, but we use special node
1826 This reuse the file nodeid from parent, but we use special node
1816 identifiers for added and modified files. This is used by manifests
1827 identifiers for added and modified files. This is used by manifests
1817 merge to see that files are different and by update logic to avoid
1828 merge to see that files are different and by update logic to avoid
1818 deleting newly added files.
1829 deleting newly added files.
1819 """
1830 """
1820 return self._buildstatusmanifest(self._status)
1831 return self._buildstatusmanifest(self._status)
1821
1832
1822 def _buildstatusmanifest(self, status):
1833 def _buildstatusmanifest(self, status):
1823 """Builds a manifest that includes the given status results."""
1834 """Builds a manifest that includes the given status results."""
1824 parents = self.parents()
1835 parents = self.parents()
1825
1836
1826 man = parents[0].manifest().copy()
1837 man = parents[0].manifest().copy()
1827
1838
1828 ff = self._flagfunc
1839 ff = self._flagfunc
1829 for i, l in ((addednodeid, status.added),
1840 for i, l in ((addednodeid, status.added),
1830 (modifiednodeid, status.modified)):
1841 (modifiednodeid, status.modified)):
1831 for f in l:
1842 for f in l:
1832 man[f] = i
1843 man[f] = i
1833 try:
1844 try:
1834 man.setflag(f, ff(f))
1845 man.setflag(f, ff(f))
1835 except OSError:
1846 except OSError:
1836 pass
1847 pass
1837
1848
1838 for f in status.deleted + status.removed:
1849 for f in status.deleted + status.removed:
1839 if f in man:
1850 if f in man:
1840 del man[f]
1851 del man[f]
1841
1852
1842 return man
1853 return man
1843
1854
1844 def _buildstatus(self, other, s, match, listignored, listclean,
1855 def _buildstatus(self, other, s, match, listignored, listclean,
1845 listunknown):
1856 listunknown):
1846 """build a status with respect to another context
1857 """build a status with respect to another context
1847
1858
1848 This includes logic for maintaining the fast path of status when
1859 This includes logic for maintaining the fast path of status when
1849 comparing the working directory against its parent, which is to skip
1860 comparing the working directory against its parent, which is to skip
1850 building a new manifest if self (working directory) is not comparing
1861 building a new manifest if self (working directory) is not comparing
1851 against its parent (repo['.']).
1862 against its parent (repo['.']).
1852 """
1863 """
1853 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1864 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1854 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1865 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1855 # might have accidentally ended up with the entire contents of the file
1866 # might have accidentally ended up with the entire contents of the file
1856 # they are supposed to be linking to.
1867 # they are supposed to be linking to.
1857 s.modified[:] = self._filtersuspectsymlink(s.modified)
1868 s.modified[:] = self._filtersuspectsymlink(s.modified)
1858 if other != self._repo['.']:
1869 if other != self._repo['.']:
1859 s = super(workingctx, self)._buildstatus(other, s, match,
1870 s = super(workingctx, self)._buildstatus(other, s, match,
1860 listignored, listclean,
1871 listignored, listclean,
1861 listunknown)
1872 listunknown)
1862 return s
1873 return s
1863
1874
1864 def _matchstatus(self, other, match):
1875 def _matchstatus(self, other, match):
1865 """override the match method with a filter for directory patterns
1876 """override the match method with a filter for directory patterns
1866
1877
1867 We use inheritance to customize the match.bad method only in cases of
1878 We use inheritance to customize the match.bad method only in cases of
1868 workingctx since it belongs only to the working directory when
1879 workingctx since it belongs only to the working directory when
1869 comparing against the parent changeset.
1880 comparing against the parent changeset.
1870
1881
1871 If we aren't comparing against the working directory's parent, then we
1882 If we aren't comparing against the working directory's parent, then we
1872 just use the default match object sent to us.
1883 just use the default match object sent to us.
1873 """
1884 """
1874 superself = super(workingctx, self)
1885 superself = super(workingctx, self)
1875 match = superself._matchstatus(other, match)
1886 match = superself._matchstatus(other, match)
1876 if other != self._repo['.']:
1887 if other != self._repo['.']:
1877 def bad(f, msg):
1888 def bad(f, msg):
1878 # 'f' may be a directory pattern from 'match.files()',
1889 # 'f' may be a directory pattern from 'match.files()',
1879 # so 'f not in ctx1' is not enough
1890 # so 'f not in ctx1' is not enough
1880 if f not in other and not other.hasdir(f):
1891 if f not in other and not other.hasdir(f):
1881 self._repo.ui.warn('%s: %s\n' %
1892 self._repo.ui.warn('%s: %s\n' %
1882 (self._repo.dirstate.pathto(f), msg))
1893 (self._repo.dirstate.pathto(f), msg))
1883 match.bad = bad
1894 match.bad = bad
1884 return match
1895 return match
1885
1896
1886 class committablefilectx(basefilectx):
1897 class committablefilectx(basefilectx):
1887 """A committablefilectx provides common functionality for a file context
1898 """A committablefilectx provides common functionality for a file context
1888 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1899 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1889 def __init__(self, repo, path, filelog=None, ctx=None):
1900 def __init__(self, repo, path, filelog=None, ctx=None):
1890 self._repo = repo
1901 self._repo = repo
1891 self._path = path
1902 self._path = path
1892 self._changeid = None
1903 self._changeid = None
1893 self._filerev = self._filenode = None
1904 self._filerev = self._filenode = None
1894
1905
1895 if filelog is not None:
1906 if filelog is not None:
1896 self._filelog = filelog
1907 self._filelog = filelog
1897 if ctx:
1908 if ctx:
1898 self._changectx = ctx
1909 self._changectx = ctx
1899
1910
1900 def __nonzero__(self):
1911 def __nonzero__(self):
1901 return True
1912 return True
1902
1913
1903 __bool__ = __nonzero__
1914 __bool__ = __nonzero__
1904
1915
1905 def linkrev(self):
1916 def linkrev(self):
1906 # linked to self._changectx no matter if file is modified or not
1917 # linked to self._changectx no matter if file is modified or not
1907 return self.rev()
1918 return self.rev()
1908
1919
1909 def parents(self):
1920 def parents(self):
1910 '''return parent filectxs, following copies if necessary'''
1921 '''return parent filectxs, following copies if necessary'''
1911 def filenode(ctx, path):
1922 def filenode(ctx, path):
1912 return ctx._manifest.get(path, nullid)
1923 return ctx._manifest.get(path, nullid)
1913
1924
1914 path = self._path
1925 path = self._path
1915 fl = self._filelog
1926 fl = self._filelog
1916 pcl = self._changectx._parents
1927 pcl = self._changectx._parents
1917 renamed = self.renamed()
1928 renamed = self.renamed()
1918
1929
1919 if renamed:
1930 if renamed:
1920 pl = [renamed + (None,)]
1931 pl = [renamed + (None,)]
1921 else:
1932 else:
1922 pl = [(path, filenode(pcl[0], path), fl)]
1933 pl = [(path, filenode(pcl[0], path), fl)]
1923
1934
1924 for pc in pcl[1:]:
1935 for pc in pcl[1:]:
1925 pl.append((path, filenode(pc, path), fl))
1936 pl.append((path, filenode(pc, path), fl))
1926
1937
1927 return [self._parentfilectx(p, fileid=n, filelog=l)
1938 return [self._parentfilectx(p, fileid=n, filelog=l)
1928 for p, n, l in pl if n != nullid]
1939 for p, n, l in pl if n != nullid]
1929
1940
1930 def children(self):
1941 def children(self):
1931 return []
1942 return []
1932
1943
1933 class workingfilectx(committablefilectx):
1944 class workingfilectx(committablefilectx):
1934 """A workingfilectx object makes access to data related to a particular
1945 """A workingfilectx object makes access to data related to a particular
1935 file in the working directory convenient."""
1946 file in the working directory convenient."""
1936 def __init__(self, repo, path, filelog=None, workingctx=None):
1947 def __init__(self, repo, path, filelog=None, workingctx=None):
1937 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1948 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1938
1949
1939 @propertycache
1950 @propertycache
1940 def _changectx(self):
1951 def _changectx(self):
1941 return workingctx(self._repo)
1952 return workingctx(self._repo)
1942
1953
1943 def data(self):
1954 def data(self):
1944 return self._repo.wread(self._path)
1955 return self._repo.wread(self._path)
1945 def renamed(self):
1956 def renamed(self):
1946 rp = self._repo.dirstate.copied(self._path)
1957 rp = self._repo.dirstate.copied(self._path)
1947 if not rp:
1958 if not rp:
1948 return None
1959 return None
1949 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1960 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1950
1961
1951 def size(self):
1962 def size(self):
1952 return self._repo.wvfs.lstat(self._path).st_size
1963 return self._repo.wvfs.lstat(self._path).st_size
1953 def date(self):
1964 def date(self):
1954 t, tz = self._changectx.date()
1965 t, tz = self._changectx.date()
1955 try:
1966 try:
1956 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1967 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1957 except OSError as err:
1968 except OSError as err:
1958 if err.errno != errno.ENOENT:
1969 if err.errno != errno.ENOENT:
1959 raise
1970 raise
1960 return (t, tz)
1971 return (t, tz)
1961
1972
1962 def cmp(self, fctx):
1973 def cmp(self, fctx):
1963 """compare with other file context
1974 """compare with other file context
1964
1975
1965 returns True if different than fctx.
1976 returns True if different than fctx.
1966 """
1977 """
1967 # fctx should be a filectx (not a workingfilectx)
1978 # fctx should be a filectx (not a workingfilectx)
1968 # invert comparison to reuse the same code path
1979 # invert comparison to reuse the same code path
1969 return fctx.cmp(self)
1980 return fctx.cmp(self)
1970
1981
1971 def remove(self, ignoremissing=False):
1982 def remove(self, ignoremissing=False):
1972 """wraps unlink for a repo's working directory"""
1983 """wraps unlink for a repo's working directory"""
1973 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1984 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1974
1985
1975 def write(self, data, flags):
1986 def write(self, data, flags):
1976 """wraps repo.wwrite"""
1987 """wraps repo.wwrite"""
1977 self._repo.wwrite(self._path, data, flags)
1988 self._repo.wwrite(self._path, data, flags)
1978
1989
1979 class workingcommitctx(workingctx):
1990 class workingcommitctx(workingctx):
1980 """A workingcommitctx object makes access to data related to
1991 """A workingcommitctx object makes access to data related to
1981 the revision being committed convenient.
1992 the revision being committed convenient.
1982
1993
1983 This hides changes in the working directory, if they aren't
1994 This hides changes in the working directory, if they aren't
1984 committed in this context.
1995 committed in this context.
1985 """
1996 """
1986 def __init__(self, repo, changes,
1997 def __init__(self, repo, changes,
1987 text="", user=None, date=None, extra=None):
1998 text="", user=None, date=None, extra=None):
1988 super(workingctx, self).__init__(repo, text, user, date, extra,
1999 super(workingctx, self).__init__(repo, text, user, date, extra,
1989 changes)
2000 changes)
1990
2001
1991 def _dirstatestatus(self, match=None, ignored=False, clean=False,
2002 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1992 unknown=False):
2003 unknown=False):
1993 """Return matched files only in ``self._status``
2004 """Return matched files only in ``self._status``
1994
2005
1995 Uncommitted files appear "clean" via this context, even if
2006 Uncommitted files appear "clean" via this context, even if
1996 they aren't actually so in the working directory.
2007 they aren't actually so in the working directory.
1997 """
2008 """
1998 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
2009 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1999 if clean:
2010 if clean:
2000 clean = [f for f in self._manifest if f not in self._changedset]
2011 clean = [f for f in self._manifest if f not in self._changedset]
2001 else:
2012 else:
2002 clean = []
2013 clean = []
2003 return scmutil.status([f for f in self._status.modified if match(f)],
2014 return scmutil.status([f for f in self._status.modified if match(f)],
2004 [f for f in self._status.added if match(f)],
2015 [f for f in self._status.added if match(f)],
2005 [f for f in self._status.removed if match(f)],
2016 [f for f in self._status.removed if match(f)],
2006 [], [], [], clean)
2017 [], [], [], clean)
2007
2018
2008 @propertycache
2019 @propertycache
2009 def _changedset(self):
2020 def _changedset(self):
2010 """Return the set of files changed in this context
2021 """Return the set of files changed in this context
2011 """
2022 """
2012 changed = set(self._status.modified)
2023 changed = set(self._status.modified)
2013 changed.update(self._status.added)
2024 changed.update(self._status.added)
2014 changed.update(self._status.removed)
2025 changed.update(self._status.removed)
2015 return changed
2026 return changed
2016
2027
2017 def makecachingfilectxfn(func):
2028 def makecachingfilectxfn(func):
2018 """Create a filectxfn that caches based on the path.
2029 """Create a filectxfn that caches based on the path.
2019
2030
2020 We can't use util.cachefunc because it uses all arguments as the cache
2031 We can't use util.cachefunc because it uses all arguments as the cache
2021 key and this creates a cycle since the arguments include the repo and
2032 key and this creates a cycle since the arguments include the repo and
2022 memctx.
2033 memctx.
2023 """
2034 """
2024 cache = {}
2035 cache = {}
2025
2036
2026 def getfilectx(repo, memctx, path):
2037 def getfilectx(repo, memctx, path):
2027 if path not in cache:
2038 if path not in cache:
2028 cache[path] = func(repo, memctx, path)
2039 cache[path] = func(repo, memctx, path)
2029 return cache[path]
2040 return cache[path]
2030
2041
2031 return getfilectx
2042 return getfilectx
2032
2043
2033 class memctx(committablectx):
2044 class memctx(committablectx):
2034 """Use memctx to perform in-memory commits via localrepo.commitctx().
2045 """Use memctx to perform in-memory commits via localrepo.commitctx().
2035
2046
2036 Revision information is supplied at initialization time while
2047 Revision information is supplied at initialization time while
2037 related files data and is made available through a callback
2048 related files data and is made available through a callback
2038 mechanism. 'repo' is the current localrepo, 'parents' is a
2049 mechanism. 'repo' is the current localrepo, 'parents' is a
2039 sequence of two parent revisions identifiers (pass None for every
2050 sequence of two parent revisions identifiers (pass None for every
2040 missing parent), 'text' is the commit message and 'files' lists
2051 missing parent), 'text' is the commit message and 'files' lists
2041 names of files touched by the revision (normalized and relative to
2052 names of files touched by the revision (normalized and relative to
2042 repository root).
2053 repository root).
2043
2054
2044 filectxfn(repo, memctx, path) is a callable receiving the
2055 filectxfn(repo, memctx, path) is a callable receiving the
2045 repository, the current memctx object and the normalized path of
2056 repository, the current memctx object and the normalized path of
2046 requested file, relative to repository root. It is fired by the
2057 requested file, relative to repository root. It is fired by the
2047 commit function for every file in 'files', but calls order is
2058 commit function for every file in 'files', but calls order is
2048 undefined. If the file is available in the revision being
2059 undefined. If the file is available in the revision being
2049 committed (updated or added), filectxfn returns a memfilectx
2060 committed (updated or added), filectxfn returns a memfilectx
2050 object. If the file was removed, filectxfn return None for recent
2061 object. If the file was removed, filectxfn return None for recent
2051 Mercurial. Moved files are represented by marking the source file
2062 Mercurial. Moved files are represented by marking the source file
2052 removed and the new file added with copy information (see
2063 removed and the new file added with copy information (see
2053 memfilectx).
2064 memfilectx).
2054
2065
2055 user receives the committer name and defaults to current
2066 user receives the committer name and defaults to current
2056 repository username, date is the commit date in any format
2067 repository username, date is the commit date in any format
2057 supported by util.parsedate() and defaults to current date, extra
2068 supported by util.parsedate() and defaults to current date, extra
2058 is a dictionary of metadata or is left empty.
2069 is a dictionary of metadata or is left empty.
2059 """
2070 """
2060
2071
2061 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2072 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2062 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2073 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2063 # this field to determine what to do in filectxfn.
2074 # this field to determine what to do in filectxfn.
2064 _returnnoneformissingfiles = True
2075 _returnnoneformissingfiles = True
2065
2076
2066 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2077 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2067 date=None, extra=None, editor=False):
2078 date=None, extra=None, editor=False):
2068 super(memctx, self).__init__(repo, text, user, date, extra)
2079 super(memctx, self).__init__(repo, text, user, date, extra)
2069 self._rev = None
2080 self._rev = None
2070 self._node = None
2081 self._node = None
2071 parents = [(p or nullid) for p in parents]
2082 parents = [(p or nullid) for p in parents]
2072 p1, p2 = parents
2083 p1, p2 = parents
2073 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2084 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2074 files = sorted(set(files))
2085 files = sorted(set(files))
2075 self._files = files
2086 self._files = files
2076 self.substate = {}
2087 self.substate = {}
2077
2088
2078 # if store is not callable, wrap it in a function
2089 # if store is not callable, wrap it in a function
2079 if not callable(filectxfn):
2090 if not callable(filectxfn):
2080 def getfilectx(repo, memctx, path):
2091 def getfilectx(repo, memctx, path):
2081 fctx = filectxfn[path]
2092 fctx = filectxfn[path]
2082 # this is weird but apparently we only keep track of one parent
2093 # this is weird but apparently we only keep track of one parent
2083 # (why not only store that instead of a tuple?)
2094 # (why not only store that instead of a tuple?)
2084 copied = fctx.renamed()
2095 copied = fctx.renamed()
2085 if copied:
2096 if copied:
2086 copied = copied[0]
2097 copied = copied[0]
2087 return memfilectx(repo, path, fctx.data(),
2098 return memfilectx(repo, path, fctx.data(),
2088 islink=fctx.islink(), isexec=fctx.isexec(),
2099 islink=fctx.islink(), isexec=fctx.isexec(),
2089 copied=copied, memctx=memctx)
2100 copied=copied, memctx=memctx)
2090 self._filectxfn = getfilectx
2101 self._filectxfn = getfilectx
2091 else:
2102 else:
2092 # memoizing increases performance for e.g. vcs convert scenarios.
2103 # memoizing increases performance for e.g. vcs convert scenarios.
2093 self._filectxfn = makecachingfilectxfn(filectxfn)
2104 self._filectxfn = makecachingfilectxfn(filectxfn)
2094
2105
2095 if editor:
2106 if editor:
2096 self._text = editor(self._repo, self, [])
2107 self._text = editor(self._repo, self, [])
2097 self._repo.savecommitmessage(self._text)
2108 self._repo.savecommitmessage(self._text)
2098
2109
2099 def filectx(self, path, filelog=None):
2110 def filectx(self, path, filelog=None):
2100 """get a file context from the working directory
2111 """get a file context from the working directory
2101
2112
2102 Returns None if file doesn't exist and should be removed."""
2113 Returns None if file doesn't exist and should be removed."""
2103 return self._filectxfn(self._repo, self, path)
2114 return self._filectxfn(self._repo, self, path)
2104
2115
2105 def commit(self):
2116 def commit(self):
2106 """commit context to the repo"""
2117 """commit context to the repo"""
2107 return self._repo.commitctx(self)
2118 return self._repo.commitctx(self)
2108
2119
2109 @propertycache
2120 @propertycache
2110 def _manifest(self):
2121 def _manifest(self):
2111 """generate a manifest based on the return values of filectxfn"""
2122 """generate a manifest based on the return values of filectxfn"""
2112
2123
2113 # keep this simple for now; just worry about p1
2124 # keep this simple for now; just worry about p1
2114 pctx = self._parents[0]
2125 pctx = self._parents[0]
2115 man = pctx.manifest().copy()
2126 man = pctx.manifest().copy()
2116
2127
2117 for f in self._status.modified:
2128 for f in self._status.modified:
2118 p1node = nullid
2129 p1node = nullid
2119 p2node = nullid
2130 p2node = nullid
2120 p = pctx[f].parents() # if file isn't in pctx, check p2?
2131 p = pctx[f].parents() # if file isn't in pctx, check p2?
2121 if len(p) > 0:
2132 if len(p) > 0:
2122 p1node = p[0].filenode()
2133 p1node = p[0].filenode()
2123 if len(p) > 1:
2134 if len(p) > 1:
2124 p2node = p[1].filenode()
2135 p2node = p[1].filenode()
2125 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2136 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2126
2137
2127 for f in self._status.added:
2138 for f in self._status.added:
2128 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2139 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2129
2140
2130 for f in self._status.removed:
2141 for f in self._status.removed:
2131 if f in man:
2142 if f in man:
2132 del man[f]
2143 del man[f]
2133
2144
2134 return man
2145 return man
2135
2146
2136 @propertycache
2147 @propertycache
2137 def _status(self):
2148 def _status(self):
2138 """Calculate exact status from ``files`` specified at construction
2149 """Calculate exact status from ``files`` specified at construction
2139 """
2150 """
2140 man1 = self.p1().manifest()
2151 man1 = self.p1().manifest()
2141 p2 = self._parents[1]
2152 p2 = self._parents[1]
2142 # "1 < len(self._parents)" can't be used for checking
2153 # "1 < len(self._parents)" can't be used for checking
2143 # existence of the 2nd parent, because "memctx._parents" is
2154 # existence of the 2nd parent, because "memctx._parents" is
2144 # explicitly initialized by the list, of which length is 2.
2155 # explicitly initialized by the list, of which length is 2.
2145 if p2.node() != nullid:
2156 if p2.node() != nullid:
2146 man2 = p2.manifest()
2157 man2 = p2.manifest()
2147 managing = lambda f: f in man1 or f in man2
2158 managing = lambda f: f in man1 or f in man2
2148 else:
2159 else:
2149 managing = lambda f: f in man1
2160 managing = lambda f: f in man1
2150
2161
2151 modified, added, removed = [], [], []
2162 modified, added, removed = [], [], []
2152 for f in self._files:
2163 for f in self._files:
2153 if not managing(f):
2164 if not managing(f):
2154 added.append(f)
2165 added.append(f)
2155 elif self[f]:
2166 elif self[f]:
2156 modified.append(f)
2167 modified.append(f)
2157 else:
2168 else:
2158 removed.append(f)
2169 removed.append(f)
2159
2170
2160 return scmutil.status(modified, added, removed, [], [], [], [])
2171 return scmutil.status(modified, added, removed, [], [], [], [])
2161
2172
2162 class memfilectx(committablefilectx):
2173 class memfilectx(committablefilectx):
2163 """memfilectx represents an in-memory file to commit.
2174 """memfilectx represents an in-memory file to commit.
2164
2175
2165 See memctx and committablefilectx for more details.
2176 See memctx and committablefilectx for more details.
2166 """
2177 """
2167 def __init__(self, repo, path, data, islink=False,
2178 def __init__(self, repo, path, data, islink=False,
2168 isexec=False, copied=None, memctx=None):
2179 isexec=False, copied=None, memctx=None):
2169 """
2180 """
2170 path is the normalized file path relative to repository root.
2181 path is the normalized file path relative to repository root.
2171 data is the file content as a string.
2182 data is the file content as a string.
2172 islink is True if the file is a symbolic link.
2183 islink is True if the file is a symbolic link.
2173 isexec is True if the file is executable.
2184 isexec is True if the file is executable.
2174 copied is the source file path if current file was copied in the
2185 copied is the source file path if current file was copied in the
2175 revision being committed, or None."""
2186 revision being committed, or None."""
2176 super(memfilectx, self).__init__(repo, path, None, memctx)
2187 super(memfilectx, self).__init__(repo, path, None, memctx)
2177 self._data = data
2188 self._data = data
2178 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2189 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2179 self._copied = None
2190 self._copied = None
2180 if copied:
2191 if copied:
2181 self._copied = (copied, nullid)
2192 self._copied = (copied, nullid)
2182
2193
2183 def data(self):
2194 def data(self):
2184 return self._data
2195 return self._data
2185
2196
2186 def remove(self, ignoremissing=False):
2197 def remove(self, ignoremissing=False):
2187 """wraps unlink for a repo's working directory"""
2198 """wraps unlink for a repo's working directory"""
2188 # need to figure out what to do here
2199 # need to figure out what to do here
2189 del self._changectx[self._path]
2200 del self._changectx[self._path]
2190
2201
2191 def write(self, data, flags):
2202 def write(self, data, flags):
2192 """wraps repo.wwrite"""
2203 """wraps repo.wwrite"""
2193 self._data = data
2204 self._data = data
2194
2205
2195 class overlayfilectx(committablefilectx):
2206 class overlayfilectx(committablefilectx):
2196 """Like memfilectx but take an original filectx and optional parameters to
2207 """Like memfilectx but take an original filectx and optional parameters to
2197 override parts of it. This is useful when fctx.data() is expensive (i.e.
2208 override parts of it. This is useful when fctx.data() is expensive (i.e.
2198 flag processor is expensive) and raw data, flags, and filenode could be
2209 flag processor is expensive) and raw data, flags, and filenode could be
2199 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2210 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2200 """
2211 """
2201
2212
2202 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2213 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2203 copied=None, ctx=None):
2214 copied=None, ctx=None):
2204 """originalfctx: filecontext to duplicate
2215 """originalfctx: filecontext to duplicate
2205
2216
2206 datafunc: None or a function to override data (file content). It is a
2217 datafunc: None or a function to override data (file content). It is a
2207 function to be lazy. path, flags, copied, ctx: None or overridden value
2218 function to be lazy. path, flags, copied, ctx: None or overridden value
2208
2219
2209 copied could be (path, rev), or False. copied could also be just path,
2220 copied could be (path, rev), or False. copied could also be just path,
2210 and will be converted to (path, nullid). This simplifies some callers.
2221 and will be converted to (path, nullid). This simplifies some callers.
2211 """
2222 """
2212
2223
2213 if path is None:
2224 if path is None:
2214 path = originalfctx.path()
2225 path = originalfctx.path()
2215 if ctx is None:
2226 if ctx is None:
2216 ctx = originalfctx.changectx()
2227 ctx = originalfctx.changectx()
2217 ctxmatch = lambda: True
2228 ctxmatch = lambda: True
2218 else:
2229 else:
2219 ctxmatch = lambda: ctx == originalfctx.changectx()
2230 ctxmatch = lambda: ctx == originalfctx.changectx()
2220
2231
2221 repo = originalfctx.repo()
2232 repo = originalfctx.repo()
2222 flog = originalfctx.filelog()
2233 flog = originalfctx.filelog()
2223 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2234 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2224
2235
2225 if copied is None:
2236 if copied is None:
2226 copied = originalfctx.renamed()
2237 copied = originalfctx.renamed()
2227 copiedmatch = lambda: True
2238 copiedmatch = lambda: True
2228 else:
2239 else:
2229 if copied and not isinstance(copied, tuple):
2240 if copied and not isinstance(copied, tuple):
2230 # repo._filecommit will recalculate copyrev so nullid is okay
2241 # repo._filecommit will recalculate copyrev so nullid is okay
2231 copied = (copied, nullid)
2242 copied = (copied, nullid)
2232 copiedmatch = lambda: copied == originalfctx.renamed()
2243 copiedmatch = lambda: copied == originalfctx.renamed()
2233
2244
2234 # When data, copied (could affect data), ctx (could affect filelog
2245 # When data, copied (could affect data), ctx (could affect filelog
2235 # parents) are not overridden, rawdata, rawflags, and filenode may be
2246 # parents) are not overridden, rawdata, rawflags, and filenode may be
2236 # reused (repo._filecommit should double check filelog parents).
2247 # reused (repo._filecommit should double check filelog parents).
2237 #
2248 #
2238 # path, flags are not hashed in filelog (but in manifestlog) so they do
2249 # path, flags are not hashed in filelog (but in manifestlog) so they do
2239 # not affect reusable here.
2250 # not affect reusable here.
2240 #
2251 #
2241 # If ctx or copied is overridden to a same value with originalfctx,
2252 # If ctx or copied is overridden to a same value with originalfctx,
2242 # still consider it's reusable. originalfctx.renamed() may be a bit
2253 # still consider it's reusable. originalfctx.renamed() may be a bit
2243 # expensive so it's not called unless necessary. Assuming datafunc is
2254 # expensive so it's not called unless necessary. Assuming datafunc is
2244 # always expensive, do not call it for this "reusable" test.
2255 # always expensive, do not call it for this "reusable" test.
2245 reusable = datafunc is None and ctxmatch() and copiedmatch()
2256 reusable = datafunc is None and ctxmatch() and copiedmatch()
2246
2257
2247 if datafunc is None:
2258 if datafunc is None:
2248 datafunc = originalfctx.data
2259 datafunc = originalfctx.data
2249 if flags is None:
2260 if flags is None:
2250 flags = originalfctx.flags()
2261 flags = originalfctx.flags()
2251
2262
2252 self._datafunc = datafunc
2263 self._datafunc = datafunc
2253 self._flags = flags
2264 self._flags = flags
2254 self._copied = copied
2265 self._copied = copied
2255
2266
2256 if reusable:
2267 if reusable:
2257 # copy extra fields from originalfctx
2268 # copy extra fields from originalfctx
2258 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2269 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2259 for attr in attrs:
2270 for attr in attrs:
2260 if util.safehasattr(originalfctx, attr):
2271 if util.safehasattr(originalfctx, attr):
2261 setattr(self, attr, getattr(originalfctx, attr))
2272 setattr(self, attr, getattr(originalfctx, attr))
2262
2273
2263 def data(self):
2274 def data(self):
2264 return self._datafunc()
2275 return self._datafunc()
2265
2276
2266 class metadataonlyctx(committablectx):
2277 class metadataonlyctx(committablectx):
2267 """Like memctx but it's reusing the manifest of different commit.
2278 """Like memctx but it's reusing the manifest of different commit.
2268 Intended to be used by lightweight operations that are creating
2279 Intended to be used by lightweight operations that are creating
2269 metadata-only changes.
2280 metadata-only changes.
2270
2281
2271 Revision information is supplied at initialization time. 'repo' is the
2282 Revision information is supplied at initialization time. 'repo' is the
2272 current localrepo, 'ctx' is original revision which manifest we're reuisng
2283 current localrepo, 'ctx' is original revision which manifest we're reuisng
2273 'parents' is a sequence of two parent revisions identifiers (pass None for
2284 'parents' is a sequence of two parent revisions identifiers (pass None for
2274 every missing parent), 'text' is the commit.
2285 every missing parent), 'text' is the commit.
2275
2286
2276 user receives the committer name and defaults to current repository
2287 user receives the committer name and defaults to current repository
2277 username, date is the commit date in any format supported by
2288 username, date is the commit date in any format supported by
2278 util.parsedate() and defaults to current date, extra is a dictionary of
2289 util.parsedate() and defaults to current date, extra is a dictionary of
2279 metadata or is left empty.
2290 metadata or is left empty.
2280 """
2291 """
2281 def __new__(cls, repo, originalctx, *args, **kwargs):
2292 def __new__(cls, repo, originalctx, *args, **kwargs):
2282 return super(metadataonlyctx, cls).__new__(cls, repo)
2293 return super(metadataonlyctx, cls).__new__(cls, repo)
2283
2294
2284 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2295 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2285 extra=None, editor=False):
2296 extra=None, editor=False):
2286 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2297 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2287 self._rev = None
2298 self._rev = None
2288 self._node = None
2299 self._node = None
2289 self._originalctx = originalctx
2300 self._originalctx = originalctx
2290 self._manifestnode = originalctx.manifestnode()
2301 self._manifestnode = originalctx.manifestnode()
2291 parents = [(p or nullid) for p in parents]
2302 parents = [(p or nullid) for p in parents]
2292 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2303 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2293
2304
2294 # sanity check to ensure that the reused manifest parents are
2305 # sanity check to ensure that the reused manifest parents are
2295 # manifests of our commit parents
2306 # manifests of our commit parents
2296 mp1, mp2 = self.manifestctx().parents
2307 mp1, mp2 = self.manifestctx().parents
2297 if p1 != nullid and p1.manifestnode() != mp1:
2308 if p1 != nullid and p1.manifestnode() != mp1:
2298 raise RuntimeError('can\'t reuse the manifest: '
2309 raise RuntimeError('can\'t reuse the manifest: '
2299 'its p1 doesn\'t match the new ctx p1')
2310 'its p1 doesn\'t match the new ctx p1')
2300 if p2 != nullid and p2.manifestnode() != mp2:
2311 if p2 != nullid and p2.manifestnode() != mp2:
2301 raise RuntimeError('can\'t reuse the manifest: '
2312 raise RuntimeError('can\'t reuse the manifest: '
2302 'its p2 doesn\'t match the new ctx p2')
2313 'its p2 doesn\'t match the new ctx p2')
2303
2314
2304 self._files = originalctx.files()
2315 self._files = originalctx.files()
2305 self.substate = {}
2316 self.substate = {}
2306
2317
2307 if editor:
2318 if editor:
2308 self._text = editor(self._repo, self, [])
2319 self._text = editor(self._repo, self, [])
2309 self._repo.savecommitmessage(self._text)
2320 self._repo.savecommitmessage(self._text)
2310
2321
2311 def manifestnode(self):
2322 def manifestnode(self):
2312 return self._manifestnode
2323 return self._manifestnode
2313
2324
2314 @property
2325 @property
2315 def _manifestctx(self):
2326 def _manifestctx(self):
2316 return self._repo.manifestlog[self._manifestnode]
2327 return self._repo.manifestlog[self._manifestnode]
2317
2328
2318 def filectx(self, path, filelog=None):
2329 def filectx(self, path, filelog=None):
2319 return self._originalctx.filectx(path, filelog=filelog)
2330 return self._originalctx.filectx(path, filelog=filelog)
2320
2331
2321 def commit(self):
2332 def commit(self):
2322 """commit context to the repo"""
2333 """commit context to the repo"""
2323 return self._repo.commitctx(self)
2334 return self._repo.commitctx(self)
2324
2335
2325 @property
2336 @property
2326 def _manifest(self):
2337 def _manifest(self):
2327 return self._originalctx.manifest()
2338 return self._originalctx.manifest()
2328
2339
2329 @propertycache
2340 @propertycache
2330 def _status(self):
2341 def _status(self):
2331 """Calculate exact status from ``files`` specified in the ``origctx``
2342 """Calculate exact status from ``files`` specified in the ``origctx``
2332 and parents manifests.
2343 and parents manifests.
2333 """
2344 """
2334 man1 = self.p1().manifest()
2345 man1 = self.p1().manifest()
2335 p2 = self._parents[1]
2346 p2 = self._parents[1]
2336 # "1 < len(self._parents)" can't be used for checking
2347 # "1 < len(self._parents)" can't be used for checking
2337 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2348 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2338 # explicitly initialized by the list, of which length is 2.
2349 # explicitly initialized by the list, of which length is 2.
2339 if p2.node() != nullid:
2350 if p2.node() != nullid:
2340 man2 = p2.manifest()
2351 man2 = p2.manifest()
2341 managing = lambda f: f in man1 or f in man2
2352 managing = lambda f: f in man1 or f in man2
2342 else:
2353 else:
2343 managing = lambda f: f in man1
2354 managing = lambda f: f in man1
2344
2355
2345 modified, added, removed = [], [], []
2356 modified, added, removed = [], [], []
2346 for f in self._files:
2357 for f in self._files:
2347 if not managing(f):
2358 if not managing(f):
2348 added.append(f)
2359 added.append(f)
2349 elif self[f]:
2360 elif self[f]:
2350 modified.append(f)
2361 modified.append(f)
2351 else:
2362 else:
2352 removed.append(f)
2363 removed.append(f)
2353
2364
2354 return scmutil.status(modified, added, removed, [], [], [], [])
2365 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,97 +1,159
1 $ hg init repo
1 $ hg init repo
2 $ cd repo
2 $ cd repo
3 $ echo a > a
3 $ echo a > a
4 $ hg add a
4 $ hg add a
5 $ hg commit -m test
5 $ hg commit -m test
6
6
7 Do we ever miss a sub-second change?:
7 Do we ever miss a sub-second change?:
8
8
9 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
9 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
10 > hg co -qC 0
10 > hg co -qC 0
11 > echo b > a
11 > echo b > a
12 > hg st
12 > hg st
13 > done
13 > done
14 M a
14 M a
15 M a
15 M a
16 M a
16 M a
17 M a
17 M a
18 M a
18 M a
19 M a
19 M a
20 M a
20 M a
21 M a
21 M a
22 M a
22 M a
23 M a
23 M a
24 M a
24 M a
25 M a
25 M a
26 M a
26 M a
27 M a
27 M a
28 M a
28 M a
29 M a
29 M a
30 M a
30 M a
31 M a
31 M a
32 M a
32 M a
33 M a
33 M a
34
34
35 $ echo test > b
35 $ echo test > b
36 $ mkdir dir1
36 $ mkdir dir1
37 $ echo test > dir1/c
37 $ echo test > dir1/c
38 $ echo test > d
38 $ echo test > d
39
39
40 $ echo test > e
40 $ echo test > e
41 #if execbit
41 #if execbit
42 A directory will typically have the execute bit -- make sure it doesn't get
42 A directory will typically have the execute bit -- make sure it doesn't get
43 confused with a file with the exec bit set
43 confused with a file with the exec bit set
44 $ chmod +x e
44 $ chmod +x e
45 #endif
45 #endif
46
46
47 $ hg add b dir1 d e
47 $ hg add b dir1 d e
48 adding dir1/c (glob)
48 adding dir1/c (glob)
49 $ hg commit -m test2
49 $ hg commit -m test2
50
50
51 $ cat >> $TESTTMP/dirstaterace.py << EOF
51 $ cat >> $TESTTMP/dirstaterace.py << EOF
52 > from mercurial import (
52 > from mercurial import (
53 > context,
53 > context,
54 > extensions,
54 > extensions,
55 > )
55 > )
56 > def extsetup():
56 > def extsetup():
57 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
57 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
58 > def overridechecklookup(orig, self, files):
58 > def overridechecklookup(orig, self, files):
59 > # make an update that changes the dirstate from underneath
59 > # make an update that changes the dirstate from underneath
60 > self._repo.ui.system(r"sh '$TESTTMP/dirstaterace.sh'",
60 > self._repo.ui.system(r"sh '$TESTTMP/dirstaterace.sh'",
61 > cwd=self._repo.root)
61 > cwd=self._repo.root)
62 > return orig(self, files)
62 > return orig(self, files)
63 > EOF
63 > EOF
64
64
65 $ hg debugrebuilddirstate
65 $ hg debugrebuilddirstate
66 $ hg debugdirstate
66 $ hg debugdirstate
67 n 0 -1 unset a
67 n 0 -1 unset a
68 n 0 -1 unset b
68 n 0 -1 unset b
69 n 0 -1 unset d
69 n 0 -1 unset d
70 n 0 -1 unset dir1/c
70 n 0 -1 unset dir1/c
71 n 0 -1 unset e
71 n 0 -1 unset e
72
72
73 XXX Note that this returns M for files that got replaced by directories. This is
73 XXX Note that this returns M for files that got replaced by directories. This is
74 definitely a bug, but the fix for that is hard and the next status run is fine
74 definitely a bug, but the fix for that is hard and the next status run is fine
75 anyway.
75 anyway.
76
76
77 $ cat > $TESTTMP/dirstaterace.sh <<EOF
77 $ cat > $TESTTMP/dirstaterace.sh <<EOF
78 > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
78 > rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e
79 > EOF
79 > EOF
80
80
81 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
81 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py
82 M d
82 M d
83 M e
83 M e
84 ! b
84 ! b
85 ! dir1/c
85 ! dir1/c
86 $ hg debugdirstate
86 $ hg debugdirstate
87 n 644 2 * a (glob)
87 n 644 2 * a (glob)
88 n 0 -1 unset b
88 n 0 -1 unset b
89 n 0 -1 unset d
89 n 0 -1 unset d
90 n 0 -1 unset dir1/c
90 n 0 -1 unset dir1/c
91 n 0 -1 unset e
91 n 0 -1 unset e
92
92
93 $ hg status
93 $ hg status
94 ! b
94 ! b
95 ! d
95 ! d
96 ! dir1/c
96 ! dir1/c
97 ! e
97 ! e
98
99 $ rmdir d e
100 $ hg update -C -q .
101
102 Test that dirstate changes aren't written out at the end of "hg
103 status", if .hg/dirstate is already changed simultaneously before
104 acquisition of wlock in workingctx._checklookup().
105
106 This avoidance is important to keep consistency of dirstate in race
107 condition (see issue5584 for detail).
108
109 $ hg parents -q
110 1:* (glob)
111
112 $ hg debugrebuilddirstate
113 $ hg debugdirstate
114 n 0 -1 unset a
115 n 0 -1 unset b
116 n 0 -1 unset d
117 n 0 -1 unset dir1/c
118 n 0 -1 unset e
119
120 $ cat > $TESTTMP/dirstaterace.sh <<EOF
121 > # This script assumes timetable of typical issue5584 case below:
122 > #
123 > # 1. "hg status" loads .hg/dirstate
124 > # 2. "hg status" confirms clean-ness of FILE
125 > # 3. "hg update -C 0" updates the working directory simultaneously
126 > # (FILE is removed, and FILE is dropped from .hg/dirstate)
127 > # 4. "hg status" acquires wlock
128 > # (.hg/dirstate is re-loaded = no FILE entry in dirstate)
129 > # 5. "hg status" marks FILE in dirstate as clean
130 > # (FILE entry is added to in-memory dirstate)
131 > # 6. "hg status" writes dirstate changes into .hg/dirstate
132 > # (FILE entry is written into .hg/dirstate)
133 > #
134 > # To reproduce similar situation easily and certainly, #2 and #3
135 > # are swapped. "hg cat" below ensures #2 on "hg status" side.
136 >
137 > hg update -q -C 0
138 > hg cat -r 1 b > b
139 > EOF
140
141 "hg status" below should excludes "e", of which exec flag is set, for
142 portability of test scenario, because unsure but missing "e" is
143 treated differently in _checklookup() according to runtime platform.
144
145 - "missing(!)" on POSIX, "pctx[f].cmp(self[f])" raises ENOENT
146 - "modified(M)" on Windows, "self.flags(f) != pctx.flags(f)" is True
147
148 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py --debug -X path:e
149 skip updating dirstate: identity mismatch
150 M a
151 ! d
152 ! dir1/c
153
154 $ hg parents -q
155 0:* (glob)
156 $ hg files
157 a
158 $ hg debugdirstate
159 n * * * a (glob)
General Comments 0
You need to be logged in to leave comments. Login now