##// END OF EJS Templates
workingctx: also pass status tuple into poststatusfixup...
Siddharth Agarwal -
r32813:6d73b7ff default
parent child Browse files
Show More
@@ -1,2380 +1,2380
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 subrepo,
41 subrepo,
42 util,
42 util,
43 )
43 )
44
44
45 propertycache = util.propertycache
45 propertycache = util.propertycache
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 r = short(self.node())
69 r = short(self.node())
70 if pycompat.ispy3:
70 if pycompat.ispy3:
71 return r.decode('ascii')
71 return r.decode('ascii')
72 return r
72 return r
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 def __int__(self):
77 def __int__(self):
78 return self.rev()
78 return self.rev()
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return r"<%s %s>" % (type(self).__name__, str(self))
81 return r"<%s %s>" % (type(self).__name__, str(self))
82
82
83 def __eq__(self, other):
83 def __eq__(self, other):
84 try:
84 try:
85 return type(self) == type(other) and self._rev == other._rev
85 return type(self) == type(other) and self._rev == other._rev
86 except AttributeError:
86 except AttributeError:
87 return False
87 return False
88
88
89 def __ne__(self, other):
89 def __ne__(self, other):
90 return not (self == other)
90 return not (self == other)
91
91
92 def __contains__(self, key):
92 def __contains__(self, key):
93 return key in self._manifest
93 return key in self._manifest
94
94
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return self.filectx(key)
96 return self.filectx(key)
97
97
98 def __iter__(self):
98 def __iter__(self):
99 return iter(self._manifest)
99 return iter(self._manifest)
100
100
101 def _buildstatusmanifest(self, status):
101 def _buildstatusmanifest(self, status):
102 """Builds a manifest that includes the given status results, if this is
102 """Builds a manifest that includes the given status results, if this is
103 a working copy context. For non-working copy contexts, it just returns
103 a working copy context. For non-working copy contexts, it just returns
104 the normal manifest."""
104 the normal manifest."""
105 return self.manifest()
105 return self.manifest()
106
106
107 def _matchstatus(self, other, match):
107 def _matchstatus(self, other, match):
108 """return match.always if match is none
108 """return match.always if match is none
109
109
110 This internal method provides a way for child objects to override the
110 This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211
211
212 def bumped(self):
212 def bumped(self):
213 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
214
214
215 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
216 """
216 """
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218
218
219 def divergent(self):
219 def divergent(self):
220 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
221
221
222 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225
225
226 def troubled(self):
226 def troubled(self):
227 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
228 return self.unstable() or self.bumped() or self.divergent()
228 return self.unstable() or self.bumped() or self.divergent()
229
229
230 def troubles(self):
230 def troubles(self):
231 """return the list of troubles affecting this changesets.
231 """return the list of troubles affecting this changesets.
232
232
233 Troubles are returned as strings. possible values are:
233 Troubles are returned as strings. possible values are:
234 - unstable,
234 - unstable,
235 - bumped,
235 - bumped,
236 - divergent.
236 - divergent.
237 """
237 """
238 troubles = []
238 troubles = []
239 if self.unstable():
239 if self.unstable():
240 troubles.append('unstable')
240 troubles.append('unstable')
241 if self.bumped():
241 if self.bumped():
242 troubles.append('bumped')
242 troubles.append('bumped')
243 if self.divergent():
243 if self.divergent():
244 troubles.append('divergent')
244 troubles.append('divergent')
245 return troubles
245 return troubles
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return changectx(self._repo, nullrev)
258 return changectx(self._repo, nullrev)
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
261 if r'_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest[path], self._manifest.flags(path)
263 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
268 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
269 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
270 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
271 mfl = self._repo.manifestlog
272 try:
272 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
273 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
274 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
275 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
276 _('not found in manifest'))
277
277
278 return node, flag
278 return node, flag
279
279
280 def filenode(self, path):
280 def filenode(self, path):
281 return self._fileinfo(path)[0]
281 return self._fileinfo(path)[0]
282
282
283 def flags(self, path):
283 def flags(self, path):
284 try:
284 try:
285 return self._fileinfo(path)[1]
285 return self._fileinfo(path)[1]
286 except error.LookupError:
286 except error.LookupError:
287 return ''
287 return ''
288
288
289 def sub(self, path, allowcreate=True):
289 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
290 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
292
293 def nullsub(self, path, pctx):
293 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
294 return subrepo.nullsubrepo(self, path, pctx)
295
295
296 def workingsub(self, path):
296 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
298 context.
299 '''
299 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
300 return subrepo.subrepo(self, path, allowwdir=True)
301
301
302 def match(self, pats=None, include=None, exclude=None, default='glob',
302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
303 listsubrepos=False, badfn=None):
304 r = self._repo
304 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
305 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
306 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
307 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
308 listsubrepos=listsubrepos, badfn=badfn)
309
309
310 def diff(self, ctx2=None, match=None, **opts):
310 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
311 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
312 if ctx2 is None:
313 ctx2 = self.p1()
313 ctx2 = self.p1()
314 if ctx2 is not None:
314 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
315 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, opts)
316 diffopts = patch.diffopts(self._repo.ui, opts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
318
319 def dirs(self):
319 def dirs(self):
320 return self._manifest.dirs()
320 return self._manifest.dirs()
321
321
322 def hasdir(self, dir):
322 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
323 return self._manifest.hasdir(dir)
324
324
325 def status(self, other=None, match=None, listignored=False,
325 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
326 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
327 """return status of files between two nodes or node and working
328 directory.
328 directory.
329
329
330 If other is None, compare this node with working directory.
330 If other is None, compare this node with working directory.
331
331
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
333 """
334
334
335 ctx1 = self
335 ctx1 = self
336 ctx2 = self._repo[other]
336 ctx2 = self._repo[other]
337
337
338 # This next code block is, admittedly, fragile logic that tests for
338 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
339 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
340 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
341 # with its first parent.
342 #
342 #
343 # What we're aiming for here is the ability to call:
343 # What we're aiming for here is the ability to call:
344 #
344 #
345 # workingctx.status(parentctx)
345 # workingctx.status(parentctx)
346 #
346 #
347 # If we always built the manifest for each context and compared those,
347 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
348 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
349 # just copy the manifest of the parent.
350 reversed = False
350 reversed = False
351 if (not isinstance(ctx1, changectx)
351 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
352 and isinstance(ctx2, changectx)):
353 reversed = True
353 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
354 ctx1, ctx2 = ctx2, ctx1
355
355
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388 def _filterederror(repo, changeid):
388 def _filterederror(repo, changeid):
389 """build an exception to be raised about a filtered changeid
389 """build an exception to be raised about a filtered changeid
390
390
391 This is extracted in a function to help extensions (eg: evolve) to
391 This is extracted in a function to help extensions (eg: evolve) to
392 experiment with various message variants."""
392 experiment with various message variants."""
393 if repo.filtername.startswith('visible'):
393 if repo.filtername.startswith('visible'):
394 msg = _("hidden revision '%s'") % changeid
394 msg = _("hidden revision '%s'") % changeid
395 hint = _('use --hidden to access hidden revisions')
395 hint = _('use --hidden to access hidden revisions')
396 return error.FilteredRepoLookupError(msg, hint=hint)
396 return error.FilteredRepoLookupError(msg, hint=hint)
397 msg = _("filtered revision '%s' (not in '%s' subset)")
397 msg = _("filtered revision '%s' (not in '%s' subset)")
398 msg %= (changeid, repo.filtername)
398 msg %= (changeid, repo.filtername)
399 return error.FilteredRepoLookupError(msg)
399 return error.FilteredRepoLookupError(msg)
400
400
401 class changectx(basectx):
401 class changectx(basectx):
402 """A changecontext object makes access to data related to a particular
402 """A changecontext object makes access to data related to a particular
403 changeset convenient. It represents a read-only context already present in
403 changeset convenient. It represents a read-only context already present in
404 the repo."""
404 the repo."""
405 def __init__(self, repo, changeid=''):
405 def __init__(self, repo, changeid=''):
406 """changeid is a revision number, node, or tag"""
406 """changeid is a revision number, node, or tag"""
407
407
408 # since basectx.__new__ already took care of copying the object, we
408 # since basectx.__new__ already took care of copying the object, we
409 # don't need to do anything in __init__, so we just exit here
409 # don't need to do anything in __init__, so we just exit here
410 if isinstance(changeid, basectx):
410 if isinstance(changeid, basectx):
411 return
411 return
412
412
413 if changeid == '':
413 if changeid == '':
414 changeid = '.'
414 changeid = '.'
415 self._repo = repo
415 self._repo = repo
416
416
417 try:
417 try:
418 if isinstance(changeid, int):
418 if isinstance(changeid, int):
419 self._node = repo.changelog.node(changeid)
419 self._node = repo.changelog.node(changeid)
420 self._rev = changeid
420 self._rev = changeid
421 return
421 return
422 if not pycompat.ispy3 and isinstance(changeid, long):
422 if not pycompat.ispy3 and isinstance(changeid, long):
423 changeid = str(changeid)
423 changeid = str(changeid)
424 if changeid == 'null':
424 if changeid == 'null':
425 self._node = nullid
425 self._node = nullid
426 self._rev = nullrev
426 self._rev = nullrev
427 return
427 return
428 if changeid == 'tip':
428 if changeid == 'tip':
429 self._node = repo.changelog.tip()
429 self._node = repo.changelog.tip()
430 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
431 return
431 return
432 if changeid == '.' or changeid == repo.dirstate.p1():
432 if changeid == '.' or changeid == repo.dirstate.p1():
433 # this is a hack to delay/avoid loading obsmarkers
433 # this is a hack to delay/avoid loading obsmarkers
434 # when we know that '.' won't be hidden
434 # when we know that '.' won't be hidden
435 self._node = repo.dirstate.p1()
435 self._node = repo.dirstate.p1()
436 self._rev = repo.unfiltered().changelog.rev(self._node)
436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 return
437 return
438 if len(changeid) == 20:
438 if len(changeid) == 20:
439 try:
439 try:
440 self._node = changeid
440 self._node = changeid
441 self._rev = repo.changelog.rev(changeid)
441 self._rev = repo.changelog.rev(changeid)
442 return
442 return
443 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
444 raise
444 raise
445 except LookupError:
445 except LookupError:
446 pass
446 pass
447
447
448 try:
448 try:
449 r = int(changeid)
449 r = int(changeid)
450 if '%d' % r != changeid:
450 if '%d' % r != changeid:
451 raise ValueError
451 raise ValueError
452 l = len(repo.changelog)
452 l = len(repo.changelog)
453 if r < 0:
453 if r < 0:
454 r += l
454 r += l
455 if r < 0 or r >= l and r != wdirrev:
455 if r < 0 or r >= l and r != wdirrev:
456 raise ValueError
456 raise ValueError
457 self._rev = r
457 self._rev = r
458 self._node = repo.changelog.node(r)
458 self._node = repo.changelog.node(r)
459 return
459 return
460 except error.FilteredIndexError:
460 except error.FilteredIndexError:
461 raise
461 raise
462 except (ValueError, OverflowError, IndexError):
462 except (ValueError, OverflowError, IndexError):
463 pass
463 pass
464
464
465 if len(changeid) == 40:
465 if len(changeid) == 40:
466 try:
466 try:
467 self._node = bin(changeid)
467 self._node = bin(changeid)
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 except error.FilteredLookupError:
470 except error.FilteredLookupError:
471 raise
471 raise
472 except (TypeError, LookupError):
472 except (TypeError, LookupError):
473 pass
473 pass
474
474
475 # lookup bookmarks through the name interface
475 # lookup bookmarks through the name interface
476 try:
476 try:
477 self._node = repo.names.singlenode(repo, changeid)
477 self._node = repo.names.singlenode(repo, changeid)
478 self._rev = repo.changelog.rev(self._node)
478 self._rev = repo.changelog.rev(self._node)
479 return
479 return
480 except KeyError:
480 except KeyError:
481 pass
481 pass
482 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
483 raise
483 raise
484 except error.RepoLookupError:
484 except error.RepoLookupError:
485 pass
485 pass
486
486
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 if self._node is not None:
488 if self._node is not None:
489 self._rev = repo.changelog.rev(self._node)
489 self._rev = repo.changelog.rev(self._node)
490 return
490 return
491
491
492 # lookup failed
492 # lookup failed
493 # check if it might have come from damaged dirstate
493 # check if it might have come from damaged dirstate
494 #
494 #
495 # XXX we could avoid the unfiltered if we had a recognizable
495 # XXX we could avoid the unfiltered if we had a recognizable
496 # exception for filtered changeset access
496 # exception for filtered changeset access
497 if changeid in repo.unfiltered().dirstate.parents():
497 if changeid in repo.unfiltered().dirstate.parents():
498 msg = _("working directory has unknown parent '%s'!")
498 msg = _("working directory has unknown parent '%s'!")
499 raise error.Abort(msg % short(changeid))
499 raise error.Abort(msg % short(changeid))
500 try:
500 try:
501 if len(changeid) == 20 and nonascii(changeid):
501 if len(changeid) == 20 and nonascii(changeid):
502 changeid = hex(changeid)
502 changeid = hex(changeid)
503 except TypeError:
503 except TypeError:
504 pass
504 pass
505 except (error.FilteredIndexError, error.FilteredLookupError,
505 except (error.FilteredIndexError, error.FilteredLookupError,
506 error.FilteredRepoLookupError):
506 error.FilteredRepoLookupError):
507 raise _filterederror(repo, changeid)
507 raise _filterederror(repo, changeid)
508 except IndexError:
508 except IndexError:
509 pass
509 pass
510 raise error.RepoLookupError(
510 raise error.RepoLookupError(
511 _("unknown revision '%s'") % changeid)
511 _("unknown revision '%s'") % changeid)
512
512
513 def __hash__(self):
513 def __hash__(self):
514 try:
514 try:
515 return hash(self._rev)
515 return hash(self._rev)
516 except AttributeError:
516 except AttributeError:
517 return id(self)
517 return id(self)
518
518
519 def __nonzero__(self):
519 def __nonzero__(self):
520 return self._rev != nullrev
520 return self._rev != nullrev
521
521
522 __bool__ = __nonzero__
522 __bool__ = __nonzero__
523
523
524 @propertycache
524 @propertycache
525 def _changeset(self):
525 def _changeset(self):
526 return self._repo.changelog.changelogrevision(self.rev())
526 return self._repo.changelog.changelogrevision(self.rev())
527
527
528 @propertycache
528 @propertycache
529 def _manifest(self):
529 def _manifest(self):
530 return self._manifestctx.read()
530 return self._manifestctx.read()
531
531
532 @property
532 @property
533 def _manifestctx(self):
533 def _manifestctx(self):
534 return self._repo.manifestlog[self._changeset.manifest]
534 return self._repo.manifestlog[self._changeset.manifest]
535
535
536 @propertycache
536 @propertycache
537 def _manifestdelta(self):
537 def _manifestdelta(self):
538 return self._manifestctx.readdelta()
538 return self._manifestctx.readdelta()
539
539
540 @propertycache
540 @propertycache
541 def _parents(self):
541 def _parents(self):
542 repo = self._repo
542 repo = self._repo
543 p1, p2 = repo.changelog.parentrevs(self._rev)
543 p1, p2 = repo.changelog.parentrevs(self._rev)
544 if p2 == nullrev:
544 if p2 == nullrev:
545 return [changectx(repo, p1)]
545 return [changectx(repo, p1)]
546 return [changectx(repo, p1), changectx(repo, p2)]
546 return [changectx(repo, p1), changectx(repo, p2)]
547
547
548 def changeset(self):
548 def changeset(self):
549 c = self._changeset
549 c = self._changeset
550 return (
550 return (
551 c.manifest,
551 c.manifest,
552 c.user,
552 c.user,
553 c.date,
553 c.date,
554 c.files,
554 c.files,
555 c.description,
555 c.description,
556 c.extra,
556 c.extra,
557 )
557 )
558 def manifestnode(self):
558 def manifestnode(self):
559 return self._changeset.manifest
559 return self._changeset.manifest
560
560
561 def user(self):
561 def user(self):
562 return self._changeset.user
562 return self._changeset.user
563 def date(self):
563 def date(self):
564 return self._changeset.date
564 return self._changeset.date
565 def files(self):
565 def files(self):
566 return self._changeset.files
566 return self._changeset.files
567 def description(self):
567 def description(self):
568 return self._changeset.description
568 return self._changeset.description
569 def branch(self):
569 def branch(self):
570 return encoding.tolocal(self._changeset.extra.get("branch"))
570 return encoding.tolocal(self._changeset.extra.get("branch"))
571 def closesbranch(self):
571 def closesbranch(self):
572 return 'close' in self._changeset.extra
572 return 'close' in self._changeset.extra
573 def extra(self):
573 def extra(self):
574 return self._changeset.extra
574 return self._changeset.extra
575 def tags(self):
575 def tags(self):
576 return self._repo.nodetags(self._node)
576 return self._repo.nodetags(self._node)
577 def bookmarks(self):
577 def bookmarks(self):
578 return self._repo.nodebookmarks(self._node)
578 return self._repo.nodebookmarks(self._node)
579 def phase(self):
579 def phase(self):
580 return self._repo._phasecache.phase(self._repo, self._rev)
580 return self._repo._phasecache.phase(self._repo, self._rev)
581 def hidden(self):
581 def hidden(self):
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
583
583
584 def children(self):
584 def children(self):
585 """return contexts for each child changeset"""
585 """return contexts for each child changeset"""
586 c = self._repo.changelog.children(self._node)
586 c = self._repo.changelog.children(self._node)
587 return [changectx(self._repo, x) for x in c]
587 return [changectx(self._repo, x) for x in c]
588
588
589 def ancestors(self):
589 def ancestors(self):
590 for a in self._repo.changelog.ancestors([self._rev]):
590 for a in self._repo.changelog.ancestors([self._rev]):
591 yield changectx(self._repo, a)
591 yield changectx(self._repo, a)
592
592
593 def descendants(self):
593 def descendants(self):
594 for d in self._repo.changelog.descendants([self._rev]):
594 for d in self._repo.changelog.descendants([self._rev]):
595 yield changectx(self._repo, d)
595 yield changectx(self._repo, d)
596
596
597 def filectx(self, path, fileid=None, filelog=None):
597 def filectx(self, path, fileid=None, filelog=None):
598 """get a file context from this changeset"""
598 """get a file context from this changeset"""
599 if fileid is None:
599 if fileid is None:
600 fileid = self.filenode(path)
600 fileid = self.filenode(path)
601 return filectx(self._repo, path, fileid=fileid,
601 return filectx(self._repo, path, fileid=fileid,
602 changectx=self, filelog=filelog)
602 changectx=self, filelog=filelog)
603
603
604 def ancestor(self, c2, warn=False):
604 def ancestor(self, c2, warn=False):
605 """return the "best" ancestor context of self and c2
605 """return the "best" ancestor context of self and c2
606
606
607 If there are multiple candidates, it will show a message and check
607 If there are multiple candidates, it will show a message and check
608 merge.preferancestor configuration before falling back to the
608 merge.preferancestor configuration before falling back to the
609 revlog ancestor."""
609 revlog ancestor."""
610 # deal with workingctxs
610 # deal with workingctxs
611 n2 = c2._node
611 n2 = c2._node
612 if n2 is None:
612 if n2 is None:
613 n2 = c2._parents[0]._node
613 n2 = c2._parents[0]._node
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 if not cahs:
615 if not cahs:
616 anc = nullid
616 anc = nullid
617 elif len(cahs) == 1:
617 elif len(cahs) == 1:
618 anc = cahs[0]
618 anc = cahs[0]
619 else:
619 else:
620 # experimental config: merge.preferancestor
620 # experimental config: merge.preferancestor
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 try:
622 try:
623 ctx = changectx(self._repo, r)
623 ctx = changectx(self._repo, r)
624 except error.RepoLookupError:
624 except error.RepoLookupError:
625 continue
625 continue
626 anc = ctx.node()
626 anc = ctx.node()
627 if anc in cahs:
627 if anc in cahs:
628 break
628 break
629 else:
629 else:
630 anc = self._repo.changelog.ancestor(self._node, n2)
630 anc = self._repo.changelog.ancestor(self._node, n2)
631 if warn:
631 if warn:
632 self._repo.ui.status(
632 self._repo.ui.status(
633 (_("note: using %s as ancestor of %s and %s\n") %
633 (_("note: using %s as ancestor of %s and %s\n") %
634 (short(anc), short(self._node), short(n2))) +
634 (short(anc), short(self._node), short(n2))) +
635 ''.join(_(" alternatively, use --config "
635 ''.join(_(" alternatively, use --config "
636 "merge.preferancestor=%s\n") %
636 "merge.preferancestor=%s\n") %
637 short(n) for n in sorted(cahs) if n != anc))
637 short(n) for n in sorted(cahs) if n != anc))
638 return changectx(self._repo, anc)
638 return changectx(self._repo, anc)
639
639
640 def descendant(self, other):
640 def descendant(self, other):
641 """True if other is descendant of this changeset"""
641 """True if other is descendant of this changeset"""
642 return self._repo.changelog.descendant(self._rev, other._rev)
642 return self._repo.changelog.descendant(self._rev, other._rev)
643
643
644 def walk(self, match):
644 def walk(self, match):
645 '''Generates matching file names.'''
645 '''Generates matching file names.'''
646
646
647 # Wrap match.bad method to have message with nodeid
647 # Wrap match.bad method to have message with nodeid
648 def bad(fn, msg):
648 def bad(fn, msg):
649 # The manifest doesn't know about subrepos, so don't complain about
649 # The manifest doesn't know about subrepos, so don't complain about
650 # paths into valid subrepos.
650 # paths into valid subrepos.
651 if any(fn == s or fn.startswith(s + '/')
651 if any(fn == s or fn.startswith(s + '/')
652 for s in self.substate):
652 for s in self.substate):
653 return
653 return
654 match.bad(fn, _('no such file in rev %s') % self)
654 match.bad(fn, _('no such file in rev %s') % self)
655
655
656 m = matchmod.badmatch(match, bad)
656 m = matchmod.badmatch(match, bad)
657 return self._manifest.walk(m)
657 return self._manifest.walk(m)
658
658
659 def matches(self, match):
659 def matches(self, match):
660 return self.walk(match)
660 return self.walk(match)
661
661
662 class basefilectx(object):
662 class basefilectx(object):
663 """A filecontext object represents the common logic for its children:
663 """A filecontext object represents the common logic for its children:
664 filectx: read-only access to a filerevision that is already present
664 filectx: read-only access to a filerevision that is already present
665 in the repo,
665 in the repo,
666 workingfilectx: a filecontext that represents files from the working
666 workingfilectx: a filecontext that represents files from the working
667 directory,
667 directory,
668 memfilectx: a filecontext that represents files in-memory,
668 memfilectx: a filecontext that represents files in-memory,
669 overlayfilectx: duplicate another filecontext with some fields overridden.
669 overlayfilectx: duplicate another filecontext with some fields overridden.
670 """
670 """
671 @propertycache
671 @propertycache
672 def _filelog(self):
672 def _filelog(self):
673 return self._repo.file(self._path)
673 return self._repo.file(self._path)
674
674
675 @propertycache
675 @propertycache
676 def _changeid(self):
676 def _changeid(self):
677 if r'_changeid' in self.__dict__:
677 if r'_changeid' in self.__dict__:
678 return self._changeid
678 return self._changeid
679 elif r'_changectx' in self.__dict__:
679 elif r'_changectx' in self.__dict__:
680 return self._changectx.rev()
680 return self._changectx.rev()
681 elif r'_descendantrev' in self.__dict__:
681 elif r'_descendantrev' in self.__dict__:
682 # this file context was created from a revision with a known
682 # this file context was created from a revision with a known
683 # descendant, we can (lazily) correct for linkrev aliases
683 # descendant, we can (lazily) correct for linkrev aliases
684 return self._adjustlinkrev(self._descendantrev)
684 return self._adjustlinkrev(self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if r'_fileid' in self.__dict__:
690 if r'_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 __bool__ = __nonzero__
711 __bool__ = __nonzero__
712
712
713 def __str__(self):
713 def __str__(self):
714 try:
714 try:
715 return "%s@%s" % (self.path(), self._changectx)
715 return "%s@%s" % (self.path(), self._changectx)
716 except error.LookupError:
716 except error.LookupError:
717 return "%s@???" % self.path()
717 return "%s@???" % self.path()
718
718
719 def __repr__(self):
719 def __repr__(self):
720 return "<%s %s>" % (type(self).__name__, str(self))
720 return "<%s %s>" % (type(self).__name__, str(self))
721
721
722 def __hash__(self):
722 def __hash__(self):
723 try:
723 try:
724 return hash((self._path, self._filenode))
724 return hash((self._path, self._filenode))
725 except AttributeError:
725 except AttributeError:
726 return id(self)
726 return id(self)
727
727
728 def __eq__(self, other):
728 def __eq__(self, other):
729 try:
729 try:
730 return (type(self) == type(other) and self._path == other._path
730 return (type(self) == type(other) and self._path == other._path
731 and self._filenode == other._filenode)
731 and self._filenode == other._filenode)
732 except AttributeError:
732 except AttributeError:
733 return False
733 return False
734
734
735 def __ne__(self, other):
735 def __ne__(self, other):
736 return not (self == other)
736 return not (self == other)
737
737
738 def filerev(self):
738 def filerev(self):
739 return self._filerev
739 return self._filerev
740 def filenode(self):
740 def filenode(self):
741 return self._filenode
741 return self._filenode
742 @propertycache
742 @propertycache
743 def _flags(self):
743 def _flags(self):
744 return self._changectx.flags(self._path)
744 return self._changectx.flags(self._path)
745 def flags(self):
745 def flags(self):
746 return self._flags
746 return self._flags
747 def filelog(self):
747 def filelog(self):
748 return self._filelog
748 return self._filelog
749 def rev(self):
749 def rev(self):
750 return self._changeid
750 return self._changeid
751 def linkrev(self):
751 def linkrev(self):
752 return self._filelog.linkrev(self._filerev)
752 return self._filelog.linkrev(self._filerev)
753 def node(self):
753 def node(self):
754 return self._changectx.node()
754 return self._changectx.node()
755 def hex(self):
755 def hex(self):
756 return self._changectx.hex()
756 return self._changectx.hex()
757 def user(self):
757 def user(self):
758 return self._changectx.user()
758 return self._changectx.user()
759 def date(self):
759 def date(self):
760 return self._changectx.date()
760 return self._changectx.date()
761 def files(self):
761 def files(self):
762 return self._changectx.files()
762 return self._changectx.files()
763 def description(self):
763 def description(self):
764 return self._changectx.description()
764 return self._changectx.description()
765 def branch(self):
765 def branch(self):
766 return self._changectx.branch()
766 return self._changectx.branch()
767 def extra(self):
767 def extra(self):
768 return self._changectx.extra()
768 return self._changectx.extra()
769 def phase(self):
769 def phase(self):
770 return self._changectx.phase()
770 return self._changectx.phase()
771 def phasestr(self):
771 def phasestr(self):
772 return self._changectx.phasestr()
772 return self._changectx.phasestr()
773 def manifest(self):
773 def manifest(self):
774 return self._changectx.manifest()
774 return self._changectx.manifest()
775 def changectx(self):
775 def changectx(self):
776 return self._changectx
776 return self._changectx
777 def renamed(self):
777 def renamed(self):
778 return self._copied
778 return self._copied
779 def repo(self):
779 def repo(self):
780 return self._repo
780 return self._repo
781 def size(self):
781 def size(self):
782 return len(self.data())
782 return len(self.data())
783
783
784 def path(self):
784 def path(self):
785 return self._path
785 return self._path
786
786
787 def isbinary(self):
787 def isbinary(self):
788 try:
788 try:
789 return util.binary(self.data())
789 return util.binary(self.data())
790 except IOError:
790 except IOError:
791 return False
791 return False
792 def isexec(self):
792 def isexec(self):
793 return 'x' in self.flags()
793 return 'x' in self.flags()
794 def islink(self):
794 def islink(self):
795 return 'l' in self.flags()
795 return 'l' in self.flags()
796
796
797 def isabsent(self):
797 def isabsent(self):
798 """whether this filectx represents a file not in self._changectx
798 """whether this filectx represents a file not in self._changectx
799
799
800 This is mainly for merge code to detect change/delete conflicts. This is
800 This is mainly for merge code to detect change/delete conflicts. This is
801 expected to be True for all subclasses of basectx."""
801 expected to be True for all subclasses of basectx."""
802 return False
802 return False
803
803
804 _customcmp = False
804 _customcmp = False
805 def cmp(self, fctx):
805 def cmp(self, fctx):
806 """compare with other file context
806 """compare with other file context
807
807
808 returns True if different than fctx.
808 returns True if different than fctx.
809 """
809 """
810 if fctx._customcmp:
810 if fctx._customcmp:
811 return fctx.cmp(self)
811 return fctx.cmp(self)
812
812
813 if (fctx._filenode is None
813 if (fctx._filenode is None
814 and (self._repo._encodefilterpats
814 and (self._repo._encodefilterpats
815 # if file data starts with '\1\n', empty metadata block is
815 # if file data starts with '\1\n', empty metadata block is
816 # prepended, which adds 4 bytes to filelog.size().
816 # prepended, which adds 4 bytes to filelog.size().
817 or self.size() - 4 == fctx.size())
817 or self.size() - 4 == fctx.size())
818 or self.size() == fctx.size()):
818 or self.size() == fctx.size()):
819 return self._filelog.cmp(self._filenode, fctx.data())
819 return self._filelog.cmp(self._filenode, fctx.data())
820
820
821 return True
821 return True
822
822
823 def _adjustlinkrev(self, srcrev, inclusive=False):
823 def _adjustlinkrev(self, srcrev, inclusive=False):
824 """return the first ancestor of <srcrev> introducing <fnode>
824 """return the first ancestor of <srcrev> introducing <fnode>
825
825
826 If the linkrev of the file revision does not point to an ancestor of
826 If the linkrev of the file revision does not point to an ancestor of
827 srcrev, we'll walk down the ancestors until we find one introducing
827 srcrev, we'll walk down the ancestors until we find one introducing
828 this file revision.
828 this file revision.
829
829
830 :srcrev: the changeset revision we search ancestors from
830 :srcrev: the changeset revision we search ancestors from
831 :inclusive: if true, the src revision will also be checked
831 :inclusive: if true, the src revision will also be checked
832 """
832 """
833 repo = self._repo
833 repo = self._repo
834 cl = repo.unfiltered().changelog
834 cl = repo.unfiltered().changelog
835 mfl = repo.manifestlog
835 mfl = repo.manifestlog
836 # fetch the linkrev
836 # fetch the linkrev
837 lkr = self.linkrev()
837 lkr = self.linkrev()
838 # hack to reuse ancestor computation when searching for renames
838 # hack to reuse ancestor computation when searching for renames
839 memberanc = getattr(self, '_ancestrycontext', None)
839 memberanc = getattr(self, '_ancestrycontext', None)
840 iteranc = None
840 iteranc = None
841 if srcrev is None:
841 if srcrev is None:
842 # wctx case, used by workingfilectx during mergecopy
842 # wctx case, used by workingfilectx during mergecopy
843 revs = [p.rev() for p in self._repo[None].parents()]
843 revs = [p.rev() for p in self._repo[None].parents()]
844 inclusive = True # we skipped the real (revless) source
844 inclusive = True # we skipped the real (revless) source
845 else:
845 else:
846 revs = [srcrev]
846 revs = [srcrev]
847 if memberanc is None:
847 if memberanc is None:
848 memberanc = iteranc = cl.ancestors(revs, lkr,
848 memberanc = iteranc = cl.ancestors(revs, lkr,
849 inclusive=inclusive)
849 inclusive=inclusive)
850 # check if this linkrev is an ancestor of srcrev
850 # check if this linkrev is an ancestor of srcrev
851 if lkr not in memberanc:
851 if lkr not in memberanc:
852 if iteranc is None:
852 if iteranc is None:
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
854 fnode = self._filenode
854 fnode = self._filenode
855 path = self._path
855 path = self._path
856 for a in iteranc:
856 for a in iteranc:
857 ac = cl.read(a) # get changeset data (we avoid object creation)
857 ac = cl.read(a) # get changeset data (we avoid object creation)
858 if path in ac[3]: # checking the 'files' field.
858 if path in ac[3]: # checking the 'files' field.
859 # The file has been touched, check if the content is
859 # The file has been touched, check if the content is
860 # similar to the one we search for.
860 # similar to the one we search for.
861 if fnode == mfl[ac[0]].readfast().get(path):
861 if fnode == mfl[ac[0]].readfast().get(path):
862 return a
862 return a
863 # In theory, we should never get out of that loop without a result.
863 # In theory, we should never get out of that loop without a result.
864 # But if manifest uses a buggy file revision (not children of the
864 # But if manifest uses a buggy file revision (not children of the
865 # one it replaces) we could. Such a buggy situation will likely
865 # one it replaces) we could. Such a buggy situation will likely
866 # result is crash somewhere else at to some point.
866 # result is crash somewhere else at to some point.
867 return lkr
867 return lkr
868
868
869 def introrev(self):
869 def introrev(self):
870 """return the rev of the changeset which introduced this file revision
870 """return the rev of the changeset which introduced this file revision
871
871
872 This method is different from linkrev because it take into account the
872 This method is different from linkrev because it take into account the
873 changeset the filectx was created from. It ensures the returned
873 changeset the filectx was created from. It ensures the returned
874 revision is one of its ancestors. This prevents bugs from
874 revision is one of its ancestors. This prevents bugs from
875 'linkrev-shadowing' when a file revision is used by multiple
875 'linkrev-shadowing' when a file revision is used by multiple
876 changesets.
876 changesets.
877 """
877 """
878 lkr = self.linkrev()
878 lkr = self.linkrev()
879 attrs = vars(self)
879 attrs = vars(self)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
881 if noctx or self.rev() == lkr:
881 if noctx or self.rev() == lkr:
882 return self.linkrev()
882 return self.linkrev()
883 return self._adjustlinkrev(self.rev(), inclusive=True)
883 return self._adjustlinkrev(self.rev(), inclusive=True)
884
884
885 def _parentfilectx(self, path, fileid, filelog):
885 def _parentfilectx(self, path, fileid, filelog):
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
889 # If self is associated with a changeset (probably explicitly
889 # If self is associated with a changeset (probably explicitly
890 # fed), ensure the created filectx is associated with a
890 # fed), ensure the created filectx is associated with a
891 # changeset that is an ancestor of self.changectx.
891 # changeset that is an ancestor of self.changectx.
892 # This lets us later use _adjustlinkrev to get a correct link.
892 # This lets us later use _adjustlinkrev to get a correct link.
893 fctx._descendantrev = self.rev()
893 fctx._descendantrev = self.rev()
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 elif '_descendantrev' in vars(self):
895 elif '_descendantrev' in vars(self):
896 # Otherwise propagate _descendantrev if we have one associated.
896 # Otherwise propagate _descendantrev if we have one associated.
897 fctx._descendantrev = self._descendantrev
897 fctx._descendantrev = self._descendantrev
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 return fctx
899 return fctx
900
900
901 def parents(self):
901 def parents(self):
902 _path = self._path
902 _path = self._path
903 fl = self._filelog
903 fl = self._filelog
904 parents = self._filelog.parents(self._filenode)
904 parents = self._filelog.parents(self._filenode)
905 pl = [(_path, node, fl) for node in parents if node != nullid]
905 pl = [(_path, node, fl) for node in parents if node != nullid]
906
906
907 r = fl.renamed(self._filenode)
907 r = fl.renamed(self._filenode)
908 if r:
908 if r:
909 # - In the simple rename case, both parent are nullid, pl is empty.
909 # - In the simple rename case, both parent are nullid, pl is empty.
910 # - In case of merge, only one of the parent is null id and should
910 # - In case of merge, only one of the parent is null id and should
911 # be replaced with the rename information. This parent is -always-
911 # be replaced with the rename information. This parent is -always-
912 # the first one.
912 # the first one.
913 #
913 #
914 # As null id have always been filtered out in the previous list
914 # As null id have always been filtered out in the previous list
915 # comprehension, inserting to 0 will always result in "replacing
915 # comprehension, inserting to 0 will always result in "replacing
916 # first nullid parent with rename information.
916 # first nullid parent with rename information.
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918
918
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920
920
921 def p1(self):
921 def p1(self):
922 return self.parents()[0]
922 return self.parents()[0]
923
923
924 def p2(self):
924 def p2(self):
925 p = self.parents()
925 p = self.parents()
926 if len(p) == 2:
926 if len(p) == 2:
927 return p[1]
927 return p[1]
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929
929
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
931 diffopts=None):
931 diffopts=None):
932 '''returns a list of tuples of ((ctx, number), line) for each line
932 '''returns a list of tuples of ((ctx, number), line) for each line
933 in the file, where ctx is the filectx of the node where
933 in the file, where ctx is the filectx of the node where
934 that line was last changed; if linenumber parameter is true, number is
934 that line was last changed; if linenumber parameter is true, number is
935 the line number at the first appearance in the managed file, otherwise,
935 the line number at the first appearance in the managed file, otherwise,
936 number has a fixed value of False.
936 number has a fixed value of False.
937 '''
937 '''
938
938
939 def lines(text):
939 def lines(text):
940 if text.endswith("\n"):
940 if text.endswith("\n"):
941 return text.count("\n")
941 return text.count("\n")
942 return text.count("\n") + int(bool(text))
942 return text.count("\n") + int(bool(text))
943
943
944 if linenumber:
944 if linenumber:
945 def decorate(text, rev):
945 def decorate(text, rev):
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
947 else:
947 else:
948 def decorate(text, rev):
948 def decorate(text, rev):
949 return ([(rev, False)] * lines(text), text)
949 return ([(rev, False)] * lines(text), text)
950
950
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
952
953 def parents(f):
953 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
957 # isn't an ancestor of the srcrev.
958 f._changeid
958 f._changeid
959 pl = f.parents()
959 pl = f.parents()
960
960
961 # Don't return renamed parents if we aren't following.
961 # Don't return renamed parents if we aren't following.
962 if not follow:
962 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
963 pl = [p for p in pl if p.path() == f.path()]
964
964
965 # renamed filectx won't have a filelog yet, so set it
965 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
966 # from the cache to save time
967 for p in pl:
967 for p in pl:
968 if not '_filelog' in p.__dict__:
968 if not '_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
969 p._filelog = getlog(p.path())
970
970
971 return pl
971 return pl
972
972
973 # use linkrev to find the first changeset where self appeared
973 # use linkrev to find the first changeset where self appeared
974 base = self
974 base = self
975 introrev = self.introrev()
975 introrev = self.introrev()
976 if self.rev() != introrev:
976 if self.rev() != introrev:
977 base = self.filectx(self.filenode(), changeid=introrev)
977 base = self.filectx(self.filenode(), changeid=introrev)
978 if getattr(base, '_ancestrycontext', None) is None:
978 if getattr(base, '_ancestrycontext', None) is None:
979 cl = self._repo.changelog
979 cl = self._repo.changelog
980 if introrev is None:
980 if introrev is None:
981 # wctx is not inclusive, but works because _ancestrycontext
981 # wctx is not inclusive, but works because _ancestrycontext
982 # is used to test filelog revisions
982 # is used to test filelog revisions
983 ac = cl.ancestors([p.rev() for p in base.parents()],
983 ac = cl.ancestors([p.rev() for p in base.parents()],
984 inclusive=True)
984 inclusive=True)
985 else:
985 else:
986 ac = cl.ancestors([introrev], inclusive=True)
986 ac = cl.ancestors([introrev], inclusive=True)
987 base._ancestrycontext = ac
987 base._ancestrycontext = ac
988
988
989 # This algorithm would prefer to be recursive, but Python is a
989 # This algorithm would prefer to be recursive, but Python is a
990 # bit recursion-hostile. Instead we do an iterative
990 # bit recursion-hostile. Instead we do an iterative
991 # depth-first search.
991 # depth-first search.
992
992
993 # 1st DFS pre-calculates pcache and needed
993 # 1st DFS pre-calculates pcache and needed
994 visit = [base]
994 visit = [base]
995 pcache = {}
995 pcache = {}
996 needed = {base: 1}
996 needed = {base: 1}
997 while visit:
997 while visit:
998 f = visit.pop()
998 f = visit.pop()
999 if f in pcache:
999 if f in pcache:
1000 continue
1000 continue
1001 pl = parents(f)
1001 pl = parents(f)
1002 pcache[f] = pl
1002 pcache[f] = pl
1003 for p in pl:
1003 for p in pl:
1004 needed[p] = needed.get(p, 0) + 1
1004 needed[p] = needed.get(p, 0) + 1
1005 if p not in pcache:
1005 if p not in pcache:
1006 visit.append(p)
1006 visit.append(p)
1007
1007
1008 # 2nd DFS does the actual annotate
1008 # 2nd DFS does the actual annotate
1009 visit[:] = [base]
1009 visit[:] = [base]
1010 hist = {}
1010 hist = {}
1011 while visit:
1011 while visit:
1012 f = visit[-1]
1012 f = visit[-1]
1013 if f in hist:
1013 if f in hist:
1014 visit.pop()
1014 visit.pop()
1015 continue
1015 continue
1016
1016
1017 ready = True
1017 ready = True
1018 pl = pcache[f]
1018 pl = pcache[f]
1019 for p in pl:
1019 for p in pl:
1020 if p not in hist:
1020 if p not in hist:
1021 ready = False
1021 ready = False
1022 visit.append(p)
1022 visit.append(p)
1023 if ready:
1023 if ready:
1024 visit.pop()
1024 visit.pop()
1025 curr = decorate(f.data(), f)
1025 curr = decorate(f.data(), f)
1026 skipchild = False
1026 skipchild = False
1027 if skiprevs is not None:
1027 if skiprevs is not None:
1028 skipchild = f._changeid in skiprevs
1028 skipchild = f._changeid in skiprevs
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1030 diffopts)
1030 diffopts)
1031 for p in pl:
1031 for p in pl:
1032 if needed[p] == 1:
1032 if needed[p] == 1:
1033 del hist[p]
1033 del hist[p]
1034 del needed[p]
1034 del needed[p]
1035 else:
1035 else:
1036 needed[p] -= 1
1036 needed[p] -= 1
1037
1037
1038 hist[f] = curr
1038 hist[f] = curr
1039 del pcache[f]
1039 del pcache[f]
1040
1040
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1042
1042
1043 def ancestors(self, followfirst=False):
1043 def ancestors(self, followfirst=False):
1044 visit = {}
1044 visit = {}
1045 c = self
1045 c = self
1046 if followfirst:
1046 if followfirst:
1047 cut = 1
1047 cut = 1
1048 else:
1048 else:
1049 cut = None
1049 cut = None
1050
1050
1051 while True:
1051 while True:
1052 for parent in c.parents()[:cut]:
1052 for parent in c.parents()[:cut]:
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1054 if not visit:
1054 if not visit:
1055 break
1055 break
1056 c = visit.pop(max(visit))
1056 c = visit.pop(max(visit))
1057 yield c
1057 yield c
1058
1058
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1060 r'''
1060 r'''
1061 Given parent and child fctxes and annotate data for parents, for all lines
1061 Given parent and child fctxes and annotate data for parents, for all lines
1062 in either parent that match the child, annotate the child with the parent's
1062 in either parent that match the child, annotate the child with the parent's
1063 data.
1063 data.
1064
1064
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1066 annotate data as well such that child is never blamed for any lines.
1066 annotate data as well such that child is never blamed for any lines.
1067
1067
1068 >>> oldfctx = 'old'
1068 >>> oldfctx = 'old'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1070 >>> olddata = 'a\nb\n'
1070 >>> olddata = 'a\nb\n'
1071 >>> p1data = 'a\nb\nc\n'
1071 >>> p1data = 'a\nb\nc\n'
1072 >>> p2data = 'a\nc\nd\n'
1072 >>> p2data = 'a\nc\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1074 >>> diffopts = mdiff.diffopts()
1074 >>> diffopts = mdiff.diffopts()
1075
1075
1076 >>> def decorate(text, rev):
1076 >>> def decorate(text, rev):
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1078
1078
1079 Basic usage:
1079 Basic usage:
1080
1080
1081 >>> oldann = decorate(olddata, oldfctx)
1081 >>> oldann = decorate(olddata, oldfctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1084 >>> p1ann[0]
1084 >>> p1ann[0]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1086 >>> p2ann = decorate(p2data, p2fctx)
1086 >>> p2ann = decorate(p2data, p2fctx)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1088 >>> p2ann[0]
1088 >>> p2ann[0]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1090
1090
1091 Test with multiple parents (note the difference caused by ordering):
1091 Test with multiple parents (note the difference caused by ordering):
1092
1092
1093 >>> childann = decorate(childdata, childfctx)
1093 >>> childann = decorate(childdata, childfctx)
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1095 ... diffopts)
1095 ... diffopts)
1096 >>> childann[0]
1096 >>> childann[0]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1098
1098
1099 >>> childann = decorate(childdata, childfctx)
1099 >>> childann = decorate(childdata, childfctx)
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1101 ... diffopts)
1101 ... diffopts)
1102 >>> childann[0]
1102 >>> childann[0]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1104
1104
1105 Test with skipchild (note the difference caused by ordering):
1105 Test with skipchild (note the difference caused by ordering):
1106
1106
1107 >>> childann = decorate(childdata, childfctx)
1107 >>> childann = decorate(childdata, childfctx)
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1109 ... diffopts)
1109 ... diffopts)
1110 >>> childann[0]
1110 >>> childann[0]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1112
1112
1113 >>> childann = decorate(childdata, childfctx)
1113 >>> childann = decorate(childdata, childfctx)
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1115 ... diffopts)
1115 ... diffopts)
1116 >>> childann[0]
1116 >>> childann[0]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1118 '''
1118 '''
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1120 for parent in parents]
1120 for parent in parents]
1121
1121
1122 if skipchild:
1122 if skipchild:
1123 # Need to iterate over the blocks twice -- make it a list
1123 # Need to iterate over the blocks twice -- make it a list
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1125 # Mercurial currently prefers p2 over p1 for annotate.
1125 # Mercurial currently prefers p2 over p1 for annotate.
1126 # TODO: change this?
1126 # TODO: change this?
1127 for parent, blocks in pblocks:
1127 for parent, blocks in pblocks:
1128 for (a1, a2, b1, b2), t in blocks:
1128 for (a1, a2, b1, b2), t in blocks:
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1130 # belong to the child.
1130 # belong to the child.
1131 if t == '=':
1131 if t == '=':
1132 child[0][b1:b2] = parent[0][a1:a2]
1132 child[0][b1:b2] = parent[0][a1:a2]
1133
1133
1134 if skipchild:
1134 if skipchild:
1135 # Now try and match up anything that couldn't be matched,
1135 # Now try and match up anything that couldn't be matched,
1136 # Reversing pblocks maintains bias towards p2, matching above
1136 # Reversing pblocks maintains bias towards p2, matching above
1137 # behavior.
1137 # behavior.
1138 pblocks.reverse()
1138 pblocks.reverse()
1139
1139
1140 # The heuristics are:
1140 # The heuristics are:
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1142 # This could potentially be smarter but works well enough.
1142 # This could potentially be smarter but works well enough.
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1144 # diff hunks 1:1, dropping lines as necessary.
1144 # diff hunks 1:1, dropping lines as necessary.
1145 # * Repeat the last line as a last resort.
1145 # * Repeat the last line as a last resort.
1146
1146
1147 # First, replace as much as possible without repeating the last line.
1147 # First, replace as much as possible without repeating the last line.
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1149 for idx, (parent, blocks) in enumerate(pblocks):
1149 for idx, (parent, blocks) in enumerate(pblocks):
1150 for (a1, a2, b1, b2), _t in blocks:
1150 for (a1, a2, b1, b2), _t in blocks:
1151 if a2 - a1 >= b2 - b1:
1151 if a2 - a1 >= b2 - b1:
1152 for bk in xrange(b1, b2):
1152 for bk in xrange(b1, b2):
1153 if child[0][bk][0] == childfctx:
1153 if child[0][bk][0] == childfctx:
1154 ak = min(a1 + (bk - b1), a2 - 1)
1154 ak = min(a1 + (bk - b1), a2 - 1)
1155 child[0][bk] = parent[0][ak]
1155 child[0][bk] = parent[0][ak]
1156 else:
1156 else:
1157 remaining[idx][1].append((a1, a2, b1, b2))
1157 remaining[idx][1].append((a1, a2, b1, b2))
1158
1158
1159 # Then, look at anything left, which might involve repeating the last
1159 # Then, look at anything left, which might involve repeating the last
1160 # line.
1160 # line.
1161 for parent, blocks in remaining:
1161 for parent, blocks in remaining:
1162 for a1, a2, b1, b2 in blocks:
1162 for a1, a2, b1, b2 in blocks:
1163 for bk in xrange(b1, b2):
1163 for bk in xrange(b1, b2):
1164 if child[0][bk][0] == childfctx:
1164 if child[0][bk][0] == childfctx:
1165 ak = min(a1 + (bk - b1), a2 - 1)
1165 ak = min(a1 + (bk - b1), a2 - 1)
1166 child[0][bk] = parent[0][ak]
1166 child[0][bk] = parent[0][ak]
1167 return child
1167 return child
1168
1168
1169 class filectx(basefilectx):
1169 class filectx(basefilectx):
1170 """A filecontext object makes access to data related to a particular
1170 """A filecontext object makes access to data related to a particular
1171 filerevision convenient."""
1171 filerevision convenient."""
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1173 filelog=None, changectx=None):
1173 filelog=None, changectx=None):
1174 """changeid can be a changeset revision, node, or tag.
1174 """changeid can be a changeset revision, node, or tag.
1175 fileid can be a file revision or node."""
1175 fileid can be a file revision or node."""
1176 self._repo = repo
1176 self._repo = repo
1177 self._path = path
1177 self._path = path
1178
1178
1179 assert (changeid is not None
1179 assert (changeid is not None
1180 or fileid is not None
1180 or fileid is not None
1181 or changectx is not None), \
1181 or changectx is not None), \
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1183 % (changeid, fileid, changectx))
1183 % (changeid, fileid, changectx))
1184
1184
1185 if filelog is not None:
1185 if filelog is not None:
1186 self._filelog = filelog
1186 self._filelog = filelog
1187
1187
1188 if changeid is not None:
1188 if changeid is not None:
1189 self._changeid = changeid
1189 self._changeid = changeid
1190 if changectx is not None:
1190 if changectx is not None:
1191 self._changectx = changectx
1191 self._changectx = changectx
1192 if fileid is not None:
1192 if fileid is not None:
1193 self._fileid = fileid
1193 self._fileid = fileid
1194
1194
1195 @propertycache
1195 @propertycache
1196 def _changectx(self):
1196 def _changectx(self):
1197 try:
1197 try:
1198 return changectx(self._repo, self._changeid)
1198 return changectx(self._repo, self._changeid)
1199 except error.FilteredRepoLookupError:
1199 except error.FilteredRepoLookupError:
1200 # Linkrev may point to any revision in the repository. When the
1200 # Linkrev may point to any revision in the repository. When the
1201 # repository is filtered this may lead to `filectx` trying to build
1201 # repository is filtered this may lead to `filectx` trying to build
1202 # `changectx` for filtered revision. In such case we fallback to
1202 # `changectx` for filtered revision. In such case we fallback to
1203 # creating `changectx` on the unfiltered version of the reposition.
1203 # creating `changectx` on the unfiltered version of the reposition.
1204 # This fallback should not be an issue because `changectx` from
1204 # This fallback should not be an issue because `changectx` from
1205 # `filectx` are not used in complex operations that care about
1205 # `filectx` are not used in complex operations that care about
1206 # filtering.
1206 # filtering.
1207 #
1207 #
1208 # This fallback is a cheap and dirty fix that prevent several
1208 # This fallback is a cheap and dirty fix that prevent several
1209 # crashes. It does not ensure the behavior is correct. However the
1209 # crashes. It does not ensure the behavior is correct. However the
1210 # behavior was not correct before filtering either and "incorrect
1210 # behavior was not correct before filtering either and "incorrect
1211 # behavior" is seen as better as "crash"
1211 # behavior" is seen as better as "crash"
1212 #
1212 #
1213 # Linkrevs have several serious troubles with filtering that are
1213 # Linkrevs have several serious troubles with filtering that are
1214 # complicated to solve. Proper handling of the issue here should be
1214 # complicated to solve. Proper handling of the issue here should be
1215 # considered when solving linkrev issue are on the table.
1215 # considered when solving linkrev issue are on the table.
1216 return changectx(self._repo.unfiltered(), self._changeid)
1216 return changectx(self._repo.unfiltered(), self._changeid)
1217
1217
1218 def filectx(self, fileid, changeid=None):
1218 def filectx(self, fileid, changeid=None):
1219 '''opens an arbitrary revision of the file without
1219 '''opens an arbitrary revision of the file without
1220 opening a new filelog'''
1220 opening a new filelog'''
1221 return filectx(self._repo, self._path, fileid=fileid,
1221 return filectx(self._repo, self._path, fileid=fileid,
1222 filelog=self._filelog, changeid=changeid)
1222 filelog=self._filelog, changeid=changeid)
1223
1223
1224 def rawdata(self):
1224 def rawdata(self):
1225 return self._filelog.revision(self._filenode, raw=True)
1225 return self._filelog.revision(self._filenode, raw=True)
1226
1226
1227 def rawflags(self):
1227 def rawflags(self):
1228 """low-level revlog flags"""
1228 """low-level revlog flags"""
1229 return self._filelog.flags(self._filerev)
1229 return self._filelog.flags(self._filerev)
1230
1230
1231 def data(self):
1231 def data(self):
1232 try:
1232 try:
1233 return self._filelog.read(self._filenode)
1233 return self._filelog.read(self._filenode)
1234 except error.CensoredNodeError:
1234 except error.CensoredNodeError:
1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1236 return ""
1236 return ""
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1238 hint=_("set censor.policy to ignore errors"))
1238 hint=_("set censor.policy to ignore errors"))
1239
1239
1240 def size(self):
1240 def size(self):
1241 return self._filelog.size(self._filerev)
1241 return self._filelog.size(self._filerev)
1242
1242
1243 @propertycache
1243 @propertycache
1244 def _copied(self):
1244 def _copied(self):
1245 """check if file was actually renamed in this changeset revision
1245 """check if file was actually renamed in this changeset revision
1246
1246
1247 If rename logged in file revision, we report copy for changeset only
1247 If rename logged in file revision, we report copy for changeset only
1248 if file revisions linkrev points back to the changeset in question
1248 if file revisions linkrev points back to the changeset in question
1249 or both changeset parents contain different file revisions.
1249 or both changeset parents contain different file revisions.
1250 """
1250 """
1251
1251
1252 renamed = self._filelog.renamed(self._filenode)
1252 renamed = self._filelog.renamed(self._filenode)
1253 if not renamed:
1253 if not renamed:
1254 return renamed
1254 return renamed
1255
1255
1256 if self.rev() == self.linkrev():
1256 if self.rev() == self.linkrev():
1257 return renamed
1257 return renamed
1258
1258
1259 name = self.path()
1259 name = self.path()
1260 fnode = self._filenode
1260 fnode = self._filenode
1261 for p in self._changectx.parents():
1261 for p in self._changectx.parents():
1262 try:
1262 try:
1263 if fnode == p.filenode(name):
1263 if fnode == p.filenode(name):
1264 return None
1264 return None
1265 except error.LookupError:
1265 except error.LookupError:
1266 pass
1266 pass
1267 return renamed
1267 return renamed
1268
1268
1269 def children(self):
1269 def children(self):
1270 # hard for renames
1270 # hard for renames
1271 c = self._filelog.children(self._filenode)
1271 c = self._filelog.children(self._filenode)
1272 return [filectx(self._repo, self._path, fileid=x,
1272 return [filectx(self._repo, self._path, fileid=x,
1273 filelog=self._filelog) for x in c]
1273 filelog=self._filelog) for x in c]
1274
1274
1275 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1275 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1276 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1276 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1277 if diff from fctx2 to fctx1 has changes in linerange2 and
1277 if diff from fctx2 to fctx1 has changes in linerange2 and
1278 `linerange1` is the new line range for fctx1.
1278 `linerange1` is the new line range for fctx1.
1279 """
1279 """
1280 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1280 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1281 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1281 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1282 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1282 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1283 return diffinrange, linerange1
1283 return diffinrange, linerange1
1284
1284
1285 def blockancestors(fctx, fromline, toline, followfirst=False):
1285 def blockancestors(fctx, fromline, toline, followfirst=False):
1286 """Yield ancestors of `fctx` with respect to the block of lines within
1286 """Yield ancestors of `fctx` with respect to the block of lines within
1287 `fromline`-`toline` range.
1287 `fromline`-`toline` range.
1288 """
1288 """
1289 diffopts = patch.diffopts(fctx._repo.ui)
1289 diffopts = patch.diffopts(fctx._repo.ui)
1290 introrev = fctx.introrev()
1290 introrev = fctx.introrev()
1291 if fctx.rev() != introrev:
1291 if fctx.rev() != introrev:
1292 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1292 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1293 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1293 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1294 while visit:
1294 while visit:
1295 c, linerange2 = visit.pop(max(visit))
1295 c, linerange2 = visit.pop(max(visit))
1296 pl = c.parents()
1296 pl = c.parents()
1297 if followfirst:
1297 if followfirst:
1298 pl = pl[:1]
1298 pl = pl[:1]
1299 if not pl:
1299 if not pl:
1300 # The block originates from the initial revision.
1300 # The block originates from the initial revision.
1301 yield c, linerange2
1301 yield c, linerange2
1302 continue
1302 continue
1303 inrange = False
1303 inrange = False
1304 for p in pl:
1304 for p in pl:
1305 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1305 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1306 inrange = inrange or inrangep
1306 inrange = inrange or inrangep
1307 if linerange1[0] == linerange1[1]:
1307 if linerange1[0] == linerange1[1]:
1308 # Parent's linerange is empty, meaning that the block got
1308 # Parent's linerange is empty, meaning that the block got
1309 # introduced in this revision; no need to go futher in this
1309 # introduced in this revision; no need to go futher in this
1310 # branch.
1310 # branch.
1311 continue
1311 continue
1312 # Set _descendantrev with 'c' (a known descendant) so that, when
1312 # Set _descendantrev with 'c' (a known descendant) so that, when
1313 # _adjustlinkrev is called for 'p', it receives this descendant
1313 # _adjustlinkrev is called for 'p', it receives this descendant
1314 # (as srcrev) instead possibly topmost introrev.
1314 # (as srcrev) instead possibly topmost introrev.
1315 p._descendantrev = c.rev()
1315 p._descendantrev = c.rev()
1316 visit[p.linkrev(), p.filenode()] = p, linerange1
1316 visit[p.linkrev(), p.filenode()] = p, linerange1
1317 if inrange:
1317 if inrange:
1318 yield c, linerange2
1318 yield c, linerange2
1319
1319
1320 def blockdescendants(fctx, fromline, toline):
1320 def blockdescendants(fctx, fromline, toline):
1321 """Yield descendants of `fctx` with respect to the block of lines within
1321 """Yield descendants of `fctx` with respect to the block of lines within
1322 `fromline`-`toline` range.
1322 `fromline`-`toline` range.
1323 """
1323 """
1324 # First possibly yield 'fctx' if it has changes in range with respect to
1324 # First possibly yield 'fctx' if it has changes in range with respect to
1325 # its parents.
1325 # its parents.
1326 try:
1326 try:
1327 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1327 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1328 except StopIteration:
1328 except StopIteration:
1329 pass
1329 pass
1330 else:
1330 else:
1331 if c == fctx:
1331 if c == fctx:
1332 yield c, linerange1
1332 yield c, linerange1
1333
1333
1334 diffopts = patch.diffopts(fctx._repo.ui)
1334 diffopts = patch.diffopts(fctx._repo.ui)
1335 fl = fctx.filelog()
1335 fl = fctx.filelog()
1336 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1336 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1337 for i in fl.descendants([fctx.filerev()]):
1337 for i in fl.descendants([fctx.filerev()]):
1338 c = fctx.filectx(i)
1338 c = fctx.filectx(i)
1339 inrange = False
1339 inrange = False
1340 for x in fl.parentrevs(i):
1340 for x in fl.parentrevs(i):
1341 try:
1341 try:
1342 p, linerange2 = seen[x]
1342 p, linerange2 = seen[x]
1343 except KeyError:
1343 except KeyError:
1344 # nullrev or other branch
1344 # nullrev or other branch
1345 continue
1345 continue
1346 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1346 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1347 inrange = inrange or inrangep
1347 inrange = inrange or inrangep
1348 # If revision 'i' has been seen (it's a merge), we assume that its
1348 # If revision 'i' has been seen (it's a merge), we assume that its
1349 # line range is the same independently of which parents was used
1349 # line range is the same independently of which parents was used
1350 # to compute it.
1350 # to compute it.
1351 assert i not in seen or seen[i][1] == linerange1, (
1351 assert i not in seen or seen[i][1] == linerange1, (
1352 'computed line range for %s is not consistent between '
1352 'computed line range for %s is not consistent between '
1353 'ancestor branches' % c)
1353 'ancestor branches' % c)
1354 seen[i] = c, linerange1
1354 seen[i] = c, linerange1
1355 if inrange:
1355 if inrange:
1356 yield c, linerange1
1356 yield c, linerange1
1357
1357
1358 class committablectx(basectx):
1358 class committablectx(basectx):
1359 """A committablectx object provides common functionality for a context that
1359 """A committablectx object provides common functionality for a context that
1360 wants the ability to commit, e.g. workingctx or memctx."""
1360 wants the ability to commit, e.g. workingctx or memctx."""
1361 def __init__(self, repo, text="", user=None, date=None, extra=None,
1361 def __init__(self, repo, text="", user=None, date=None, extra=None,
1362 changes=None):
1362 changes=None):
1363 self._repo = repo
1363 self._repo = repo
1364 self._rev = None
1364 self._rev = None
1365 self._node = None
1365 self._node = None
1366 self._text = text
1366 self._text = text
1367 if date:
1367 if date:
1368 self._date = util.parsedate(date)
1368 self._date = util.parsedate(date)
1369 if user:
1369 if user:
1370 self._user = user
1370 self._user = user
1371 if changes:
1371 if changes:
1372 self._status = changes
1372 self._status = changes
1373
1373
1374 self._extra = {}
1374 self._extra = {}
1375 if extra:
1375 if extra:
1376 self._extra = extra.copy()
1376 self._extra = extra.copy()
1377 if 'branch' not in self._extra:
1377 if 'branch' not in self._extra:
1378 try:
1378 try:
1379 branch = encoding.fromlocal(self._repo.dirstate.branch())
1379 branch = encoding.fromlocal(self._repo.dirstate.branch())
1380 except UnicodeDecodeError:
1380 except UnicodeDecodeError:
1381 raise error.Abort(_('branch name not in UTF-8!'))
1381 raise error.Abort(_('branch name not in UTF-8!'))
1382 self._extra['branch'] = branch
1382 self._extra['branch'] = branch
1383 if self._extra['branch'] == '':
1383 if self._extra['branch'] == '':
1384 self._extra['branch'] = 'default'
1384 self._extra['branch'] = 'default'
1385
1385
1386 def __str__(self):
1386 def __str__(self):
1387 return str(self._parents[0]) + r"+"
1387 return str(self._parents[0]) + r"+"
1388
1388
1389 def __bytes__(self):
1389 def __bytes__(self):
1390 return bytes(self._parents[0]) + "+"
1390 return bytes(self._parents[0]) + "+"
1391
1391
1392 def __nonzero__(self):
1392 def __nonzero__(self):
1393 return True
1393 return True
1394
1394
1395 __bool__ = __nonzero__
1395 __bool__ = __nonzero__
1396
1396
1397 def _buildflagfunc(self):
1397 def _buildflagfunc(self):
1398 # Create a fallback function for getting file flags when the
1398 # Create a fallback function for getting file flags when the
1399 # filesystem doesn't support them
1399 # filesystem doesn't support them
1400
1400
1401 copiesget = self._repo.dirstate.copies().get
1401 copiesget = self._repo.dirstate.copies().get
1402 parents = self.parents()
1402 parents = self.parents()
1403 if len(parents) < 2:
1403 if len(parents) < 2:
1404 # when we have one parent, it's easy: copy from parent
1404 # when we have one parent, it's easy: copy from parent
1405 man = parents[0].manifest()
1405 man = parents[0].manifest()
1406 def func(f):
1406 def func(f):
1407 f = copiesget(f, f)
1407 f = copiesget(f, f)
1408 return man.flags(f)
1408 return man.flags(f)
1409 else:
1409 else:
1410 # merges are tricky: we try to reconstruct the unstored
1410 # merges are tricky: we try to reconstruct the unstored
1411 # result from the merge (issue1802)
1411 # result from the merge (issue1802)
1412 p1, p2 = parents
1412 p1, p2 = parents
1413 pa = p1.ancestor(p2)
1413 pa = p1.ancestor(p2)
1414 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1414 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1415
1415
1416 def func(f):
1416 def func(f):
1417 f = copiesget(f, f) # may be wrong for merges with copies
1417 f = copiesget(f, f) # may be wrong for merges with copies
1418 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1418 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1419 if fl1 == fl2:
1419 if fl1 == fl2:
1420 return fl1
1420 return fl1
1421 if fl1 == fla:
1421 if fl1 == fla:
1422 return fl2
1422 return fl2
1423 if fl2 == fla:
1423 if fl2 == fla:
1424 return fl1
1424 return fl1
1425 return '' # punt for conflicts
1425 return '' # punt for conflicts
1426
1426
1427 return func
1427 return func
1428
1428
1429 @propertycache
1429 @propertycache
1430 def _flagfunc(self):
1430 def _flagfunc(self):
1431 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1431 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1432
1432
1433 @propertycache
1433 @propertycache
1434 def _status(self):
1434 def _status(self):
1435 return self._repo.status()
1435 return self._repo.status()
1436
1436
1437 @propertycache
1437 @propertycache
1438 def _user(self):
1438 def _user(self):
1439 return self._repo.ui.username()
1439 return self._repo.ui.username()
1440
1440
1441 @propertycache
1441 @propertycache
1442 def _date(self):
1442 def _date(self):
1443 ui = self._repo.ui
1443 ui = self._repo.ui
1444 date = ui.configdate('devel', 'default-date')
1444 date = ui.configdate('devel', 'default-date')
1445 if date is None:
1445 if date is None:
1446 date = util.makedate()
1446 date = util.makedate()
1447 return date
1447 return date
1448
1448
1449 def subrev(self, subpath):
1449 def subrev(self, subpath):
1450 return None
1450 return None
1451
1451
1452 def manifestnode(self):
1452 def manifestnode(self):
1453 return None
1453 return None
1454 def user(self):
1454 def user(self):
1455 return self._user or self._repo.ui.username()
1455 return self._user or self._repo.ui.username()
1456 def date(self):
1456 def date(self):
1457 return self._date
1457 return self._date
1458 def description(self):
1458 def description(self):
1459 return self._text
1459 return self._text
1460 def files(self):
1460 def files(self):
1461 return sorted(self._status.modified + self._status.added +
1461 return sorted(self._status.modified + self._status.added +
1462 self._status.removed)
1462 self._status.removed)
1463
1463
1464 def modified(self):
1464 def modified(self):
1465 return self._status.modified
1465 return self._status.modified
1466 def added(self):
1466 def added(self):
1467 return self._status.added
1467 return self._status.added
1468 def removed(self):
1468 def removed(self):
1469 return self._status.removed
1469 return self._status.removed
1470 def deleted(self):
1470 def deleted(self):
1471 return self._status.deleted
1471 return self._status.deleted
1472 def branch(self):
1472 def branch(self):
1473 return encoding.tolocal(self._extra['branch'])
1473 return encoding.tolocal(self._extra['branch'])
1474 def closesbranch(self):
1474 def closesbranch(self):
1475 return 'close' in self._extra
1475 return 'close' in self._extra
1476 def extra(self):
1476 def extra(self):
1477 return self._extra
1477 return self._extra
1478
1478
1479 def tags(self):
1479 def tags(self):
1480 return []
1480 return []
1481
1481
1482 def bookmarks(self):
1482 def bookmarks(self):
1483 b = []
1483 b = []
1484 for p in self.parents():
1484 for p in self.parents():
1485 b.extend(p.bookmarks())
1485 b.extend(p.bookmarks())
1486 return b
1486 return b
1487
1487
1488 def phase(self):
1488 def phase(self):
1489 phase = phases.draft # default phase to draft
1489 phase = phases.draft # default phase to draft
1490 for p in self.parents():
1490 for p in self.parents():
1491 phase = max(phase, p.phase())
1491 phase = max(phase, p.phase())
1492 return phase
1492 return phase
1493
1493
1494 def hidden(self):
1494 def hidden(self):
1495 return False
1495 return False
1496
1496
1497 def children(self):
1497 def children(self):
1498 return []
1498 return []
1499
1499
1500 def flags(self, path):
1500 def flags(self, path):
1501 if r'_manifest' in self.__dict__:
1501 if r'_manifest' in self.__dict__:
1502 try:
1502 try:
1503 return self._manifest.flags(path)
1503 return self._manifest.flags(path)
1504 except KeyError:
1504 except KeyError:
1505 return ''
1505 return ''
1506
1506
1507 try:
1507 try:
1508 return self._flagfunc(path)
1508 return self._flagfunc(path)
1509 except OSError:
1509 except OSError:
1510 return ''
1510 return ''
1511
1511
1512 def ancestor(self, c2):
1512 def ancestor(self, c2):
1513 """return the "best" ancestor context of self and c2"""
1513 """return the "best" ancestor context of self and c2"""
1514 return self._parents[0].ancestor(c2) # punt on two parents for now
1514 return self._parents[0].ancestor(c2) # punt on two parents for now
1515
1515
1516 def walk(self, match):
1516 def walk(self, match):
1517 '''Generates matching file names.'''
1517 '''Generates matching file names.'''
1518 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1518 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1519 True, False))
1519 True, False))
1520
1520
1521 def matches(self, match):
1521 def matches(self, match):
1522 return sorted(self._repo.dirstate.matches(match))
1522 return sorted(self._repo.dirstate.matches(match))
1523
1523
1524 def ancestors(self):
1524 def ancestors(self):
1525 for p in self._parents:
1525 for p in self._parents:
1526 yield p
1526 yield p
1527 for a in self._repo.changelog.ancestors(
1527 for a in self._repo.changelog.ancestors(
1528 [p.rev() for p in self._parents]):
1528 [p.rev() for p in self._parents]):
1529 yield changectx(self._repo, a)
1529 yield changectx(self._repo, a)
1530
1530
1531 def markcommitted(self, node):
1531 def markcommitted(self, node):
1532 """Perform post-commit cleanup necessary after committing this ctx
1532 """Perform post-commit cleanup necessary after committing this ctx
1533
1533
1534 Specifically, this updates backing stores this working context
1534 Specifically, this updates backing stores this working context
1535 wraps to reflect the fact that the changes reflected by this
1535 wraps to reflect the fact that the changes reflected by this
1536 workingctx have been committed. For example, it marks
1536 workingctx have been committed. For example, it marks
1537 modified and added files as normal in the dirstate.
1537 modified and added files as normal in the dirstate.
1538
1538
1539 """
1539 """
1540
1540
1541 with self._repo.dirstate.parentchange():
1541 with self._repo.dirstate.parentchange():
1542 for f in self.modified() + self.added():
1542 for f in self.modified() + self.added():
1543 self._repo.dirstate.normal(f)
1543 self._repo.dirstate.normal(f)
1544 for f in self.removed():
1544 for f in self.removed():
1545 self._repo.dirstate.drop(f)
1545 self._repo.dirstate.drop(f)
1546 self._repo.dirstate.setparents(node)
1546 self._repo.dirstate.setparents(node)
1547
1547
1548 # write changes out explicitly, because nesting wlock at
1548 # write changes out explicitly, because nesting wlock at
1549 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1549 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1550 # from immediately doing so for subsequent changing files
1550 # from immediately doing so for subsequent changing files
1551 self._repo.dirstate.write(self._repo.currenttransaction())
1551 self._repo.dirstate.write(self._repo.currenttransaction())
1552
1552
1553 def dirty(self, missing=False, merge=True, branch=True):
1553 def dirty(self, missing=False, merge=True, branch=True):
1554 return False
1554 return False
1555
1555
1556 class workingctx(committablectx):
1556 class workingctx(committablectx):
1557 """A workingctx object makes access to data related to
1557 """A workingctx object makes access to data related to
1558 the current working directory convenient.
1558 the current working directory convenient.
1559 date - any valid date string or (unixtime, offset), or None.
1559 date - any valid date string or (unixtime, offset), or None.
1560 user - username string, or None.
1560 user - username string, or None.
1561 extra - a dictionary of extra values, or None.
1561 extra - a dictionary of extra values, or None.
1562 changes - a list of file lists as returned by localrepo.status()
1562 changes - a list of file lists as returned by localrepo.status()
1563 or None to use the repository status.
1563 or None to use the repository status.
1564 """
1564 """
1565 def __init__(self, repo, text="", user=None, date=None, extra=None,
1565 def __init__(self, repo, text="", user=None, date=None, extra=None,
1566 changes=None):
1566 changes=None):
1567 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1567 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1568
1568
1569 def __iter__(self):
1569 def __iter__(self):
1570 d = self._repo.dirstate
1570 d = self._repo.dirstate
1571 for f in d:
1571 for f in d:
1572 if d[f] != 'r':
1572 if d[f] != 'r':
1573 yield f
1573 yield f
1574
1574
1575 def __contains__(self, key):
1575 def __contains__(self, key):
1576 return self._repo.dirstate[key] not in "?r"
1576 return self._repo.dirstate[key] not in "?r"
1577
1577
1578 def hex(self):
1578 def hex(self):
1579 return hex(wdirid)
1579 return hex(wdirid)
1580
1580
1581 @propertycache
1581 @propertycache
1582 def _parents(self):
1582 def _parents(self):
1583 p = self._repo.dirstate.parents()
1583 p = self._repo.dirstate.parents()
1584 if p[1] == nullid:
1584 if p[1] == nullid:
1585 p = p[:-1]
1585 p = p[:-1]
1586 return [changectx(self._repo, x) for x in p]
1586 return [changectx(self._repo, x) for x in p]
1587
1587
1588 def filectx(self, path, filelog=None):
1588 def filectx(self, path, filelog=None):
1589 """get a file context from the working directory"""
1589 """get a file context from the working directory"""
1590 return workingfilectx(self._repo, path, workingctx=self,
1590 return workingfilectx(self._repo, path, workingctx=self,
1591 filelog=filelog)
1591 filelog=filelog)
1592
1592
1593 def dirty(self, missing=False, merge=True, branch=True):
1593 def dirty(self, missing=False, merge=True, branch=True):
1594 "check whether a working directory is modified"
1594 "check whether a working directory is modified"
1595 # check subrepos first
1595 # check subrepos first
1596 for s in sorted(self.substate):
1596 for s in sorted(self.substate):
1597 if self.sub(s).dirty():
1597 if self.sub(s).dirty():
1598 return True
1598 return True
1599 # check current working dir
1599 # check current working dir
1600 return ((merge and self.p2()) or
1600 return ((merge and self.p2()) or
1601 (branch and self.branch() != self.p1().branch()) or
1601 (branch and self.branch() != self.p1().branch()) or
1602 self.modified() or self.added() or self.removed() or
1602 self.modified() or self.added() or self.removed() or
1603 (missing and self.deleted()))
1603 (missing and self.deleted()))
1604
1604
1605 def add(self, list, prefix=""):
1605 def add(self, list, prefix=""):
1606 join = lambda f: os.path.join(prefix, f)
1606 join = lambda f: os.path.join(prefix, f)
1607 with self._repo.wlock():
1607 with self._repo.wlock():
1608 ui, ds = self._repo.ui, self._repo.dirstate
1608 ui, ds = self._repo.ui, self._repo.dirstate
1609 rejected = []
1609 rejected = []
1610 lstat = self._repo.wvfs.lstat
1610 lstat = self._repo.wvfs.lstat
1611 for f in list:
1611 for f in list:
1612 scmutil.checkportable(ui, join(f))
1612 scmutil.checkportable(ui, join(f))
1613 try:
1613 try:
1614 st = lstat(f)
1614 st = lstat(f)
1615 except OSError:
1615 except OSError:
1616 ui.warn(_("%s does not exist!\n") % join(f))
1616 ui.warn(_("%s does not exist!\n") % join(f))
1617 rejected.append(f)
1617 rejected.append(f)
1618 continue
1618 continue
1619 if st.st_size > 10000000:
1619 if st.st_size > 10000000:
1620 ui.warn(_("%s: up to %d MB of RAM may be required "
1620 ui.warn(_("%s: up to %d MB of RAM may be required "
1621 "to manage this file\n"
1621 "to manage this file\n"
1622 "(use 'hg revert %s' to cancel the "
1622 "(use 'hg revert %s' to cancel the "
1623 "pending addition)\n")
1623 "pending addition)\n")
1624 % (f, 3 * st.st_size // 1000000, join(f)))
1624 % (f, 3 * st.st_size // 1000000, join(f)))
1625 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1625 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1626 ui.warn(_("%s not added: only files and symlinks "
1626 ui.warn(_("%s not added: only files and symlinks "
1627 "supported currently\n") % join(f))
1627 "supported currently\n") % join(f))
1628 rejected.append(f)
1628 rejected.append(f)
1629 elif ds[f] in 'amn':
1629 elif ds[f] in 'amn':
1630 ui.warn(_("%s already tracked!\n") % join(f))
1630 ui.warn(_("%s already tracked!\n") % join(f))
1631 elif ds[f] == 'r':
1631 elif ds[f] == 'r':
1632 ds.normallookup(f)
1632 ds.normallookup(f)
1633 else:
1633 else:
1634 ds.add(f)
1634 ds.add(f)
1635 return rejected
1635 return rejected
1636
1636
1637 def forget(self, files, prefix=""):
1637 def forget(self, files, prefix=""):
1638 join = lambda f: os.path.join(prefix, f)
1638 join = lambda f: os.path.join(prefix, f)
1639 with self._repo.wlock():
1639 with self._repo.wlock():
1640 rejected = []
1640 rejected = []
1641 for f in files:
1641 for f in files:
1642 if f not in self._repo.dirstate:
1642 if f not in self._repo.dirstate:
1643 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1643 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1644 rejected.append(f)
1644 rejected.append(f)
1645 elif self._repo.dirstate[f] != 'a':
1645 elif self._repo.dirstate[f] != 'a':
1646 self._repo.dirstate.remove(f)
1646 self._repo.dirstate.remove(f)
1647 else:
1647 else:
1648 self._repo.dirstate.drop(f)
1648 self._repo.dirstate.drop(f)
1649 return rejected
1649 return rejected
1650
1650
1651 def undelete(self, list):
1651 def undelete(self, list):
1652 pctxs = self.parents()
1652 pctxs = self.parents()
1653 with self._repo.wlock():
1653 with self._repo.wlock():
1654 for f in list:
1654 for f in list:
1655 if self._repo.dirstate[f] != 'r':
1655 if self._repo.dirstate[f] != 'r':
1656 self._repo.ui.warn(_("%s not removed!\n") % f)
1656 self._repo.ui.warn(_("%s not removed!\n") % f)
1657 else:
1657 else:
1658 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1658 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1659 t = fctx.data()
1659 t = fctx.data()
1660 self._repo.wwrite(f, t, fctx.flags())
1660 self._repo.wwrite(f, t, fctx.flags())
1661 self._repo.dirstate.normal(f)
1661 self._repo.dirstate.normal(f)
1662
1662
1663 def copy(self, source, dest):
1663 def copy(self, source, dest):
1664 try:
1664 try:
1665 st = self._repo.wvfs.lstat(dest)
1665 st = self._repo.wvfs.lstat(dest)
1666 except OSError as err:
1666 except OSError as err:
1667 if err.errno != errno.ENOENT:
1667 if err.errno != errno.ENOENT:
1668 raise
1668 raise
1669 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1669 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1670 return
1670 return
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1672 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1673 "symbolic link\n") % dest)
1673 "symbolic link\n") % dest)
1674 else:
1674 else:
1675 with self._repo.wlock():
1675 with self._repo.wlock():
1676 if self._repo.dirstate[dest] in '?':
1676 if self._repo.dirstate[dest] in '?':
1677 self._repo.dirstate.add(dest)
1677 self._repo.dirstate.add(dest)
1678 elif self._repo.dirstate[dest] in 'r':
1678 elif self._repo.dirstate[dest] in 'r':
1679 self._repo.dirstate.normallookup(dest)
1679 self._repo.dirstate.normallookup(dest)
1680 self._repo.dirstate.copy(source, dest)
1680 self._repo.dirstate.copy(source, dest)
1681
1681
1682 def match(self, pats=None, include=None, exclude=None, default='glob',
1682 def match(self, pats=None, include=None, exclude=None, default='glob',
1683 listsubrepos=False, badfn=None):
1683 listsubrepos=False, badfn=None):
1684 r = self._repo
1684 r = self._repo
1685
1685
1686 # Only a case insensitive filesystem needs magic to translate user input
1686 # Only a case insensitive filesystem needs magic to translate user input
1687 # to actual case in the filesystem.
1687 # to actual case in the filesystem.
1688 icasefs = not util.fscasesensitive(r.root)
1688 icasefs = not util.fscasesensitive(r.root)
1689 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1689 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1690 default, auditor=r.auditor, ctx=self,
1690 default, auditor=r.auditor, ctx=self,
1691 listsubrepos=listsubrepos, badfn=badfn,
1691 listsubrepos=listsubrepos, badfn=badfn,
1692 icasefs=icasefs)
1692 icasefs=icasefs)
1693
1693
1694 def _filtersuspectsymlink(self, files):
1694 def _filtersuspectsymlink(self, files):
1695 if not files or self._repo.dirstate._checklink:
1695 if not files or self._repo.dirstate._checklink:
1696 return files
1696 return files
1697
1697
1698 # Symlink placeholders may get non-symlink-like contents
1698 # Symlink placeholders may get non-symlink-like contents
1699 # via user error or dereferencing by NFS or Samba servers,
1699 # via user error or dereferencing by NFS or Samba servers,
1700 # so we filter out any placeholders that don't look like a
1700 # so we filter out any placeholders that don't look like a
1701 # symlink
1701 # symlink
1702 sane = []
1702 sane = []
1703 for f in files:
1703 for f in files:
1704 if self.flags(f) == 'l':
1704 if self.flags(f) == 'l':
1705 d = self[f].data()
1705 d = self[f].data()
1706 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1706 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1707 self._repo.ui.debug('ignoring suspect symlink placeholder'
1707 self._repo.ui.debug('ignoring suspect symlink placeholder'
1708 ' "%s"\n' % f)
1708 ' "%s"\n' % f)
1709 continue
1709 continue
1710 sane.append(f)
1710 sane.append(f)
1711 return sane
1711 return sane
1712
1712
1713 def _checklookup(self, files):
1713 def _checklookup(self, files):
1714 # check for any possibly clean files
1714 # check for any possibly clean files
1715 if not files:
1715 if not files:
1716 return [], [], []
1716 return [], [], []
1717
1717
1718 modified = []
1718 modified = []
1719 deleted = []
1719 deleted = []
1720 fixup = []
1720 fixup = []
1721 pctx = self._parents[0]
1721 pctx = self._parents[0]
1722 # do a full compare of any files that might have changed
1722 # do a full compare of any files that might have changed
1723 for f in sorted(files):
1723 for f in sorted(files):
1724 try:
1724 try:
1725 # This will return True for a file that got replaced by a
1725 # This will return True for a file that got replaced by a
1726 # directory in the interim, but fixing that is pretty hard.
1726 # directory in the interim, but fixing that is pretty hard.
1727 if (f not in pctx or self.flags(f) != pctx.flags(f)
1727 if (f not in pctx or self.flags(f) != pctx.flags(f)
1728 or pctx[f].cmp(self[f])):
1728 or pctx[f].cmp(self[f])):
1729 modified.append(f)
1729 modified.append(f)
1730 else:
1730 else:
1731 fixup.append(f)
1731 fixup.append(f)
1732 except (IOError, OSError):
1732 except (IOError, OSError):
1733 # A file become inaccessible in between? Mark it as deleted,
1733 # A file become inaccessible in between? Mark it as deleted,
1734 # matching dirstate behavior (issue5584).
1734 # matching dirstate behavior (issue5584).
1735 # The dirstate has more complex behavior around whether a
1735 # The dirstate has more complex behavior around whether a
1736 # missing file matches a directory, etc, but we don't need to
1736 # missing file matches a directory, etc, but we don't need to
1737 # bother with that: if f has made it to this point, we're sure
1737 # bother with that: if f has made it to this point, we're sure
1738 # it's in the dirstate.
1738 # it's in the dirstate.
1739 deleted.append(f)
1739 deleted.append(f)
1740
1740
1741 return modified, deleted, fixup
1741 return modified, deleted, fixup
1742
1742
1743 def _poststatusfixup(self, fixup):
1743 def _poststatusfixup(self, status, fixup):
1744 """update dirstate for files that are actually clean"""
1744 """update dirstate for files that are actually clean"""
1745 if fixup:
1745 if fixup:
1746 try:
1746 try:
1747 oldid = self._repo.dirstate.identity()
1747 oldid = self._repo.dirstate.identity()
1748
1748
1749 # updating the dirstate is optional
1749 # updating the dirstate is optional
1750 # so we don't wait on the lock
1750 # so we don't wait on the lock
1751 # wlock can invalidate the dirstate, so cache normal _after_
1751 # wlock can invalidate the dirstate, so cache normal _after_
1752 # taking the lock
1752 # taking the lock
1753 with self._repo.wlock(False):
1753 with self._repo.wlock(False):
1754 if self._repo.dirstate.identity() == oldid:
1754 if self._repo.dirstate.identity() == oldid:
1755 normal = self._repo.dirstate.normal
1755 normal = self._repo.dirstate.normal
1756 for f in fixup:
1756 for f in fixup:
1757 normal(f)
1757 normal(f)
1758 # write changes out explicitly, because nesting
1758 # write changes out explicitly, because nesting
1759 # wlock at runtime may prevent 'wlock.release()'
1759 # wlock at runtime may prevent 'wlock.release()'
1760 # after this block from doing so for subsequent
1760 # after this block from doing so for subsequent
1761 # changing files
1761 # changing files
1762 tr = self._repo.currenttransaction()
1762 tr = self._repo.currenttransaction()
1763 self._repo.dirstate.write(tr)
1763 self._repo.dirstate.write(tr)
1764 else:
1764 else:
1765 # in this case, writing changes out breaks
1765 # in this case, writing changes out breaks
1766 # consistency, because .hg/dirstate was
1766 # consistency, because .hg/dirstate was
1767 # already changed simultaneously after last
1767 # already changed simultaneously after last
1768 # caching (see also issue5584 for detail)
1768 # caching (see also issue5584 for detail)
1769 self._repo.ui.debug('skip updating dirstate: '
1769 self._repo.ui.debug('skip updating dirstate: '
1770 'identity mismatch\n')
1770 'identity mismatch\n')
1771 except error.LockError:
1771 except error.LockError:
1772 pass
1772 pass
1773
1773
1774 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1774 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1775 unknown=False):
1775 unknown=False):
1776 '''Gets the status from the dirstate -- internal use only.'''
1776 '''Gets the status from the dirstate -- internal use only.'''
1777 listignored, listclean, listunknown = ignored, clean, unknown
1777 listignored, listclean, listunknown = ignored, clean, unknown
1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1778 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1779 subrepos = []
1779 subrepos = []
1780 if '.hgsub' in self:
1780 if '.hgsub' in self:
1781 subrepos = sorted(self.substate)
1781 subrepos = sorted(self.substate)
1782 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1782 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1783 listclean, listunknown)
1783 listclean, listunknown)
1784
1784
1785 # check for any possibly clean files
1785 # check for any possibly clean files
1786 fixup = []
1786 fixup = []
1787 if cmp:
1787 if cmp:
1788 modified2, deleted2, fixup = self._checklookup(cmp)
1788 modified2, deleted2, fixup = self._checklookup(cmp)
1789 s.modified.extend(modified2)
1789 s.modified.extend(modified2)
1790 s.deleted.extend(deleted2)
1790 s.deleted.extend(deleted2)
1791
1791
1792 if fixup and listclean:
1792 if fixup and listclean:
1793 s.clean.extend(fixup)
1793 s.clean.extend(fixup)
1794
1794
1795 self._poststatusfixup(fixup)
1795 self._poststatusfixup(s, fixup)
1796
1796
1797 if match.always():
1797 if match.always():
1798 # cache for performance
1798 # cache for performance
1799 if s.unknown or s.ignored or s.clean:
1799 if s.unknown or s.ignored or s.clean:
1800 # "_status" is cached with list*=False in the normal route
1800 # "_status" is cached with list*=False in the normal route
1801 self._status = scmutil.status(s.modified, s.added, s.removed,
1801 self._status = scmutil.status(s.modified, s.added, s.removed,
1802 s.deleted, [], [], [])
1802 s.deleted, [], [], [])
1803 else:
1803 else:
1804 self._status = s
1804 self._status = s
1805
1805
1806 return s
1806 return s
1807
1807
1808 @propertycache
1808 @propertycache
1809 def _manifest(self):
1809 def _manifest(self):
1810 """generate a manifest corresponding to the values in self._status
1810 """generate a manifest corresponding to the values in self._status
1811
1811
1812 This reuse the file nodeid from parent, but we use special node
1812 This reuse the file nodeid from parent, but we use special node
1813 identifiers for added and modified files. This is used by manifests
1813 identifiers for added and modified files. This is used by manifests
1814 merge to see that files are different and by update logic to avoid
1814 merge to see that files are different and by update logic to avoid
1815 deleting newly added files.
1815 deleting newly added files.
1816 """
1816 """
1817 return self._buildstatusmanifest(self._status)
1817 return self._buildstatusmanifest(self._status)
1818
1818
1819 def _buildstatusmanifest(self, status):
1819 def _buildstatusmanifest(self, status):
1820 """Builds a manifest that includes the given status results."""
1820 """Builds a manifest that includes the given status results."""
1821 parents = self.parents()
1821 parents = self.parents()
1822
1822
1823 man = parents[0].manifest().copy()
1823 man = parents[0].manifest().copy()
1824
1824
1825 ff = self._flagfunc
1825 ff = self._flagfunc
1826 for i, l in ((addednodeid, status.added),
1826 for i, l in ((addednodeid, status.added),
1827 (modifiednodeid, status.modified)):
1827 (modifiednodeid, status.modified)):
1828 for f in l:
1828 for f in l:
1829 man[f] = i
1829 man[f] = i
1830 try:
1830 try:
1831 man.setflag(f, ff(f))
1831 man.setflag(f, ff(f))
1832 except OSError:
1832 except OSError:
1833 pass
1833 pass
1834
1834
1835 for f in status.deleted + status.removed:
1835 for f in status.deleted + status.removed:
1836 if f in man:
1836 if f in man:
1837 del man[f]
1837 del man[f]
1838
1838
1839 return man
1839 return man
1840
1840
1841 def _buildstatus(self, other, s, match, listignored, listclean,
1841 def _buildstatus(self, other, s, match, listignored, listclean,
1842 listunknown):
1842 listunknown):
1843 """build a status with respect to another context
1843 """build a status with respect to another context
1844
1844
1845 This includes logic for maintaining the fast path of status when
1845 This includes logic for maintaining the fast path of status when
1846 comparing the working directory against its parent, which is to skip
1846 comparing the working directory against its parent, which is to skip
1847 building a new manifest if self (working directory) is not comparing
1847 building a new manifest if self (working directory) is not comparing
1848 against its parent (repo['.']).
1848 against its parent (repo['.']).
1849 """
1849 """
1850 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1850 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1851 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1851 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1852 # might have accidentally ended up with the entire contents of the file
1852 # might have accidentally ended up with the entire contents of the file
1853 # they are supposed to be linking to.
1853 # they are supposed to be linking to.
1854 s.modified[:] = self._filtersuspectsymlink(s.modified)
1854 s.modified[:] = self._filtersuspectsymlink(s.modified)
1855 if other != self._repo['.']:
1855 if other != self._repo['.']:
1856 s = super(workingctx, self)._buildstatus(other, s, match,
1856 s = super(workingctx, self)._buildstatus(other, s, match,
1857 listignored, listclean,
1857 listignored, listclean,
1858 listunknown)
1858 listunknown)
1859 return s
1859 return s
1860
1860
1861 def _matchstatus(self, other, match):
1861 def _matchstatus(self, other, match):
1862 """override the match method with a filter for directory patterns
1862 """override the match method with a filter for directory patterns
1863
1863
1864 We use inheritance to customize the match.bad method only in cases of
1864 We use inheritance to customize the match.bad method only in cases of
1865 workingctx since it belongs only to the working directory when
1865 workingctx since it belongs only to the working directory when
1866 comparing against the parent changeset.
1866 comparing against the parent changeset.
1867
1867
1868 If we aren't comparing against the working directory's parent, then we
1868 If we aren't comparing against the working directory's parent, then we
1869 just use the default match object sent to us.
1869 just use the default match object sent to us.
1870 """
1870 """
1871 superself = super(workingctx, self)
1871 superself = super(workingctx, self)
1872 match = superself._matchstatus(other, match)
1872 match = superself._matchstatus(other, match)
1873 if other != self._repo['.']:
1873 if other != self._repo['.']:
1874 def bad(f, msg):
1874 def bad(f, msg):
1875 # 'f' may be a directory pattern from 'match.files()',
1875 # 'f' may be a directory pattern from 'match.files()',
1876 # so 'f not in ctx1' is not enough
1876 # so 'f not in ctx1' is not enough
1877 if f not in other and not other.hasdir(f):
1877 if f not in other and not other.hasdir(f):
1878 self._repo.ui.warn('%s: %s\n' %
1878 self._repo.ui.warn('%s: %s\n' %
1879 (self._repo.dirstate.pathto(f), msg))
1879 (self._repo.dirstate.pathto(f), msg))
1880 match.bad = bad
1880 match.bad = bad
1881 return match
1881 return match
1882
1882
1883 class committablefilectx(basefilectx):
1883 class committablefilectx(basefilectx):
1884 """A committablefilectx provides common functionality for a file context
1884 """A committablefilectx provides common functionality for a file context
1885 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1885 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1886 def __init__(self, repo, path, filelog=None, ctx=None):
1886 def __init__(self, repo, path, filelog=None, ctx=None):
1887 self._repo = repo
1887 self._repo = repo
1888 self._path = path
1888 self._path = path
1889 self._changeid = None
1889 self._changeid = None
1890 self._filerev = self._filenode = None
1890 self._filerev = self._filenode = None
1891
1891
1892 if filelog is not None:
1892 if filelog is not None:
1893 self._filelog = filelog
1893 self._filelog = filelog
1894 if ctx:
1894 if ctx:
1895 self._changectx = ctx
1895 self._changectx = ctx
1896
1896
1897 def __nonzero__(self):
1897 def __nonzero__(self):
1898 return True
1898 return True
1899
1899
1900 __bool__ = __nonzero__
1900 __bool__ = __nonzero__
1901
1901
1902 def linkrev(self):
1902 def linkrev(self):
1903 # linked to self._changectx no matter if file is modified or not
1903 # linked to self._changectx no matter if file is modified or not
1904 return self.rev()
1904 return self.rev()
1905
1905
1906 def parents(self):
1906 def parents(self):
1907 '''return parent filectxs, following copies if necessary'''
1907 '''return parent filectxs, following copies if necessary'''
1908 def filenode(ctx, path):
1908 def filenode(ctx, path):
1909 return ctx._manifest.get(path, nullid)
1909 return ctx._manifest.get(path, nullid)
1910
1910
1911 path = self._path
1911 path = self._path
1912 fl = self._filelog
1912 fl = self._filelog
1913 pcl = self._changectx._parents
1913 pcl = self._changectx._parents
1914 renamed = self.renamed()
1914 renamed = self.renamed()
1915
1915
1916 if renamed:
1916 if renamed:
1917 pl = [renamed + (None,)]
1917 pl = [renamed + (None,)]
1918 else:
1918 else:
1919 pl = [(path, filenode(pcl[0], path), fl)]
1919 pl = [(path, filenode(pcl[0], path), fl)]
1920
1920
1921 for pc in pcl[1:]:
1921 for pc in pcl[1:]:
1922 pl.append((path, filenode(pc, path), fl))
1922 pl.append((path, filenode(pc, path), fl))
1923
1923
1924 return [self._parentfilectx(p, fileid=n, filelog=l)
1924 return [self._parentfilectx(p, fileid=n, filelog=l)
1925 for p, n, l in pl if n != nullid]
1925 for p, n, l in pl if n != nullid]
1926
1926
1927 def children(self):
1927 def children(self):
1928 return []
1928 return []
1929
1929
1930 class workingfilectx(committablefilectx):
1930 class workingfilectx(committablefilectx):
1931 """A workingfilectx object makes access to data related to a particular
1931 """A workingfilectx object makes access to data related to a particular
1932 file in the working directory convenient."""
1932 file in the working directory convenient."""
1933 def __init__(self, repo, path, filelog=None, workingctx=None):
1933 def __init__(self, repo, path, filelog=None, workingctx=None):
1934 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1934 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1935
1935
1936 @propertycache
1936 @propertycache
1937 def _changectx(self):
1937 def _changectx(self):
1938 return workingctx(self._repo)
1938 return workingctx(self._repo)
1939
1939
1940 def data(self):
1940 def data(self):
1941 return self._repo.wread(self._path)
1941 return self._repo.wread(self._path)
1942 def renamed(self):
1942 def renamed(self):
1943 rp = self._repo.dirstate.copied(self._path)
1943 rp = self._repo.dirstate.copied(self._path)
1944 if not rp:
1944 if not rp:
1945 return None
1945 return None
1946 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1946 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1947
1947
1948 def size(self):
1948 def size(self):
1949 return self._repo.wvfs.lstat(self._path).st_size
1949 return self._repo.wvfs.lstat(self._path).st_size
1950 def date(self):
1950 def date(self):
1951 t, tz = self._changectx.date()
1951 t, tz = self._changectx.date()
1952 try:
1952 try:
1953 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1953 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1954 except OSError as err:
1954 except OSError as err:
1955 if err.errno != errno.ENOENT:
1955 if err.errno != errno.ENOENT:
1956 raise
1956 raise
1957 return (t, tz)
1957 return (t, tz)
1958
1958
1959 def cmp(self, fctx):
1959 def cmp(self, fctx):
1960 """compare with other file context
1960 """compare with other file context
1961
1961
1962 returns True if different than fctx.
1962 returns True if different than fctx.
1963 """
1963 """
1964 # fctx should be a filectx (not a workingfilectx)
1964 # fctx should be a filectx (not a workingfilectx)
1965 # invert comparison to reuse the same code path
1965 # invert comparison to reuse the same code path
1966 return fctx.cmp(self)
1966 return fctx.cmp(self)
1967
1967
1968 def remove(self, ignoremissing=False):
1968 def remove(self, ignoremissing=False):
1969 """wraps unlink for a repo's working directory"""
1969 """wraps unlink for a repo's working directory"""
1970 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1970 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1971
1971
1972 def write(self, data, flags):
1972 def write(self, data, flags):
1973 """wraps repo.wwrite"""
1973 """wraps repo.wwrite"""
1974 self._repo.wwrite(self._path, data, flags)
1974 self._repo.wwrite(self._path, data, flags)
1975
1975
1976 class workingcommitctx(workingctx):
1976 class workingcommitctx(workingctx):
1977 """A workingcommitctx object makes access to data related to
1977 """A workingcommitctx object makes access to data related to
1978 the revision being committed convenient.
1978 the revision being committed convenient.
1979
1979
1980 This hides changes in the working directory, if they aren't
1980 This hides changes in the working directory, if they aren't
1981 committed in this context.
1981 committed in this context.
1982 """
1982 """
1983 def __init__(self, repo, changes,
1983 def __init__(self, repo, changes,
1984 text="", user=None, date=None, extra=None):
1984 text="", user=None, date=None, extra=None):
1985 super(workingctx, self).__init__(repo, text, user, date, extra,
1985 super(workingctx, self).__init__(repo, text, user, date, extra,
1986 changes)
1986 changes)
1987
1987
1988 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1988 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1989 unknown=False):
1989 unknown=False):
1990 """Return matched files only in ``self._status``
1990 """Return matched files only in ``self._status``
1991
1991
1992 Uncommitted files appear "clean" via this context, even if
1992 Uncommitted files appear "clean" via this context, even if
1993 they aren't actually so in the working directory.
1993 they aren't actually so in the working directory.
1994 """
1994 """
1995 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1995 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1996 if clean:
1996 if clean:
1997 clean = [f for f in self._manifest if f not in self._changedset]
1997 clean = [f for f in self._manifest if f not in self._changedset]
1998 else:
1998 else:
1999 clean = []
1999 clean = []
2000 return scmutil.status([f for f in self._status.modified if match(f)],
2000 return scmutil.status([f for f in self._status.modified if match(f)],
2001 [f for f in self._status.added if match(f)],
2001 [f for f in self._status.added if match(f)],
2002 [f for f in self._status.removed if match(f)],
2002 [f for f in self._status.removed if match(f)],
2003 [], [], [], clean)
2003 [], [], [], clean)
2004
2004
2005 @propertycache
2005 @propertycache
2006 def _changedset(self):
2006 def _changedset(self):
2007 """Return the set of files changed in this context
2007 """Return the set of files changed in this context
2008 """
2008 """
2009 changed = set(self._status.modified)
2009 changed = set(self._status.modified)
2010 changed.update(self._status.added)
2010 changed.update(self._status.added)
2011 changed.update(self._status.removed)
2011 changed.update(self._status.removed)
2012 return changed
2012 return changed
2013
2013
2014 def makecachingfilectxfn(func):
2014 def makecachingfilectxfn(func):
2015 """Create a filectxfn that caches based on the path.
2015 """Create a filectxfn that caches based on the path.
2016
2016
2017 We can't use util.cachefunc because it uses all arguments as the cache
2017 We can't use util.cachefunc because it uses all arguments as the cache
2018 key and this creates a cycle since the arguments include the repo and
2018 key and this creates a cycle since the arguments include the repo and
2019 memctx.
2019 memctx.
2020 """
2020 """
2021 cache = {}
2021 cache = {}
2022
2022
2023 def getfilectx(repo, memctx, path):
2023 def getfilectx(repo, memctx, path):
2024 if path not in cache:
2024 if path not in cache:
2025 cache[path] = func(repo, memctx, path)
2025 cache[path] = func(repo, memctx, path)
2026 return cache[path]
2026 return cache[path]
2027
2027
2028 return getfilectx
2028 return getfilectx
2029
2029
2030 def memfilefromctx(ctx):
2030 def memfilefromctx(ctx):
2031 """Given a context return a memfilectx for ctx[path]
2031 """Given a context return a memfilectx for ctx[path]
2032
2032
2033 This is a convenience method for building a memctx based on another
2033 This is a convenience method for building a memctx based on another
2034 context.
2034 context.
2035 """
2035 """
2036 def getfilectx(repo, memctx, path):
2036 def getfilectx(repo, memctx, path):
2037 fctx = ctx[path]
2037 fctx = ctx[path]
2038 # this is weird but apparently we only keep track of one parent
2038 # this is weird but apparently we only keep track of one parent
2039 # (why not only store that instead of a tuple?)
2039 # (why not only store that instead of a tuple?)
2040 copied = fctx.renamed()
2040 copied = fctx.renamed()
2041 if copied:
2041 if copied:
2042 copied = copied[0]
2042 copied = copied[0]
2043 return memfilectx(repo, path, fctx.data(),
2043 return memfilectx(repo, path, fctx.data(),
2044 islink=fctx.islink(), isexec=fctx.isexec(),
2044 islink=fctx.islink(), isexec=fctx.isexec(),
2045 copied=copied, memctx=memctx)
2045 copied=copied, memctx=memctx)
2046
2046
2047 return getfilectx
2047 return getfilectx
2048
2048
2049 def memfilefrompatch(patchstore):
2049 def memfilefrompatch(patchstore):
2050 """Given a patch (e.g. patchstore object) return a memfilectx
2050 """Given a patch (e.g. patchstore object) return a memfilectx
2051
2051
2052 This is a convenience method for building a memctx based on a patchstore.
2052 This is a convenience method for building a memctx based on a patchstore.
2053 """
2053 """
2054 def getfilectx(repo, memctx, path):
2054 def getfilectx(repo, memctx, path):
2055 data, mode, copied = patchstore.getfile(path)
2055 data, mode, copied = patchstore.getfile(path)
2056 if data is None:
2056 if data is None:
2057 return None
2057 return None
2058 islink, isexec = mode
2058 islink, isexec = mode
2059 return memfilectx(repo, path, data, islink=islink,
2059 return memfilectx(repo, path, data, islink=islink,
2060 isexec=isexec, copied=copied,
2060 isexec=isexec, copied=copied,
2061 memctx=memctx)
2061 memctx=memctx)
2062
2062
2063 return getfilectx
2063 return getfilectx
2064
2064
2065 class memctx(committablectx):
2065 class memctx(committablectx):
2066 """Use memctx to perform in-memory commits via localrepo.commitctx().
2066 """Use memctx to perform in-memory commits via localrepo.commitctx().
2067
2067
2068 Revision information is supplied at initialization time while
2068 Revision information is supplied at initialization time while
2069 related files data and is made available through a callback
2069 related files data and is made available through a callback
2070 mechanism. 'repo' is the current localrepo, 'parents' is a
2070 mechanism. 'repo' is the current localrepo, 'parents' is a
2071 sequence of two parent revisions identifiers (pass None for every
2071 sequence of two parent revisions identifiers (pass None for every
2072 missing parent), 'text' is the commit message and 'files' lists
2072 missing parent), 'text' is the commit message and 'files' lists
2073 names of files touched by the revision (normalized and relative to
2073 names of files touched by the revision (normalized and relative to
2074 repository root).
2074 repository root).
2075
2075
2076 filectxfn(repo, memctx, path) is a callable receiving the
2076 filectxfn(repo, memctx, path) is a callable receiving the
2077 repository, the current memctx object and the normalized path of
2077 repository, the current memctx object and the normalized path of
2078 requested file, relative to repository root. It is fired by the
2078 requested file, relative to repository root. It is fired by the
2079 commit function for every file in 'files', but calls order is
2079 commit function for every file in 'files', but calls order is
2080 undefined. If the file is available in the revision being
2080 undefined. If the file is available in the revision being
2081 committed (updated or added), filectxfn returns a memfilectx
2081 committed (updated or added), filectxfn returns a memfilectx
2082 object. If the file was removed, filectxfn return None for recent
2082 object. If the file was removed, filectxfn return None for recent
2083 Mercurial. Moved files are represented by marking the source file
2083 Mercurial. Moved files are represented by marking the source file
2084 removed and the new file added with copy information (see
2084 removed and the new file added with copy information (see
2085 memfilectx).
2085 memfilectx).
2086
2086
2087 user receives the committer name and defaults to current
2087 user receives the committer name and defaults to current
2088 repository username, date is the commit date in any format
2088 repository username, date is the commit date in any format
2089 supported by util.parsedate() and defaults to current date, extra
2089 supported by util.parsedate() and defaults to current date, extra
2090 is a dictionary of metadata or is left empty.
2090 is a dictionary of metadata or is left empty.
2091 """
2091 """
2092
2092
2093 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2093 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2094 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2094 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2095 # this field to determine what to do in filectxfn.
2095 # this field to determine what to do in filectxfn.
2096 _returnnoneformissingfiles = True
2096 _returnnoneformissingfiles = True
2097
2097
2098 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2098 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2099 date=None, extra=None, branch=None, editor=False):
2099 date=None, extra=None, branch=None, editor=False):
2100 super(memctx, self).__init__(repo, text, user, date, extra)
2100 super(memctx, self).__init__(repo, text, user, date, extra)
2101 self._rev = None
2101 self._rev = None
2102 self._node = None
2102 self._node = None
2103 parents = [(p or nullid) for p in parents]
2103 parents = [(p or nullid) for p in parents]
2104 p1, p2 = parents
2104 p1, p2 = parents
2105 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2105 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2106 files = sorted(set(files))
2106 files = sorted(set(files))
2107 self._files = files
2107 self._files = files
2108 if branch is not None:
2108 if branch is not None:
2109 self._extra['branch'] = encoding.fromlocal(branch)
2109 self._extra['branch'] = encoding.fromlocal(branch)
2110 self.substate = {}
2110 self.substate = {}
2111
2111
2112 if isinstance(filectxfn, patch.filestore):
2112 if isinstance(filectxfn, patch.filestore):
2113 filectxfn = memfilefrompatch(filectxfn)
2113 filectxfn = memfilefrompatch(filectxfn)
2114 elif not callable(filectxfn):
2114 elif not callable(filectxfn):
2115 # if store is not callable, wrap it in a function
2115 # if store is not callable, wrap it in a function
2116 filectxfn = memfilefromctx(filectxfn)
2116 filectxfn = memfilefromctx(filectxfn)
2117
2117
2118 # memoizing increases performance for e.g. vcs convert scenarios.
2118 # memoizing increases performance for e.g. vcs convert scenarios.
2119 self._filectxfn = makecachingfilectxfn(filectxfn)
2119 self._filectxfn = makecachingfilectxfn(filectxfn)
2120
2120
2121 if editor:
2121 if editor:
2122 self._text = editor(self._repo, self, [])
2122 self._text = editor(self._repo, self, [])
2123 self._repo.savecommitmessage(self._text)
2123 self._repo.savecommitmessage(self._text)
2124
2124
2125 def filectx(self, path, filelog=None):
2125 def filectx(self, path, filelog=None):
2126 """get a file context from the working directory
2126 """get a file context from the working directory
2127
2127
2128 Returns None if file doesn't exist and should be removed."""
2128 Returns None if file doesn't exist and should be removed."""
2129 return self._filectxfn(self._repo, self, path)
2129 return self._filectxfn(self._repo, self, path)
2130
2130
2131 def commit(self):
2131 def commit(self):
2132 """commit context to the repo"""
2132 """commit context to the repo"""
2133 return self._repo.commitctx(self)
2133 return self._repo.commitctx(self)
2134
2134
2135 @propertycache
2135 @propertycache
2136 def _manifest(self):
2136 def _manifest(self):
2137 """generate a manifest based on the return values of filectxfn"""
2137 """generate a manifest based on the return values of filectxfn"""
2138
2138
2139 # keep this simple for now; just worry about p1
2139 # keep this simple for now; just worry about p1
2140 pctx = self._parents[0]
2140 pctx = self._parents[0]
2141 man = pctx.manifest().copy()
2141 man = pctx.manifest().copy()
2142
2142
2143 for f in self._status.modified:
2143 for f in self._status.modified:
2144 p1node = nullid
2144 p1node = nullid
2145 p2node = nullid
2145 p2node = nullid
2146 p = pctx[f].parents() # if file isn't in pctx, check p2?
2146 p = pctx[f].parents() # if file isn't in pctx, check p2?
2147 if len(p) > 0:
2147 if len(p) > 0:
2148 p1node = p[0].filenode()
2148 p1node = p[0].filenode()
2149 if len(p) > 1:
2149 if len(p) > 1:
2150 p2node = p[1].filenode()
2150 p2node = p[1].filenode()
2151 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2151 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2152
2152
2153 for f in self._status.added:
2153 for f in self._status.added:
2154 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2154 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2155
2155
2156 for f in self._status.removed:
2156 for f in self._status.removed:
2157 if f in man:
2157 if f in man:
2158 del man[f]
2158 del man[f]
2159
2159
2160 return man
2160 return man
2161
2161
2162 @propertycache
2162 @propertycache
2163 def _status(self):
2163 def _status(self):
2164 """Calculate exact status from ``files`` specified at construction
2164 """Calculate exact status from ``files`` specified at construction
2165 """
2165 """
2166 man1 = self.p1().manifest()
2166 man1 = self.p1().manifest()
2167 p2 = self._parents[1]
2167 p2 = self._parents[1]
2168 # "1 < len(self._parents)" can't be used for checking
2168 # "1 < len(self._parents)" can't be used for checking
2169 # existence of the 2nd parent, because "memctx._parents" is
2169 # existence of the 2nd parent, because "memctx._parents" is
2170 # explicitly initialized by the list, of which length is 2.
2170 # explicitly initialized by the list, of which length is 2.
2171 if p2.node() != nullid:
2171 if p2.node() != nullid:
2172 man2 = p2.manifest()
2172 man2 = p2.manifest()
2173 managing = lambda f: f in man1 or f in man2
2173 managing = lambda f: f in man1 or f in man2
2174 else:
2174 else:
2175 managing = lambda f: f in man1
2175 managing = lambda f: f in man1
2176
2176
2177 modified, added, removed = [], [], []
2177 modified, added, removed = [], [], []
2178 for f in self._files:
2178 for f in self._files:
2179 if not managing(f):
2179 if not managing(f):
2180 added.append(f)
2180 added.append(f)
2181 elif self[f]:
2181 elif self[f]:
2182 modified.append(f)
2182 modified.append(f)
2183 else:
2183 else:
2184 removed.append(f)
2184 removed.append(f)
2185
2185
2186 return scmutil.status(modified, added, removed, [], [], [], [])
2186 return scmutil.status(modified, added, removed, [], [], [], [])
2187
2187
2188 class memfilectx(committablefilectx):
2188 class memfilectx(committablefilectx):
2189 """memfilectx represents an in-memory file to commit.
2189 """memfilectx represents an in-memory file to commit.
2190
2190
2191 See memctx and committablefilectx for more details.
2191 See memctx and committablefilectx for more details.
2192 """
2192 """
2193 def __init__(self, repo, path, data, islink=False,
2193 def __init__(self, repo, path, data, islink=False,
2194 isexec=False, copied=None, memctx=None):
2194 isexec=False, copied=None, memctx=None):
2195 """
2195 """
2196 path is the normalized file path relative to repository root.
2196 path is the normalized file path relative to repository root.
2197 data is the file content as a string.
2197 data is the file content as a string.
2198 islink is True if the file is a symbolic link.
2198 islink is True if the file is a symbolic link.
2199 isexec is True if the file is executable.
2199 isexec is True if the file is executable.
2200 copied is the source file path if current file was copied in the
2200 copied is the source file path if current file was copied in the
2201 revision being committed, or None."""
2201 revision being committed, or None."""
2202 super(memfilectx, self).__init__(repo, path, None, memctx)
2202 super(memfilectx, self).__init__(repo, path, None, memctx)
2203 self._data = data
2203 self._data = data
2204 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2204 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2205 self._copied = None
2205 self._copied = None
2206 if copied:
2206 if copied:
2207 self._copied = (copied, nullid)
2207 self._copied = (copied, nullid)
2208
2208
2209 def data(self):
2209 def data(self):
2210 return self._data
2210 return self._data
2211
2211
2212 def remove(self, ignoremissing=False):
2212 def remove(self, ignoremissing=False):
2213 """wraps unlink for a repo's working directory"""
2213 """wraps unlink for a repo's working directory"""
2214 # need to figure out what to do here
2214 # need to figure out what to do here
2215 del self._changectx[self._path]
2215 del self._changectx[self._path]
2216
2216
2217 def write(self, data, flags):
2217 def write(self, data, flags):
2218 """wraps repo.wwrite"""
2218 """wraps repo.wwrite"""
2219 self._data = data
2219 self._data = data
2220
2220
2221 class overlayfilectx(committablefilectx):
2221 class overlayfilectx(committablefilectx):
2222 """Like memfilectx but take an original filectx and optional parameters to
2222 """Like memfilectx but take an original filectx and optional parameters to
2223 override parts of it. This is useful when fctx.data() is expensive (i.e.
2223 override parts of it. This is useful when fctx.data() is expensive (i.e.
2224 flag processor is expensive) and raw data, flags, and filenode could be
2224 flag processor is expensive) and raw data, flags, and filenode could be
2225 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2225 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2226 """
2226 """
2227
2227
2228 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2228 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2229 copied=None, ctx=None):
2229 copied=None, ctx=None):
2230 """originalfctx: filecontext to duplicate
2230 """originalfctx: filecontext to duplicate
2231
2231
2232 datafunc: None or a function to override data (file content). It is a
2232 datafunc: None or a function to override data (file content). It is a
2233 function to be lazy. path, flags, copied, ctx: None or overridden value
2233 function to be lazy. path, flags, copied, ctx: None or overridden value
2234
2234
2235 copied could be (path, rev), or False. copied could also be just path,
2235 copied could be (path, rev), or False. copied could also be just path,
2236 and will be converted to (path, nullid). This simplifies some callers.
2236 and will be converted to (path, nullid). This simplifies some callers.
2237 """
2237 """
2238
2238
2239 if path is None:
2239 if path is None:
2240 path = originalfctx.path()
2240 path = originalfctx.path()
2241 if ctx is None:
2241 if ctx is None:
2242 ctx = originalfctx.changectx()
2242 ctx = originalfctx.changectx()
2243 ctxmatch = lambda: True
2243 ctxmatch = lambda: True
2244 else:
2244 else:
2245 ctxmatch = lambda: ctx == originalfctx.changectx()
2245 ctxmatch = lambda: ctx == originalfctx.changectx()
2246
2246
2247 repo = originalfctx.repo()
2247 repo = originalfctx.repo()
2248 flog = originalfctx.filelog()
2248 flog = originalfctx.filelog()
2249 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2249 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2250
2250
2251 if copied is None:
2251 if copied is None:
2252 copied = originalfctx.renamed()
2252 copied = originalfctx.renamed()
2253 copiedmatch = lambda: True
2253 copiedmatch = lambda: True
2254 else:
2254 else:
2255 if copied and not isinstance(copied, tuple):
2255 if copied and not isinstance(copied, tuple):
2256 # repo._filecommit will recalculate copyrev so nullid is okay
2256 # repo._filecommit will recalculate copyrev so nullid is okay
2257 copied = (copied, nullid)
2257 copied = (copied, nullid)
2258 copiedmatch = lambda: copied == originalfctx.renamed()
2258 copiedmatch = lambda: copied == originalfctx.renamed()
2259
2259
2260 # When data, copied (could affect data), ctx (could affect filelog
2260 # When data, copied (could affect data), ctx (could affect filelog
2261 # parents) are not overridden, rawdata, rawflags, and filenode may be
2261 # parents) are not overridden, rawdata, rawflags, and filenode may be
2262 # reused (repo._filecommit should double check filelog parents).
2262 # reused (repo._filecommit should double check filelog parents).
2263 #
2263 #
2264 # path, flags are not hashed in filelog (but in manifestlog) so they do
2264 # path, flags are not hashed in filelog (but in manifestlog) so they do
2265 # not affect reusable here.
2265 # not affect reusable here.
2266 #
2266 #
2267 # If ctx or copied is overridden to a same value with originalfctx,
2267 # If ctx or copied is overridden to a same value with originalfctx,
2268 # still consider it's reusable. originalfctx.renamed() may be a bit
2268 # still consider it's reusable. originalfctx.renamed() may be a bit
2269 # expensive so it's not called unless necessary. Assuming datafunc is
2269 # expensive so it's not called unless necessary. Assuming datafunc is
2270 # always expensive, do not call it for this "reusable" test.
2270 # always expensive, do not call it for this "reusable" test.
2271 reusable = datafunc is None and ctxmatch() and copiedmatch()
2271 reusable = datafunc is None and ctxmatch() and copiedmatch()
2272
2272
2273 if datafunc is None:
2273 if datafunc is None:
2274 datafunc = originalfctx.data
2274 datafunc = originalfctx.data
2275 if flags is None:
2275 if flags is None:
2276 flags = originalfctx.flags()
2276 flags = originalfctx.flags()
2277
2277
2278 self._datafunc = datafunc
2278 self._datafunc = datafunc
2279 self._flags = flags
2279 self._flags = flags
2280 self._copied = copied
2280 self._copied = copied
2281
2281
2282 if reusable:
2282 if reusable:
2283 # copy extra fields from originalfctx
2283 # copy extra fields from originalfctx
2284 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2284 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2285 for attr in attrs:
2285 for attr in attrs:
2286 if util.safehasattr(originalfctx, attr):
2286 if util.safehasattr(originalfctx, attr):
2287 setattr(self, attr, getattr(originalfctx, attr))
2287 setattr(self, attr, getattr(originalfctx, attr))
2288
2288
2289 def data(self):
2289 def data(self):
2290 return self._datafunc()
2290 return self._datafunc()
2291
2291
2292 class metadataonlyctx(committablectx):
2292 class metadataonlyctx(committablectx):
2293 """Like memctx but it's reusing the manifest of different commit.
2293 """Like memctx but it's reusing the manifest of different commit.
2294 Intended to be used by lightweight operations that are creating
2294 Intended to be used by lightweight operations that are creating
2295 metadata-only changes.
2295 metadata-only changes.
2296
2296
2297 Revision information is supplied at initialization time. 'repo' is the
2297 Revision information is supplied at initialization time. 'repo' is the
2298 current localrepo, 'ctx' is original revision which manifest we're reuisng
2298 current localrepo, 'ctx' is original revision which manifest we're reuisng
2299 'parents' is a sequence of two parent revisions identifiers (pass None for
2299 'parents' is a sequence of two parent revisions identifiers (pass None for
2300 every missing parent), 'text' is the commit.
2300 every missing parent), 'text' is the commit.
2301
2301
2302 user receives the committer name and defaults to current repository
2302 user receives the committer name and defaults to current repository
2303 username, date is the commit date in any format supported by
2303 username, date is the commit date in any format supported by
2304 util.parsedate() and defaults to current date, extra is a dictionary of
2304 util.parsedate() and defaults to current date, extra is a dictionary of
2305 metadata or is left empty.
2305 metadata or is left empty.
2306 """
2306 """
2307 def __new__(cls, repo, originalctx, *args, **kwargs):
2307 def __new__(cls, repo, originalctx, *args, **kwargs):
2308 return super(metadataonlyctx, cls).__new__(cls, repo)
2308 return super(metadataonlyctx, cls).__new__(cls, repo)
2309
2309
2310 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2310 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2311 extra=None, editor=False):
2311 extra=None, editor=False):
2312 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2312 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2313 self._rev = None
2313 self._rev = None
2314 self._node = None
2314 self._node = None
2315 self._originalctx = originalctx
2315 self._originalctx = originalctx
2316 self._manifestnode = originalctx.manifestnode()
2316 self._manifestnode = originalctx.manifestnode()
2317 parents = [(p or nullid) for p in parents]
2317 parents = [(p or nullid) for p in parents]
2318 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2318 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2319
2319
2320 # sanity check to ensure that the reused manifest parents are
2320 # sanity check to ensure that the reused manifest parents are
2321 # manifests of our commit parents
2321 # manifests of our commit parents
2322 mp1, mp2 = self.manifestctx().parents
2322 mp1, mp2 = self.manifestctx().parents
2323 if p1 != nullid and p1.manifestnode() != mp1:
2323 if p1 != nullid and p1.manifestnode() != mp1:
2324 raise RuntimeError('can\'t reuse the manifest: '
2324 raise RuntimeError('can\'t reuse the manifest: '
2325 'its p1 doesn\'t match the new ctx p1')
2325 'its p1 doesn\'t match the new ctx p1')
2326 if p2 != nullid and p2.manifestnode() != mp2:
2326 if p2 != nullid and p2.manifestnode() != mp2:
2327 raise RuntimeError('can\'t reuse the manifest: '
2327 raise RuntimeError('can\'t reuse the manifest: '
2328 'its p2 doesn\'t match the new ctx p2')
2328 'its p2 doesn\'t match the new ctx p2')
2329
2329
2330 self._files = originalctx.files()
2330 self._files = originalctx.files()
2331 self.substate = {}
2331 self.substate = {}
2332
2332
2333 if editor:
2333 if editor:
2334 self._text = editor(self._repo, self, [])
2334 self._text = editor(self._repo, self, [])
2335 self._repo.savecommitmessage(self._text)
2335 self._repo.savecommitmessage(self._text)
2336
2336
2337 def manifestnode(self):
2337 def manifestnode(self):
2338 return self._manifestnode
2338 return self._manifestnode
2339
2339
2340 @property
2340 @property
2341 def _manifestctx(self):
2341 def _manifestctx(self):
2342 return self._repo.manifestlog[self._manifestnode]
2342 return self._repo.manifestlog[self._manifestnode]
2343
2343
2344 def filectx(self, path, filelog=None):
2344 def filectx(self, path, filelog=None):
2345 return self._originalctx.filectx(path, filelog=filelog)
2345 return self._originalctx.filectx(path, filelog=filelog)
2346
2346
2347 def commit(self):
2347 def commit(self):
2348 """commit context to the repo"""
2348 """commit context to the repo"""
2349 return self._repo.commitctx(self)
2349 return self._repo.commitctx(self)
2350
2350
2351 @property
2351 @property
2352 def _manifest(self):
2352 def _manifest(self):
2353 return self._originalctx.manifest()
2353 return self._originalctx.manifest()
2354
2354
2355 @propertycache
2355 @propertycache
2356 def _status(self):
2356 def _status(self):
2357 """Calculate exact status from ``files`` specified in the ``origctx``
2357 """Calculate exact status from ``files`` specified in the ``origctx``
2358 and parents manifests.
2358 and parents manifests.
2359 """
2359 """
2360 man1 = self.p1().manifest()
2360 man1 = self.p1().manifest()
2361 p2 = self._parents[1]
2361 p2 = self._parents[1]
2362 # "1 < len(self._parents)" can't be used for checking
2362 # "1 < len(self._parents)" can't be used for checking
2363 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2363 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2364 # explicitly initialized by the list, of which length is 2.
2364 # explicitly initialized by the list, of which length is 2.
2365 if p2.node() != nullid:
2365 if p2.node() != nullid:
2366 man2 = p2.manifest()
2366 man2 = p2.manifest()
2367 managing = lambda f: f in man1 or f in man2
2367 managing = lambda f: f in man1 or f in man2
2368 else:
2368 else:
2369 managing = lambda f: f in man1
2369 managing = lambda f: f in man1
2370
2370
2371 modified, added, removed = [], [], []
2371 modified, added, removed = [], [], []
2372 for f in self._files:
2372 for f in self._files:
2373 if not managing(f):
2373 if not managing(f):
2374 added.append(f)
2374 added.append(f)
2375 elif self[f]:
2375 elif self[f]:
2376 modified.append(f)
2376 modified.append(f)
2377 else:
2377 else:
2378 removed.append(f)
2378 removed.append(f)
2379
2379
2380 return scmutil.status(modified, added, removed, [], [], [], [])
2380 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,70 +1,70
1 # extension to emulate invoking 'dirstate.write()' at the time
1 # extension to emulate invoking 'dirstate.write()' at the time
2 # specified by '[fakedirstatewritetime] fakenow', only when
2 # specified by '[fakedirstatewritetime] fakenow', only when
3 # 'dirstate.write()' is invoked via functions below:
3 # 'dirstate.write()' is invoked via functions below:
4 #
4 #
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 # - 'committablectx.markcommitted()'
6 # - 'committablectx.markcommitted()'
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 context,
11 context,
12 dirstate,
12 dirstate,
13 extensions,
13 extensions,
14 policy,
14 policy,
15 util,
15 util,
16 )
16 )
17
17
18 parsers = policy.importmod(r'parsers')
18 parsers = policy.importmod(r'parsers')
19
19
20 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
20 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
21 # execute what original parsers.pack_dirstate should do actually
21 # execute what original parsers.pack_dirstate should do actually
22 # for consistency
22 # for consistency
23 actualnow = int(now)
23 actualnow = int(now)
24 for f, e in dmap.iteritems():
24 for f, e in dmap.iteritems():
25 if e[0] == 'n' and e[3] == actualnow:
25 if e[0] == 'n' and e[3] == actualnow:
26 e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
26 e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
27 dmap[f] = e
27 dmap[f] = e
28
28
29 return orig(dmap, copymap, pl, fakenow)
29 return orig(dmap, copymap, pl, fakenow)
30
30
31 def fakewrite(ui, func):
31 def fakewrite(ui, func):
32 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
32 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
33
33
34 fakenow = ui.config('fakedirstatewritetime', 'fakenow')
34 fakenow = ui.config('fakedirstatewritetime', 'fakenow')
35 if not fakenow:
35 if not fakenow:
36 # Execute original one, if fakenow isn't configured. This is
36 # Execute original one, if fakenow isn't configured. This is
37 # useful to prevent subrepos from executing replaced one,
37 # useful to prevent subrepos from executing replaced one,
38 # because replacing 'parsers.pack_dirstate' is also effective
38 # because replacing 'parsers.pack_dirstate' is also effective
39 # in subrepos.
39 # in subrepos.
40 return func()
40 return func()
41
41
42 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
42 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
43 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
43 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
44 fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
44 fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
45
45
46 orig_pack_dirstate = parsers.pack_dirstate
46 orig_pack_dirstate = parsers.pack_dirstate
47 orig_dirstate_getfsnow = dirstate._getfsnow
47 orig_dirstate_getfsnow = dirstate._getfsnow
48 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
48 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
49
49
50 parsers.pack_dirstate = wrapper
50 parsers.pack_dirstate = wrapper
51 dirstate._getfsnow = lambda *args: fakenow
51 dirstate._getfsnow = lambda *args: fakenow
52 try:
52 try:
53 return func()
53 return func()
54 finally:
54 finally:
55 parsers.pack_dirstate = orig_pack_dirstate
55 parsers.pack_dirstate = orig_pack_dirstate
56 dirstate._getfsnow = orig_dirstate_getfsnow
56 dirstate._getfsnow = orig_dirstate_getfsnow
57
57
58 def _poststatusfixup(orig, workingctx, fixup):
58 def _poststatusfixup(orig, workingctx, status, fixup):
59 ui = workingctx.repo().ui
59 ui = workingctx.repo().ui
60 return fakewrite(ui, lambda : orig(workingctx, fixup))
60 return fakewrite(ui, lambda : orig(workingctx, status, fixup))
61
61
62 def markcommitted(orig, committablectx, node):
62 def markcommitted(orig, committablectx, node):
63 ui = committablectx.repo().ui
63 ui = committablectx.repo().ui
64 return fakewrite(ui, lambda : orig(committablectx, node))
64 return fakewrite(ui, lambda : orig(committablectx, node))
65
65
66 def extsetup(ui):
66 def extsetup(ui):
67 extensions.wrapfunction(context.workingctx, '_poststatusfixup',
67 extensions.wrapfunction(context.workingctx, '_poststatusfixup',
68 _poststatusfixup)
68 _poststatusfixup)
69 extensions.wrapfunction(context.committablectx, 'markcommitted',
69 extensions.wrapfunction(context.committablectx, 'markcommitted',
70 markcommitted)
70 markcommitted)
General Comments 0
You need to be logged in to leave comments. Login now