##// END OF EJS Templates
_adjustlinkrev: reuse ancestors set during rename detection (issue4514)...
Pierre-Yves David -
r23980:c1ce5442 stable
parent child Browse files
Show More
@@ -1,1848 +1,1857
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 for f in sorted(self._manifest):
69 for f in sorted(self._manifest):
70 yield f
70 yield f
71
71
72 def _manifestmatches(self, match, s):
72 def _manifestmatches(self, match, s):
73 """generate a new manifest filtered by the match argument
73 """generate a new manifest filtered by the match argument
74
74
75 This method is for internal use only and mainly exists to provide an
75 This method is for internal use only and mainly exists to provide an
76 object oriented way for other contexts to customize the manifest
76 object oriented way for other contexts to customize the manifest
77 generation.
77 generation.
78 """
78 """
79 return self.manifest().matches(match)
79 return self.manifest().matches(match)
80
80
81 def _matchstatus(self, other, match):
81 def _matchstatus(self, other, match):
82 """return match.always if match is none
82 """return match.always if match is none
83
83
84 This internal method provides a way for child objects to override the
84 This internal method provides a way for child objects to override the
85 match operator.
85 match operator.
86 """
86 """
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88
88
89 def _buildstatus(self, other, s, match, listignored, listclean,
89 def _buildstatus(self, other, s, match, listignored, listclean,
90 listunknown):
90 listunknown):
91 """build a status with respect to another context"""
91 """build a status with respect to another context"""
92 # Load earliest manifest first for caching reasons. More specifically,
92 # Load earliest manifest first for caching reasons. More specifically,
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta to what's in the cache. So that's one full reconstruction + one
97 # delta application.
97 # delta application.
98 if self.rev() is not None and self.rev() < other.rev():
98 if self.rev() is not None and self.rev() < other.rev():
99 self.manifest()
99 self.manifest()
100 mf1 = other._manifestmatches(match, s)
100 mf1 = other._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
102
102
103 modified, added = [], []
103 modified, added = [], []
104 removed = []
104 removed = []
105 clean = []
105 clean = []
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 deletedset = set(deleted)
107 deletedset = set(deleted)
108 d = mf1.diff(mf2, clean=listclean)
108 d = mf1.diff(mf2, clean=listclean)
109 for fn, value in d.iteritems():
109 for fn, value in d.iteritems():
110 if fn in deletedset:
110 if fn in deletedset:
111 continue
111 continue
112 if value is None:
112 if value is None:
113 clean.append(fn)
113 clean.append(fn)
114 continue
114 continue
115 (node1, flag1), (node2, flag2) = value
115 (node1, flag1), (node2, flag2) = value
116 if node1 is None:
116 if node1 is None:
117 added.append(fn)
117 added.append(fn)
118 elif node2 is None:
118 elif node2 is None:
119 removed.append(fn)
119 removed.append(fn)
120 elif node2 != _newnode:
120 elif node2 != _newnode:
121 # The file was not a new file in mf2, so an entry
121 # The file was not a new file in mf2, so an entry
122 # from diff is really a difference.
122 # from diff is really a difference.
123 modified.append(fn)
123 modified.append(fn)
124 elif self[fn].cmp(other[fn]):
124 elif self[fn].cmp(other[fn]):
125 # node2 was newnode, but the working file doesn't
125 # node2 was newnode, but the working file doesn't
126 # match the one in mf1.
126 # match the one in mf1.
127 modified.append(fn)
127 modified.append(fn)
128 else:
128 else:
129 clean.append(fn)
129 clean.append(fn)
130
130
131 if removed:
131 if removed:
132 # need to filter files if they are already reported as removed
132 # need to filter files if they are already reported as removed
133 unknown = [fn for fn in unknown if fn not in mf1]
133 unknown = [fn for fn in unknown if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
135 # if they're deleted, don't report them as removed
135 # if they're deleted, don't report them as removed
136 removed = [fn for fn in removed if fn not in deletedset]
136 removed = [fn for fn in removed if fn not in deletedset]
137
137
138 return scmutil.status(modified, added, removed, deleted, unknown,
138 return scmutil.status(modified, added, removed, deleted, unknown,
139 ignored, clean)
139 ignored, clean)
140
140
141 @propertycache
141 @propertycache
142 def substate(self):
142 def substate(self):
143 return subrepo.state(self, self._repo.ui)
143 return subrepo.state(self, self._repo.ui)
144
144
145 def subrev(self, subpath):
145 def subrev(self, subpath):
146 return self.substate[subpath][1]
146 return self.substate[subpath][1]
147
147
148 def rev(self):
148 def rev(self):
149 return self._rev
149 return self._rev
150 def node(self):
150 def node(self):
151 return self._node
151 return self._node
152 def hex(self):
152 def hex(self):
153 return hex(self.node())
153 return hex(self.node())
154 def manifest(self):
154 def manifest(self):
155 return self._manifest
155 return self._manifest
156 def phasestr(self):
156 def phasestr(self):
157 return phases.phasenames[self.phase()]
157 return phases.phasenames[self.phase()]
158 def mutable(self):
158 def mutable(self):
159 return self.phase() > phases.public
159 return self.phase() > phases.public
160
160
161 def getfileset(self, expr):
161 def getfileset(self, expr):
162 return fileset.getfileset(self, expr)
162 return fileset.getfileset(self, expr)
163
163
164 def obsolete(self):
164 def obsolete(self):
165 """True if the changeset is obsolete"""
165 """True if the changeset is obsolete"""
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167
167
168 def extinct(self):
168 def extinct(self):
169 """True if the changeset is extinct"""
169 """True if the changeset is extinct"""
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171
171
172 def unstable(self):
172 def unstable(self):
173 """True if the changeset is not obsolete but it's ancestor are"""
173 """True if the changeset is not obsolete but it's ancestor are"""
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175
175
176 def bumped(self):
176 def bumped(self):
177 """True if the changeset try to be a successor of a public changeset
177 """True if the changeset try to be a successor of a public changeset
178
178
179 Only non-public and non-obsolete changesets may be bumped.
179 Only non-public and non-obsolete changesets may be bumped.
180 """
180 """
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182
182
183 def divergent(self):
183 def divergent(self):
184 """Is a successors of a changeset with multiple possible successors set
184 """Is a successors of a changeset with multiple possible successors set
185
185
186 Only non-public and non-obsolete changesets may be divergent.
186 Only non-public and non-obsolete changesets may be divergent.
187 """
187 """
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189
189
190 def troubled(self):
190 def troubled(self):
191 """True if the changeset is either unstable, bumped or divergent"""
191 """True if the changeset is either unstable, bumped or divergent"""
192 return self.unstable() or self.bumped() or self.divergent()
192 return self.unstable() or self.bumped() or self.divergent()
193
193
194 def troubles(self):
194 def troubles(self):
195 """return the list of troubles affecting this changesets.
195 """return the list of troubles affecting this changesets.
196
196
197 Troubles are returned as strings. possible values are:
197 Troubles are returned as strings. possible values are:
198 - unstable,
198 - unstable,
199 - bumped,
199 - bumped,
200 - divergent.
200 - divergent.
201 """
201 """
202 troubles = []
202 troubles = []
203 if self.unstable():
203 if self.unstable():
204 troubles.append('unstable')
204 troubles.append('unstable')
205 if self.bumped():
205 if self.bumped():
206 troubles.append('bumped')
206 troubles.append('bumped')
207 if self.divergent():
207 if self.divergent():
208 troubles.append('divergent')
208 troubles.append('divergent')
209 return troubles
209 return troubles
210
210
211 def parents(self):
211 def parents(self):
212 """return contexts for each parent changeset"""
212 """return contexts for each parent changeset"""
213 return self._parents
213 return self._parents
214
214
215 def p1(self):
215 def p1(self):
216 return self._parents[0]
216 return self._parents[0]
217
217
218 def p2(self):
218 def p2(self):
219 if len(self._parents) == 2:
219 if len(self._parents) == 2:
220 return self._parents[1]
220 return self._parents[1]
221 return changectx(self._repo, -1)
221 return changectx(self._repo, -1)
222
222
223 def _fileinfo(self, path):
223 def _fileinfo(self, path):
224 if '_manifest' in self.__dict__:
224 if '_manifest' in self.__dict__:
225 try:
225 try:
226 return self._manifest[path], self._manifest.flags(path)
226 return self._manifest[path], self._manifest.flags(path)
227 except KeyError:
227 except KeyError:
228 raise error.ManifestLookupError(self._node, path,
228 raise error.ManifestLookupError(self._node, path,
229 _('not found in manifest'))
229 _('not found in manifest'))
230 if '_manifestdelta' in self.__dict__ or path in self.files():
230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if path in self._manifestdelta:
231 if path in self._manifestdelta:
232 return (self._manifestdelta[path],
232 return (self._manifestdelta[path],
233 self._manifestdelta.flags(path))
233 self._manifestdelta.flags(path))
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 if not node:
235 if not node:
236 raise error.ManifestLookupError(self._node, path,
236 raise error.ManifestLookupError(self._node, path,
237 _('not found in manifest'))
237 _('not found in manifest'))
238
238
239 return node, flag
239 return node, flag
240
240
241 def filenode(self, path):
241 def filenode(self, path):
242 return self._fileinfo(path)[0]
242 return self._fileinfo(path)[0]
243
243
244 def flags(self, path):
244 def flags(self, path):
245 try:
245 try:
246 return self._fileinfo(path)[1]
246 return self._fileinfo(path)[1]
247 except error.LookupError:
247 except error.LookupError:
248 return ''
248 return ''
249
249
250 def sub(self, path):
250 def sub(self, path):
251 return subrepo.subrepo(self, path)
251 return subrepo.subrepo(self, path)
252
252
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 r = self._repo
254 r = self._repo
255 return matchmod.match(r.root, r.getcwd(), pats,
255 return matchmod.match(r.root, r.getcwd(), pats,
256 include, exclude, default,
256 include, exclude, default,
257 auditor=r.auditor, ctx=self)
257 auditor=r.auditor, ctx=self)
258
258
259 def diff(self, ctx2=None, match=None, **opts):
259 def diff(self, ctx2=None, match=None, **opts):
260 """Returns a diff generator for the given contexts and matcher"""
260 """Returns a diff generator for the given contexts and matcher"""
261 if ctx2 is None:
261 if ctx2 is None:
262 ctx2 = self.p1()
262 ctx2 = self.p1()
263 if ctx2 is not None:
263 if ctx2 is not None:
264 ctx2 = self._repo[ctx2]
264 ctx2 = self._repo[ctx2]
265 diffopts = patch.diffopts(self._repo.ui, opts)
265 diffopts = patch.diffopts(self._repo.ui, opts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267
267
268 @propertycache
268 @propertycache
269 def _dirs(self):
269 def _dirs(self):
270 return scmutil.dirs(self._manifest)
270 return scmutil.dirs(self._manifest)
271
271
272 def dirs(self):
272 def dirs(self):
273 return self._dirs
273 return self._dirs
274
274
275 def dirty(self, missing=False, merge=True, branch=True):
275 def dirty(self, missing=False, merge=True, branch=True):
276 return False
276 return False
277
277
278 def status(self, other=None, match=None, listignored=False,
278 def status(self, other=None, match=None, listignored=False,
279 listclean=False, listunknown=False, listsubrepos=False):
279 listclean=False, listunknown=False, listsubrepos=False):
280 """return status of files between two nodes or node and working
280 """return status of files between two nodes or node and working
281 directory.
281 directory.
282
282
283 If other is None, compare this node with working directory.
283 If other is None, compare this node with working directory.
284
284
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 """
286 """
287
287
288 ctx1 = self
288 ctx1 = self
289 ctx2 = self._repo[other]
289 ctx2 = self._repo[other]
290
290
291 # This next code block is, admittedly, fragile logic that tests for
291 # This next code block is, admittedly, fragile logic that tests for
292 # reversing the contexts and wouldn't need to exist if it weren't for
292 # reversing the contexts and wouldn't need to exist if it weren't for
293 # the fast (and common) code path of comparing the working directory
293 # the fast (and common) code path of comparing the working directory
294 # with its first parent.
294 # with its first parent.
295 #
295 #
296 # What we're aiming for here is the ability to call:
296 # What we're aiming for here is the ability to call:
297 #
297 #
298 # workingctx.status(parentctx)
298 # workingctx.status(parentctx)
299 #
299 #
300 # If we always built the manifest for each context and compared those,
300 # If we always built the manifest for each context and compared those,
301 # then we'd be done. But the special case of the above call means we
301 # then we'd be done. But the special case of the above call means we
302 # just copy the manifest of the parent.
302 # just copy the manifest of the parent.
303 reversed = False
303 reversed = False
304 if (not isinstance(ctx1, changectx)
304 if (not isinstance(ctx1, changectx)
305 and isinstance(ctx2, changectx)):
305 and isinstance(ctx2, changectx)):
306 reversed = True
306 reversed = True
307 ctx1, ctx2 = ctx2, ctx1
307 ctx1, ctx2 = ctx2, ctx1
308
308
309 match = ctx2._matchstatus(ctx1, match)
309 match = ctx2._matchstatus(ctx1, match)
310 r = scmutil.status([], [], [], [], [], [], [])
310 r = scmutil.status([], [], [], [], [], [], [])
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 listunknown)
312 listunknown)
313
313
314 if reversed:
314 if reversed:
315 # Reverse added and removed. Clear deleted, unknown and ignored as
315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 # these make no sense to reverse.
316 # these make no sense to reverse.
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 r.clean)
318 r.clean)
319
319
320 if listsubrepos:
320 if listsubrepos:
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 rev2 = ctx2.subrev(subpath)
322 rev2 = ctx2.subrev(subpath)
323 try:
323 try:
324 submatch = matchmod.narrowmatcher(subpath, match)
324 submatch = matchmod.narrowmatcher(subpath, match)
325 s = sub.status(rev2, match=submatch, ignored=listignored,
325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 clean=listclean, unknown=listunknown,
326 clean=listclean, unknown=listunknown,
327 listsubrepos=True)
327 listsubrepos=True)
328 for rfiles, sfiles in zip(r, s):
328 for rfiles, sfiles in zip(r, s):
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 except error.LookupError:
330 except error.LookupError:
331 self._repo.ui.status(_("skipping missing "
331 self._repo.ui.status(_("skipping missing "
332 "subrepository: %s\n") % subpath)
332 "subrepository: %s\n") % subpath)
333
333
334 for l in r:
334 for l in r:
335 l.sort()
335 l.sort()
336
336
337 return r
337 return r
338
338
339
339
340 def makememctx(repo, parents, text, user, date, branch, files, store,
340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 editor=None):
341 editor=None):
342 def getfilectx(repo, memctx, path):
342 def getfilectx(repo, memctx, path):
343 data, mode, copied = store.getfile(path)
343 data, mode, copied = store.getfile(path)
344 if data is None:
344 if data is None:
345 return None
345 return None
346 islink, isexec = mode
346 islink, isexec = mode
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 copied=copied, memctx=memctx)
348 copied=copied, memctx=memctx)
349 extra = {}
349 extra = {}
350 if branch:
350 if branch:
351 extra['branch'] = encoding.fromlocal(branch)
351 extra['branch'] = encoding.fromlocal(branch)
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 date, extra, editor)
353 date, extra, editor)
354 return ctx
354 return ctx
355
355
356 class changectx(basectx):
356 class changectx(basectx):
357 """A changecontext object makes access to data related to a particular
357 """A changecontext object makes access to data related to a particular
358 changeset convenient. It represents a read-only context already present in
358 changeset convenient. It represents a read-only context already present in
359 the repo."""
359 the repo."""
360 def __init__(self, repo, changeid=''):
360 def __init__(self, repo, changeid=''):
361 """changeid is a revision number, node, or tag"""
361 """changeid is a revision number, node, or tag"""
362
362
363 # since basectx.__new__ already took care of copying the object, we
363 # since basectx.__new__ already took care of copying the object, we
364 # don't need to do anything in __init__, so we just exit here
364 # don't need to do anything in __init__, so we just exit here
365 if isinstance(changeid, basectx):
365 if isinstance(changeid, basectx):
366 return
366 return
367
367
368 if changeid == '':
368 if changeid == '':
369 changeid = '.'
369 changeid = '.'
370 self._repo = repo
370 self._repo = repo
371
371
372 try:
372 try:
373 if isinstance(changeid, int):
373 if isinstance(changeid, int):
374 self._node = repo.changelog.node(changeid)
374 self._node = repo.changelog.node(changeid)
375 self._rev = changeid
375 self._rev = changeid
376 return
376 return
377 if isinstance(changeid, long):
377 if isinstance(changeid, long):
378 changeid = str(changeid)
378 changeid = str(changeid)
379 if changeid == '.':
379 if changeid == '.':
380 self._node = repo.dirstate.p1()
380 self._node = repo.dirstate.p1()
381 self._rev = repo.changelog.rev(self._node)
381 self._rev = repo.changelog.rev(self._node)
382 return
382 return
383 if changeid == 'null':
383 if changeid == 'null':
384 self._node = nullid
384 self._node = nullid
385 self._rev = nullrev
385 self._rev = nullrev
386 return
386 return
387 if changeid == 'tip':
387 if changeid == 'tip':
388 self._node = repo.changelog.tip()
388 self._node = repo.changelog.tip()
389 self._rev = repo.changelog.rev(self._node)
389 self._rev = repo.changelog.rev(self._node)
390 return
390 return
391 if len(changeid) == 20:
391 if len(changeid) == 20:
392 try:
392 try:
393 self._node = changeid
393 self._node = changeid
394 self._rev = repo.changelog.rev(changeid)
394 self._rev = repo.changelog.rev(changeid)
395 return
395 return
396 except error.FilteredRepoLookupError:
396 except error.FilteredRepoLookupError:
397 raise
397 raise
398 except LookupError:
398 except LookupError:
399 pass
399 pass
400
400
401 try:
401 try:
402 r = int(changeid)
402 r = int(changeid)
403 if str(r) != changeid:
403 if str(r) != changeid:
404 raise ValueError
404 raise ValueError
405 l = len(repo.changelog)
405 l = len(repo.changelog)
406 if r < 0:
406 if r < 0:
407 r += l
407 r += l
408 if r < 0 or r >= l:
408 if r < 0 or r >= l:
409 raise ValueError
409 raise ValueError
410 self._rev = r
410 self._rev = r
411 self._node = repo.changelog.node(r)
411 self._node = repo.changelog.node(r)
412 return
412 return
413 except error.FilteredIndexError:
413 except error.FilteredIndexError:
414 raise
414 raise
415 except (ValueError, OverflowError, IndexError):
415 except (ValueError, OverflowError, IndexError):
416 pass
416 pass
417
417
418 if len(changeid) == 40:
418 if len(changeid) == 40:
419 try:
419 try:
420 self._node = bin(changeid)
420 self._node = bin(changeid)
421 self._rev = repo.changelog.rev(self._node)
421 self._rev = repo.changelog.rev(self._node)
422 return
422 return
423 except error.FilteredLookupError:
423 except error.FilteredLookupError:
424 raise
424 raise
425 except (TypeError, LookupError):
425 except (TypeError, LookupError):
426 pass
426 pass
427
427
428 # lookup bookmarks through the name interface
428 # lookup bookmarks through the name interface
429 try:
429 try:
430 self._node = repo.names.singlenode(repo, changeid)
430 self._node = repo.names.singlenode(repo, changeid)
431 self._rev = repo.changelog.rev(self._node)
431 self._rev = repo.changelog.rev(self._node)
432 return
432 return
433 except KeyError:
433 except KeyError:
434 pass
434 pass
435 except error.FilteredRepoLookupError:
435 except error.FilteredRepoLookupError:
436 raise
436 raise
437 except error.RepoLookupError:
437 except error.RepoLookupError:
438 pass
438 pass
439
439
440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 if self._node is not None:
441 if self._node is not None:
442 self._rev = repo.changelog.rev(self._node)
442 self._rev = repo.changelog.rev(self._node)
443 return
443 return
444
444
445 # lookup failed
445 # lookup failed
446 # check if it might have come from damaged dirstate
446 # check if it might have come from damaged dirstate
447 #
447 #
448 # XXX we could avoid the unfiltered if we had a recognizable
448 # XXX we could avoid the unfiltered if we had a recognizable
449 # exception for filtered changeset access
449 # exception for filtered changeset access
450 if changeid in repo.unfiltered().dirstate.parents():
450 if changeid in repo.unfiltered().dirstate.parents():
451 msg = _("working directory has unknown parent '%s'!")
451 msg = _("working directory has unknown parent '%s'!")
452 raise error.Abort(msg % short(changeid))
452 raise error.Abort(msg % short(changeid))
453 try:
453 try:
454 if len(changeid) == 20:
454 if len(changeid) == 20:
455 changeid = hex(changeid)
455 changeid = hex(changeid)
456 except TypeError:
456 except TypeError:
457 pass
457 pass
458 except (error.FilteredIndexError, error.FilteredLookupError,
458 except (error.FilteredIndexError, error.FilteredLookupError,
459 error.FilteredRepoLookupError):
459 error.FilteredRepoLookupError):
460 if repo.filtername == 'visible':
460 if repo.filtername == 'visible':
461 msg = _("hidden revision '%s'") % changeid
461 msg = _("hidden revision '%s'") % changeid
462 hint = _('use --hidden to access hidden revisions')
462 hint = _('use --hidden to access hidden revisions')
463 raise error.FilteredRepoLookupError(msg, hint=hint)
463 raise error.FilteredRepoLookupError(msg, hint=hint)
464 msg = _("filtered revision '%s' (not in '%s' subset)")
464 msg = _("filtered revision '%s' (not in '%s' subset)")
465 msg %= (changeid, repo.filtername)
465 msg %= (changeid, repo.filtername)
466 raise error.FilteredRepoLookupError(msg)
466 raise error.FilteredRepoLookupError(msg)
467 except IndexError:
467 except IndexError:
468 pass
468 pass
469 raise error.RepoLookupError(
469 raise error.RepoLookupError(
470 _("unknown revision '%s'") % changeid)
470 _("unknown revision '%s'") % changeid)
471
471
472 def __hash__(self):
472 def __hash__(self):
473 try:
473 try:
474 return hash(self._rev)
474 return hash(self._rev)
475 except AttributeError:
475 except AttributeError:
476 return id(self)
476 return id(self)
477
477
478 def __nonzero__(self):
478 def __nonzero__(self):
479 return self._rev != nullrev
479 return self._rev != nullrev
480
480
481 @propertycache
481 @propertycache
482 def _changeset(self):
482 def _changeset(self):
483 return self._repo.changelog.read(self.rev())
483 return self._repo.changelog.read(self.rev())
484
484
485 @propertycache
485 @propertycache
486 def _manifest(self):
486 def _manifest(self):
487 return self._repo.manifest.read(self._changeset[0])
487 return self._repo.manifest.read(self._changeset[0])
488
488
489 @propertycache
489 @propertycache
490 def _manifestdelta(self):
490 def _manifestdelta(self):
491 return self._repo.manifest.readdelta(self._changeset[0])
491 return self._repo.manifest.readdelta(self._changeset[0])
492
492
493 @propertycache
493 @propertycache
494 def _parents(self):
494 def _parents(self):
495 p = self._repo.changelog.parentrevs(self._rev)
495 p = self._repo.changelog.parentrevs(self._rev)
496 if p[1] == nullrev:
496 if p[1] == nullrev:
497 p = p[:-1]
497 p = p[:-1]
498 return [changectx(self._repo, x) for x in p]
498 return [changectx(self._repo, x) for x in p]
499
499
500 def changeset(self):
500 def changeset(self):
501 return self._changeset
501 return self._changeset
502 def manifestnode(self):
502 def manifestnode(self):
503 return self._changeset[0]
503 return self._changeset[0]
504
504
505 def user(self):
505 def user(self):
506 return self._changeset[1]
506 return self._changeset[1]
507 def date(self):
507 def date(self):
508 return self._changeset[2]
508 return self._changeset[2]
509 def files(self):
509 def files(self):
510 return self._changeset[3]
510 return self._changeset[3]
511 def description(self):
511 def description(self):
512 return self._changeset[4]
512 return self._changeset[4]
513 def branch(self):
513 def branch(self):
514 return encoding.tolocal(self._changeset[5].get("branch"))
514 return encoding.tolocal(self._changeset[5].get("branch"))
515 def closesbranch(self):
515 def closesbranch(self):
516 return 'close' in self._changeset[5]
516 return 'close' in self._changeset[5]
517 def extra(self):
517 def extra(self):
518 return self._changeset[5]
518 return self._changeset[5]
519 def tags(self):
519 def tags(self):
520 return self._repo.nodetags(self._node)
520 return self._repo.nodetags(self._node)
521 def bookmarks(self):
521 def bookmarks(self):
522 return self._repo.nodebookmarks(self._node)
522 return self._repo.nodebookmarks(self._node)
523 def phase(self):
523 def phase(self):
524 return self._repo._phasecache.phase(self._repo, self._rev)
524 return self._repo._phasecache.phase(self._repo, self._rev)
525 def hidden(self):
525 def hidden(self):
526 return self._rev in repoview.filterrevs(self._repo, 'visible')
526 return self._rev in repoview.filterrevs(self._repo, 'visible')
527
527
528 def children(self):
528 def children(self):
529 """return contexts for each child changeset"""
529 """return contexts for each child changeset"""
530 c = self._repo.changelog.children(self._node)
530 c = self._repo.changelog.children(self._node)
531 return [changectx(self._repo, x) for x in c]
531 return [changectx(self._repo, x) for x in c]
532
532
533 def ancestors(self):
533 def ancestors(self):
534 for a in self._repo.changelog.ancestors([self._rev]):
534 for a in self._repo.changelog.ancestors([self._rev]):
535 yield changectx(self._repo, a)
535 yield changectx(self._repo, a)
536
536
537 def descendants(self):
537 def descendants(self):
538 for d in self._repo.changelog.descendants([self._rev]):
538 for d in self._repo.changelog.descendants([self._rev]):
539 yield changectx(self._repo, d)
539 yield changectx(self._repo, d)
540
540
541 def filectx(self, path, fileid=None, filelog=None):
541 def filectx(self, path, fileid=None, filelog=None):
542 """get a file context from this changeset"""
542 """get a file context from this changeset"""
543 if fileid is None:
543 if fileid is None:
544 fileid = self.filenode(path)
544 fileid = self.filenode(path)
545 return filectx(self._repo, path, fileid=fileid,
545 return filectx(self._repo, path, fileid=fileid,
546 changectx=self, filelog=filelog)
546 changectx=self, filelog=filelog)
547
547
548 def ancestor(self, c2, warn=False):
548 def ancestor(self, c2, warn=False):
549 """return the "best" ancestor context of self and c2
549 """return the "best" ancestor context of self and c2
550
550
551 If there are multiple candidates, it will show a message and check
551 If there are multiple candidates, it will show a message and check
552 merge.preferancestor configuration before falling back to the
552 merge.preferancestor configuration before falling back to the
553 revlog ancestor."""
553 revlog ancestor."""
554 # deal with workingctxs
554 # deal with workingctxs
555 n2 = c2._node
555 n2 = c2._node
556 if n2 is None:
556 if n2 is None:
557 n2 = c2._parents[0]._node
557 n2 = c2._parents[0]._node
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 if not cahs:
559 if not cahs:
560 anc = nullid
560 anc = nullid
561 elif len(cahs) == 1:
561 elif len(cahs) == 1:
562 anc = cahs[0]
562 anc = cahs[0]
563 else:
563 else:
564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 try:
565 try:
566 ctx = changectx(self._repo, r)
566 ctx = changectx(self._repo, r)
567 except error.RepoLookupError:
567 except error.RepoLookupError:
568 continue
568 continue
569 anc = ctx.node()
569 anc = ctx.node()
570 if anc in cahs:
570 if anc in cahs:
571 break
571 break
572 else:
572 else:
573 anc = self._repo.changelog.ancestor(self._node, n2)
573 anc = self._repo.changelog.ancestor(self._node, n2)
574 if warn:
574 if warn:
575 self._repo.ui.status(
575 self._repo.ui.status(
576 (_("note: using %s as ancestor of %s and %s\n") %
576 (_("note: using %s as ancestor of %s and %s\n") %
577 (short(anc), short(self._node), short(n2))) +
577 (short(anc), short(self._node), short(n2))) +
578 ''.join(_(" alternatively, use --config "
578 ''.join(_(" alternatively, use --config "
579 "merge.preferancestor=%s\n") %
579 "merge.preferancestor=%s\n") %
580 short(n) for n in sorted(cahs) if n != anc))
580 short(n) for n in sorted(cahs) if n != anc))
581 return changectx(self._repo, anc)
581 return changectx(self._repo, anc)
582
582
583 def descendant(self, other):
583 def descendant(self, other):
584 """True if other is descendant of this changeset"""
584 """True if other is descendant of this changeset"""
585 return self._repo.changelog.descendant(self._rev, other._rev)
585 return self._repo.changelog.descendant(self._rev, other._rev)
586
586
587 def walk(self, match):
587 def walk(self, match):
588 fset = set(match.files())
588 fset = set(match.files())
589 # for dirstate.walk, files=['.'] means "walk the whole tree".
589 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 # follow that here, too
590 # follow that here, too
591 fset.discard('.')
591 fset.discard('.')
592
592
593 # avoid the entire walk if we're only looking for specific files
593 # avoid the entire walk if we're only looking for specific files
594 if fset and not match.anypats():
594 if fset and not match.anypats():
595 if util.all([fn in self for fn in fset]):
595 if util.all([fn in self for fn in fset]):
596 for fn in sorted(fset):
596 for fn in sorted(fset):
597 if match(fn):
597 if match(fn):
598 yield fn
598 yield fn
599 raise StopIteration
599 raise StopIteration
600
600
601 for fn in self:
601 for fn in self:
602 if fn in fset:
602 if fn in fset:
603 # specified pattern is the exact name
603 # specified pattern is the exact name
604 fset.remove(fn)
604 fset.remove(fn)
605 if match(fn):
605 if match(fn):
606 yield fn
606 yield fn
607 for fn in sorted(fset):
607 for fn in sorted(fset):
608 if fn in self._dirs:
608 if fn in self._dirs:
609 # specified pattern is a directory
609 # specified pattern is a directory
610 continue
610 continue
611 match.bad(fn, _('no such file in rev %s') % self)
611 match.bad(fn, _('no such file in rev %s') % self)
612
612
613 def matches(self, match):
613 def matches(self, match):
614 return self.walk(match)
614 return self.walk(match)
615
615
616 class basefilectx(object):
616 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
617 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
618 filectx: read-only access to a filerevision that is already present
619 in the repo,
619 in the repo,
620 workingfilectx: a filecontext that represents files from the working
620 workingfilectx: a filecontext that represents files from the working
621 directory,
621 directory,
622 memfilectx: a filecontext that represents files in-memory."""
622 memfilectx: a filecontext that represents files in-memory."""
623 def __new__(cls, repo, path, *args, **kwargs):
623 def __new__(cls, repo, path, *args, **kwargs):
624 return super(basefilectx, cls).__new__(cls)
624 return super(basefilectx, cls).__new__(cls)
625
625
626 @propertycache
626 @propertycache
627 def _filelog(self):
627 def _filelog(self):
628 return self._repo.file(self._path)
628 return self._repo.file(self._path)
629
629
630 @propertycache
630 @propertycache
631 def _changeid(self):
631 def _changeid(self):
632 if '_changeid' in self.__dict__:
632 if '_changeid' in self.__dict__:
633 return self._changeid
633 return self._changeid
634 elif '_changectx' in self.__dict__:
634 elif '_changectx' in self.__dict__:
635 return self._changectx.rev()
635 return self._changectx.rev()
636 else:
636 else:
637 return self._filelog.linkrev(self._filerev)
637 return self._filelog.linkrev(self._filerev)
638
638
639 @propertycache
639 @propertycache
640 def _filenode(self):
640 def _filenode(self):
641 if '_fileid' in self.__dict__:
641 if '_fileid' in self.__dict__:
642 return self._filelog.lookup(self._fileid)
642 return self._filelog.lookup(self._fileid)
643 else:
643 else:
644 return self._changectx.filenode(self._path)
644 return self._changectx.filenode(self._path)
645
645
646 @propertycache
646 @propertycache
647 def _filerev(self):
647 def _filerev(self):
648 return self._filelog.rev(self._filenode)
648 return self._filelog.rev(self._filenode)
649
649
650 @propertycache
650 @propertycache
651 def _repopath(self):
651 def _repopath(self):
652 return self._path
652 return self._path
653
653
654 def __nonzero__(self):
654 def __nonzero__(self):
655 try:
655 try:
656 self._filenode
656 self._filenode
657 return True
657 return True
658 except error.LookupError:
658 except error.LookupError:
659 # file is missing
659 # file is missing
660 return False
660 return False
661
661
662 def __str__(self):
662 def __str__(self):
663 return "%s@%s" % (self.path(), self._changectx)
663 return "%s@%s" % (self.path(), self._changectx)
664
664
665 def __repr__(self):
665 def __repr__(self):
666 return "<%s %s>" % (type(self).__name__, str(self))
666 return "<%s %s>" % (type(self).__name__, str(self))
667
667
668 def __hash__(self):
668 def __hash__(self):
669 try:
669 try:
670 return hash((self._path, self._filenode))
670 return hash((self._path, self._filenode))
671 except AttributeError:
671 except AttributeError:
672 return id(self)
672 return id(self)
673
673
674 def __eq__(self, other):
674 def __eq__(self, other):
675 try:
675 try:
676 return (type(self) == type(other) and self._path == other._path
676 return (type(self) == type(other) and self._path == other._path
677 and self._filenode == other._filenode)
677 and self._filenode == other._filenode)
678 except AttributeError:
678 except AttributeError:
679 return False
679 return False
680
680
681 def __ne__(self, other):
681 def __ne__(self, other):
682 return not (self == other)
682 return not (self == other)
683
683
684 def filerev(self):
684 def filerev(self):
685 return self._filerev
685 return self._filerev
686 def filenode(self):
686 def filenode(self):
687 return self._filenode
687 return self._filenode
688 def flags(self):
688 def flags(self):
689 return self._changectx.flags(self._path)
689 return self._changectx.flags(self._path)
690 def filelog(self):
690 def filelog(self):
691 return self._filelog
691 return self._filelog
692 def rev(self):
692 def rev(self):
693 return self._changeid
693 return self._changeid
694 def linkrev(self):
694 def linkrev(self):
695 return self._filelog.linkrev(self._filerev)
695 return self._filelog.linkrev(self._filerev)
696 def node(self):
696 def node(self):
697 return self._changectx.node()
697 return self._changectx.node()
698 def hex(self):
698 def hex(self):
699 return self._changectx.hex()
699 return self._changectx.hex()
700 def user(self):
700 def user(self):
701 return self._changectx.user()
701 return self._changectx.user()
702 def date(self):
702 def date(self):
703 return self._changectx.date()
703 return self._changectx.date()
704 def files(self):
704 def files(self):
705 return self._changectx.files()
705 return self._changectx.files()
706 def description(self):
706 def description(self):
707 return self._changectx.description()
707 return self._changectx.description()
708 def branch(self):
708 def branch(self):
709 return self._changectx.branch()
709 return self._changectx.branch()
710 def extra(self):
710 def extra(self):
711 return self._changectx.extra()
711 return self._changectx.extra()
712 def phase(self):
712 def phase(self):
713 return self._changectx.phase()
713 return self._changectx.phase()
714 def phasestr(self):
714 def phasestr(self):
715 return self._changectx.phasestr()
715 return self._changectx.phasestr()
716 def manifest(self):
716 def manifest(self):
717 return self._changectx.manifest()
717 return self._changectx.manifest()
718 def changectx(self):
718 def changectx(self):
719 return self._changectx
719 return self._changectx
720
720
721 def path(self):
721 def path(self):
722 return self._path
722 return self._path
723
723
724 def isbinary(self):
724 def isbinary(self):
725 try:
725 try:
726 return util.binary(self.data())
726 return util.binary(self.data())
727 except IOError:
727 except IOError:
728 return False
728 return False
729 def isexec(self):
729 def isexec(self):
730 return 'x' in self.flags()
730 return 'x' in self.flags()
731 def islink(self):
731 def islink(self):
732 return 'l' in self.flags()
732 return 'l' in self.flags()
733
733
734 def cmp(self, fctx):
734 def cmp(self, fctx):
735 """compare with other file context
735 """compare with other file context
736
736
737 returns True if different than fctx.
737 returns True if different than fctx.
738 """
738 """
739 if (fctx._filerev is None
739 if (fctx._filerev is None
740 and (self._repo._encodefilterpats
740 and (self._repo._encodefilterpats
741 # if file data starts with '\1\n', empty metadata block is
741 # if file data starts with '\1\n', empty metadata block is
742 # prepended, which adds 4 bytes to filelog.size().
742 # prepended, which adds 4 bytes to filelog.size().
743 or self.size() - 4 == fctx.size())
743 or self.size() - 4 == fctx.size())
744 or self.size() == fctx.size()):
744 or self.size() == fctx.size()):
745 return self._filelog.cmp(self._filenode, fctx.data())
745 return self._filelog.cmp(self._filenode, fctx.data())
746
746
747 return True
747 return True
748
748
749 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
749 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
750 """return the first ancestor of <srcrev> introducting <fnode>
750 """return the first ancestor of <srcrev> introducting <fnode>
751
751
752 If the linkrev of the file revision does not point to an ancestor of
752 If the linkrev of the file revision does not point to an ancestor of
753 srcrev, we'll walk down the ancestors until we find one introducing
753 srcrev, we'll walk down the ancestors until we find one introducing
754 this file revision.
754 this file revision.
755
755
756 :repo: a localrepository object (used to access changelog and manifest)
756 :repo: a localrepository object (used to access changelog and manifest)
757 :path: the file path
757 :path: the file path
758 :fnode: the nodeid of the file revision
758 :fnode: the nodeid of the file revision
759 :filelog: the filelog of this path
759 :filelog: the filelog of this path
760 :srcrev: the changeset revision we search ancestors from
760 :srcrev: the changeset revision we search ancestors from
761 :inclusive: if true, the src revision will also be checked
761 :inclusive: if true, the src revision will also be checked
762 """
762 """
763 repo = self._repo
763 repo = self._repo
764 cl = repo.unfiltered().changelog
764 cl = repo.unfiltered().changelog
765 ma = repo.manifest
765 ma = repo.manifest
766 # fetch the linkrev
766 # fetch the linkrev
767 fr = filelog.rev(fnode)
767 fr = filelog.rev(fnode)
768 lkr = filelog.linkrev(fr)
768 lkr = filelog.linkrev(fr)
769 # hack to reuse ancestor computation when searching for renames
770 memberanc = getattr(self, '_ancestrycontext', None)
771 iteranc = None
772 if memberanc is None:
773 memberanc = iteranc = cl.ancestors([srcrev], lkr,
774 inclusive=inclusive)
769 # check if this linkrev is an ancestor of srcrev
775 # check if this linkrev is an ancestor of srcrev
770 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
776 if lkr not in memberanc:
771 if lkr not in anc:
777 if iteranc is None:
772 for a in anc:
778 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
779 for a in iteranc:
773 ac = cl.read(a) # get changeset data (we avoid object creation)
780 ac = cl.read(a) # get changeset data (we avoid object creation)
774 if path in ac[3]: # checking the 'files' field.
781 if path in ac[3]: # checking the 'files' field.
775 # The file has been touched, check if the content is
782 # The file has been touched, check if the content is
776 # similar to the one we search for.
783 # similar to the one we search for.
777 if fnode == ma.readfast(ac[0]).get(path):
784 if fnode == ma.readfast(ac[0]).get(path):
778 return a
785 return a
779 # In theory, we should never get out of that loop without a result.
786 # In theory, we should never get out of that loop without a result.
780 # But if manifest uses a buggy file revision (not children of the
787 # But if manifest uses a buggy file revision (not children of the
781 # one it replaces) we could. Such a buggy situation will likely
788 # one it replaces) we could. Such a buggy situation will likely
782 # result is crash somewhere else at to some point.
789 # result is crash somewhere else at to some point.
783 return lkr
790 return lkr
784
791
785 def introrev(self):
792 def introrev(self):
786 """return the rev of the changeset which introduced this file revision
793 """return the rev of the changeset which introduced this file revision
787
794
788 This method is different from linkrev because it take into account the
795 This method is different from linkrev because it take into account the
789 changeset the filectx was created from. It ensures the returned
796 changeset the filectx was created from. It ensures the returned
790 revision is one of its ancestors. This prevents bugs from
797 revision is one of its ancestors. This prevents bugs from
791 'linkrev-shadowing' when a file revision is used by multiple
798 'linkrev-shadowing' when a file revision is used by multiple
792 changesets.
799 changesets.
793 """
800 """
794 lkr = self.linkrev()
801 lkr = self.linkrev()
795 attrs = vars(self)
802 attrs = vars(self)
796 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
803 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
797 if noctx or self.rev() == lkr:
804 if noctx or self.rev() == lkr:
798 return self.linkrev()
805 return self.linkrev()
799 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
806 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
800 self.rev(), inclusive=True)
807 self.rev(), inclusive=True)
801
808
802 def parents(self):
809 def parents(self):
803 _path = self._path
810 _path = self._path
804 fl = self._filelog
811 fl = self._filelog
805 parents = self._filelog.parents(self._filenode)
812 parents = self._filelog.parents(self._filenode)
806 pl = [(_path, node, fl) for node in parents if node != nullid]
813 pl = [(_path, node, fl) for node in parents if node != nullid]
807
814
808 r = fl.renamed(self._filenode)
815 r = fl.renamed(self._filenode)
809 if r:
816 if r:
810 # - In the simple rename case, both parent are nullid, pl is empty.
817 # - In the simple rename case, both parent are nullid, pl is empty.
811 # - In case of merge, only one of the parent is null id and should
818 # - In case of merge, only one of the parent is null id and should
812 # be replaced with the rename information. This parent is -always-
819 # be replaced with the rename information. This parent is -always-
813 # the first one.
820 # the first one.
814 #
821 #
815 # As null id have alway been filtered out in the previous list
822 # As null id have alway been filtered out in the previous list
816 # comprehension, inserting to 0 will always result in "replacing
823 # comprehension, inserting to 0 will always result in "replacing
817 # first nullid parent with rename information.
824 # first nullid parent with rename information.
818 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
825 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
819
826
820 ret = []
827 ret = []
821 for path, fnode, l in pl:
828 for path, fnode, l in pl:
822 if '_changeid' in vars(self) or '_changectx' in vars(self):
829 if '_changeid' in vars(self) or '_changectx' in vars(self):
823 # If self is associated with a changeset (probably explicitly
830 # If self is associated with a changeset (probably explicitly
824 # fed), ensure the created filectx is associated with a
831 # fed), ensure the created filectx is associated with a
825 # changeset that is an ancestor of self.changectx.
832 # changeset that is an ancestor of self.changectx.
826 rev = self._adjustlinkrev(path, l, fnode, self.rev())
833 rev = self._adjustlinkrev(path, l, fnode, self.rev())
827 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
834 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
828 changeid=rev)
835 changeid=rev)
836 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
837
829 else:
838 else:
830 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
831 ret.append(fctx)
840 ret.append(fctx)
832 return ret
841 return ret
833
842
834 def p1(self):
843 def p1(self):
835 return self.parents()[0]
844 return self.parents()[0]
836
845
837 def p2(self):
846 def p2(self):
838 p = self.parents()
847 p = self.parents()
839 if len(p) == 2:
848 if len(p) == 2:
840 return p[1]
849 return p[1]
841 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
850 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
842
851
843 def annotate(self, follow=False, linenumber=None, diffopts=None):
852 def annotate(self, follow=False, linenumber=None, diffopts=None):
844 '''returns a list of tuples of (ctx, line) for each line
853 '''returns a list of tuples of (ctx, line) for each line
845 in the file, where ctx is the filectx of the node where
854 in the file, where ctx is the filectx of the node where
846 that line was last changed.
855 that line was last changed.
847 This returns tuples of ((ctx, linenumber), line) for each line,
856 This returns tuples of ((ctx, linenumber), line) for each line,
848 if "linenumber" parameter is NOT "None".
857 if "linenumber" parameter is NOT "None".
849 In such tuples, linenumber means one at the first appearance
858 In such tuples, linenumber means one at the first appearance
850 in the managed file.
859 in the managed file.
851 To reduce annotation cost,
860 To reduce annotation cost,
852 this returns fixed value(False is used) as linenumber,
861 this returns fixed value(False is used) as linenumber,
853 if "linenumber" parameter is "False".'''
862 if "linenumber" parameter is "False".'''
854
863
855 if linenumber is None:
864 if linenumber is None:
856 def decorate(text, rev):
865 def decorate(text, rev):
857 return ([rev] * len(text.splitlines()), text)
866 return ([rev] * len(text.splitlines()), text)
858 elif linenumber:
867 elif linenumber:
859 def decorate(text, rev):
868 def decorate(text, rev):
860 size = len(text.splitlines())
869 size = len(text.splitlines())
861 return ([(rev, i) for i in xrange(1, size + 1)], text)
870 return ([(rev, i) for i in xrange(1, size + 1)], text)
862 else:
871 else:
863 def decorate(text, rev):
872 def decorate(text, rev):
864 return ([(rev, False)] * len(text.splitlines()), text)
873 return ([(rev, False)] * len(text.splitlines()), text)
865
874
866 def pair(parent, child):
875 def pair(parent, child):
867 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
876 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
868 refine=True)
877 refine=True)
869 for (a1, a2, b1, b2), t in blocks:
878 for (a1, a2, b1, b2), t in blocks:
870 # Changed blocks ('!') or blocks made only of blank lines ('~')
879 # Changed blocks ('!') or blocks made only of blank lines ('~')
871 # belong to the child.
880 # belong to the child.
872 if t == '=':
881 if t == '=':
873 child[0][b1:b2] = parent[0][a1:a2]
882 child[0][b1:b2] = parent[0][a1:a2]
874 return child
883 return child
875
884
876 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
885 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
877
886
878 def parents(f):
887 def parents(f):
879 pl = f.parents()
888 pl = f.parents()
880
889
881 # Don't return renamed parents if we aren't following.
890 # Don't return renamed parents if we aren't following.
882 if not follow:
891 if not follow:
883 pl = [p for p in pl if p.path() == f.path()]
892 pl = [p for p in pl if p.path() == f.path()]
884
893
885 # renamed filectx won't have a filelog yet, so set it
894 # renamed filectx won't have a filelog yet, so set it
886 # from the cache to save time
895 # from the cache to save time
887 for p in pl:
896 for p in pl:
888 if not '_filelog' in p.__dict__:
897 if not '_filelog' in p.__dict__:
889 p._filelog = getlog(p.path())
898 p._filelog = getlog(p.path())
890
899
891 return pl
900 return pl
892
901
893 # use linkrev to find the first changeset where self appeared
902 # use linkrev to find the first changeset where self appeared
894 base = self
903 base = self
895 introrev = self.introrev()
904 introrev = self.introrev()
896 if self.rev() != introrev:
905 if self.rev() != introrev:
897 base = self.filectx(self.filenode(), changeid=introrev)
906 base = self.filectx(self.filenode(), changeid=introrev)
898
907
899 # This algorithm would prefer to be recursive, but Python is a
908 # This algorithm would prefer to be recursive, but Python is a
900 # bit recursion-hostile. Instead we do an iterative
909 # bit recursion-hostile. Instead we do an iterative
901 # depth-first search.
910 # depth-first search.
902
911
903 visit = [base]
912 visit = [base]
904 hist = {}
913 hist = {}
905 pcache = {}
914 pcache = {}
906 needed = {base: 1}
915 needed = {base: 1}
907 while visit:
916 while visit:
908 f = visit[-1]
917 f = visit[-1]
909 pcached = f in pcache
918 pcached = f in pcache
910 if not pcached:
919 if not pcached:
911 pcache[f] = parents(f)
920 pcache[f] = parents(f)
912
921
913 ready = True
922 ready = True
914 pl = pcache[f]
923 pl = pcache[f]
915 for p in pl:
924 for p in pl:
916 if p not in hist:
925 if p not in hist:
917 ready = False
926 ready = False
918 visit.append(p)
927 visit.append(p)
919 if not pcached:
928 if not pcached:
920 needed[p] = needed.get(p, 0) + 1
929 needed[p] = needed.get(p, 0) + 1
921 if ready:
930 if ready:
922 visit.pop()
931 visit.pop()
923 reusable = f in hist
932 reusable = f in hist
924 if reusable:
933 if reusable:
925 curr = hist[f]
934 curr = hist[f]
926 else:
935 else:
927 curr = decorate(f.data(), f)
936 curr = decorate(f.data(), f)
928 for p in pl:
937 for p in pl:
929 if not reusable:
938 if not reusable:
930 curr = pair(hist[p], curr)
939 curr = pair(hist[p], curr)
931 if needed[p] == 1:
940 if needed[p] == 1:
932 del hist[p]
941 del hist[p]
933 del needed[p]
942 del needed[p]
934 else:
943 else:
935 needed[p] -= 1
944 needed[p] -= 1
936
945
937 hist[f] = curr
946 hist[f] = curr
938 pcache[f] = []
947 pcache[f] = []
939
948
940 return zip(hist[base][0], hist[base][1].splitlines(True))
949 return zip(hist[base][0], hist[base][1].splitlines(True))
941
950
942 def ancestors(self, followfirst=False):
951 def ancestors(self, followfirst=False):
943 visit = {}
952 visit = {}
944 c = self
953 c = self
945 cut = followfirst and 1 or None
954 cut = followfirst and 1 or None
946 while True:
955 while True:
947 for parent in c.parents()[:cut]:
956 for parent in c.parents()[:cut]:
948 visit[(parent.rev(), parent.node())] = parent
957 visit[(parent.rev(), parent.node())] = parent
949 if not visit:
958 if not visit:
950 break
959 break
951 c = visit.pop(max(visit))
960 c = visit.pop(max(visit))
952 yield c
961 yield c
953
962
954 class filectx(basefilectx):
963 class filectx(basefilectx):
955 """A filecontext object makes access to data related to a particular
964 """A filecontext object makes access to data related to a particular
956 filerevision convenient."""
965 filerevision convenient."""
957 def __init__(self, repo, path, changeid=None, fileid=None,
966 def __init__(self, repo, path, changeid=None, fileid=None,
958 filelog=None, changectx=None):
967 filelog=None, changectx=None):
959 """changeid can be a changeset revision, node, or tag.
968 """changeid can be a changeset revision, node, or tag.
960 fileid can be a file revision or node."""
969 fileid can be a file revision or node."""
961 self._repo = repo
970 self._repo = repo
962 self._path = path
971 self._path = path
963
972
964 assert (changeid is not None
973 assert (changeid is not None
965 or fileid is not None
974 or fileid is not None
966 or changectx is not None), \
975 or changectx is not None), \
967 ("bad args: changeid=%r, fileid=%r, changectx=%r"
976 ("bad args: changeid=%r, fileid=%r, changectx=%r"
968 % (changeid, fileid, changectx))
977 % (changeid, fileid, changectx))
969
978
970 if filelog is not None:
979 if filelog is not None:
971 self._filelog = filelog
980 self._filelog = filelog
972
981
973 if changeid is not None:
982 if changeid is not None:
974 self._changeid = changeid
983 self._changeid = changeid
975 if changectx is not None:
984 if changectx is not None:
976 self._changectx = changectx
985 self._changectx = changectx
977 if fileid is not None:
986 if fileid is not None:
978 self._fileid = fileid
987 self._fileid = fileid
979
988
980 @propertycache
989 @propertycache
981 def _changectx(self):
990 def _changectx(self):
982 try:
991 try:
983 return changectx(self._repo, self._changeid)
992 return changectx(self._repo, self._changeid)
984 except error.FilteredRepoLookupError:
993 except error.FilteredRepoLookupError:
985 # Linkrev may point to any revision in the repository. When the
994 # Linkrev may point to any revision in the repository. When the
986 # repository is filtered this may lead to `filectx` trying to build
995 # repository is filtered this may lead to `filectx` trying to build
987 # `changectx` for filtered revision. In such case we fallback to
996 # `changectx` for filtered revision. In such case we fallback to
988 # creating `changectx` on the unfiltered version of the reposition.
997 # creating `changectx` on the unfiltered version of the reposition.
989 # This fallback should not be an issue because `changectx` from
998 # This fallback should not be an issue because `changectx` from
990 # `filectx` are not used in complex operations that care about
999 # `filectx` are not used in complex operations that care about
991 # filtering.
1000 # filtering.
992 #
1001 #
993 # This fallback is a cheap and dirty fix that prevent several
1002 # This fallback is a cheap and dirty fix that prevent several
994 # crashes. It does not ensure the behavior is correct. However the
1003 # crashes. It does not ensure the behavior is correct. However the
995 # behavior was not correct before filtering either and "incorrect
1004 # behavior was not correct before filtering either and "incorrect
996 # behavior" is seen as better as "crash"
1005 # behavior" is seen as better as "crash"
997 #
1006 #
998 # Linkrevs have several serious troubles with filtering that are
1007 # Linkrevs have several serious troubles with filtering that are
999 # complicated to solve. Proper handling of the issue here should be
1008 # complicated to solve. Proper handling of the issue here should be
1000 # considered when solving linkrev issue are on the table.
1009 # considered when solving linkrev issue are on the table.
1001 return changectx(self._repo.unfiltered(), self._changeid)
1010 return changectx(self._repo.unfiltered(), self._changeid)
1002
1011
1003 def filectx(self, fileid, changeid=None):
1012 def filectx(self, fileid, changeid=None):
1004 '''opens an arbitrary revision of the file without
1013 '''opens an arbitrary revision of the file without
1005 opening a new filelog'''
1014 opening a new filelog'''
1006 return filectx(self._repo, self._path, fileid=fileid,
1015 return filectx(self._repo, self._path, fileid=fileid,
1007 filelog=self._filelog, changeid=changeid)
1016 filelog=self._filelog, changeid=changeid)
1008
1017
1009 def data(self):
1018 def data(self):
1010 try:
1019 try:
1011 return self._filelog.read(self._filenode)
1020 return self._filelog.read(self._filenode)
1012 except error.CensoredNodeError:
1021 except error.CensoredNodeError:
1013 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1022 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1014 return ""
1023 return ""
1015 raise util.Abort(_("censored node: %s") % short(self._filenode),
1024 raise util.Abort(_("censored node: %s") % short(self._filenode),
1016 hint=_("set censor.policy to ignore errors"))
1025 hint=_("set censor.policy to ignore errors"))
1017
1026
1018 def size(self):
1027 def size(self):
1019 return self._filelog.size(self._filerev)
1028 return self._filelog.size(self._filerev)
1020
1029
1021 def renamed(self):
1030 def renamed(self):
1022 """check if file was actually renamed in this changeset revision
1031 """check if file was actually renamed in this changeset revision
1023
1032
1024 If rename logged in file revision, we report copy for changeset only
1033 If rename logged in file revision, we report copy for changeset only
1025 if file revisions linkrev points back to the changeset in question
1034 if file revisions linkrev points back to the changeset in question
1026 or both changeset parents contain different file revisions.
1035 or both changeset parents contain different file revisions.
1027 """
1036 """
1028
1037
1029 renamed = self._filelog.renamed(self._filenode)
1038 renamed = self._filelog.renamed(self._filenode)
1030 if not renamed:
1039 if not renamed:
1031 return renamed
1040 return renamed
1032
1041
1033 if self.rev() == self.linkrev():
1042 if self.rev() == self.linkrev():
1034 return renamed
1043 return renamed
1035
1044
1036 name = self.path()
1045 name = self.path()
1037 fnode = self._filenode
1046 fnode = self._filenode
1038 for p in self._changectx.parents():
1047 for p in self._changectx.parents():
1039 try:
1048 try:
1040 if fnode == p.filenode(name):
1049 if fnode == p.filenode(name):
1041 return None
1050 return None
1042 except error.LookupError:
1051 except error.LookupError:
1043 pass
1052 pass
1044 return renamed
1053 return renamed
1045
1054
1046 def children(self):
1055 def children(self):
1047 # hard for renames
1056 # hard for renames
1048 c = self._filelog.children(self._filenode)
1057 c = self._filelog.children(self._filenode)
1049 return [filectx(self._repo, self._path, fileid=x,
1058 return [filectx(self._repo, self._path, fileid=x,
1050 filelog=self._filelog) for x in c]
1059 filelog=self._filelog) for x in c]
1051
1060
1052 class committablectx(basectx):
1061 class committablectx(basectx):
1053 """A committablectx object provides common functionality for a context that
1062 """A committablectx object provides common functionality for a context that
1054 wants the ability to commit, e.g. workingctx or memctx."""
1063 wants the ability to commit, e.g. workingctx or memctx."""
1055 def __init__(self, repo, text="", user=None, date=None, extra=None,
1064 def __init__(self, repo, text="", user=None, date=None, extra=None,
1056 changes=None):
1065 changes=None):
1057 self._repo = repo
1066 self._repo = repo
1058 self._rev = None
1067 self._rev = None
1059 self._node = None
1068 self._node = None
1060 self._text = text
1069 self._text = text
1061 if date:
1070 if date:
1062 self._date = util.parsedate(date)
1071 self._date = util.parsedate(date)
1063 if user:
1072 if user:
1064 self._user = user
1073 self._user = user
1065 if changes:
1074 if changes:
1066 self._status = changes
1075 self._status = changes
1067
1076
1068 self._extra = {}
1077 self._extra = {}
1069 if extra:
1078 if extra:
1070 self._extra = extra.copy()
1079 self._extra = extra.copy()
1071 if 'branch' not in self._extra:
1080 if 'branch' not in self._extra:
1072 try:
1081 try:
1073 branch = encoding.fromlocal(self._repo.dirstate.branch())
1082 branch = encoding.fromlocal(self._repo.dirstate.branch())
1074 except UnicodeDecodeError:
1083 except UnicodeDecodeError:
1075 raise util.Abort(_('branch name not in UTF-8!'))
1084 raise util.Abort(_('branch name not in UTF-8!'))
1076 self._extra['branch'] = branch
1085 self._extra['branch'] = branch
1077 if self._extra['branch'] == '':
1086 if self._extra['branch'] == '':
1078 self._extra['branch'] = 'default'
1087 self._extra['branch'] = 'default'
1079
1088
1080 def __str__(self):
1089 def __str__(self):
1081 return str(self._parents[0]) + "+"
1090 return str(self._parents[0]) + "+"
1082
1091
1083 def __nonzero__(self):
1092 def __nonzero__(self):
1084 return True
1093 return True
1085
1094
1086 def _buildflagfunc(self):
1095 def _buildflagfunc(self):
1087 # Create a fallback function for getting file flags when the
1096 # Create a fallback function for getting file flags when the
1088 # filesystem doesn't support them
1097 # filesystem doesn't support them
1089
1098
1090 copiesget = self._repo.dirstate.copies().get
1099 copiesget = self._repo.dirstate.copies().get
1091
1100
1092 if len(self._parents) < 2:
1101 if len(self._parents) < 2:
1093 # when we have one parent, it's easy: copy from parent
1102 # when we have one parent, it's easy: copy from parent
1094 man = self._parents[0].manifest()
1103 man = self._parents[0].manifest()
1095 def func(f):
1104 def func(f):
1096 f = copiesget(f, f)
1105 f = copiesget(f, f)
1097 return man.flags(f)
1106 return man.flags(f)
1098 else:
1107 else:
1099 # merges are tricky: we try to reconstruct the unstored
1108 # merges are tricky: we try to reconstruct the unstored
1100 # result from the merge (issue1802)
1109 # result from the merge (issue1802)
1101 p1, p2 = self._parents
1110 p1, p2 = self._parents
1102 pa = p1.ancestor(p2)
1111 pa = p1.ancestor(p2)
1103 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1112 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1104
1113
1105 def func(f):
1114 def func(f):
1106 f = copiesget(f, f) # may be wrong for merges with copies
1115 f = copiesget(f, f) # may be wrong for merges with copies
1107 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1116 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1108 if fl1 == fl2:
1117 if fl1 == fl2:
1109 return fl1
1118 return fl1
1110 if fl1 == fla:
1119 if fl1 == fla:
1111 return fl2
1120 return fl2
1112 if fl2 == fla:
1121 if fl2 == fla:
1113 return fl1
1122 return fl1
1114 return '' # punt for conflicts
1123 return '' # punt for conflicts
1115
1124
1116 return func
1125 return func
1117
1126
1118 @propertycache
1127 @propertycache
1119 def _flagfunc(self):
1128 def _flagfunc(self):
1120 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1129 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1121
1130
1122 @propertycache
1131 @propertycache
1123 def _manifest(self):
1132 def _manifest(self):
1124 """generate a manifest corresponding to the values in self._status
1133 """generate a manifest corresponding to the values in self._status
1125
1134
1126 This reuse the file nodeid from parent, but we append an extra letter
1135 This reuse the file nodeid from parent, but we append an extra letter
1127 when modified. Modified files get an extra 'm' while added files get
1136 when modified. Modified files get an extra 'm' while added files get
1128 an extra 'a'. This is used by manifests merge to see that files
1137 an extra 'a'. This is used by manifests merge to see that files
1129 are different and by update logic to avoid deleting newly added files.
1138 are different and by update logic to avoid deleting newly added files.
1130 """
1139 """
1131
1140
1132 man1 = self._parents[0].manifest()
1141 man1 = self._parents[0].manifest()
1133 man = man1.copy()
1142 man = man1.copy()
1134 if len(self._parents) > 1:
1143 if len(self._parents) > 1:
1135 man2 = self.p2().manifest()
1144 man2 = self.p2().manifest()
1136 def getman(f):
1145 def getman(f):
1137 if f in man1:
1146 if f in man1:
1138 return man1
1147 return man1
1139 return man2
1148 return man2
1140 else:
1149 else:
1141 getman = lambda f: man1
1150 getman = lambda f: man1
1142
1151
1143 copied = self._repo.dirstate.copies()
1152 copied = self._repo.dirstate.copies()
1144 ff = self._flagfunc
1153 ff = self._flagfunc
1145 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1154 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1146 for f in l:
1155 for f in l:
1147 orig = copied.get(f, f)
1156 orig = copied.get(f, f)
1148 man[f] = getman(orig).get(orig, nullid) + i
1157 man[f] = getman(orig).get(orig, nullid) + i
1149 try:
1158 try:
1150 man.setflag(f, ff(f))
1159 man.setflag(f, ff(f))
1151 except OSError:
1160 except OSError:
1152 pass
1161 pass
1153
1162
1154 for f in self._status.deleted + self._status.removed:
1163 for f in self._status.deleted + self._status.removed:
1155 if f in man:
1164 if f in man:
1156 del man[f]
1165 del man[f]
1157
1166
1158 return man
1167 return man
1159
1168
1160 @propertycache
1169 @propertycache
1161 def _status(self):
1170 def _status(self):
1162 return self._repo.status()
1171 return self._repo.status()
1163
1172
1164 @propertycache
1173 @propertycache
1165 def _user(self):
1174 def _user(self):
1166 return self._repo.ui.username()
1175 return self._repo.ui.username()
1167
1176
1168 @propertycache
1177 @propertycache
1169 def _date(self):
1178 def _date(self):
1170 return util.makedate()
1179 return util.makedate()
1171
1180
1172 def subrev(self, subpath):
1181 def subrev(self, subpath):
1173 return None
1182 return None
1174
1183
1175 def user(self):
1184 def user(self):
1176 return self._user or self._repo.ui.username()
1185 return self._user or self._repo.ui.username()
1177 def date(self):
1186 def date(self):
1178 return self._date
1187 return self._date
1179 def description(self):
1188 def description(self):
1180 return self._text
1189 return self._text
1181 def files(self):
1190 def files(self):
1182 return sorted(self._status.modified + self._status.added +
1191 return sorted(self._status.modified + self._status.added +
1183 self._status.removed)
1192 self._status.removed)
1184
1193
1185 def modified(self):
1194 def modified(self):
1186 return self._status.modified
1195 return self._status.modified
1187 def added(self):
1196 def added(self):
1188 return self._status.added
1197 return self._status.added
1189 def removed(self):
1198 def removed(self):
1190 return self._status.removed
1199 return self._status.removed
1191 def deleted(self):
1200 def deleted(self):
1192 return self._status.deleted
1201 return self._status.deleted
1193 def branch(self):
1202 def branch(self):
1194 return encoding.tolocal(self._extra['branch'])
1203 return encoding.tolocal(self._extra['branch'])
1195 def closesbranch(self):
1204 def closesbranch(self):
1196 return 'close' in self._extra
1205 return 'close' in self._extra
1197 def extra(self):
1206 def extra(self):
1198 return self._extra
1207 return self._extra
1199
1208
1200 def tags(self):
1209 def tags(self):
1201 t = []
1210 t = []
1202 for p in self.parents():
1211 for p in self.parents():
1203 t.extend(p.tags())
1212 t.extend(p.tags())
1204 return t
1213 return t
1205
1214
1206 def bookmarks(self):
1215 def bookmarks(self):
1207 b = []
1216 b = []
1208 for p in self.parents():
1217 for p in self.parents():
1209 b.extend(p.bookmarks())
1218 b.extend(p.bookmarks())
1210 return b
1219 return b
1211
1220
1212 def phase(self):
1221 def phase(self):
1213 phase = phases.draft # default phase to draft
1222 phase = phases.draft # default phase to draft
1214 for p in self.parents():
1223 for p in self.parents():
1215 phase = max(phase, p.phase())
1224 phase = max(phase, p.phase())
1216 return phase
1225 return phase
1217
1226
1218 def hidden(self):
1227 def hidden(self):
1219 return False
1228 return False
1220
1229
1221 def children(self):
1230 def children(self):
1222 return []
1231 return []
1223
1232
1224 def flags(self, path):
1233 def flags(self, path):
1225 if '_manifest' in self.__dict__:
1234 if '_manifest' in self.__dict__:
1226 try:
1235 try:
1227 return self._manifest.flags(path)
1236 return self._manifest.flags(path)
1228 except KeyError:
1237 except KeyError:
1229 return ''
1238 return ''
1230
1239
1231 try:
1240 try:
1232 return self._flagfunc(path)
1241 return self._flagfunc(path)
1233 except OSError:
1242 except OSError:
1234 return ''
1243 return ''
1235
1244
1236 def ancestor(self, c2):
1245 def ancestor(self, c2):
1237 """return the "best" ancestor context of self and c2"""
1246 """return the "best" ancestor context of self and c2"""
1238 return self._parents[0].ancestor(c2) # punt on two parents for now
1247 return self._parents[0].ancestor(c2) # punt on two parents for now
1239
1248
1240 def walk(self, match):
1249 def walk(self, match):
1241 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1250 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1242 True, False))
1251 True, False))
1243
1252
1244 def matches(self, match):
1253 def matches(self, match):
1245 return sorted(self._repo.dirstate.matches(match))
1254 return sorted(self._repo.dirstate.matches(match))
1246
1255
1247 def ancestors(self):
1256 def ancestors(self):
1248 for p in self._parents:
1257 for p in self._parents:
1249 yield p
1258 yield p
1250 for a in self._repo.changelog.ancestors(
1259 for a in self._repo.changelog.ancestors(
1251 [p.rev() for p in self._parents]):
1260 [p.rev() for p in self._parents]):
1252 yield changectx(self._repo, a)
1261 yield changectx(self._repo, a)
1253
1262
1254 def markcommitted(self, node):
1263 def markcommitted(self, node):
1255 """Perform post-commit cleanup necessary after committing this ctx
1264 """Perform post-commit cleanup necessary after committing this ctx
1256
1265
1257 Specifically, this updates backing stores this working context
1266 Specifically, this updates backing stores this working context
1258 wraps to reflect the fact that the changes reflected by this
1267 wraps to reflect the fact that the changes reflected by this
1259 workingctx have been committed. For example, it marks
1268 workingctx have been committed. For example, it marks
1260 modified and added files as normal in the dirstate.
1269 modified and added files as normal in the dirstate.
1261
1270
1262 """
1271 """
1263
1272
1264 self._repo.dirstate.beginparentchange()
1273 self._repo.dirstate.beginparentchange()
1265 for f in self.modified() + self.added():
1274 for f in self.modified() + self.added():
1266 self._repo.dirstate.normal(f)
1275 self._repo.dirstate.normal(f)
1267 for f in self.removed():
1276 for f in self.removed():
1268 self._repo.dirstate.drop(f)
1277 self._repo.dirstate.drop(f)
1269 self._repo.dirstate.setparents(node)
1278 self._repo.dirstate.setparents(node)
1270 self._repo.dirstate.endparentchange()
1279 self._repo.dirstate.endparentchange()
1271
1280
1272 def dirs(self):
1281 def dirs(self):
1273 return self._repo.dirstate.dirs()
1282 return self._repo.dirstate.dirs()
1274
1283
1275 class workingctx(committablectx):
1284 class workingctx(committablectx):
1276 """A workingctx object makes access to data related to
1285 """A workingctx object makes access to data related to
1277 the current working directory convenient.
1286 the current working directory convenient.
1278 date - any valid date string or (unixtime, offset), or None.
1287 date - any valid date string or (unixtime, offset), or None.
1279 user - username string, or None.
1288 user - username string, or None.
1280 extra - a dictionary of extra values, or None.
1289 extra - a dictionary of extra values, or None.
1281 changes - a list of file lists as returned by localrepo.status()
1290 changes - a list of file lists as returned by localrepo.status()
1282 or None to use the repository status.
1291 or None to use the repository status.
1283 """
1292 """
1284 def __init__(self, repo, text="", user=None, date=None, extra=None,
1293 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 changes=None):
1294 changes=None):
1286 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1295 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1287
1296
1288 def __iter__(self):
1297 def __iter__(self):
1289 d = self._repo.dirstate
1298 d = self._repo.dirstate
1290 for f in d:
1299 for f in d:
1291 if d[f] != 'r':
1300 if d[f] != 'r':
1292 yield f
1301 yield f
1293
1302
1294 def __contains__(self, key):
1303 def __contains__(self, key):
1295 return self._repo.dirstate[key] not in "?r"
1304 return self._repo.dirstate[key] not in "?r"
1296
1305
1297 @propertycache
1306 @propertycache
1298 def _parents(self):
1307 def _parents(self):
1299 p = self._repo.dirstate.parents()
1308 p = self._repo.dirstate.parents()
1300 if p[1] == nullid:
1309 if p[1] == nullid:
1301 p = p[:-1]
1310 p = p[:-1]
1302 return [changectx(self._repo, x) for x in p]
1311 return [changectx(self._repo, x) for x in p]
1303
1312
1304 def filectx(self, path, filelog=None):
1313 def filectx(self, path, filelog=None):
1305 """get a file context from the working directory"""
1314 """get a file context from the working directory"""
1306 return workingfilectx(self._repo, path, workingctx=self,
1315 return workingfilectx(self._repo, path, workingctx=self,
1307 filelog=filelog)
1316 filelog=filelog)
1308
1317
1309 def dirty(self, missing=False, merge=True, branch=True):
1318 def dirty(self, missing=False, merge=True, branch=True):
1310 "check whether a working directory is modified"
1319 "check whether a working directory is modified"
1311 # check subrepos first
1320 # check subrepos first
1312 for s in sorted(self.substate):
1321 for s in sorted(self.substate):
1313 if self.sub(s).dirty():
1322 if self.sub(s).dirty():
1314 return True
1323 return True
1315 # check current working dir
1324 # check current working dir
1316 return ((merge and self.p2()) or
1325 return ((merge and self.p2()) or
1317 (branch and self.branch() != self.p1().branch()) or
1326 (branch and self.branch() != self.p1().branch()) or
1318 self.modified() or self.added() or self.removed() or
1327 self.modified() or self.added() or self.removed() or
1319 (missing and self.deleted()))
1328 (missing and self.deleted()))
1320
1329
1321 def add(self, list, prefix=""):
1330 def add(self, list, prefix=""):
1322 join = lambda f: os.path.join(prefix, f)
1331 join = lambda f: os.path.join(prefix, f)
1323 wlock = self._repo.wlock()
1332 wlock = self._repo.wlock()
1324 ui, ds = self._repo.ui, self._repo.dirstate
1333 ui, ds = self._repo.ui, self._repo.dirstate
1325 try:
1334 try:
1326 rejected = []
1335 rejected = []
1327 lstat = self._repo.wvfs.lstat
1336 lstat = self._repo.wvfs.lstat
1328 for f in list:
1337 for f in list:
1329 scmutil.checkportable(ui, join(f))
1338 scmutil.checkportable(ui, join(f))
1330 try:
1339 try:
1331 st = lstat(f)
1340 st = lstat(f)
1332 except OSError:
1341 except OSError:
1333 ui.warn(_("%s does not exist!\n") % join(f))
1342 ui.warn(_("%s does not exist!\n") % join(f))
1334 rejected.append(f)
1343 rejected.append(f)
1335 continue
1344 continue
1336 if st.st_size > 10000000:
1345 if st.st_size > 10000000:
1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1346 ui.warn(_("%s: up to %d MB of RAM may be required "
1338 "to manage this file\n"
1347 "to manage this file\n"
1339 "(use 'hg revert %s' to cancel the "
1348 "(use 'hg revert %s' to cancel the "
1340 "pending addition)\n")
1349 "pending addition)\n")
1341 % (f, 3 * st.st_size // 1000000, join(f)))
1350 % (f, 3 * st.st_size // 1000000, join(f)))
1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1351 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1343 ui.warn(_("%s not added: only files and symlinks "
1352 ui.warn(_("%s not added: only files and symlinks "
1344 "supported currently\n") % join(f))
1353 "supported currently\n") % join(f))
1345 rejected.append(f)
1354 rejected.append(f)
1346 elif ds[f] in 'amn':
1355 elif ds[f] in 'amn':
1347 ui.warn(_("%s already tracked!\n") % join(f))
1356 ui.warn(_("%s already tracked!\n") % join(f))
1348 elif ds[f] == 'r':
1357 elif ds[f] == 'r':
1349 ds.normallookup(f)
1358 ds.normallookup(f)
1350 else:
1359 else:
1351 ds.add(f)
1360 ds.add(f)
1352 return rejected
1361 return rejected
1353 finally:
1362 finally:
1354 wlock.release()
1363 wlock.release()
1355
1364
1356 def forget(self, files, prefix=""):
1365 def forget(self, files, prefix=""):
1357 join = lambda f: os.path.join(prefix, f)
1366 join = lambda f: os.path.join(prefix, f)
1358 wlock = self._repo.wlock()
1367 wlock = self._repo.wlock()
1359 try:
1368 try:
1360 rejected = []
1369 rejected = []
1361 for f in files:
1370 for f in files:
1362 if f not in self._repo.dirstate:
1371 if f not in self._repo.dirstate:
1363 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1372 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1364 rejected.append(f)
1373 rejected.append(f)
1365 elif self._repo.dirstate[f] != 'a':
1374 elif self._repo.dirstate[f] != 'a':
1366 self._repo.dirstate.remove(f)
1375 self._repo.dirstate.remove(f)
1367 else:
1376 else:
1368 self._repo.dirstate.drop(f)
1377 self._repo.dirstate.drop(f)
1369 return rejected
1378 return rejected
1370 finally:
1379 finally:
1371 wlock.release()
1380 wlock.release()
1372
1381
1373 def undelete(self, list):
1382 def undelete(self, list):
1374 pctxs = self.parents()
1383 pctxs = self.parents()
1375 wlock = self._repo.wlock()
1384 wlock = self._repo.wlock()
1376 try:
1385 try:
1377 for f in list:
1386 for f in list:
1378 if self._repo.dirstate[f] != 'r':
1387 if self._repo.dirstate[f] != 'r':
1379 self._repo.ui.warn(_("%s not removed!\n") % f)
1388 self._repo.ui.warn(_("%s not removed!\n") % f)
1380 else:
1389 else:
1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1390 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1382 t = fctx.data()
1391 t = fctx.data()
1383 self._repo.wwrite(f, t, fctx.flags())
1392 self._repo.wwrite(f, t, fctx.flags())
1384 self._repo.dirstate.normal(f)
1393 self._repo.dirstate.normal(f)
1385 finally:
1394 finally:
1386 wlock.release()
1395 wlock.release()
1387
1396
1388 def copy(self, source, dest):
1397 def copy(self, source, dest):
1389 try:
1398 try:
1390 st = self._repo.wvfs.lstat(dest)
1399 st = self._repo.wvfs.lstat(dest)
1391 except OSError, err:
1400 except OSError, err:
1392 if err.errno != errno.ENOENT:
1401 if err.errno != errno.ENOENT:
1393 raise
1402 raise
1394 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1403 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1395 return
1404 return
1396 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1405 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1397 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1406 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1398 "symbolic link\n") % dest)
1407 "symbolic link\n") % dest)
1399 else:
1408 else:
1400 wlock = self._repo.wlock()
1409 wlock = self._repo.wlock()
1401 try:
1410 try:
1402 if self._repo.dirstate[dest] in '?':
1411 if self._repo.dirstate[dest] in '?':
1403 self._repo.dirstate.add(dest)
1412 self._repo.dirstate.add(dest)
1404 elif self._repo.dirstate[dest] in 'r':
1413 elif self._repo.dirstate[dest] in 'r':
1405 self._repo.dirstate.normallookup(dest)
1414 self._repo.dirstate.normallookup(dest)
1406 self._repo.dirstate.copy(source, dest)
1415 self._repo.dirstate.copy(source, dest)
1407 finally:
1416 finally:
1408 wlock.release()
1417 wlock.release()
1409
1418
1410 def _filtersuspectsymlink(self, files):
1419 def _filtersuspectsymlink(self, files):
1411 if not files or self._repo.dirstate._checklink:
1420 if not files or self._repo.dirstate._checklink:
1412 return files
1421 return files
1413
1422
1414 # Symlink placeholders may get non-symlink-like contents
1423 # Symlink placeholders may get non-symlink-like contents
1415 # via user error or dereferencing by NFS or Samba servers,
1424 # via user error or dereferencing by NFS or Samba servers,
1416 # so we filter out any placeholders that don't look like a
1425 # so we filter out any placeholders that don't look like a
1417 # symlink
1426 # symlink
1418 sane = []
1427 sane = []
1419 for f in files:
1428 for f in files:
1420 if self.flags(f) == 'l':
1429 if self.flags(f) == 'l':
1421 d = self[f].data()
1430 d = self[f].data()
1422 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1431 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1423 self._repo.ui.debug('ignoring suspect symlink placeholder'
1432 self._repo.ui.debug('ignoring suspect symlink placeholder'
1424 ' "%s"\n' % f)
1433 ' "%s"\n' % f)
1425 continue
1434 continue
1426 sane.append(f)
1435 sane.append(f)
1427 return sane
1436 return sane
1428
1437
1429 def _checklookup(self, files):
1438 def _checklookup(self, files):
1430 # check for any possibly clean files
1439 # check for any possibly clean files
1431 if not files:
1440 if not files:
1432 return [], []
1441 return [], []
1433
1442
1434 modified = []
1443 modified = []
1435 fixup = []
1444 fixup = []
1436 pctx = self._parents[0]
1445 pctx = self._parents[0]
1437 # do a full compare of any files that might have changed
1446 # do a full compare of any files that might have changed
1438 for f in sorted(files):
1447 for f in sorted(files):
1439 if (f not in pctx or self.flags(f) != pctx.flags(f)
1448 if (f not in pctx or self.flags(f) != pctx.flags(f)
1440 or pctx[f].cmp(self[f])):
1449 or pctx[f].cmp(self[f])):
1441 modified.append(f)
1450 modified.append(f)
1442 else:
1451 else:
1443 fixup.append(f)
1452 fixup.append(f)
1444
1453
1445 # update dirstate for files that are actually clean
1454 # update dirstate for files that are actually clean
1446 if fixup:
1455 if fixup:
1447 try:
1456 try:
1448 # updating the dirstate is optional
1457 # updating the dirstate is optional
1449 # so we don't wait on the lock
1458 # so we don't wait on the lock
1450 # wlock can invalidate the dirstate, so cache normal _after_
1459 # wlock can invalidate the dirstate, so cache normal _after_
1451 # taking the lock
1460 # taking the lock
1452 wlock = self._repo.wlock(False)
1461 wlock = self._repo.wlock(False)
1453 normal = self._repo.dirstate.normal
1462 normal = self._repo.dirstate.normal
1454 try:
1463 try:
1455 for f in fixup:
1464 for f in fixup:
1456 normal(f)
1465 normal(f)
1457 finally:
1466 finally:
1458 wlock.release()
1467 wlock.release()
1459 except error.LockError:
1468 except error.LockError:
1460 pass
1469 pass
1461 return modified, fixup
1470 return modified, fixup
1462
1471
1463 def _manifestmatches(self, match, s):
1472 def _manifestmatches(self, match, s):
1464 """Slow path for workingctx
1473 """Slow path for workingctx
1465
1474
1466 The fast path is when we compare the working directory to its parent
1475 The fast path is when we compare the working directory to its parent
1467 which means this function is comparing with a non-parent; therefore we
1476 which means this function is comparing with a non-parent; therefore we
1468 need to build a manifest and return what matches.
1477 need to build a manifest and return what matches.
1469 """
1478 """
1470 mf = self._repo['.']._manifestmatches(match, s)
1479 mf = self._repo['.']._manifestmatches(match, s)
1471 for f in s.modified + s.added:
1480 for f in s.modified + s.added:
1472 mf[f] = _newnode
1481 mf[f] = _newnode
1473 mf.setflag(f, self.flags(f))
1482 mf.setflag(f, self.flags(f))
1474 for f in s.removed:
1483 for f in s.removed:
1475 if f in mf:
1484 if f in mf:
1476 del mf[f]
1485 del mf[f]
1477 return mf
1486 return mf
1478
1487
1479 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1488 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1480 unknown=False):
1489 unknown=False):
1481 '''Gets the status from the dirstate -- internal use only.'''
1490 '''Gets the status from the dirstate -- internal use only.'''
1482 listignored, listclean, listunknown = ignored, clean, unknown
1491 listignored, listclean, listunknown = ignored, clean, unknown
1483 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1492 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1484 subrepos = []
1493 subrepos = []
1485 if '.hgsub' in self:
1494 if '.hgsub' in self:
1486 subrepos = sorted(self.substate)
1495 subrepos = sorted(self.substate)
1487 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1496 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1488 listclean, listunknown)
1497 listclean, listunknown)
1489
1498
1490 # check for any possibly clean files
1499 # check for any possibly clean files
1491 if cmp:
1500 if cmp:
1492 modified2, fixup = self._checklookup(cmp)
1501 modified2, fixup = self._checklookup(cmp)
1493 s.modified.extend(modified2)
1502 s.modified.extend(modified2)
1494
1503
1495 # update dirstate for files that are actually clean
1504 # update dirstate for files that are actually clean
1496 if fixup and listclean:
1505 if fixup and listclean:
1497 s.clean.extend(fixup)
1506 s.clean.extend(fixup)
1498
1507
1499 if match.always():
1508 if match.always():
1500 # cache for performance
1509 # cache for performance
1501 if s.unknown or s.ignored or s.clean:
1510 if s.unknown or s.ignored or s.clean:
1502 # "_status" is cached with list*=False in the normal route
1511 # "_status" is cached with list*=False in the normal route
1503 self._status = scmutil.status(s.modified, s.added, s.removed,
1512 self._status = scmutil.status(s.modified, s.added, s.removed,
1504 s.deleted, [], [], [])
1513 s.deleted, [], [], [])
1505 else:
1514 else:
1506 self._status = s
1515 self._status = s
1507
1516
1508 return s
1517 return s
1509
1518
1510 def _buildstatus(self, other, s, match, listignored, listclean,
1519 def _buildstatus(self, other, s, match, listignored, listclean,
1511 listunknown):
1520 listunknown):
1512 """build a status with respect to another context
1521 """build a status with respect to another context
1513
1522
1514 This includes logic for maintaining the fast path of status when
1523 This includes logic for maintaining the fast path of status when
1515 comparing the working directory against its parent, which is to skip
1524 comparing the working directory against its parent, which is to skip
1516 building a new manifest if self (working directory) is not comparing
1525 building a new manifest if self (working directory) is not comparing
1517 against its parent (repo['.']).
1526 against its parent (repo['.']).
1518 """
1527 """
1519 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1528 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1520 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1529 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1521 # might have accidentally ended up with the entire contents of the file
1530 # might have accidentally ended up with the entire contents of the file
1522 # they are supposed to be linking to.
1531 # they are supposed to be linking to.
1523 s.modified[:] = self._filtersuspectsymlink(s.modified)
1532 s.modified[:] = self._filtersuspectsymlink(s.modified)
1524 if other != self._repo['.']:
1533 if other != self._repo['.']:
1525 s = super(workingctx, self)._buildstatus(other, s, match,
1534 s = super(workingctx, self)._buildstatus(other, s, match,
1526 listignored, listclean,
1535 listignored, listclean,
1527 listunknown)
1536 listunknown)
1528 return s
1537 return s
1529
1538
1530 def _matchstatus(self, other, match):
1539 def _matchstatus(self, other, match):
1531 """override the match method with a filter for directory patterns
1540 """override the match method with a filter for directory patterns
1532
1541
1533 We use inheritance to customize the match.bad method only in cases of
1542 We use inheritance to customize the match.bad method only in cases of
1534 workingctx since it belongs only to the working directory when
1543 workingctx since it belongs only to the working directory when
1535 comparing against the parent changeset.
1544 comparing against the parent changeset.
1536
1545
1537 If we aren't comparing against the working directory's parent, then we
1546 If we aren't comparing against the working directory's parent, then we
1538 just use the default match object sent to us.
1547 just use the default match object sent to us.
1539 """
1548 """
1540 superself = super(workingctx, self)
1549 superself = super(workingctx, self)
1541 match = superself._matchstatus(other, match)
1550 match = superself._matchstatus(other, match)
1542 if other != self._repo['.']:
1551 if other != self._repo['.']:
1543 def bad(f, msg):
1552 def bad(f, msg):
1544 # 'f' may be a directory pattern from 'match.files()',
1553 # 'f' may be a directory pattern from 'match.files()',
1545 # so 'f not in ctx1' is not enough
1554 # so 'f not in ctx1' is not enough
1546 if f not in other and f not in other.dirs():
1555 if f not in other and f not in other.dirs():
1547 self._repo.ui.warn('%s: %s\n' %
1556 self._repo.ui.warn('%s: %s\n' %
1548 (self._repo.dirstate.pathto(f), msg))
1557 (self._repo.dirstate.pathto(f), msg))
1549 match.bad = bad
1558 match.bad = bad
1550 return match
1559 return match
1551
1560
1552 class committablefilectx(basefilectx):
1561 class committablefilectx(basefilectx):
1553 """A committablefilectx provides common functionality for a file context
1562 """A committablefilectx provides common functionality for a file context
1554 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1563 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1555 def __init__(self, repo, path, filelog=None, ctx=None):
1564 def __init__(self, repo, path, filelog=None, ctx=None):
1556 self._repo = repo
1565 self._repo = repo
1557 self._path = path
1566 self._path = path
1558 self._changeid = None
1567 self._changeid = None
1559 self._filerev = self._filenode = None
1568 self._filerev = self._filenode = None
1560
1569
1561 if filelog is not None:
1570 if filelog is not None:
1562 self._filelog = filelog
1571 self._filelog = filelog
1563 if ctx:
1572 if ctx:
1564 self._changectx = ctx
1573 self._changectx = ctx
1565
1574
1566 def __nonzero__(self):
1575 def __nonzero__(self):
1567 return True
1576 return True
1568
1577
1569 def parents(self):
1578 def parents(self):
1570 '''return parent filectxs, following copies if necessary'''
1579 '''return parent filectxs, following copies if necessary'''
1571 def filenode(ctx, path):
1580 def filenode(ctx, path):
1572 return ctx._manifest.get(path, nullid)
1581 return ctx._manifest.get(path, nullid)
1573
1582
1574 path = self._path
1583 path = self._path
1575 fl = self._filelog
1584 fl = self._filelog
1576 pcl = self._changectx._parents
1585 pcl = self._changectx._parents
1577 renamed = self.renamed()
1586 renamed = self.renamed()
1578
1587
1579 if renamed:
1588 if renamed:
1580 pl = [renamed + (None,)]
1589 pl = [renamed + (None,)]
1581 else:
1590 else:
1582 pl = [(path, filenode(pcl[0], path), fl)]
1591 pl = [(path, filenode(pcl[0], path), fl)]
1583
1592
1584 for pc in pcl[1:]:
1593 for pc in pcl[1:]:
1585 pl.append((path, filenode(pc, path), fl))
1594 pl.append((path, filenode(pc, path), fl))
1586
1595
1587 return [filectx(self._repo, p, fileid=n, filelog=l)
1596 return [filectx(self._repo, p, fileid=n, filelog=l)
1588 for p, n, l in pl if n != nullid]
1597 for p, n, l in pl if n != nullid]
1589
1598
1590 def children(self):
1599 def children(self):
1591 return []
1600 return []
1592
1601
1593 class workingfilectx(committablefilectx):
1602 class workingfilectx(committablefilectx):
1594 """A workingfilectx object makes access to data related to a particular
1603 """A workingfilectx object makes access to data related to a particular
1595 file in the working directory convenient."""
1604 file in the working directory convenient."""
1596 def __init__(self, repo, path, filelog=None, workingctx=None):
1605 def __init__(self, repo, path, filelog=None, workingctx=None):
1597 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1606 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1598
1607
1599 @propertycache
1608 @propertycache
1600 def _changectx(self):
1609 def _changectx(self):
1601 return workingctx(self._repo)
1610 return workingctx(self._repo)
1602
1611
1603 def data(self):
1612 def data(self):
1604 return self._repo.wread(self._path)
1613 return self._repo.wread(self._path)
1605 def renamed(self):
1614 def renamed(self):
1606 rp = self._repo.dirstate.copied(self._path)
1615 rp = self._repo.dirstate.copied(self._path)
1607 if not rp:
1616 if not rp:
1608 return None
1617 return None
1609 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1618 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1610
1619
1611 def size(self):
1620 def size(self):
1612 return self._repo.wvfs.lstat(self._path).st_size
1621 return self._repo.wvfs.lstat(self._path).st_size
1613 def date(self):
1622 def date(self):
1614 t, tz = self._changectx.date()
1623 t, tz = self._changectx.date()
1615 try:
1624 try:
1616 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1625 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1617 except OSError, err:
1626 except OSError, err:
1618 if err.errno != errno.ENOENT:
1627 if err.errno != errno.ENOENT:
1619 raise
1628 raise
1620 return (t, tz)
1629 return (t, tz)
1621
1630
1622 def cmp(self, fctx):
1631 def cmp(self, fctx):
1623 """compare with other file context
1632 """compare with other file context
1624
1633
1625 returns True if different than fctx.
1634 returns True if different than fctx.
1626 """
1635 """
1627 # fctx should be a filectx (not a workingfilectx)
1636 # fctx should be a filectx (not a workingfilectx)
1628 # invert comparison to reuse the same code path
1637 # invert comparison to reuse the same code path
1629 return fctx.cmp(self)
1638 return fctx.cmp(self)
1630
1639
1631 def remove(self, ignoremissing=False):
1640 def remove(self, ignoremissing=False):
1632 """wraps unlink for a repo's working directory"""
1641 """wraps unlink for a repo's working directory"""
1633 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1642 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1634
1643
1635 def write(self, data, flags):
1644 def write(self, data, flags):
1636 """wraps repo.wwrite"""
1645 """wraps repo.wwrite"""
1637 self._repo.wwrite(self._path, data, flags)
1646 self._repo.wwrite(self._path, data, flags)
1638
1647
1639 class workingcommitctx(workingctx):
1648 class workingcommitctx(workingctx):
1640 """A workingcommitctx object makes access to data related to
1649 """A workingcommitctx object makes access to data related to
1641 the revision being committed convenient.
1650 the revision being committed convenient.
1642
1651
1643 This hides changes in the working directory, if they aren't
1652 This hides changes in the working directory, if they aren't
1644 committed in this context.
1653 committed in this context.
1645 """
1654 """
1646 def __init__(self, repo, changes,
1655 def __init__(self, repo, changes,
1647 text="", user=None, date=None, extra=None):
1656 text="", user=None, date=None, extra=None):
1648 super(workingctx, self).__init__(repo, text, user, date, extra,
1657 super(workingctx, self).__init__(repo, text, user, date, extra,
1649 changes)
1658 changes)
1650
1659
1651 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1660 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1652 unknown=False):
1661 unknown=False):
1653 """Return matched files only in ``self._status``
1662 """Return matched files only in ``self._status``
1654
1663
1655 Uncommitted files appear "clean" via this context, even if
1664 Uncommitted files appear "clean" via this context, even if
1656 they aren't actually so in the working directory.
1665 they aren't actually so in the working directory.
1657 """
1666 """
1658 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1667 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1659 if clean:
1668 if clean:
1660 clean = [f for f in self._manifest if f not in self._changedset]
1669 clean = [f for f in self._manifest if f not in self._changedset]
1661 else:
1670 else:
1662 clean = []
1671 clean = []
1663 return scmutil.status([f for f in self._status.modified if match(f)],
1672 return scmutil.status([f for f in self._status.modified if match(f)],
1664 [f for f in self._status.added if match(f)],
1673 [f for f in self._status.added if match(f)],
1665 [f for f in self._status.removed if match(f)],
1674 [f for f in self._status.removed if match(f)],
1666 [], [], [], clean)
1675 [], [], [], clean)
1667
1676
1668 @propertycache
1677 @propertycache
1669 def _changedset(self):
1678 def _changedset(self):
1670 """Return the set of files changed in this context
1679 """Return the set of files changed in this context
1671 """
1680 """
1672 changed = set(self._status.modified)
1681 changed = set(self._status.modified)
1673 changed.update(self._status.added)
1682 changed.update(self._status.added)
1674 changed.update(self._status.removed)
1683 changed.update(self._status.removed)
1675 return changed
1684 return changed
1676
1685
1677 class memctx(committablectx):
1686 class memctx(committablectx):
1678 """Use memctx to perform in-memory commits via localrepo.commitctx().
1687 """Use memctx to perform in-memory commits via localrepo.commitctx().
1679
1688
1680 Revision information is supplied at initialization time while
1689 Revision information is supplied at initialization time while
1681 related files data and is made available through a callback
1690 related files data and is made available through a callback
1682 mechanism. 'repo' is the current localrepo, 'parents' is a
1691 mechanism. 'repo' is the current localrepo, 'parents' is a
1683 sequence of two parent revisions identifiers (pass None for every
1692 sequence of two parent revisions identifiers (pass None for every
1684 missing parent), 'text' is the commit message and 'files' lists
1693 missing parent), 'text' is the commit message and 'files' lists
1685 names of files touched by the revision (normalized and relative to
1694 names of files touched by the revision (normalized and relative to
1686 repository root).
1695 repository root).
1687
1696
1688 filectxfn(repo, memctx, path) is a callable receiving the
1697 filectxfn(repo, memctx, path) is a callable receiving the
1689 repository, the current memctx object and the normalized path of
1698 repository, the current memctx object and the normalized path of
1690 requested file, relative to repository root. It is fired by the
1699 requested file, relative to repository root. It is fired by the
1691 commit function for every file in 'files', but calls order is
1700 commit function for every file in 'files', but calls order is
1692 undefined. If the file is available in the revision being
1701 undefined. If the file is available in the revision being
1693 committed (updated or added), filectxfn returns a memfilectx
1702 committed (updated or added), filectxfn returns a memfilectx
1694 object. If the file was removed, filectxfn raises an
1703 object. If the file was removed, filectxfn raises an
1695 IOError. Moved files are represented by marking the source file
1704 IOError. Moved files are represented by marking the source file
1696 removed and the new file added with copy information (see
1705 removed and the new file added with copy information (see
1697 memfilectx).
1706 memfilectx).
1698
1707
1699 user receives the committer name and defaults to current
1708 user receives the committer name and defaults to current
1700 repository username, date is the commit date in any format
1709 repository username, date is the commit date in any format
1701 supported by util.parsedate() and defaults to current date, extra
1710 supported by util.parsedate() and defaults to current date, extra
1702 is a dictionary of metadata or is left empty.
1711 is a dictionary of metadata or is left empty.
1703 """
1712 """
1704
1713
1705 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1714 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1706 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1715 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1707 # this field to determine what to do in filectxfn.
1716 # this field to determine what to do in filectxfn.
1708 _returnnoneformissingfiles = True
1717 _returnnoneformissingfiles = True
1709
1718
1710 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1719 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1711 date=None, extra=None, editor=False):
1720 date=None, extra=None, editor=False):
1712 super(memctx, self).__init__(repo, text, user, date, extra)
1721 super(memctx, self).__init__(repo, text, user, date, extra)
1713 self._rev = None
1722 self._rev = None
1714 self._node = None
1723 self._node = None
1715 parents = [(p or nullid) for p in parents]
1724 parents = [(p or nullid) for p in parents]
1716 p1, p2 = parents
1725 p1, p2 = parents
1717 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1726 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1718 files = sorted(set(files))
1727 files = sorted(set(files))
1719 self._files = files
1728 self._files = files
1720 self.substate = {}
1729 self.substate = {}
1721
1730
1722 # if store is not callable, wrap it in a function
1731 # if store is not callable, wrap it in a function
1723 if not callable(filectxfn):
1732 if not callable(filectxfn):
1724 def getfilectx(repo, memctx, path):
1733 def getfilectx(repo, memctx, path):
1725 fctx = filectxfn[path]
1734 fctx = filectxfn[path]
1726 # this is weird but apparently we only keep track of one parent
1735 # this is weird but apparently we only keep track of one parent
1727 # (why not only store that instead of a tuple?)
1736 # (why not only store that instead of a tuple?)
1728 copied = fctx.renamed()
1737 copied = fctx.renamed()
1729 if copied:
1738 if copied:
1730 copied = copied[0]
1739 copied = copied[0]
1731 return memfilectx(repo, path, fctx.data(),
1740 return memfilectx(repo, path, fctx.data(),
1732 islink=fctx.islink(), isexec=fctx.isexec(),
1741 islink=fctx.islink(), isexec=fctx.isexec(),
1733 copied=copied, memctx=memctx)
1742 copied=copied, memctx=memctx)
1734 self._filectxfn = getfilectx
1743 self._filectxfn = getfilectx
1735 else:
1744 else:
1736 # "util.cachefunc" reduces invocation of possibly expensive
1745 # "util.cachefunc" reduces invocation of possibly expensive
1737 # "filectxfn" for performance (e.g. converting from another VCS)
1746 # "filectxfn" for performance (e.g. converting from another VCS)
1738 self._filectxfn = util.cachefunc(filectxfn)
1747 self._filectxfn = util.cachefunc(filectxfn)
1739
1748
1740 self._extra = extra and extra.copy() or {}
1749 self._extra = extra and extra.copy() or {}
1741 if self._extra.get('branch', '') == '':
1750 if self._extra.get('branch', '') == '':
1742 self._extra['branch'] = 'default'
1751 self._extra['branch'] = 'default'
1743
1752
1744 if editor:
1753 if editor:
1745 self._text = editor(self._repo, self, [])
1754 self._text = editor(self._repo, self, [])
1746 self._repo.savecommitmessage(self._text)
1755 self._repo.savecommitmessage(self._text)
1747
1756
1748 def filectx(self, path, filelog=None):
1757 def filectx(self, path, filelog=None):
1749 """get a file context from the working directory
1758 """get a file context from the working directory
1750
1759
1751 Returns None if file doesn't exist and should be removed."""
1760 Returns None if file doesn't exist and should be removed."""
1752 return self._filectxfn(self._repo, self, path)
1761 return self._filectxfn(self._repo, self, path)
1753
1762
1754 def commit(self):
1763 def commit(self):
1755 """commit context to the repo"""
1764 """commit context to the repo"""
1756 return self._repo.commitctx(self)
1765 return self._repo.commitctx(self)
1757
1766
1758 @propertycache
1767 @propertycache
1759 def _manifest(self):
1768 def _manifest(self):
1760 """generate a manifest based on the return values of filectxfn"""
1769 """generate a manifest based on the return values of filectxfn"""
1761
1770
1762 # keep this simple for now; just worry about p1
1771 # keep this simple for now; just worry about p1
1763 pctx = self._parents[0]
1772 pctx = self._parents[0]
1764 man = pctx.manifest().copy()
1773 man = pctx.manifest().copy()
1765
1774
1766 for f in self._status.modified:
1775 for f in self._status.modified:
1767 p1node = nullid
1776 p1node = nullid
1768 p2node = nullid
1777 p2node = nullid
1769 p = pctx[f].parents() # if file isn't in pctx, check p2?
1778 p = pctx[f].parents() # if file isn't in pctx, check p2?
1770 if len(p) > 0:
1779 if len(p) > 0:
1771 p1node = p[0].node()
1780 p1node = p[0].node()
1772 if len(p) > 1:
1781 if len(p) > 1:
1773 p2node = p[1].node()
1782 p2node = p[1].node()
1774 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1783 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1775
1784
1776 for f in self._status.added:
1785 for f in self._status.added:
1777 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1786 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1778
1787
1779 for f in self._status.removed:
1788 for f in self._status.removed:
1780 if f in man:
1789 if f in man:
1781 del man[f]
1790 del man[f]
1782
1791
1783 return man
1792 return man
1784
1793
1785 @propertycache
1794 @propertycache
1786 def _status(self):
1795 def _status(self):
1787 """Calculate exact status from ``files`` specified at construction
1796 """Calculate exact status from ``files`` specified at construction
1788 """
1797 """
1789 man1 = self.p1().manifest()
1798 man1 = self.p1().manifest()
1790 p2 = self._parents[1]
1799 p2 = self._parents[1]
1791 # "1 < len(self._parents)" can't be used for checking
1800 # "1 < len(self._parents)" can't be used for checking
1792 # existence of the 2nd parent, because "memctx._parents" is
1801 # existence of the 2nd parent, because "memctx._parents" is
1793 # explicitly initialized by the list, of which length is 2.
1802 # explicitly initialized by the list, of which length is 2.
1794 if p2.node() != nullid:
1803 if p2.node() != nullid:
1795 man2 = p2.manifest()
1804 man2 = p2.manifest()
1796 managing = lambda f: f in man1 or f in man2
1805 managing = lambda f: f in man1 or f in man2
1797 else:
1806 else:
1798 managing = lambda f: f in man1
1807 managing = lambda f: f in man1
1799
1808
1800 modified, added, removed = [], [], []
1809 modified, added, removed = [], [], []
1801 for f in self._files:
1810 for f in self._files:
1802 if not managing(f):
1811 if not managing(f):
1803 added.append(f)
1812 added.append(f)
1804 elif self[f]:
1813 elif self[f]:
1805 modified.append(f)
1814 modified.append(f)
1806 else:
1815 else:
1807 removed.append(f)
1816 removed.append(f)
1808
1817
1809 return scmutil.status(modified, added, removed, [], [], [], [])
1818 return scmutil.status(modified, added, removed, [], [], [], [])
1810
1819
1811 class memfilectx(committablefilectx):
1820 class memfilectx(committablefilectx):
1812 """memfilectx represents an in-memory file to commit.
1821 """memfilectx represents an in-memory file to commit.
1813
1822
1814 See memctx and committablefilectx for more details.
1823 See memctx and committablefilectx for more details.
1815 """
1824 """
1816 def __init__(self, repo, path, data, islink=False,
1825 def __init__(self, repo, path, data, islink=False,
1817 isexec=False, copied=None, memctx=None):
1826 isexec=False, copied=None, memctx=None):
1818 """
1827 """
1819 path is the normalized file path relative to repository root.
1828 path is the normalized file path relative to repository root.
1820 data is the file content as a string.
1829 data is the file content as a string.
1821 islink is True if the file is a symbolic link.
1830 islink is True if the file is a symbolic link.
1822 isexec is True if the file is executable.
1831 isexec is True if the file is executable.
1823 copied is the source file path if current file was copied in the
1832 copied is the source file path if current file was copied in the
1824 revision being committed, or None."""
1833 revision being committed, or None."""
1825 super(memfilectx, self).__init__(repo, path, None, memctx)
1834 super(memfilectx, self).__init__(repo, path, None, memctx)
1826 self._data = data
1835 self._data = data
1827 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1836 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1828 self._copied = None
1837 self._copied = None
1829 if copied:
1838 if copied:
1830 self._copied = (copied, nullid)
1839 self._copied = (copied, nullid)
1831
1840
1832 def data(self):
1841 def data(self):
1833 return self._data
1842 return self._data
1834 def size(self):
1843 def size(self):
1835 return len(self.data())
1844 return len(self.data())
1836 def flags(self):
1845 def flags(self):
1837 return self._flags
1846 return self._flags
1838 def renamed(self):
1847 def renamed(self):
1839 return self._copied
1848 return self._copied
1840
1849
1841 def remove(self, ignoremissing=False):
1850 def remove(self, ignoremissing=False):
1842 """wraps unlink for a repo's working directory"""
1851 """wraps unlink for a repo's working directory"""
1843 # need to figure out what to do here
1852 # need to figure out what to do here
1844 del self._changectx[self._path]
1853 del self._changectx[self._path]
1845
1854
1846 def write(self, data, flags):
1855 def write(self, data, flags):
1847 """wraps repo.wwrite"""
1856 """wraps repo.wwrite"""
1848 self._data = data
1857 self._data = data
@@ -1,469 +1,472
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util
8 import util
9 import heapq
9 import heapq
10
10
11 def _nonoverlap(d1, d2, d3):
11 def _nonoverlap(d1, d2, d3):
12 "Return list of elements in d1 not in d2 or d3"
12 "Return list of elements in d1 not in d2 or d3"
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
14
14
15 def _dirname(f):
15 def _dirname(f):
16 s = f.rfind("/")
16 s = f.rfind("/")
17 if s == -1:
17 if s == -1:
18 return ""
18 return ""
19 return f[:s]
19 return f[:s]
20
20
21 def _findlimit(repo, a, b):
21 def _findlimit(repo, a, b):
22 """
22 """
23 Find the last revision that needs to be checked to ensure that a full
23 Find the last revision that needs to be checked to ensure that a full
24 transitive closure for file copies can be properly calculated.
24 transitive closure for file copies can be properly calculated.
25 Generally, this means finding the earliest revision number that's an
25 Generally, this means finding the earliest revision number that's an
26 ancestor of a or b but not both, except when a or b is a direct descendent
26 ancestor of a or b but not both, except when a or b is a direct descendent
27 of the other, in which case we can return the minimum revnum of a and b.
27 of the other, in which case we can return the minimum revnum of a and b.
28 None if no such revision exists.
28 None if no such revision exists.
29 """
29 """
30
30
31 # basic idea:
31 # basic idea:
32 # - mark a and b with different sides
32 # - mark a and b with different sides
33 # - if a parent's children are all on the same side, the parent is
33 # - if a parent's children are all on the same side, the parent is
34 # on that side, otherwise it is on no side
34 # on that side, otherwise it is on no side
35 # - walk the graph in topological order with the help of a heap;
35 # - walk the graph in topological order with the help of a heap;
36 # - add unseen parents to side map
36 # - add unseen parents to side map
37 # - clear side of any parent that has children on different sides
37 # - clear side of any parent that has children on different sides
38 # - track number of interesting revs that might still be on a side
38 # - track number of interesting revs that might still be on a side
39 # - track the lowest interesting rev seen
39 # - track the lowest interesting rev seen
40 # - quit when interesting revs is zero
40 # - quit when interesting revs is zero
41
41
42 cl = repo.changelog
42 cl = repo.changelog
43 working = len(cl) # pseudo rev for the working directory
43 working = len(cl) # pseudo rev for the working directory
44 if a is None:
44 if a is None:
45 a = working
45 a = working
46 if b is None:
46 if b is None:
47 b = working
47 b = working
48
48
49 side = {a: -1, b: 1}
49 side = {a: -1, b: 1}
50 visit = [-a, -b]
50 visit = [-a, -b]
51 heapq.heapify(visit)
51 heapq.heapify(visit)
52 interesting = len(visit)
52 interesting = len(visit)
53 hascommonancestor = False
53 hascommonancestor = False
54 limit = working
54 limit = working
55
55
56 while interesting:
56 while interesting:
57 r = -heapq.heappop(visit)
57 r = -heapq.heappop(visit)
58 if r == working:
58 if r == working:
59 parents = [cl.rev(p) for p in repo.dirstate.parents()]
59 parents = [cl.rev(p) for p in repo.dirstate.parents()]
60 else:
60 else:
61 parents = cl.parentrevs(r)
61 parents = cl.parentrevs(r)
62 for p in parents:
62 for p in parents:
63 if p < 0:
63 if p < 0:
64 continue
64 continue
65 if p not in side:
65 if p not in side:
66 # first time we see p; add it to visit
66 # first time we see p; add it to visit
67 side[p] = side[r]
67 side[p] = side[r]
68 if side[p]:
68 if side[p]:
69 interesting += 1
69 interesting += 1
70 heapq.heappush(visit, -p)
70 heapq.heappush(visit, -p)
71 elif side[p] and side[p] != side[r]:
71 elif side[p] and side[p] != side[r]:
72 # p was interesting but now we know better
72 # p was interesting but now we know better
73 side[p] = 0
73 side[p] = 0
74 interesting -= 1
74 interesting -= 1
75 hascommonancestor = True
75 hascommonancestor = True
76 if side[r]:
76 if side[r]:
77 limit = r # lowest rev visited
77 limit = r # lowest rev visited
78 interesting -= 1
78 interesting -= 1
79
79
80 if not hascommonancestor:
80 if not hascommonancestor:
81 return None
81 return None
82
82
83 # Consider the following flow (see test-commit-amend.t under issue4405):
83 # Consider the following flow (see test-commit-amend.t under issue4405):
84 # 1/ File 'a0' committed
84 # 1/ File 'a0' committed
85 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
85 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
86 # 3/ Move back to first commit
86 # 3/ Move back to first commit
87 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
87 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
88 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
88 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
89 #
89 #
90 # During the amend in step five, we will be in this state:
90 # During the amend in step five, we will be in this state:
91 #
91 #
92 # @ 3 temporary amend commit for a1-amend
92 # @ 3 temporary amend commit for a1-amend
93 # |
93 # |
94 # o 2 a1-amend
94 # o 2 a1-amend
95 # |
95 # |
96 # | o 1 a1
96 # | o 1 a1
97 # |/
97 # |/
98 # o 0 a0
98 # o 0 a0
99 #
99 #
100 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
100 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
101 # yet the filelog has the copy information in rev 1 and we will not look
101 # yet the filelog has the copy information in rev 1 and we will not look
102 # back far enough unless we also look at the a and b as candidates.
102 # back far enough unless we also look at the a and b as candidates.
103 # This only occurs when a is a descendent of b or visa-versa.
103 # This only occurs when a is a descendent of b or visa-versa.
104 return min(limit, a, b)
104 return min(limit, a, b)
105
105
106 def _chain(src, dst, a, b):
106 def _chain(src, dst, a, b):
107 '''chain two sets of copies a->b'''
107 '''chain two sets of copies a->b'''
108 t = a.copy()
108 t = a.copy()
109 for k, v in b.iteritems():
109 for k, v in b.iteritems():
110 if v in t:
110 if v in t:
111 # found a chain
111 # found a chain
112 if t[v] != k:
112 if t[v] != k:
113 # file wasn't renamed back to itself
113 # file wasn't renamed back to itself
114 t[k] = t[v]
114 t[k] = t[v]
115 if v not in dst:
115 if v not in dst:
116 # chain was a rename, not a copy
116 # chain was a rename, not a copy
117 del t[v]
117 del t[v]
118 if v in src:
118 if v in src:
119 # file is a copy of an existing file
119 # file is a copy of an existing file
120 t[k] = v
120 t[k] = v
121
121
122 # remove criss-crossed copies
122 # remove criss-crossed copies
123 for k, v in t.items():
123 for k, v in t.items():
124 if k in src and v in dst:
124 if k in src and v in dst:
125 del t[k]
125 del t[k]
126
126
127 return t
127 return t
128
128
129 def _tracefile(fctx, am, limit=-1):
129 def _tracefile(fctx, am, limit=-1):
130 '''return file context that is the ancestor of fctx present in ancestor
130 '''return file context that is the ancestor of fctx present in ancestor
131 manifest am, stopping after the first ancestor lower than limit'''
131 manifest am, stopping after the first ancestor lower than limit'''
132
132
133 for f in fctx.ancestors():
133 for f in fctx.ancestors():
134 if am.get(f.path(), None) == f.filenode():
134 if am.get(f.path(), None) == f.filenode():
135 return f
135 return f
136 if f.rev() < limit:
136 if f.rev() < limit:
137 return None
137 return None
138
138
139 def _dirstatecopies(d):
139 def _dirstatecopies(d):
140 ds = d._repo.dirstate
140 ds = d._repo.dirstate
141 c = ds.copies().copy()
141 c = ds.copies().copy()
142 for k in c.keys():
142 for k in c.keys():
143 if ds[k] not in 'anm':
143 if ds[k] not in 'anm':
144 del c[k]
144 del c[k]
145 return c
145 return c
146
146
147 def _forwardcopies(a, b):
147 def _forwardcopies(a, b):
148 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
148 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
149
149
150 # check for working copy
150 # check for working copy
151 w = None
151 w = None
152 if b.rev() is None:
152 if b.rev() is None:
153 w = b
153 w = b
154 b = w.p1()
154 b = w.p1()
155 if a == b:
155 if a == b:
156 # short-circuit to avoid issues with merge states
156 # short-circuit to avoid issues with merge states
157 return _dirstatecopies(w)
157 return _dirstatecopies(w)
158
158
159 # files might have to be traced back to the fctx parent of the last
159 # files might have to be traced back to the fctx parent of the last
160 # one-side-only changeset, but not further back than that
160 # one-side-only changeset, but not further back than that
161 limit = _findlimit(a._repo, a.rev(), b.rev())
161 limit = _findlimit(a._repo, a.rev(), b.rev())
162 if limit is None:
162 if limit is None:
163 limit = -1
163 limit = -1
164 am = a.manifest()
164 am = a.manifest()
165
165
166 # find where new files came from
166 # find where new files came from
167 # we currently don't try to find where old files went, too expensive
167 # we currently don't try to find where old files went, too expensive
168 # this means we can miss a case like 'hg rm b; hg cp a b'
168 # this means we can miss a case like 'hg rm b; hg cp a b'
169 cm = {}
169 cm = {}
170 missing = set(b.manifest().iterkeys())
170 missing = set(b.manifest().iterkeys())
171 missing.difference_update(a.manifest().iterkeys())
171 missing.difference_update(a.manifest().iterkeys())
172
172
173 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
173 for f in missing:
174 for f in missing:
174 ofctx = _tracefile(b[f], am, limit)
175 fctx = b[f]
176 fctx._ancestrycontext = ancestrycontext
177 ofctx = _tracefile(fctx, am, limit)
175 if ofctx:
178 if ofctx:
176 cm[f] = ofctx.path()
179 cm[f] = ofctx.path()
177
180
178 # combine copies from dirstate if necessary
181 # combine copies from dirstate if necessary
179 if w is not None:
182 if w is not None:
180 cm = _chain(a, w, cm, _dirstatecopies(w))
183 cm = _chain(a, w, cm, _dirstatecopies(w))
181
184
182 return cm
185 return cm
183
186
184 def _backwardrenames(a, b):
187 def _backwardrenames(a, b):
185 # Even though we're not taking copies into account, 1:n rename situations
188 # Even though we're not taking copies into account, 1:n rename situations
186 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
189 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
187 # arbitrarily pick one of the renames.
190 # arbitrarily pick one of the renames.
188 f = _forwardcopies(b, a)
191 f = _forwardcopies(b, a)
189 r = {}
192 r = {}
190 for k, v in sorted(f.iteritems()):
193 for k, v in sorted(f.iteritems()):
191 # remove copies
194 # remove copies
192 if v in a:
195 if v in a:
193 continue
196 continue
194 r[v] = k
197 r[v] = k
195 return r
198 return r
196
199
197 def pathcopies(x, y):
200 def pathcopies(x, y):
198 '''find {dst@y: src@x} copy mapping for directed compare'''
201 '''find {dst@y: src@x} copy mapping for directed compare'''
199 if x == y or not x or not y:
202 if x == y or not x or not y:
200 return {}
203 return {}
201 a = y.ancestor(x)
204 a = y.ancestor(x)
202 if a == x:
205 if a == x:
203 return _forwardcopies(x, y)
206 return _forwardcopies(x, y)
204 if a == y:
207 if a == y:
205 return _backwardrenames(x, y)
208 return _backwardrenames(x, y)
206 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
209 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
207
210
208 def mergecopies(repo, c1, c2, ca):
211 def mergecopies(repo, c1, c2, ca):
209 """
212 """
210 Find moves and copies between context c1 and c2 that are relevant
213 Find moves and copies between context c1 and c2 that are relevant
211 for merging.
214 for merging.
212
215
213 Returns four dicts: "copy", "movewithdir", "diverge", and
216 Returns four dicts: "copy", "movewithdir", "diverge", and
214 "renamedelete".
217 "renamedelete".
215
218
216 "copy" is a mapping from destination name -> source name,
219 "copy" is a mapping from destination name -> source name,
217 where source is in c1 and destination is in c2 or vice-versa.
220 where source is in c1 and destination is in c2 or vice-versa.
218
221
219 "movewithdir" is a mapping from source name -> destination name,
222 "movewithdir" is a mapping from source name -> destination name,
220 where the file at source present in one context but not the other
223 where the file at source present in one context but not the other
221 needs to be moved to destination by the merge process, because the
224 needs to be moved to destination by the merge process, because the
222 other context moved the directory it is in.
225 other context moved the directory it is in.
223
226
224 "diverge" is a mapping of source name -> list of destination names
227 "diverge" is a mapping of source name -> list of destination names
225 for divergent renames.
228 for divergent renames.
226
229
227 "renamedelete" is a mapping of source name -> list of destination
230 "renamedelete" is a mapping of source name -> list of destination
228 names for files deleted in c1 that were renamed in c2 or vice-versa.
231 names for files deleted in c1 that were renamed in c2 or vice-versa.
229 """
232 """
230 # avoid silly behavior for update from empty dir
233 # avoid silly behavior for update from empty dir
231 if not c1 or not c2 or c1 == c2:
234 if not c1 or not c2 or c1 == c2:
232 return {}, {}, {}, {}
235 return {}, {}, {}, {}
233
236
234 # avoid silly behavior for parent -> working dir
237 # avoid silly behavior for parent -> working dir
235 if c2.node() is None and c1.node() == repo.dirstate.p1():
238 if c2.node() is None and c1.node() == repo.dirstate.p1():
236 return repo.dirstate.copies(), {}, {}, {}
239 return repo.dirstate.copies(), {}, {}, {}
237
240
238 limit = _findlimit(repo, c1.rev(), c2.rev())
241 limit = _findlimit(repo, c1.rev(), c2.rev())
239 if limit is None:
242 if limit is None:
240 # no common ancestor, no copies
243 # no common ancestor, no copies
241 return {}, {}, {}, {}
244 return {}, {}, {}, {}
242 m1 = c1.manifest()
245 m1 = c1.manifest()
243 m2 = c2.manifest()
246 m2 = c2.manifest()
244 ma = ca.manifest()
247 ma = ca.manifest()
245
248
246 def makectx(f, n):
249 def makectx(f, n):
247 if len(n) != 20: # in a working context?
250 if len(n) != 20: # in a working context?
248 if c1.rev() is None:
251 if c1.rev() is None:
249 return c1.filectx(f)
252 return c1.filectx(f)
250 return c2.filectx(f)
253 return c2.filectx(f)
251 return repo.filectx(f, fileid=n)
254 return repo.filectx(f, fileid=n)
252
255
253 ctx = util.lrucachefunc(makectx)
256 ctx = util.lrucachefunc(makectx)
254 copy = {}
257 copy = {}
255 movewithdir = {}
258 movewithdir = {}
256 fullcopy = {}
259 fullcopy = {}
257 diverge = {}
260 diverge = {}
258
261
259 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
262 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
260
263
261 u1 = _nonoverlap(m1, m2, ma)
264 u1 = _nonoverlap(m1, m2, ma)
262 u2 = _nonoverlap(m2, m1, ma)
265 u2 = _nonoverlap(m2, m1, ma)
263
266
264 if u1:
267 if u1:
265 repo.ui.debug(" unmatched files in local:\n %s\n"
268 repo.ui.debug(" unmatched files in local:\n %s\n"
266 % "\n ".join(u1))
269 % "\n ".join(u1))
267 if u2:
270 if u2:
268 repo.ui.debug(" unmatched files in other:\n %s\n"
271 repo.ui.debug(" unmatched files in other:\n %s\n"
269 % "\n ".join(u2))
272 % "\n ".join(u2))
270
273
271 for f in u1:
274 for f in u1:
272 checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
275 checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
273
276
274 for f in u2:
277 for f in u2:
275 checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
278 checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
276
279
277 renamedelete = {}
280 renamedelete = {}
278 renamedelete2 = set()
281 renamedelete2 = set()
279 diverge2 = set()
282 diverge2 = set()
280 for of, fl in diverge.items():
283 for of, fl in diverge.items():
281 if len(fl) == 1 or of in c1 or of in c2:
284 if len(fl) == 1 or of in c1 or of in c2:
282 del diverge[of] # not actually divergent, or not a rename
285 del diverge[of] # not actually divergent, or not a rename
283 if of not in c1 and of not in c2:
286 if of not in c1 and of not in c2:
284 # renamed on one side, deleted on the other side, but filter
287 # renamed on one side, deleted on the other side, but filter
285 # out files that have been renamed and then deleted
288 # out files that have been renamed and then deleted
286 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
289 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
287 renamedelete2.update(fl) # reverse map for below
290 renamedelete2.update(fl) # reverse map for below
288 else:
291 else:
289 diverge2.update(fl) # reverse map for below
292 diverge2.update(fl) # reverse map for below
290
293
291 bothnew = sorted([d for d in m1 if d in m2 and d not in ma])
294 bothnew = sorted([d for d in m1 if d in m2 and d not in ma])
292 if bothnew:
295 if bothnew:
293 repo.ui.debug(" unmatched files new in both:\n %s\n"
296 repo.ui.debug(" unmatched files new in both:\n %s\n"
294 % "\n ".join(bothnew))
297 % "\n ".join(bothnew))
295 bothdiverge, _copy, _fullcopy = {}, {}, {}
298 bothdiverge, _copy, _fullcopy = {}, {}, {}
296 for f in bothnew:
299 for f in bothnew:
297 checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
300 checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
298 checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
301 checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
299 for of, fl in bothdiverge.items():
302 for of, fl in bothdiverge.items():
300 if len(fl) == 2 and fl[0] == fl[1]:
303 if len(fl) == 2 and fl[0] == fl[1]:
301 copy[fl[0]] = of # not actually divergent, just matching renames
304 copy[fl[0]] = of # not actually divergent, just matching renames
302
305
303 if fullcopy and repo.ui.debugflag:
306 if fullcopy and repo.ui.debugflag:
304 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
307 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
305 "% = renamed and deleted):\n")
308 "% = renamed and deleted):\n")
306 for f in sorted(fullcopy):
309 for f in sorted(fullcopy):
307 note = ""
310 note = ""
308 if f in copy:
311 if f in copy:
309 note += "*"
312 note += "*"
310 if f in diverge2:
313 if f in diverge2:
311 note += "!"
314 note += "!"
312 if f in renamedelete2:
315 if f in renamedelete2:
313 note += "%"
316 note += "%"
314 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
317 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
315 note))
318 note))
316 del diverge2
319 del diverge2
317
320
318 if not fullcopy:
321 if not fullcopy:
319 return copy, movewithdir, diverge, renamedelete
322 return copy, movewithdir, diverge, renamedelete
320
323
321 repo.ui.debug(" checking for directory renames\n")
324 repo.ui.debug(" checking for directory renames\n")
322
325
323 # generate a directory move map
326 # generate a directory move map
324 d1, d2 = c1.dirs(), c2.dirs()
327 d1, d2 = c1.dirs(), c2.dirs()
325 d1.addpath('/')
328 d1.addpath('/')
326 d2.addpath('/')
329 d2.addpath('/')
327 invalid = set()
330 invalid = set()
328 dirmove = {}
331 dirmove = {}
329
332
330 # examine each file copy for a potential directory move, which is
333 # examine each file copy for a potential directory move, which is
331 # when all the files in a directory are moved to a new directory
334 # when all the files in a directory are moved to a new directory
332 for dst, src in fullcopy.iteritems():
335 for dst, src in fullcopy.iteritems():
333 dsrc, ddst = _dirname(src), _dirname(dst)
336 dsrc, ddst = _dirname(src), _dirname(dst)
334 if dsrc in invalid:
337 if dsrc in invalid:
335 # already seen to be uninteresting
338 # already seen to be uninteresting
336 continue
339 continue
337 elif dsrc in d1 and ddst in d1:
340 elif dsrc in d1 and ddst in d1:
338 # directory wasn't entirely moved locally
341 # directory wasn't entirely moved locally
339 invalid.add(dsrc)
342 invalid.add(dsrc)
340 elif dsrc in d2 and ddst in d2:
343 elif dsrc in d2 and ddst in d2:
341 # directory wasn't entirely moved remotely
344 # directory wasn't entirely moved remotely
342 invalid.add(dsrc)
345 invalid.add(dsrc)
343 elif dsrc in dirmove and dirmove[dsrc] != ddst:
346 elif dsrc in dirmove and dirmove[dsrc] != ddst:
344 # files from the same directory moved to two different places
347 # files from the same directory moved to two different places
345 invalid.add(dsrc)
348 invalid.add(dsrc)
346 else:
349 else:
347 # looks good so far
350 # looks good so far
348 dirmove[dsrc + "/"] = ddst + "/"
351 dirmove[dsrc + "/"] = ddst + "/"
349
352
350 for i in invalid:
353 for i in invalid:
351 if i in dirmove:
354 if i in dirmove:
352 del dirmove[i]
355 del dirmove[i]
353 del d1, d2, invalid
356 del d1, d2, invalid
354
357
355 if not dirmove:
358 if not dirmove:
356 return copy, movewithdir, diverge, renamedelete
359 return copy, movewithdir, diverge, renamedelete
357
360
358 for d in dirmove:
361 for d in dirmove:
359 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
362 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
360 (d, dirmove[d]))
363 (d, dirmove[d]))
361
364
362 # check unaccounted nonoverlapping files against directory moves
365 # check unaccounted nonoverlapping files against directory moves
363 for f in u1 + u2:
366 for f in u1 + u2:
364 if f not in fullcopy:
367 if f not in fullcopy:
365 for d in dirmove:
368 for d in dirmove:
366 if f.startswith(d):
369 if f.startswith(d):
367 # new file added in a directory that was moved, move it
370 # new file added in a directory that was moved, move it
368 df = dirmove[d] + f[len(d):]
371 df = dirmove[d] + f[len(d):]
369 if df not in copy:
372 if df not in copy:
370 movewithdir[f] = df
373 movewithdir[f] = df
371 repo.ui.debug((" pending file src: '%s' -> "
374 repo.ui.debug((" pending file src: '%s' -> "
372 "dst: '%s'\n") % (f, df))
375 "dst: '%s'\n") % (f, df))
373 break
376 break
374
377
375 return copy, movewithdir, diverge, renamedelete
378 return copy, movewithdir, diverge, renamedelete
376
379
377 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
380 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
378 """
381 """
379 check possible copies of f from m1 to m2
382 check possible copies of f from m1 to m2
380
383
381 ctx = function accepting (filename, node) that returns a filectx.
384 ctx = function accepting (filename, node) that returns a filectx.
382 f = the filename to check
385 f = the filename to check
383 m1 = the source manifest
386 m1 = the source manifest
384 m2 = the destination manifest
387 m2 = the destination manifest
385 ca = the changectx of the common ancestor
388 ca = the changectx of the common ancestor
386 limit = the rev number to not search beyond
389 limit = the rev number to not search beyond
387 diverge = record all diverges in this dict
390 diverge = record all diverges in this dict
388 copy = record all non-divergent copies in this dict
391 copy = record all non-divergent copies in this dict
389 fullcopy = record all copies in this dict
392 fullcopy = record all copies in this dict
390 """
393 """
391
394
392 ma = ca.manifest()
395 ma = ca.manifest()
393
396
394 def _related(f1, f2, limit):
397 def _related(f1, f2, limit):
395 # Walk back to common ancestor to see if the two files originate
398 # Walk back to common ancestor to see if the two files originate
396 # from the same file. Since workingfilectx's rev() is None it messes
399 # from the same file. Since workingfilectx's rev() is None it messes
397 # up the integer comparison logic, hence the pre-step check for
400 # up the integer comparison logic, hence the pre-step check for
398 # None (f1 and f2 can only be workingfilectx's initially).
401 # None (f1 and f2 can only be workingfilectx's initially).
399
402
400 if f1 == f2:
403 if f1 == f2:
401 return f1 # a match
404 return f1 # a match
402
405
403 g1, g2 = f1.ancestors(), f2.ancestors()
406 g1, g2 = f1.ancestors(), f2.ancestors()
404 try:
407 try:
405 f1r, f2r = f1.rev(), f2.rev()
408 f1r, f2r = f1.rev(), f2.rev()
406
409
407 if f1r is None:
410 if f1r is None:
408 f1 = g1.next()
411 f1 = g1.next()
409 if f2r is None:
412 if f2r is None:
410 f2 = g2.next()
413 f2 = g2.next()
411
414
412 while True:
415 while True:
413 f1r, f2r = f1.rev(), f2.rev()
416 f1r, f2r = f1.rev(), f2.rev()
414 if f1r > f2r:
417 if f1r > f2r:
415 f1 = g1.next()
418 f1 = g1.next()
416 elif f2r > f1r:
419 elif f2r > f1r:
417 f2 = g2.next()
420 f2 = g2.next()
418 elif f1 == f2:
421 elif f1 == f2:
419 return f1 # a match
422 return f1 # a match
420 elif f1r == f2r or f1r < limit or f2r < limit:
423 elif f1r == f2r or f1r < limit or f2r < limit:
421 return False # copy no longer relevant
424 return False # copy no longer relevant
422 except StopIteration:
425 except StopIteration:
423 return False
426 return False
424
427
425 of = None
428 of = None
426 seen = set([f])
429 seen = set([f])
427 for oc in ctx(f, m1[f]).ancestors():
430 for oc in ctx(f, m1[f]).ancestors():
428 ocr = oc.rev()
431 ocr = oc.rev()
429 of = oc.path()
432 of = oc.path()
430 if of in seen:
433 if of in seen:
431 # check limit late - grab last rename before
434 # check limit late - grab last rename before
432 if ocr < limit:
435 if ocr < limit:
433 break
436 break
434 continue
437 continue
435 seen.add(of)
438 seen.add(of)
436
439
437 fullcopy[f] = of # remember for dir rename detection
440 fullcopy[f] = of # remember for dir rename detection
438 if of not in m2:
441 if of not in m2:
439 continue # no match, keep looking
442 continue # no match, keep looking
440 if m2[of] == ma.get(of):
443 if m2[of] == ma.get(of):
441 break # no merge needed, quit early
444 break # no merge needed, quit early
442 c2 = ctx(of, m2[of])
445 c2 = ctx(of, m2[of])
443 cr = _related(oc, c2, ca.rev())
446 cr = _related(oc, c2, ca.rev())
444 if cr and (of == f or of == c2.path()): # non-divergent
447 if cr and (of == f or of == c2.path()): # non-divergent
445 copy[f] = of
448 copy[f] = of
446 of = None
449 of = None
447 break
450 break
448
451
449 if of in ma:
452 if of in ma:
450 diverge.setdefault(of, []).append(f)
453 diverge.setdefault(of, []).append(f)
451
454
452 def duplicatecopies(repo, rev, fromrev, skiprev=None):
455 def duplicatecopies(repo, rev, fromrev, skiprev=None):
453 '''reproduce copies from fromrev to rev in the dirstate
456 '''reproduce copies from fromrev to rev in the dirstate
454
457
455 If skiprev is specified, it's a revision that should be used to
458 If skiprev is specified, it's a revision that should be used to
456 filter copy records. Any copies that occur between fromrev and
459 filter copy records. Any copies that occur between fromrev and
457 skiprev will not be duplicated, even if they appear in the set of
460 skiprev will not be duplicated, even if they appear in the set of
458 copies between fromrev and rev.
461 copies between fromrev and rev.
459 '''
462 '''
460 exclude = {}
463 exclude = {}
461 if skiprev is not None:
464 if skiprev is not None:
462 exclude = pathcopies(repo[fromrev], repo[skiprev])
465 exclude = pathcopies(repo[fromrev], repo[skiprev])
463 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
466 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
464 # copies.pathcopies returns backward renames, so dst might not
467 # copies.pathcopies returns backward renames, so dst might not
465 # actually be in the dirstate
468 # actually be in the dirstate
466 if dst in exclude:
469 if dst in exclude:
467 continue
470 continue
468 if repo.dirstate[dst] in "nma":
471 if repo.dirstate[dst] in "nma":
469 repo.dirstate.copy(src, dst)
472 repo.dirstate.copy(src, dst)
General Comments 0
You need to be logged in to leave comments. Login now