##// END OF EJS Templates
dirstate: change placeholder hash length to 20 bytes...
Durham Goode -
r30360:0298a07f default
parent child Browse files
Show More
@@ -1,1989 +1,1985
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 newnodeid,
19 nullid,
20 nullid,
20 nullrev,
21 nullrev,
21 short,
22 short,
22 wdirid,
23 wdirid,
23 )
24 )
24 from . import (
25 from . import (
25 encoding,
26 encoding,
26 error,
27 error,
27 fileset,
28 fileset,
28 match as matchmod,
29 match as matchmod,
29 mdiff,
30 mdiff,
30 obsolete as obsmod,
31 obsolete as obsmod,
31 patch,
32 patch,
32 phases,
33 phases,
33 repoview,
34 repoview,
34 revlog,
35 revlog,
35 scmutil,
36 scmutil,
36 subrepo,
37 subrepo,
37 util,
38 util,
38 )
39 )
39
40
40 propertycache = util.propertycache
41 propertycache = util.propertycache
41
42
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
45 _newnode = '!' * 21
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
43 nonascii = re.compile(r'[^\x21-\x7f]').search
48
44
49 class basectx(object):
45 class basectx(object):
50 """A basectx object represents the common logic for its children:
46 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
47 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
48 workingctx: a context that represents the working directory and can
53 be committed,
49 be committed,
54 memctx: a context that represents changes in-memory and can also
50 memctx: a context that represents changes in-memory and can also
55 be committed."""
51 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
52 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
53 if isinstance(changeid, basectx):
58 return changeid
54 return changeid
59
55
60 o = super(basectx, cls).__new__(cls)
56 o = super(basectx, cls).__new__(cls)
61
57
62 o._repo = repo
58 o._repo = repo
63 o._rev = nullrev
59 o._rev = nullrev
64 o._node = nullid
60 o._node = nullid
65
61
66 return o
62 return o
67
63
68 def __str__(self):
64 def __str__(self):
69 return short(self.node())
65 return short(self.node())
70
66
71 def __int__(self):
67 def __int__(self):
72 return self.rev()
68 return self.rev()
73
69
74 def __repr__(self):
70 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
71 return "<%s %s>" % (type(self).__name__, str(self))
76
72
77 def __eq__(self, other):
73 def __eq__(self, other):
78 try:
74 try:
79 return type(self) == type(other) and self._rev == other._rev
75 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
76 except AttributeError:
81 return False
77 return False
82
78
83 def __ne__(self, other):
79 def __ne__(self, other):
84 return not (self == other)
80 return not (self == other)
85
81
86 def __contains__(self, key):
82 def __contains__(self, key):
87 return key in self._manifest
83 return key in self._manifest
88
84
89 def __getitem__(self, key):
85 def __getitem__(self, key):
90 return self.filectx(key)
86 return self.filectx(key)
91
87
92 def __iter__(self):
88 def __iter__(self):
93 return iter(self._manifest)
89 return iter(self._manifest)
94
90
95 def _manifestmatches(self, match, s):
91 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
92 """generate a new manifest filtered by the match argument
97
93
98 This method is for internal use only and mainly exists to provide an
94 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
95 object oriented way for other contexts to customize the manifest
100 generation.
96 generation.
101 """
97 """
102 return self.manifest().matches(match)
98 return self.manifest().matches(match)
103
99
104 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
105 """return match.always if match is none
101 """return match.always if match is none
106
102
107 This internal method provides a way for child objects to override the
103 This internal method provides a way for child objects to override the
108 match operator.
104 match operator.
109 """
105 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
106 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
107
112 def _buildstatus(self, other, s, match, listignored, listclean,
108 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
109 listunknown):
114 """build a status with respect to another context"""
110 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
116 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
118 self.manifest()
123 mf1 = other._manifestmatches(match, s)
119 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
120 mf2 = self._manifestmatches(match, s)
125
121
126 modified, added = [], []
122 modified, added = [], []
127 removed = []
123 removed = []
128 clean = []
124 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
126 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
127 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
128 for fn, value in d.iteritems():
133 if fn in deletedset:
129 if fn in deletedset:
134 continue
130 continue
135 if value is None:
131 if value is None:
136 clean.append(fn)
132 clean.append(fn)
137 continue
133 continue
138 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
135 if node1 is None:
140 added.append(fn)
136 added.append(fn)
141 elif node2 is None:
137 elif node2 is None:
142 removed.append(fn)
138 removed.append(fn)
143 elif flag1 != flag2:
139 elif flag1 != flag2:
144 modified.append(fn)
140 modified.append(fn)
145 elif node2 != _newnode:
141 elif node2 != newnodeid:
146 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
145 # to a file as a modification.
150 modified.append(fn)
146 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
148 modified.append(fn)
153 else:
149 else:
154 clean.append(fn)
150 clean.append(fn)
155
151
156 if removed:
152 if removed:
157 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
154 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
155 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
156 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
157 removed = [fn for fn in removed if fn not in deletedset]
162
158
163 return scmutil.status(modified, added, removed, deleted, unknown,
159 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
160 ignored, clean)
165
161
166 @propertycache
162 @propertycache
167 def substate(self):
163 def substate(self):
168 return subrepo.state(self, self._repo.ui)
164 return subrepo.state(self, self._repo.ui)
169
165
170 def subrev(self, subpath):
166 def subrev(self, subpath):
171 return self.substate[subpath][1]
167 return self.substate[subpath][1]
172
168
173 def rev(self):
169 def rev(self):
174 return self._rev
170 return self._rev
175 def node(self):
171 def node(self):
176 return self._node
172 return self._node
177 def hex(self):
173 def hex(self):
178 return hex(self.node())
174 return hex(self.node())
179 def manifest(self):
175 def manifest(self):
180 return self._manifest
176 return self._manifest
181 def manifestctx(self):
177 def manifestctx(self):
182 return self._manifestctx
178 return self._manifestctx
183 def repo(self):
179 def repo(self):
184 return self._repo
180 return self._repo
185 def phasestr(self):
181 def phasestr(self):
186 return phases.phasenames[self.phase()]
182 return phases.phasenames[self.phase()]
187 def mutable(self):
183 def mutable(self):
188 return self.phase() > phases.public
184 return self.phase() > phases.public
189
185
190 def getfileset(self, expr):
186 def getfileset(self, expr):
191 return fileset.getfileset(self, expr)
187 return fileset.getfileset(self, expr)
192
188
193 def obsolete(self):
189 def obsolete(self):
194 """True if the changeset is obsolete"""
190 """True if the changeset is obsolete"""
195 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
191 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
196
192
197 def extinct(self):
193 def extinct(self):
198 """True if the changeset is extinct"""
194 """True if the changeset is extinct"""
199 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
195 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
200
196
201 def unstable(self):
197 def unstable(self):
202 """True if the changeset is not obsolete but it's ancestor are"""
198 """True if the changeset is not obsolete but it's ancestor are"""
203 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
199 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
204
200
205 def bumped(self):
201 def bumped(self):
206 """True if the changeset try to be a successor of a public changeset
202 """True if the changeset try to be a successor of a public changeset
207
203
208 Only non-public and non-obsolete changesets may be bumped.
204 Only non-public and non-obsolete changesets may be bumped.
209 """
205 """
210 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
206 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
211
207
212 def divergent(self):
208 def divergent(self):
213 """Is a successors of a changeset with multiple possible successors set
209 """Is a successors of a changeset with multiple possible successors set
214
210
215 Only non-public and non-obsolete changesets may be divergent.
211 Only non-public and non-obsolete changesets may be divergent.
216 """
212 """
217 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
213 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
218
214
219 def troubled(self):
215 def troubled(self):
220 """True if the changeset is either unstable, bumped or divergent"""
216 """True if the changeset is either unstable, bumped or divergent"""
221 return self.unstable() or self.bumped() or self.divergent()
217 return self.unstable() or self.bumped() or self.divergent()
222
218
223 def troubles(self):
219 def troubles(self):
224 """return the list of troubles affecting this changesets.
220 """return the list of troubles affecting this changesets.
225
221
226 Troubles are returned as strings. possible values are:
222 Troubles are returned as strings. possible values are:
227 - unstable,
223 - unstable,
228 - bumped,
224 - bumped,
229 - divergent.
225 - divergent.
230 """
226 """
231 troubles = []
227 troubles = []
232 if self.unstable():
228 if self.unstable():
233 troubles.append('unstable')
229 troubles.append('unstable')
234 if self.bumped():
230 if self.bumped():
235 troubles.append('bumped')
231 troubles.append('bumped')
236 if self.divergent():
232 if self.divergent():
237 troubles.append('divergent')
233 troubles.append('divergent')
238 return troubles
234 return troubles
239
235
240 def parents(self):
236 def parents(self):
241 """return contexts for each parent changeset"""
237 """return contexts for each parent changeset"""
242 return self._parents
238 return self._parents
243
239
244 def p1(self):
240 def p1(self):
245 return self._parents[0]
241 return self._parents[0]
246
242
247 def p2(self):
243 def p2(self):
248 parents = self._parents
244 parents = self._parents
249 if len(parents) == 2:
245 if len(parents) == 2:
250 return parents[1]
246 return parents[1]
251 return changectx(self._repo, nullrev)
247 return changectx(self._repo, nullrev)
252
248
253 def _fileinfo(self, path):
249 def _fileinfo(self, path):
254 if '_manifest' in self.__dict__:
250 if '_manifest' in self.__dict__:
255 try:
251 try:
256 return self._manifest[path], self._manifest.flags(path)
252 return self._manifest[path], self._manifest.flags(path)
257 except KeyError:
253 except KeyError:
258 raise error.ManifestLookupError(self._node, path,
254 raise error.ManifestLookupError(self._node, path,
259 _('not found in manifest'))
255 _('not found in manifest'))
260 if '_manifestdelta' in self.__dict__ or path in self.files():
256 if '_manifestdelta' in self.__dict__ or path in self.files():
261 if path in self._manifestdelta:
257 if path in self._manifestdelta:
262 return (self._manifestdelta[path],
258 return (self._manifestdelta[path],
263 self._manifestdelta.flags(path))
259 self._manifestdelta.flags(path))
264 mfl = self._repo.manifestlog
260 mfl = self._repo.manifestlog
265 try:
261 try:
266 node, flag = mfl[self._changeset.manifest].find(path)
262 node, flag = mfl[self._changeset.manifest].find(path)
267 except KeyError:
263 except KeyError:
268 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
269 _('not found in manifest'))
265 _('not found in manifest'))
270
266
271 return node, flag
267 return node, flag
272
268
273 def filenode(self, path):
269 def filenode(self, path):
274 return self._fileinfo(path)[0]
270 return self._fileinfo(path)[0]
275
271
276 def flags(self, path):
272 def flags(self, path):
277 try:
273 try:
278 return self._fileinfo(path)[1]
274 return self._fileinfo(path)[1]
279 except error.LookupError:
275 except error.LookupError:
280 return ''
276 return ''
281
277
282 def sub(self, path, allowcreate=True):
278 def sub(self, path, allowcreate=True):
283 '''return a subrepo for the stored revision of path, never wdir()'''
279 '''return a subrepo for the stored revision of path, never wdir()'''
284 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
285
281
286 def nullsub(self, path, pctx):
282 def nullsub(self, path, pctx):
287 return subrepo.nullsubrepo(self, path, pctx)
283 return subrepo.nullsubrepo(self, path, pctx)
288
284
289 def workingsub(self, path):
285 def workingsub(self, path):
290 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
291 context.
287 context.
292 '''
288 '''
293 return subrepo.subrepo(self, path, allowwdir=True)
289 return subrepo.subrepo(self, path, allowwdir=True)
294
290
295 def match(self, pats=[], include=None, exclude=None, default='glob',
291 def match(self, pats=[], include=None, exclude=None, default='glob',
296 listsubrepos=False, badfn=None):
292 listsubrepos=False, badfn=None):
297 r = self._repo
293 r = self._repo
298 return matchmod.match(r.root, r.getcwd(), pats,
294 return matchmod.match(r.root, r.getcwd(), pats,
299 include, exclude, default,
295 include, exclude, default,
300 auditor=r.nofsauditor, ctx=self,
296 auditor=r.nofsauditor, ctx=self,
301 listsubrepos=listsubrepos, badfn=badfn)
297 listsubrepos=listsubrepos, badfn=badfn)
302
298
303 def diff(self, ctx2=None, match=None, **opts):
299 def diff(self, ctx2=None, match=None, **opts):
304 """Returns a diff generator for the given contexts and matcher"""
300 """Returns a diff generator for the given contexts and matcher"""
305 if ctx2 is None:
301 if ctx2 is None:
306 ctx2 = self.p1()
302 ctx2 = self.p1()
307 if ctx2 is not None:
303 if ctx2 is not None:
308 ctx2 = self._repo[ctx2]
304 ctx2 = self._repo[ctx2]
309 diffopts = patch.diffopts(self._repo.ui, opts)
305 diffopts = patch.diffopts(self._repo.ui, opts)
310 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
311
307
312 def dirs(self):
308 def dirs(self):
313 return self._manifest.dirs()
309 return self._manifest.dirs()
314
310
315 def hasdir(self, dir):
311 def hasdir(self, dir):
316 return self._manifest.hasdir(dir)
312 return self._manifest.hasdir(dir)
317
313
318 def dirty(self, missing=False, merge=True, branch=True):
314 def dirty(self, missing=False, merge=True, branch=True):
319 return False
315 return False
320
316
321 def status(self, other=None, match=None, listignored=False,
317 def status(self, other=None, match=None, listignored=False,
322 listclean=False, listunknown=False, listsubrepos=False):
318 listclean=False, listunknown=False, listsubrepos=False):
323 """return status of files between two nodes or node and working
319 """return status of files between two nodes or node and working
324 directory.
320 directory.
325
321
326 If other is None, compare this node with working directory.
322 If other is None, compare this node with working directory.
327
323
328 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
329 """
325 """
330
326
331 ctx1 = self
327 ctx1 = self
332 ctx2 = self._repo[other]
328 ctx2 = self._repo[other]
333
329
334 # This next code block is, admittedly, fragile logic that tests for
330 # This next code block is, admittedly, fragile logic that tests for
335 # reversing the contexts and wouldn't need to exist if it weren't for
331 # reversing the contexts and wouldn't need to exist if it weren't for
336 # the fast (and common) code path of comparing the working directory
332 # the fast (and common) code path of comparing the working directory
337 # with its first parent.
333 # with its first parent.
338 #
334 #
339 # What we're aiming for here is the ability to call:
335 # What we're aiming for here is the ability to call:
340 #
336 #
341 # workingctx.status(parentctx)
337 # workingctx.status(parentctx)
342 #
338 #
343 # If we always built the manifest for each context and compared those,
339 # If we always built the manifest for each context and compared those,
344 # then we'd be done. But the special case of the above call means we
340 # then we'd be done. But the special case of the above call means we
345 # just copy the manifest of the parent.
341 # just copy the manifest of the parent.
346 reversed = False
342 reversed = False
347 if (not isinstance(ctx1, changectx)
343 if (not isinstance(ctx1, changectx)
348 and isinstance(ctx2, changectx)):
344 and isinstance(ctx2, changectx)):
349 reversed = True
345 reversed = True
350 ctx1, ctx2 = ctx2, ctx1
346 ctx1, ctx2 = ctx2, ctx1
351
347
352 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
353 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
355 listunknown)
351 listunknown)
356
352
357 if reversed:
353 if reversed:
358 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
359 # these make no sense to reverse.
355 # these make no sense to reverse.
360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
361 r.clean)
357 r.clean)
362
358
363 if listsubrepos:
359 if listsubrepos:
364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
365 try:
361 try:
366 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
367 except KeyError:
363 except KeyError:
368 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
369 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
370 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
371 rev2 = None
367 rev2 = None
372 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
373 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
374 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
375 listsubrepos=True)
371 listsubrepos=True)
376 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
378
374
379 for l in r:
375 for l in r:
380 l.sort()
376 l.sort()
381
377
382 return r
378 return r
383
379
384
380
385 def makememctx(repo, parents, text, user, date, branch, files, store,
381 def makememctx(repo, parents, text, user, date, branch, files, store,
386 editor=None, extra=None):
382 editor=None, extra=None):
387 def getfilectx(repo, memctx, path):
383 def getfilectx(repo, memctx, path):
388 data, mode, copied = store.getfile(path)
384 data, mode, copied = store.getfile(path)
389 if data is None:
385 if data is None:
390 return None
386 return None
391 islink, isexec = mode
387 islink, isexec = mode
392 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
393 copied=copied, memctx=memctx)
389 copied=copied, memctx=memctx)
394 if extra is None:
390 if extra is None:
395 extra = {}
391 extra = {}
396 if branch:
392 if branch:
397 extra['branch'] = encoding.fromlocal(branch)
393 extra['branch'] = encoding.fromlocal(branch)
398 ctx = memctx(repo, parents, text, files, getfilectx, user,
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
399 date, extra, editor)
395 date, extra, editor)
400 return ctx
396 return ctx
401
397
402 class changectx(basectx):
398 class changectx(basectx):
403 """A changecontext object makes access to data related to a particular
399 """A changecontext object makes access to data related to a particular
404 changeset convenient. It represents a read-only context already present in
400 changeset convenient. It represents a read-only context already present in
405 the repo."""
401 the repo."""
406 def __init__(self, repo, changeid=''):
402 def __init__(self, repo, changeid=''):
407 """changeid is a revision number, node, or tag"""
403 """changeid is a revision number, node, or tag"""
408
404
409 # since basectx.__new__ already took care of copying the object, we
405 # since basectx.__new__ already took care of copying the object, we
410 # don't need to do anything in __init__, so we just exit here
406 # don't need to do anything in __init__, so we just exit here
411 if isinstance(changeid, basectx):
407 if isinstance(changeid, basectx):
412 return
408 return
413
409
414 if changeid == '':
410 if changeid == '':
415 changeid = '.'
411 changeid = '.'
416 self._repo = repo
412 self._repo = repo
417
413
418 try:
414 try:
419 if isinstance(changeid, int):
415 if isinstance(changeid, int):
420 self._node = repo.changelog.node(changeid)
416 self._node = repo.changelog.node(changeid)
421 self._rev = changeid
417 self._rev = changeid
422 return
418 return
423 if isinstance(changeid, long):
419 if isinstance(changeid, long):
424 changeid = str(changeid)
420 changeid = str(changeid)
425 if changeid == 'null':
421 if changeid == 'null':
426 self._node = nullid
422 self._node = nullid
427 self._rev = nullrev
423 self._rev = nullrev
428 return
424 return
429 if changeid == 'tip':
425 if changeid == 'tip':
430 self._node = repo.changelog.tip()
426 self._node = repo.changelog.tip()
431 self._rev = repo.changelog.rev(self._node)
427 self._rev = repo.changelog.rev(self._node)
432 return
428 return
433 if changeid == '.' or changeid == repo.dirstate.p1():
429 if changeid == '.' or changeid == repo.dirstate.p1():
434 # this is a hack to delay/avoid loading obsmarkers
430 # this is a hack to delay/avoid loading obsmarkers
435 # when we know that '.' won't be hidden
431 # when we know that '.' won't be hidden
436 self._node = repo.dirstate.p1()
432 self._node = repo.dirstate.p1()
437 self._rev = repo.unfiltered().changelog.rev(self._node)
433 self._rev = repo.unfiltered().changelog.rev(self._node)
438 return
434 return
439 if len(changeid) == 20:
435 if len(changeid) == 20:
440 try:
436 try:
441 self._node = changeid
437 self._node = changeid
442 self._rev = repo.changelog.rev(changeid)
438 self._rev = repo.changelog.rev(changeid)
443 return
439 return
444 except error.FilteredRepoLookupError:
440 except error.FilteredRepoLookupError:
445 raise
441 raise
446 except LookupError:
442 except LookupError:
447 pass
443 pass
448
444
449 try:
445 try:
450 r = int(changeid)
446 r = int(changeid)
451 if str(r) != changeid:
447 if str(r) != changeid:
452 raise ValueError
448 raise ValueError
453 l = len(repo.changelog)
449 l = len(repo.changelog)
454 if r < 0:
450 if r < 0:
455 r += l
451 r += l
456 if r < 0 or r >= l:
452 if r < 0 or r >= l:
457 raise ValueError
453 raise ValueError
458 self._rev = r
454 self._rev = r
459 self._node = repo.changelog.node(r)
455 self._node = repo.changelog.node(r)
460 return
456 return
461 except error.FilteredIndexError:
457 except error.FilteredIndexError:
462 raise
458 raise
463 except (ValueError, OverflowError, IndexError):
459 except (ValueError, OverflowError, IndexError):
464 pass
460 pass
465
461
466 if len(changeid) == 40:
462 if len(changeid) == 40:
467 try:
463 try:
468 self._node = bin(changeid)
464 self._node = bin(changeid)
469 self._rev = repo.changelog.rev(self._node)
465 self._rev = repo.changelog.rev(self._node)
470 return
466 return
471 except error.FilteredLookupError:
467 except error.FilteredLookupError:
472 raise
468 raise
473 except (TypeError, LookupError):
469 except (TypeError, LookupError):
474 pass
470 pass
475
471
476 # lookup bookmarks through the name interface
472 # lookup bookmarks through the name interface
477 try:
473 try:
478 self._node = repo.names.singlenode(repo, changeid)
474 self._node = repo.names.singlenode(repo, changeid)
479 self._rev = repo.changelog.rev(self._node)
475 self._rev = repo.changelog.rev(self._node)
480 return
476 return
481 except KeyError:
477 except KeyError:
482 pass
478 pass
483 except error.FilteredRepoLookupError:
479 except error.FilteredRepoLookupError:
484 raise
480 raise
485 except error.RepoLookupError:
481 except error.RepoLookupError:
486 pass
482 pass
487
483
488 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
489 if self._node is not None:
485 if self._node is not None:
490 self._rev = repo.changelog.rev(self._node)
486 self._rev = repo.changelog.rev(self._node)
491 return
487 return
492
488
493 # lookup failed
489 # lookup failed
494 # check if it might have come from damaged dirstate
490 # check if it might have come from damaged dirstate
495 #
491 #
496 # XXX we could avoid the unfiltered if we had a recognizable
492 # XXX we could avoid the unfiltered if we had a recognizable
497 # exception for filtered changeset access
493 # exception for filtered changeset access
498 if changeid in repo.unfiltered().dirstate.parents():
494 if changeid in repo.unfiltered().dirstate.parents():
499 msg = _("working directory has unknown parent '%s'!")
495 msg = _("working directory has unknown parent '%s'!")
500 raise error.Abort(msg % short(changeid))
496 raise error.Abort(msg % short(changeid))
501 try:
497 try:
502 if len(changeid) == 20 and nonascii(changeid):
498 if len(changeid) == 20 and nonascii(changeid):
503 changeid = hex(changeid)
499 changeid = hex(changeid)
504 except TypeError:
500 except TypeError:
505 pass
501 pass
506 except (error.FilteredIndexError, error.FilteredLookupError,
502 except (error.FilteredIndexError, error.FilteredLookupError,
507 error.FilteredRepoLookupError):
503 error.FilteredRepoLookupError):
508 if repo.filtername.startswith('visible'):
504 if repo.filtername.startswith('visible'):
509 msg = _("hidden revision '%s'") % changeid
505 msg = _("hidden revision '%s'") % changeid
510 hint = _('use --hidden to access hidden revisions')
506 hint = _('use --hidden to access hidden revisions')
511 raise error.FilteredRepoLookupError(msg, hint=hint)
507 raise error.FilteredRepoLookupError(msg, hint=hint)
512 msg = _("filtered revision '%s' (not in '%s' subset)")
508 msg = _("filtered revision '%s' (not in '%s' subset)")
513 msg %= (changeid, repo.filtername)
509 msg %= (changeid, repo.filtername)
514 raise error.FilteredRepoLookupError(msg)
510 raise error.FilteredRepoLookupError(msg)
515 except IndexError:
511 except IndexError:
516 pass
512 pass
517 raise error.RepoLookupError(
513 raise error.RepoLookupError(
518 _("unknown revision '%s'") % changeid)
514 _("unknown revision '%s'") % changeid)
519
515
520 def __hash__(self):
516 def __hash__(self):
521 try:
517 try:
522 return hash(self._rev)
518 return hash(self._rev)
523 except AttributeError:
519 except AttributeError:
524 return id(self)
520 return id(self)
525
521
526 def __nonzero__(self):
522 def __nonzero__(self):
527 return self._rev != nullrev
523 return self._rev != nullrev
528
524
529 @propertycache
525 @propertycache
530 def _changeset(self):
526 def _changeset(self):
531 return self._repo.changelog.changelogrevision(self.rev())
527 return self._repo.changelog.changelogrevision(self.rev())
532
528
533 @propertycache
529 @propertycache
534 def _manifest(self):
530 def _manifest(self):
535 return self._manifestctx.read()
531 return self._manifestctx.read()
536
532
537 @propertycache
533 @propertycache
538 def _manifestctx(self):
534 def _manifestctx(self):
539 return self._repo.manifestlog[self._changeset.manifest]
535 return self._repo.manifestlog[self._changeset.manifest]
540
536
541 @propertycache
537 @propertycache
542 def _manifestdelta(self):
538 def _manifestdelta(self):
543 return self._manifestctx.readdelta()
539 return self._manifestctx.readdelta()
544
540
545 @propertycache
541 @propertycache
546 def _parents(self):
542 def _parents(self):
547 repo = self._repo
543 repo = self._repo
548 p1, p2 = repo.changelog.parentrevs(self._rev)
544 p1, p2 = repo.changelog.parentrevs(self._rev)
549 if p2 == nullrev:
545 if p2 == nullrev:
550 return [changectx(repo, p1)]
546 return [changectx(repo, p1)]
551 return [changectx(repo, p1), changectx(repo, p2)]
547 return [changectx(repo, p1), changectx(repo, p2)]
552
548
553 def changeset(self):
549 def changeset(self):
554 c = self._changeset
550 c = self._changeset
555 return (
551 return (
556 c.manifest,
552 c.manifest,
557 c.user,
553 c.user,
558 c.date,
554 c.date,
559 c.files,
555 c.files,
560 c.description,
556 c.description,
561 c.extra,
557 c.extra,
562 )
558 )
563 def manifestnode(self):
559 def manifestnode(self):
564 return self._changeset.manifest
560 return self._changeset.manifest
565
561
566 def user(self):
562 def user(self):
567 return self._changeset.user
563 return self._changeset.user
568 def date(self):
564 def date(self):
569 return self._changeset.date
565 return self._changeset.date
570 def files(self):
566 def files(self):
571 return self._changeset.files
567 return self._changeset.files
572 def description(self):
568 def description(self):
573 return self._changeset.description
569 return self._changeset.description
574 def branch(self):
570 def branch(self):
575 return encoding.tolocal(self._changeset.extra.get("branch"))
571 return encoding.tolocal(self._changeset.extra.get("branch"))
576 def closesbranch(self):
572 def closesbranch(self):
577 return 'close' in self._changeset.extra
573 return 'close' in self._changeset.extra
578 def extra(self):
574 def extra(self):
579 return self._changeset.extra
575 return self._changeset.extra
580 def tags(self):
576 def tags(self):
581 return self._repo.nodetags(self._node)
577 return self._repo.nodetags(self._node)
582 def bookmarks(self):
578 def bookmarks(self):
583 return self._repo.nodebookmarks(self._node)
579 return self._repo.nodebookmarks(self._node)
584 def phase(self):
580 def phase(self):
585 return self._repo._phasecache.phase(self._repo, self._rev)
581 return self._repo._phasecache.phase(self._repo, self._rev)
586 def hidden(self):
582 def hidden(self):
587 return self._rev in repoview.filterrevs(self._repo, 'visible')
583 return self._rev in repoview.filterrevs(self._repo, 'visible')
588
584
589 def children(self):
585 def children(self):
590 """return contexts for each child changeset"""
586 """return contexts for each child changeset"""
591 c = self._repo.changelog.children(self._node)
587 c = self._repo.changelog.children(self._node)
592 return [changectx(self._repo, x) for x in c]
588 return [changectx(self._repo, x) for x in c]
593
589
594 def ancestors(self):
590 def ancestors(self):
595 for a in self._repo.changelog.ancestors([self._rev]):
591 for a in self._repo.changelog.ancestors([self._rev]):
596 yield changectx(self._repo, a)
592 yield changectx(self._repo, a)
597
593
598 def descendants(self):
594 def descendants(self):
599 for d in self._repo.changelog.descendants([self._rev]):
595 for d in self._repo.changelog.descendants([self._rev]):
600 yield changectx(self._repo, d)
596 yield changectx(self._repo, d)
601
597
602 def filectx(self, path, fileid=None, filelog=None):
598 def filectx(self, path, fileid=None, filelog=None):
603 """get a file context from this changeset"""
599 """get a file context from this changeset"""
604 if fileid is None:
600 if fileid is None:
605 fileid = self.filenode(path)
601 fileid = self.filenode(path)
606 return filectx(self._repo, path, fileid=fileid,
602 return filectx(self._repo, path, fileid=fileid,
607 changectx=self, filelog=filelog)
603 changectx=self, filelog=filelog)
608
604
609 def ancestor(self, c2, warn=False):
605 def ancestor(self, c2, warn=False):
610 """return the "best" ancestor context of self and c2
606 """return the "best" ancestor context of self and c2
611
607
612 If there are multiple candidates, it will show a message and check
608 If there are multiple candidates, it will show a message and check
613 merge.preferancestor configuration before falling back to the
609 merge.preferancestor configuration before falling back to the
614 revlog ancestor."""
610 revlog ancestor."""
615 # deal with workingctxs
611 # deal with workingctxs
616 n2 = c2._node
612 n2 = c2._node
617 if n2 is None:
613 if n2 is None:
618 n2 = c2._parents[0]._node
614 n2 = c2._parents[0]._node
619 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
620 if not cahs:
616 if not cahs:
621 anc = nullid
617 anc = nullid
622 elif len(cahs) == 1:
618 elif len(cahs) == 1:
623 anc = cahs[0]
619 anc = cahs[0]
624 else:
620 else:
625 # experimental config: merge.preferancestor
621 # experimental config: merge.preferancestor
626 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
627 try:
623 try:
628 ctx = changectx(self._repo, r)
624 ctx = changectx(self._repo, r)
629 except error.RepoLookupError:
625 except error.RepoLookupError:
630 continue
626 continue
631 anc = ctx.node()
627 anc = ctx.node()
632 if anc in cahs:
628 if anc in cahs:
633 break
629 break
634 else:
630 else:
635 anc = self._repo.changelog.ancestor(self._node, n2)
631 anc = self._repo.changelog.ancestor(self._node, n2)
636 if warn:
632 if warn:
637 self._repo.ui.status(
633 self._repo.ui.status(
638 (_("note: using %s as ancestor of %s and %s\n") %
634 (_("note: using %s as ancestor of %s and %s\n") %
639 (short(anc), short(self._node), short(n2))) +
635 (short(anc), short(self._node), short(n2))) +
640 ''.join(_(" alternatively, use --config "
636 ''.join(_(" alternatively, use --config "
641 "merge.preferancestor=%s\n") %
637 "merge.preferancestor=%s\n") %
642 short(n) for n in sorted(cahs) if n != anc))
638 short(n) for n in sorted(cahs) if n != anc))
643 return changectx(self._repo, anc)
639 return changectx(self._repo, anc)
644
640
645 def descendant(self, other):
641 def descendant(self, other):
646 """True if other is descendant of this changeset"""
642 """True if other is descendant of this changeset"""
647 return self._repo.changelog.descendant(self._rev, other._rev)
643 return self._repo.changelog.descendant(self._rev, other._rev)
648
644
649 def walk(self, match):
645 def walk(self, match):
650 '''Generates matching file names.'''
646 '''Generates matching file names.'''
651
647
652 # Wrap match.bad method to have message with nodeid
648 # Wrap match.bad method to have message with nodeid
653 def bad(fn, msg):
649 def bad(fn, msg):
654 # The manifest doesn't know about subrepos, so don't complain about
650 # The manifest doesn't know about subrepos, so don't complain about
655 # paths into valid subrepos.
651 # paths into valid subrepos.
656 if any(fn == s or fn.startswith(s + '/')
652 if any(fn == s or fn.startswith(s + '/')
657 for s in self.substate):
653 for s in self.substate):
658 return
654 return
659 match.bad(fn, _('no such file in rev %s') % self)
655 match.bad(fn, _('no such file in rev %s') % self)
660
656
661 m = matchmod.badmatch(match, bad)
657 m = matchmod.badmatch(match, bad)
662 return self._manifest.walk(m)
658 return self._manifest.walk(m)
663
659
664 def matches(self, match):
660 def matches(self, match):
665 return self.walk(match)
661 return self.walk(match)
666
662
667 class basefilectx(object):
663 class basefilectx(object):
668 """A filecontext object represents the common logic for its children:
664 """A filecontext object represents the common logic for its children:
669 filectx: read-only access to a filerevision that is already present
665 filectx: read-only access to a filerevision that is already present
670 in the repo,
666 in the repo,
671 workingfilectx: a filecontext that represents files from the working
667 workingfilectx: a filecontext that represents files from the working
672 directory,
668 directory,
673 memfilectx: a filecontext that represents files in-memory."""
669 memfilectx: a filecontext that represents files in-memory."""
674 def __new__(cls, repo, path, *args, **kwargs):
670 def __new__(cls, repo, path, *args, **kwargs):
675 return super(basefilectx, cls).__new__(cls)
671 return super(basefilectx, cls).__new__(cls)
676
672
677 @propertycache
673 @propertycache
678 def _filelog(self):
674 def _filelog(self):
679 return self._repo.file(self._path)
675 return self._repo.file(self._path)
680
676
681 @propertycache
677 @propertycache
682 def _changeid(self):
678 def _changeid(self):
683 if '_changeid' in self.__dict__:
679 if '_changeid' in self.__dict__:
684 return self._changeid
680 return self._changeid
685 elif '_changectx' in self.__dict__:
681 elif '_changectx' in self.__dict__:
686 return self._changectx.rev()
682 return self._changectx.rev()
687 elif '_descendantrev' in self.__dict__:
683 elif '_descendantrev' in self.__dict__:
688 # this file context was created from a revision with a known
684 # this file context was created from a revision with a known
689 # descendant, we can (lazily) correct for linkrev aliases
685 # descendant, we can (lazily) correct for linkrev aliases
690 return self._adjustlinkrev(self._descendantrev)
686 return self._adjustlinkrev(self._descendantrev)
691 else:
687 else:
692 return self._filelog.linkrev(self._filerev)
688 return self._filelog.linkrev(self._filerev)
693
689
694 @propertycache
690 @propertycache
695 def _filenode(self):
691 def _filenode(self):
696 if '_fileid' in self.__dict__:
692 if '_fileid' in self.__dict__:
697 return self._filelog.lookup(self._fileid)
693 return self._filelog.lookup(self._fileid)
698 else:
694 else:
699 return self._changectx.filenode(self._path)
695 return self._changectx.filenode(self._path)
700
696
701 @propertycache
697 @propertycache
702 def _filerev(self):
698 def _filerev(self):
703 return self._filelog.rev(self._filenode)
699 return self._filelog.rev(self._filenode)
704
700
705 @propertycache
701 @propertycache
706 def _repopath(self):
702 def _repopath(self):
707 return self._path
703 return self._path
708
704
709 def __nonzero__(self):
705 def __nonzero__(self):
710 try:
706 try:
711 self._filenode
707 self._filenode
712 return True
708 return True
713 except error.LookupError:
709 except error.LookupError:
714 # file is missing
710 # file is missing
715 return False
711 return False
716
712
717 def __str__(self):
713 def __str__(self):
718 try:
714 try:
719 return "%s@%s" % (self.path(), self._changectx)
715 return "%s@%s" % (self.path(), self._changectx)
720 except error.LookupError:
716 except error.LookupError:
721 return "%s@???" % self.path()
717 return "%s@???" % self.path()
722
718
723 def __repr__(self):
719 def __repr__(self):
724 return "<%s %s>" % (type(self).__name__, str(self))
720 return "<%s %s>" % (type(self).__name__, str(self))
725
721
726 def __hash__(self):
722 def __hash__(self):
727 try:
723 try:
728 return hash((self._path, self._filenode))
724 return hash((self._path, self._filenode))
729 except AttributeError:
725 except AttributeError:
730 return id(self)
726 return id(self)
731
727
732 def __eq__(self, other):
728 def __eq__(self, other):
733 try:
729 try:
734 return (type(self) == type(other) and self._path == other._path
730 return (type(self) == type(other) and self._path == other._path
735 and self._filenode == other._filenode)
731 and self._filenode == other._filenode)
736 except AttributeError:
732 except AttributeError:
737 return False
733 return False
738
734
739 def __ne__(self, other):
735 def __ne__(self, other):
740 return not (self == other)
736 return not (self == other)
741
737
742 def filerev(self):
738 def filerev(self):
743 return self._filerev
739 return self._filerev
744 def filenode(self):
740 def filenode(self):
745 return self._filenode
741 return self._filenode
746 def flags(self):
742 def flags(self):
747 return self._changectx.flags(self._path)
743 return self._changectx.flags(self._path)
748 def filelog(self):
744 def filelog(self):
749 return self._filelog
745 return self._filelog
750 def rev(self):
746 def rev(self):
751 return self._changeid
747 return self._changeid
752 def linkrev(self):
748 def linkrev(self):
753 return self._filelog.linkrev(self._filerev)
749 return self._filelog.linkrev(self._filerev)
754 def node(self):
750 def node(self):
755 return self._changectx.node()
751 return self._changectx.node()
756 def hex(self):
752 def hex(self):
757 return self._changectx.hex()
753 return self._changectx.hex()
758 def user(self):
754 def user(self):
759 return self._changectx.user()
755 return self._changectx.user()
760 def date(self):
756 def date(self):
761 return self._changectx.date()
757 return self._changectx.date()
762 def files(self):
758 def files(self):
763 return self._changectx.files()
759 return self._changectx.files()
764 def description(self):
760 def description(self):
765 return self._changectx.description()
761 return self._changectx.description()
766 def branch(self):
762 def branch(self):
767 return self._changectx.branch()
763 return self._changectx.branch()
768 def extra(self):
764 def extra(self):
769 return self._changectx.extra()
765 return self._changectx.extra()
770 def phase(self):
766 def phase(self):
771 return self._changectx.phase()
767 return self._changectx.phase()
772 def phasestr(self):
768 def phasestr(self):
773 return self._changectx.phasestr()
769 return self._changectx.phasestr()
774 def manifest(self):
770 def manifest(self):
775 return self._changectx.manifest()
771 return self._changectx.manifest()
776 def changectx(self):
772 def changectx(self):
777 return self._changectx
773 return self._changectx
778 def repo(self):
774 def repo(self):
779 return self._repo
775 return self._repo
780
776
781 def path(self):
777 def path(self):
782 return self._path
778 return self._path
783
779
784 def isbinary(self):
780 def isbinary(self):
785 try:
781 try:
786 return util.binary(self.data())
782 return util.binary(self.data())
787 except IOError:
783 except IOError:
788 return False
784 return False
789 def isexec(self):
785 def isexec(self):
790 return 'x' in self.flags()
786 return 'x' in self.flags()
791 def islink(self):
787 def islink(self):
792 return 'l' in self.flags()
788 return 'l' in self.flags()
793
789
794 def isabsent(self):
790 def isabsent(self):
795 """whether this filectx represents a file not in self._changectx
791 """whether this filectx represents a file not in self._changectx
796
792
797 This is mainly for merge code to detect change/delete conflicts. This is
793 This is mainly for merge code to detect change/delete conflicts. This is
798 expected to be True for all subclasses of basectx."""
794 expected to be True for all subclasses of basectx."""
799 return False
795 return False
800
796
801 _customcmp = False
797 _customcmp = False
802 def cmp(self, fctx):
798 def cmp(self, fctx):
803 """compare with other file context
799 """compare with other file context
804
800
805 returns True if different than fctx.
801 returns True if different than fctx.
806 """
802 """
807 if fctx._customcmp:
803 if fctx._customcmp:
808 return fctx.cmp(self)
804 return fctx.cmp(self)
809
805
810 if (fctx._filenode is None
806 if (fctx._filenode is None
811 and (self._repo._encodefilterpats
807 and (self._repo._encodefilterpats
812 # if file data starts with '\1\n', empty metadata block is
808 # if file data starts with '\1\n', empty metadata block is
813 # prepended, which adds 4 bytes to filelog.size().
809 # prepended, which adds 4 bytes to filelog.size().
814 or self.size() - 4 == fctx.size())
810 or self.size() - 4 == fctx.size())
815 or self.size() == fctx.size()):
811 or self.size() == fctx.size()):
816 return self._filelog.cmp(self._filenode, fctx.data())
812 return self._filelog.cmp(self._filenode, fctx.data())
817
813
818 return True
814 return True
819
815
820 def _adjustlinkrev(self, srcrev, inclusive=False):
816 def _adjustlinkrev(self, srcrev, inclusive=False):
821 """return the first ancestor of <srcrev> introducing <fnode>
817 """return the first ancestor of <srcrev> introducing <fnode>
822
818
823 If the linkrev of the file revision does not point to an ancestor of
819 If the linkrev of the file revision does not point to an ancestor of
824 srcrev, we'll walk down the ancestors until we find one introducing
820 srcrev, we'll walk down the ancestors until we find one introducing
825 this file revision.
821 this file revision.
826
822
827 :srcrev: the changeset revision we search ancestors from
823 :srcrev: the changeset revision we search ancestors from
828 :inclusive: if true, the src revision will also be checked
824 :inclusive: if true, the src revision will also be checked
829 """
825 """
830 repo = self._repo
826 repo = self._repo
831 cl = repo.unfiltered().changelog
827 cl = repo.unfiltered().changelog
832 mfl = repo.manifestlog
828 mfl = repo.manifestlog
833 # fetch the linkrev
829 # fetch the linkrev
834 lkr = self.linkrev()
830 lkr = self.linkrev()
835 # hack to reuse ancestor computation when searching for renames
831 # hack to reuse ancestor computation when searching for renames
836 memberanc = getattr(self, '_ancestrycontext', None)
832 memberanc = getattr(self, '_ancestrycontext', None)
837 iteranc = None
833 iteranc = None
838 if srcrev is None:
834 if srcrev is None:
839 # wctx case, used by workingfilectx during mergecopy
835 # wctx case, used by workingfilectx during mergecopy
840 revs = [p.rev() for p in self._repo[None].parents()]
836 revs = [p.rev() for p in self._repo[None].parents()]
841 inclusive = True # we skipped the real (revless) source
837 inclusive = True # we skipped the real (revless) source
842 else:
838 else:
843 revs = [srcrev]
839 revs = [srcrev]
844 if memberanc is None:
840 if memberanc is None:
845 memberanc = iteranc = cl.ancestors(revs, lkr,
841 memberanc = iteranc = cl.ancestors(revs, lkr,
846 inclusive=inclusive)
842 inclusive=inclusive)
847 # check if this linkrev is an ancestor of srcrev
843 # check if this linkrev is an ancestor of srcrev
848 if lkr not in memberanc:
844 if lkr not in memberanc:
849 if iteranc is None:
845 if iteranc is None:
850 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
846 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
851 fnode = self._filenode
847 fnode = self._filenode
852 path = self._path
848 path = self._path
853 for a in iteranc:
849 for a in iteranc:
854 ac = cl.read(a) # get changeset data (we avoid object creation)
850 ac = cl.read(a) # get changeset data (we avoid object creation)
855 if path in ac[3]: # checking the 'files' field.
851 if path in ac[3]: # checking the 'files' field.
856 # The file has been touched, check if the content is
852 # The file has been touched, check if the content is
857 # similar to the one we search for.
853 # similar to the one we search for.
858 if fnode == mfl[ac[0]].readfast().get(path):
854 if fnode == mfl[ac[0]].readfast().get(path):
859 return a
855 return a
860 # In theory, we should never get out of that loop without a result.
856 # In theory, we should never get out of that loop without a result.
861 # But if manifest uses a buggy file revision (not children of the
857 # But if manifest uses a buggy file revision (not children of the
862 # one it replaces) we could. Such a buggy situation will likely
858 # one it replaces) we could. Such a buggy situation will likely
863 # result is crash somewhere else at to some point.
859 # result is crash somewhere else at to some point.
864 return lkr
860 return lkr
865
861
866 def introrev(self):
862 def introrev(self):
867 """return the rev of the changeset which introduced this file revision
863 """return the rev of the changeset which introduced this file revision
868
864
869 This method is different from linkrev because it take into account the
865 This method is different from linkrev because it take into account the
870 changeset the filectx was created from. It ensures the returned
866 changeset the filectx was created from. It ensures the returned
871 revision is one of its ancestors. This prevents bugs from
867 revision is one of its ancestors. This prevents bugs from
872 'linkrev-shadowing' when a file revision is used by multiple
868 'linkrev-shadowing' when a file revision is used by multiple
873 changesets.
869 changesets.
874 """
870 """
875 lkr = self.linkrev()
871 lkr = self.linkrev()
876 attrs = vars(self)
872 attrs = vars(self)
877 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
873 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
878 if noctx or self.rev() == lkr:
874 if noctx or self.rev() == lkr:
879 return self.linkrev()
875 return self.linkrev()
880 return self._adjustlinkrev(self.rev(), inclusive=True)
876 return self._adjustlinkrev(self.rev(), inclusive=True)
881
877
882 def _parentfilectx(self, path, fileid, filelog):
878 def _parentfilectx(self, path, fileid, filelog):
883 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
879 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
884 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
880 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
885 if '_changeid' in vars(self) or '_changectx' in vars(self):
881 if '_changeid' in vars(self) or '_changectx' in vars(self):
886 # If self is associated with a changeset (probably explicitly
882 # If self is associated with a changeset (probably explicitly
887 # fed), ensure the created filectx is associated with a
883 # fed), ensure the created filectx is associated with a
888 # changeset that is an ancestor of self.changectx.
884 # changeset that is an ancestor of self.changectx.
889 # This lets us later use _adjustlinkrev to get a correct link.
885 # This lets us later use _adjustlinkrev to get a correct link.
890 fctx._descendantrev = self.rev()
886 fctx._descendantrev = self.rev()
891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
887 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
892 elif '_descendantrev' in vars(self):
888 elif '_descendantrev' in vars(self):
893 # Otherwise propagate _descendantrev if we have one associated.
889 # Otherwise propagate _descendantrev if we have one associated.
894 fctx._descendantrev = self._descendantrev
890 fctx._descendantrev = self._descendantrev
895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
891 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
896 return fctx
892 return fctx
897
893
898 def parents(self):
894 def parents(self):
899 _path = self._path
895 _path = self._path
900 fl = self._filelog
896 fl = self._filelog
901 parents = self._filelog.parents(self._filenode)
897 parents = self._filelog.parents(self._filenode)
902 pl = [(_path, node, fl) for node in parents if node != nullid]
898 pl = [(_path, node, fl) for node in parents if node != nullid]
903
899
904 r = fl.renamed(self._filenode)
900 r = fl.renamed(self._filenode)
905 if r:
901 if r:
906 # - In the simple rename case, both parent are nullid, pl is empty.
902 # - In the simple rename case, both parent are nullid, pl is empty.
907 # - In case of merge, only one of the parent is null id and should
903 # - In case of merge, only one of the parent is null id and should
908 # be replaced with the rename information. This parent is -always-
904 # be replaced with the rename information. This parent is -always-
909 # the first one.
905 # the first one.
910 #
906 #
911 # As null id have always been filtered out in the previous list
907 # As null id have always been filtered out in the previous list
912 # comprehension, inserting to 0 will always result in "replacing
908 # comprehension, inserting to 0 will always result in "replacing
913 # first nullid parent with rename information.
909 # first nullid parent with rename information.
914 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
910 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
915
911
916 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
912 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
917
913
918 def p1(self):
914 def p1(self):
919 return self.parents()[0]
915 return self.parents()[0]
920
916
921 def p2(self):
917 def p2(self):
922 p = self.parents()
918 p = self.parents()
923 if len(p) == 2:
919 if len(p) == 2:
924 return p[1]
920 return p[1]
925 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
921 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
926
922
927 def annotate(self, follow=False, linenumber=False, diffopts=None):
923 def annotate(self, follow=False, linenumber=False, diffopts=None):
928 '''returns a list of tuples of ((ctx, number), line) for each line
924 '''returns a list of tuples of ((ctx, number), line) for each line
929 in the file, where ctx is the filectx of the node where
925 in the file, where ctx is the filectx of the node where
930 that line was last changed; if linenumber parameter is true, number is
926 that line was last changed; if linenumber parameter is true, number is
931 the line number at the first appearance in the managed file, otherwise,
927 the line number at the first appearance in the managed file, otherwise,
932 number has a fixed value of False.
928 number has a fixed value of False.
933 '''
929 '''
934
930
935 def lines(text):
931 def lines(text):
936 if text.endswith("\n"):
932 if text.endswith("\n"):
937 return text.count("\n")
933 return text.count("\n")
938 return text.count("\n") + int(bool(text))
934 return text.count("\n") + int(bool(text))
939
935
940 if linenumber:
936 if linenumber:
941 def decorate(text, rev):
937 def decorate(text, rev):
942 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
938 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
943 else:
939 else:
944 def decorate(text, rev):
940 def decorate(text, rev):
945 return ([(rev, False)] * lines(text), text)
941 return ([(rev, False)] * lines(text), text)
946
942
947 def pair(parent, child):
943 def pair(parent, child):
948 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
944 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
949 for (a1, a2, b1, b2), t in blocks:
945 for (a1, a2, b1, b2), t in blocks:
950 # Changed blocks ('!') or blocks made only of blank lines ('~')
946 # Changed blocks ('!') or blocks made only of blank lines ('~')
951 # belong to the child.
947 # belong to the child.
952 if t == '=':
948 if t == '=':
953 child[0][b1:b2] = parent[0][a1:a2]
949 child[0][b1:b2] = parent[0][a1:a2]
954 return child
950 return child
955
951
956 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
957
953
958 def parents(f):
954 def parents(f):
959 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
960 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
961 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # from the topmost introrev (= srcrev) down to p.linkrev() if it
962 # isn't an ancestor of the srcrev.
958 # isn't an ancestor of the srcrev.
963 f._changeid
959 f._changeid
964 pl = f.parents()
960 pl = f.parents()
965
961
966 # Don't return renamed parents if we aren't following.
962 # Don't return renamed parents if we aren't following.
967 if not follow:
963 if not follow:
968 pl = [p for p in pl if p.path() == f.path()]
964 pl = [p for p in pl if p.path() == f.path()]
969
965
970 # renamed filectx won't have a filelog yet, so set it
966 # renamed filectx won't have a filelog yet, so set it
971 # from the cache to save time
967 # from the cache to save time
972 for p in pl:
968 for p in pl:
973 if not '_filelog' in p.__dict__:
969 if not '_filelog' in p.__dict__:
974 p._filelog = getlog(p.path())
970 p._filelog = getlog(p.path())
975
971
976 return pl
972 return pl
977
973
978 # use linkrev to find the first changeset where self appeared
974 # use linkrev to find the first changeset where self appeared
979 base = self
975 base = self
980 introrev = self.introrev()
976 introrev = self.introrev()
981 if self.rev() != introrev:
977 if self.rev() != introrev:
982 base = self.filectx(self.filenode(), changeid=introrev)
978 base = self.filectx(self.filenode(), changeid=introrev)
983 if getattr(base, '_ancestrycontext', None) is None:
979 if getattr(base, '_ancestrycontext', None) is None:
984 cl = self._repo.changelog
980 cl = self._repo.changelog
985 if introrev is None:
981 if introrev is None:
986 # wctx is not inclusive, but works because _ancestrycontext
982 # wctx is not inclusive, but works because _ancestrycontext
987 # is used to test filelog revisions
983 # is used to test filelog revisions
988 ac = cl.ancestors([p.rev() for p in base.parents()],
984 ac = cl.ancestors([p.rev() for p in base.parents()],
989 inclusive=True)
985 inclusive=True)
990 else:
986 else:
991 ac = cl.ancestors([introrev], inclusive=True)
987 ac = cl.ancestors([introrev], inclusive=True)
992 base._ancestrycontext = ac
988 base._ancestrycontext = ac
993
989
994 # This algorithm would prefer to be recursive, but Python is a
990 # This algorithm would prefer to be recursive, but Python is a
995 # bit recursion-hostile. Instead we do an iterative
991 # bit recursion-hostile. Instead we do an iterative
996 # depth-first search.
992 # depth-first search.
997
993
998 # 1st DFS pre-calculates pcache and needed
994 # 1st DFS pre-calculates pcache and needed
999 visit = [base]
995 visit = [base]
1000 pcache = {}
996 pcache = {}
1001 needed = {base: 1}
997 needed = {base: 1}
1002 while visit:
998 while visit:
1003 f = visit.pop()
999 f = visit.pop()
1004 if f in pcache:
1000 if f in pcache:
1005 continue
1001 continue
1006 pl = parents(f)
1002 pl = parents(f)
1007 pcache[f] = pl
1003 pcache[f] = pl
1008 for p in pl:
1004 for p in pl:
1009 needed[p] = needed.get(p, 0) + 1
1005 needed[p] = needed.get(p, 0) + 1
1010 if p not in pcache:
1006 if p not in pcache:
1011 visit.append(p)
1007 visit.append(p)
1012
1008
1013 # 2nd DFS does the actual annotate
1009 # 2nd DFS does the actual annotate
1014 visit[:] = [base]
1010 visit[:] = [base]
1015 hist = {}
1011 hist = {}
1016 while visit:
1012 while visit:
1017 f = visit[-1]
1013 f = visit[-1]
1018 if f in hist:
1014 if f in hist:
1019 visit.pop()
1015 visit.pop()
1020 continue
1016 continue
1021
1017
1022 ready = True
1018 ready = True
1023 pl = pcache[f]
1019 pl = pcache[f]
1024 for p in pl:
1020 for p in pl:
1025 if p not in hist:
1021 if p not in hist:
1026 ready = False
1022 ready = False
1027 visit.append(p)
1023 visit.append(p)
1028 if ready:
1024 if ready:
1029 visit.pop()
1025 visit.pop()
1030 curr = decorate(f.data(), f)
1026 curr = decorate(f.data(), f)
1031 for p in pl:
1027 for p in pl:
1032 curr = pair(hist[p], curr)
1028 curr = pair(hist[p], curr)
1033 if needed[p] == 1:
1029 if needed[p] == 1:
1034 del hist[p]
1030 del hist[p]
1035 del needed[p]
1031 del needed[p]
1036 else:
1032 else:
1037 needed[p] -= 1
1033 needed[p] -= 1
1038
1034
1039 hist[f] = curr
1035 hist[f] = curr
1040 del pcache[f]
1036 del pcache[f]
1041
1037
1042 return zip(hist[base][0], hist[base][1].splitlines(True))
1038 return zip(hist[base][0], hist[base][1].splitlines(True))
1043
1039
1044 def ancestors(self, followfirst=False):
1040 def ancestors(self, followfirst=False):
1045 visit = {}
1041 visit = {}
1046 c = self
1042 c = self
1047 if followfirst:
1043 if followfirst:
1048 cut = 1
1044 cut = 1
1049 else:
1045 else:
1050 cut = None
1046 cut = None
1051
1047
1052 while True:
1048 while True:
1053 for parent in c.parents()[:cut]:
1049 for parent in c.parents()[:cut]:
1054 visit[(parent.linkrev(), parent.filenode())] = parent
1050 visit[(parent.linkrev(), parent.filenode())] = parent
1055 if not visit:
1051 if not visit:
1056 break
1052 break
1057 c = visit.pop(max(visit))
1053 c = visit.pop(max(visit))
1058 yield c
1054 yield c
1059
1055
1060 class filectx(basefilectx):
1056 class filectx(basefilectx):
1061 """A filecontext object makes access to data related to a particular
1057 """A filecontext object makes access to data related to a particular
1062 filerevision convenient."""
1058 filerevision convenient."""
1063 def __init__(self, repo, path, changeid=None, fileid=None,
1059 def __init__(self, repo, path, changeid=None, fileid=None,
1064 filelog=None, changectx=None):
1060 filelog=None, changectx=None):
1065 """changeid can be a changeset revision, node, or tag.
1061 """changeid can be a changeset revision, node, or tag.
1066 fileid can be a file revision or node."""
1062 fileid can be a file revision or node."""
1067 self._repo = repo
1063 self._repo = repo
1068 self._path = path
1064 self._path = path
1069
1065
1070 assert (changeid is not None
1066 assert (changeid is not None
1071 or fileid is not None
1067 or fileid is not None
1072 or changectx is not None), \
1068 or changectx is not None), \
1073 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1069 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1074 % (changeid, fileid, changectx))
1070 % (changeid, fileid, changectx))
1075
1071
1076 if filelog is not None:
1072 if filelog is not None:
1077 self._filelog = filelog
1073 self._filelog = filelog
1078
1074
1079 if changeid is not None:
1075 if changeid is not None:
1080 self._changeid = changeid
1076 self._changeid = changeid
1081 if changectx is not None:
1077 if changectx is not None:
1082 self._changectx = changectx
1078 self._changectx = changectx
1083 if fileid is not None:
1079 if fileid is not None:
1084 self._fileid = fileid
1080 self._fileid = fileid
1085
1081
1086 @propertycache
1082 @propertycache
1087 def _changectx(self):
1083 def _changectx(self):
1088 try:
1084 try:
1089 return changectx(self._repo, self._changeid)
1085 return changectx(self._repo, self._changeid)
1090 except error.FilteredRepoLookupError:
1086 except error.FilteredRepoLookupError:
1091 # Linkrev may point to any revision in the repository. When the
1087 # Linkrev may point to any revision in the repository. When the
1092 # repository is filtered this may lead to `filectx` trying to build
1088 # repository is filtered this may lead to `filectx` trying to build
1093 # `changectx` for filtered revision. In such case we fallback to
1089 # `changectx` for filtered revision. In such case we fallback to
1094 # creating `changectx` on the unfiltered version of the reposition.
1090 # creating `changectx` on the unfiltered version of the reposition.
1095 # This fallback should not be an issue because `changectx` from
1091 # This fallback should not be an issue because `changectx` from
1096 # `filectx` are not used in complex operations that care about
1092 # `filectx` are not used in complex operations that care about
1097 # filtering.
1093 # filtering.
1098 #
1094 #
1099 # This fallback is a cheap and dirty fix that prevent several
1095 # This fallback is a cheap and dirty fix that prevent several
1100 # crashes. It does not ensure the behavior is correct. However the
1096 # crashes. It does not ensure the behavior is correct. However the
1101 # behavior was not correct before filtering either and "incorrect
1097 # behavior was not correct before filtering either and "incorrect
1102 # behavior" is seen as better as "crash"
1098 # behavior" is seen as better as "crash"
1103 #
1099 #
1104 # Linkrevs have several serious troubles with filtering that are
1100 # Linkrevs have several serious troubles with filtering that are
1105 # complicated to solve. Proper handling of the issue here should be
1101 # complicated to solve. Proper handling of the issue here should be
1106 # considered when solving linkrev issue are on the table.
1102 # considered when solving linkrev issue are on the table.
1107 return changectx(self._repo.unfiltered(), self._changeid)
1103 return changectx(self._repo.unfiltered(), self._changeid)
1108
1104
1109 def filectx(self, fileid, changeid=None):
1105 def filectx(self, fileid, changeid=None):
1110 '''opens an arbitrary revision of the file without
1106 '''opens an arbitrary revision of the file without
1111 opening a new filelog'''
1107 opening a new filelog'''
1112 return filectx(self._repo, self._path, fileid=fileid,
1108 return filectx(self._repo, self._path, fileid=fileid,
1113 filelog=self._filelog, changeid=changeid)
1109 filelog=self._filelog, changeid=changeid)
1114
1110
1115 def data(self):
1111 def data(self):
1116 try:
1112 try:
1117 return self._filelog.read(self._filenode)
1113 return self._filelog.read(self._filenode)
1118 except error.CensoredNodeError:
1114 except error.CensoredNodeError:
1119 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1115 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1120 return ""
1116 return ""
1121 raise error.Abort(_("censored node: %s") % short(self._filenode),
1117 raise error.Abort(_("censored node: %s") % short(self._filenode),
1122 hint=_("set censor.policy to ignore errors"))
1118 hint=_("set censor.policy to ignore errors"))
1123
1119
1124 def size(self):
1120 def size(self):
1125 return self._filelog.size(self._filerev)
1121 return self._filelog.size(self._filerev)
1126
1122
1127 def renamed(self):
1123 def renamed(self):
1128 """check if file was actually renamed in this changeset revision
1124 """check if file was actually renamed in this changeset revision
1129
1125
1130 If rename logged in file revision, we report copy for changeset only
1126 If rename logged in file revision, we report copy for changeset only
1131 if file revisions linkrev points back to the changeset in question
1127 if file revisions linkrev points back to the changeset in question
1132 or both changeset parents contain different file revisions.
1128 or both changeset parents contain different file revisions.
1133 """
1129 """
1134
1130
1135 renamed = self._filelog.renamed(self._filenode)
1131 renamed = self._filelog.renamed(self._filenode)
1136 if not renamed:
1132 if not renamed:
1137 return renamed
1133 return renamed
1138
1134
1139 if self.rev() == self.linkrev():
1135 if self.rev() == self.linkrev():
1140 return renamed
1136 return renamed
1141
1137
1142 name = self.path()
1138 name = self.path()
1143 fnode = self._filenode
1139 fnode = self._filenode
1144 for p in self._changectx.parents():
1140 for p in self._changectx.parents():
1145 try:
1141 try:
1146 if fnode == p.filenode(name):
1142 if fnode == p.filenode(name):
1147 return None
1143 return None
1148 except error.LookupError:
1144 except error.LookupError:
1149 pass
1145 pass
1150 return renamed
1146 return renamed
1151
1147
1152 def children(self):
1148 def children(self):
1153 # hard for renames
1149 # hard for renames
1154 c = self._filelog.children(self._filenode)
1150 c = self._filelog.children(self._filenode)
1155 return [filectx(self._repo, self._path, fileid=x,
1151 return [filectx(self._repo, self._path, fileid=x,
1156 filelog=self._filelog) for x in c]
1152 filelog=self._filelog) for x in c]
1157
1153
1158 class committablectx(basectx):
1154 class committablectx(basectx):
1159 """A committablectx object provides common functionality for a context that
1155 """A committablectx object provides common functionality for a context that
1160 wants the ability to commit, e.g. workingctx or memctx."""
1156 wants the ability to commit, e.g. workingctx or memctx."""
1161 def __init__(self, repo, text="", user=None, date=None, extra=None,
1157 def __init__(self, repo, text="", user=None, date=None, extra=None,
1162 changes=None):
1158 changes=None):
1163 self._repo = repo
1159 self._repo = repo
1164 self._rev = None
1160 self._rev = None
1165 self._node = None
1161 self._node = None
1166 self._text = text
1162 self._text = text
1167 if date:
1163 if date:
1168 self._date = util.parsedate(date)
1164 self._date = util.parsedate(date)
1169 if user:
1165 if user:
1170 self._user = user
1166 self._user = user
1171 if changes:
1167 if changes:
1172 self._status = changes
1168 self._status = changes
1173
1169
1174 self._extra = {}
1170 self._extra = {}
1175 if extra:
1171 if extra:
1176 self._extra = extra.copy()
1172 self._extra = extra.copy()
1177 if 'branch' not in self._extra:
1173 if 'branch' not in self._extra:
1178 try:
1174 try:
1179 branch = encoding.fromlocal(self._repo.dirstate.branch())
1175 branch = encoding.fromlocal(self._repo.dirstate.branch())
1180 except UnicodeDecodeError:
1176 except UnicodeDecodeError:
1181 raise error.Abort(_('branch name not in UTF-8!'))
1177 raise error.Abort(_('branch name not in UTF-8!'))
1182 self._extra['branch'] = branch
1178 self._extra['branch'] = branch
1183 if self._extra['branch'] == '':
1179 if self._extra['branch'] == '':
1184 self._extra['branch'] = 'default'
1180 self._extra['branch'] = 'default'
1185
1181
1186 def __str__(self):
1182 def __str__(self):
1187 return str(self._parents[0]) + "+"
1183 return str(self._parents[0]) + "+"
1188
1184
1189 def __nonzero__(self):
1185 def __nonzero__(self):
1190 return True
1186 return True
1191
1187
1192 def _buildflagfunc(self):
1188 def _buildflagfunc(self):
1193 # Create a fallback function for getting file flags when the
1189 # Create a fallback function for getting file flags when the
1194 # filesystem doesn't support them
1190 # filesystem doesn't support them
1195
1191
1196 copiesget = self._repo.dirstate.copies().get
1192 copiesget = self._repo.dirstate.copies().get
1197 parents = self.parents()
1193 parents = self.parents()
1198 if len(parents) < 2:
1194 if len(parents) < 2:
1199 # when we have one parent, it's easy: copy from parent
1195 # when we have one parent, it's easy: copy from parent
1200 man = parents[0].manifest()
1196 man = parents[0].manifest()
1201 def func(f):
1197 def func(f):
1202 f = copiesget(f, f)
1198 f = copiesget(f, f)
1203 return man.flags(f)
1199 return man.flags(f)
1204 else:
1200 else:
1205 # merges are tricky: we try to reconstruct the unstored
1201 # merges are tricky: we try to reconstruct the unstored
1206 # result from the merge (issue1802)
1202 # result from the merge (issue1802)
1207 p1, p2 = parents
1203 p1, p2 = parents
1208 pa = p1.ancestor(p2)
1204 pa = p1.ancestor(p2)
1209 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1205 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1210
1206
1211 def func(f):
1207 def func(f):
1212 f = copiesget(f, f) # may be wrong for merges with copies
1208 f = copiesget(f, f) # may be wrong for merges with copies
1213 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1209 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1214 if fl1 == fl2:
1210 if fl1 == fl2:
1215 return fl1
1211 return fl1
1216 if fl1 == fla:
1212 if fl1 == fla:
1217 return fl2
1213 return fl2
1218 if fl2 == fla:
1214 if fl2 == fla:
1219 return fl1
1215 return fl1
1220 return '' # punt for conflicts
1216 return '' # punt for conflicts
1221
1217
1222 return func
1218 return func
1223
1219
1224 @propertycache
1220 @propertycache
1225 def _flagfunc(self):
1221 def _flagfunc(self):
1226 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1222 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1227
1223
1228 @propertycache
1224 @propertycache
1229 def _manifest(self):
1225 def _manifest(self):
1230 """generate a manifest corresponding to the values in self._status
1226 """generate a manifest corresponding to the values in self._status
1231
1227
1232 This reuse the file nodeid from parent, but we append an extra letter
1228 This reuse the file nodeid from parent, but we append an extra letter
1233 when modified. Modified files get an extra 'm' while added files get
1229 when modified. Modified files get an extra 'm' while added files get
1234 an extra 'a'. This is used by manifests merge to see that files
1230 an extra 'a'. This is used by manifests merge to see that files
1235 are different and by update logic to avoid deleting newly added files.
1231 are different and by update logic to avoid deleting newly added files.
1236 """
1232 """
1237 parents = self.parents()
1233 parents = self.parents()
1238
1234
1239 man1 = parents[0].manifest()
1235 man1 = parents[0].manifest()
1240 man = man1.copy()
1236 man = man1.copy()
1241 if len(parents) > 1:
1237 if len(parents) > 1:
1242 man2 = self.p2().manifest()
1238 man2 = self.p2().manifest()
1243 def getman(f):
1239 def getman(f):
1244 if f in man1:
1240 if f in man1:
1245 return man1
1241 return man1
1246 return man2
1242 return man2
1247 else:
1243 else:
1248 getman = lambda f: man1
1244 getman = lambda f: man1
1249
1245
1250 copied = self._repo.dirstate.copies()
1246 copied = self._repo.dirstate.copies()
1251 ff = self._flagfunc
1247 ff = self._flagfunc
1252 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1248 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1253 for f in l:
1249 for f in l:
1254 orig = copied.get(f, f)
1250 orig = copied.get(f, f)
1255 man[f] = getman(orig).get(orig, nullid) + i
1251 man[f] = getman(orig).get(orig, nullid) + i
1256 try:
1252 try:
1257 man.setflag(f, ff(f))
1253 man.setflag(f, ff(f))
1258 except OSError:
1254 except OSError:
1259 pass
1255 pass
1260
1256
1261 for f in self._status.deleted + self._status.removed:
1257 for f in self._status.deleted + self._status.removed:
1262 if f in man:
1258 if f in man:
1263 del man[f]
1259 del man[f]
1264
1260
1265 return man
1261 return man
1266
1262
1267 @propertycache
1263 @propertycache
1268 def _status(self):
1264 def _status(self):
1269 return self._repo.status()
1265 return self._repo.status()
1270
1266
1271 @propertycache
1267 @propertycache
1272 def _user(self):
1268 def _user(self):
1273 return self._repo.ui.username()
1269 return self._repo.ui.username()
1274
1270
1275 @propertycache
1271 @propertycache
1276 def _date(self):
1272 def _date(self):
1277 return util.makedate()
1273 return util.makedate()
1278
1274
1279 def subrev(self, subpath):
1275 def subrev(self, subpath):
1280 return None
1276 return None
1281
1277
1282 def manifestnode(self):
1278 def manifestnode(self):
1283 return None
1279 return None
1284 def user(self):
1280 def user(self):
1285 return self._user or self._repo.ui.username()
1281 return self._user or self._repo.ui.username()
1286 def date(self):
1282 def date(self):
1287 return self._date
1283 return self._date
1288 def description(self):
1284 def description(self):
1289 return self._text
1285 return self._text
1290 def files(self):
1286 def files(self):
1291 return sorted(self._status.modified + self._status.added +
1287 return sorted(self._status.modified + self._status.added +
1292 self._status.removed)
1288 self._status.removed)
1293
1289
1294 def modified(self):
1290 def modified(self):
1295 return self._status.modified
1291 return self._status.modified
1296 def added(self):
1292 def added(self):
1297 return self._status.added
1293 return self._status.added
1298 def removed(self):
1294 def removed(self):
1299 return self._status.removed
1295 return self._status.removed
1300 def deleted(self):
1296 def deleted(self):
1301 return self._status.deleted
1297 return self._status.deleted
1302 def branch(self):
1298 def branch(self):
1303 return encoding.tolocal(self._extra['branch'])
1299 return encoding.tolocal(self._extra['branch'])
1304 def closesbranch(self):
1300 def closesbranch(self):
1305 return 'close' in self._extra
1301 return 'close' in self._extra
1306 def extra(self):
1302 def extra(self):
1307 return self._extra
1303 return self._extra
1308
1304
1309 def tags(self):
1305 def tags(self):
1310 return []
1306 return []
1311
1307
1312 def bookmarks(self):
1308 def bookmarks(self):
1313 b = []
1309 b = []
1314 for p in self.parents():
1310 for p in self.parents():
1315 b.extend(p.bookmarks())
1311 b.extend(p.bookmarks())
1316 return b
1312 return b
1317
1313
1318 def phase(self):
1314 def phase(self):
1319 phase = phases.draft # default phase to draft
1315 phase = phases.draft # default phase to draft
1320 for p in self.parents():
1316 for p in self.parents():
1321 phase = max(phase, p.phase())
1317 phase = max(phase, p.phase())
1322 return phase
1318 return phase
1323
1319
1324 def hidden(self):
1320 def hidden(self):
1325 return False
1321 return False
1326
1322
1327 def children(self):
1323 def children(self):
1328 return []
1324 return []
1329
1325
1330 def flags(self, path):
1326 def flags(self, path):
1331 if '_manifest' in self.__dict__:
1327 if '_manifest' in self.__dict__:
1332 try:
1328 try:
1333 return self._manifest.flags(path)
1329 return self._manifest.flags(path)
1334 except KeyError:
1330 except KeyError:
1335 return ''
1331 return ''
1336
1332
1337 try:
1333 try:
1338 return self._flagfunc(path)
1334 return self._flagfunc(path)
1339 except OSError:
1335 except OSError:
1340 return ''
1336 return ''
1341
1337
1342 def ancestor(self, c2):
1338 def ancestor(self, c2):
1343 """return the "best" ancestor context of self and c2"""
1339 """return the "best" ancestor context of self and c2"""
1344 return self._parents[0].ancestor(c2) # punt on two parents for now
1340 return self._parents[0].ancestor(c2) # punt on two parents for now
1345
1341
1346 def walk(self, match):
1342 def walk(self, match):
1347 '''Generates matching file names.'''
1343 '''Generates matching file names.'''
1348 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1344 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1349 True, False))
1345 True, False))
1350
1346
1351 def matches(self, match):
1347 def matches(self, match):
1352 return sorted(self._repo.dirstate.matches(match))
1348 return sorted(self._repo.dirstate.matches(match))
1353
1349
1354 def ancestors(self):
1350 def ancestors(self):
1355 for p in self._parents:
1351 for p in self._parents:
1356 yield p
1352 yield p
1357 for a in self._repo.changelog.ancestors(
1353 for a in self._repo.changelog.ancestors(
1358 [p.rev() for p in self._parents]):
1354 [p.rev() for p in self._parents]):
1359 yield changectx(self._repo, a)
1355 yield changectx(self._repo, a)
1360
1356
1361 def markcommitted(self, node):
1357 def markcommitted(self, node):
1362 """Perform post-commit cleanup necessary after committing this ctx
1358 """Perform post-commit cleanup necessary after committing this ctx
1363
1359
1364 Specifically, this updates backing stores this working context
1360 Specifically, this updates backing stores this working context
1365 wraps to reflect the fact that the changes reflected by this
1361 wraps to reflect the fact that the changes reflected by this
1366 workingctx have been committed. For example, it marks
1362 workingctx have been committed. For example, it marks
1367 modified and added files as normal in the dirstate.
1363 modified and added files as normal in the dirstate.
1368
1364
1369 """
1365 """
1370
1366
1371 self._repo.dirstate.beginparentchange()
1367 self._repo.dirstate.beginparentchange()
1372 for f in self.modified() + self.added():
1368 for f in self.modified() + self.added():
1373 self._repo.dirstate.normal(f)
1369 self._repo.dirstate.normal(f)
1374 for f in self.removed():
1370 for f in self.removed():
1375 self._repo.dirstate.drop(f)
1371 self._repo.dirstate.drop(f)
1376 self._repo.dirstate.setparents(node)
1372 self._repo.dirstate.setparents(node)
1377 self._repo.dirstate.endparentchange()
1373 self._repo.dirstate.endparentchange()
1378
1374
1379 # write changes out explicitly, because nesting wlock at
1375 # write changes out explicitly, because nesting wlock at
1380 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1376 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1381 # from immediately doing so for subsequent changing files
1377 # from immediately doing so for subsequent changing files
1382 self._repo.dirstate.write(self._repo.currenttransaction())
1378 self._repo.dirstate.write(self._repo.currenttransaction())
1383
1379
1384 class workingctx(committablectx):
1380 class workingctx(committablectx):
1385 """A workingctx object makes access to data related to
1381 """A workingctx object makes access to data related to
1386 the current working directory convenient.
1382 the current working directory convenient.
1387 date - any valid date string or (unixtime, offset), or None.
1383 date - any valid date string or (unixtime, offset), or None.
1388 user - username string, or None.
1384 user - username string, or None.
1389 extra - a dictionary of extra values, or None.
1385 extra - a dictionary of extra values, or None.
1390 changes - a list of file lists as returned by localrepo.status()
1386 changes - a list of file lists as returned by localrepo.status()
1391 or None to use the repository status.
1387 or None to use the repository status.
1392 """
1388 """
1393 def __init__(self, repo, text="", user=None, date=None, extra=None,
1389 def __init__(self, repo, text="", user=None, date=None, extra=None,
1394 changes=None):
1390 changes=None):
1395 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1391 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1396
1392
1397 def __iter__(self):
1393 def __iter__(self):
1398 d = self._repo.dirstate
1394 d = self._repo.dirstate
1399 for f in d:
1395 for f in d:
1400 if d[f] != 'r':
1396 if d[f] != 'r':
1401 yield f
1397 yield f
1402
1398
1403 def __contains__(self, key):
1399 def __contains__(self, key):
1404 return self._repo.dirstate[key] not in "?r"
1400 return self._repo.dirstate[key] not in "?r"
1405
1401
1406 def hex(self):
1402 def hex(self):
1407 return hex(wdirid)
1403 return hex(wdirid)
1408
1404
1409 @propertycache
1405 @propertycache
1410 def _parents(self):
1406 def _parents(self):
1411 p = self._repo.dirstate.parents()
1407 p = self._repo.dirstate.parents()
1412 if p[1] == nullid:
1408 if p[1] == nullid:
1413 p = p[:-1]
1409 p = p[:-1]
1414 return [changectx(self._repo, x) for x in p]
1410 return [changectx(self._repo, x) for x in p]
1415
1411
1416 def filectx(self, path, filelog=None):
1412 def filectx(self, path, filelog=None):
1417 """get a file context from the working directory"""
1413 """get a file context from the working directory"""
1418 return workingfilectx(self._repo, path, workingctx=self,
1414 return workingfilectx(self._repo, path, workingctx=self,
1419 filelog=filelog)
1415 filelog=filelog)
1420
1416
1421 def dirty(self, missing=False, merge=True, branch=True):
1417 def dirty(self, missing=False, merge=True, branch=True):
1422 "check whether a working directory is modified"
1418 "check whether a working directory is modified"
1423 # check subrepos first
1419 # check subrepos first
1424 for s in sorted(self.substate):
1420 for s in sorted(self.substate):
1425 if self.sub(s).dirty():
1421 if self.sub(s).dirty():
1426 return True
1422 return True
1427 # check current working dir
1423 # check current working dir
1428 return ((merge and self.p2()) or
1424 return ((merge and self.p2()) or
1429 (branch and self.branch() != self.p1().branch()) or
1425 (branch and self.branch() != self.p1().branch()) or
1430 self.modified() or self.added() or self.removed() or
1426 self.modified() or self.added() or self.removed() or
1431 (missing and self.deleted()))
1427 (missing and self.deleted()))
1432
1428
1433 def add(self, list, prefix=""):
1429 def add(self, list, prefix=""):
1434 join = lambda f: os.path.join(prefix, f)
1430 join = lambda f: os.path.join(prefix, f)
1435 with self._repo.wlock():
1431 with self._repo.wlock():
1436 ui, ds = self._repo.ui, self._repo.dirstate
1432 ui, ds = self._repo.ui, self._repo.dirstate
1437 rejected = []
1433 rejected = []
1438 lstat = self._repo.wvfs.lstat
1434 lstat = self._repo.wvfs.lstat
1439 for f in list:
1435 for f in list:
1440 scmutil.checkportable(ui, join(f))
1436 scmutil.checkportable(ui, join(f))
1441 try:
1437 try:
1442 st = lstat(f)
1438 st = lstat(f)
1443 except OSError:
1439 except OSError:
1444 ui.warn(_("%s does not exist!\n") % join(f))
1440 ui.warn(_("%s does not exist!\n") % join(f))
1445 rejected.append(f)
1441 rejected.append(f)
1446 continue
1442 continue
1447 if st.st_size > 10000000:
1443 if st.st_size > 10000000:
1448 ui.warn(_("%s: up to %d MB of RAM may be required "
1444 ui.warn(_("%s: up to %d MB of RAM may be required "
1449 "to manage this file\n"
1445 "to manage this file\n"
1450 "(use 'hg revert %s' to cancel the "
1446 "(use 'hg revert %s' to cancel the "
1451 "pending addition)\n")
1447 "pending addition)\n")
1452 % (f, 3 * st.st_size // 1000000, join(f)))
1448 % (f, 3 * st.st_size // 1000000, join(f)))
1453 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1454 ui.warn(_("%s not added: only files and symlinks "
1450 ui.warn(_("%s not added: only files and symlinks "
1455 "supported currently\n") % join(f))
1451 "supported currently\n") % join(f))
1456 rejected.append(f)
1452 rejected.append(f)
1457 elif ds[f] in 'amn':
1453 elif ds[f] in 'amn':
1458 ui.warn(_("%s already tracked!\n") % join(f))
1454 ui.warn(_("%s already tracked!\n") % join(f))
1459 elif ds[f] == 'r':
1455 elif ds[f] == 'r':
1460 ds.normallookup(f)
1456 ds.normallookup(f)
1461 else:
1457 else:
1462 ds.add(f)
1458 ds.add(f)
1463 return rejected
1459 return rejected
1464
1460
1465 def forget(self, files, prefix=""):
1461 def forget(self, files, prefix=""):
1466 join = lambda f: os.path.join(prefix, f)
1462 join = lambda f: os.path.join(prefix, f)
1467 with self._repo.wlock():
1463 with self._repo.wlock():
1468 rejected = []
1464 rejected = []
1469 for f in files:
1465 for f in files:
1470 if f not in self._repo.dirstate:
1466 if f not in self._repo.dirstate:
1471 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1467 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1472 rejected.append(f)
1468 rejected.append(f)
1473 elif self._repo.dirstate[f] != 'a':
1469 elif self._repo.dirstate[f] != 'a':
1474 self._repo.dirstate.remove(f)
1470 self._repo.dirstate.remove(f)
1475 else:
1471 else:
1476 self._repo.dirstate.drop(f)
1472 self._repo.dirstate.drop(f)
1477 return rejected
1473 return rejected
1478
1474
1479 def undelete(self, list):
1475 def undelete(self, list):
1480 pctxs = self.parents()
1476 pctxs = self.parents()
1481 with self._repo.wlock():
1477 with self._repo.wlock():
1482 for f in list:
1478 for f in list:
1483 if self._repo.dirstate[f] != 'r':
1479 if self._repo.dirstate[f] != 'r':
1484 self._repo.ui.warn(_("%s not removed!\n") % f)
1480 self._repo.ui.warn(_("%s not removed!\n") % f)
1485 else:
1481 else:
1486 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1482 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1487 t = fctx.data()
1483 t = fctx.data()
1488 self._repo.wwrite(f, t, fctx.flags())
1484 self._repo.wwrite(f, t, fctx.flags())
1489 self._repo.dirstate.normal(f)
1485 self._repo.dirstate.normal(f)
1490
1486
1491 def copy(self, source, dest):
1487 def copy(self, source, dest):
1492 try:
1488 try:
1493 st = self._repo.wvfs.lstat(dest)
1489 st = self._repo.wvfs.lstat(dest)
1494 except OSError as err:
1490 except OSError as err:
1495 if err.errno != errno.ENOENT:
1491 if err.errno != errno.ENOENT:
1496 raise
1492 raise
1497 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1493 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1498 return
1494 return
1499 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1495 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1500 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1496 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1501 "symbolic link\n") % dest)
1497 "symbolic link\n") % dest)
1502 else:
1498 else:
1503 with self._repo.wlock():
1499 with self._repo.wlock():
1504 if self._repo.dirstate[dest] in '?':
1500 if self._repo.dirstate[dest] in '?':
1505 self._repo.dirstate.add(dest)
1501 self._repo.dirstate.add(dest)
1506 elif self._repo.dirstate[dest] in 'r':
1502 elif self._repo.dirstate[dest] in 'r':
1507 self._repo.dirstate.normallookup(dest)
1503 self._repo.dirstate.normallookup(dest)
1508 self._repo.dirstate.copy(source, dest)
1504 self._repo.dirstate.copy(source, dest)
1509
1505
1510 def match(self, pats=[], include=None, exclude=None, default='glob',
1506 def match(self, pats=[], include=None, exclude=None, default='glob',
1511 listsubrepos=False, badfn=None):
1507 listsubrepos=False, badfn=None):
1512 r = self._repo
1508 r = self._repo
1513
1509
1514 # Only a case insensitive filesystem needs magic to translate user input
1510 # Only a case insensitive filesystem needs magic to translate user input
1515 # to actual case in the filesystem.
1511 # to actual case in the filesystem.
1516 if not util.fscasesensitive(r.root):
1512 if not util.fscasesensitive(r.root):
1517 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1513 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1518 exclude, default, r.auditor, self,
1514 exclude, default, r.auditor, self,
1519 listsubrepos=listsubrepos,
1515 listsubrepos=listsubrepos,
1520 badfn=badfn)
1516 badfn=badfn)
1521 return matchmod.match(r.root, r.getcwd(), pats,
1517 return matchmod.match(r.root, r.getcwd(), pats,
1522 include, exclude, default,
1518 include, exclude, default,
1523 auditor=r.auditor, ctx=self,
1519 auditor=r.auditor, ctx=self,
1524 listsubrepos=listsubrepos, badfn=badfn)
1520 listsubrepos=listsubrepos, badfn=badfn)
1525
1521
1526 def _filtersuspectsymlink(self, files):
1522 def _filtersuspectsymlink(self, files):
1527 if not files or self._repo.dirstate._checklink:
1523 if not files or self._repo.dirstate._checklink:
1528 return files
1524 return files
1529
1525
1530 # Symlink placeholders may get non-symlink-like contents
1526 # Symlink placeholders may get non-symlink-like contents
1531 # via user error or dereferencing by NFS or Samba servers,
1527 # via user error or dereferencing by NFS or Samba servers,
1532 # so we filter out any placeholders that don't look like a
1528 # so we filter out any placeholders that don't look like a
1533 # symlink
1529 # symlink
1534 sane = []
1530 sane = []
1535 for f in files:
1531 for f in files:
1536 if self.flags(f) == 'l':
1532 if self.flags(f) == 'l':
1537 d = self[f].data()
1533 d = self[f].data()
1538 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1534 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1539 self._repo.ui.debug('ignoring suspect symlink placeholder'
1535 self._repo.ui.debug('ignoring suspect symlink placeholder'
1540 ' "%s"\n' % f)
1536 ' "%s"\n' % f)
1541 continue
1537 continue
1542 sane.append(f)
1538 sane.append(f)
1543 return sane
1539 return sane
1544
1540
1545 def _checklookup(self, files):
1541 def _checklookup(self, files):
1546 # check for any possibly clean files
1542 # check for any possibly clean files
1547 if not files:
1543 if not files:
1548 return [], []
1544 return [], []
1549
1545
1550 modified = []
1546 modified = []
1551 fixup = []
1547 fixup = []
1552 pctx = self._parents[0]
1548 pctx = self._parents[0]
1553 # do a full compare of any files that might have changed
1549 # do a full compare of any files that might have changed
1554 for f in sorted(files):
1550 for f in sorted(files):
1555 if (f not in pctx or self.flags(f) != pctx.flags(f)
1551 if (f not in pctx or self.flags(f) != pctx.flags(f)
1556 or pctx[f].cmp(self[f])):
1552 or pctx[f].cmp(self[f])):
1557 modified.append(f)
1553 modified.append(f)
1558 else:
1554 else:
1559 fixup.append(f)
1555 fixup.append(f)
1560
1556
1561 # update dirstate for files that are actually clean
1557 # update dirstate for files that are actually clean
1562 if fixup:
1558 if fixup:
1563 try:
1559 try:
1564 # updating the dirstate is optional
1560 # updating the dirstate is optional
1565 # so we don't wait on the lock
1561 # so we don't wait on the lock
1566 # wlock can invalidate the dirstate, so cache normal _after_
1562 # wlock can invalidate the dirstate, so cache normal _after_
1567 # taking the lock
1563 # taking the lock
1568 with self._repo.wlock(False):
1564 with self._repo.wlock(False):
1569 normal = self._repo.dirstate.normal
1565 normal = self._repo.dirstate.normal
1570 for f in fixup:
1566 for f in fixup:
1571 normal(f)
1567 normal(f)
1572 # write changes out explicitly, because nesting
1568 # write changes out explicitly, because nesting
1573 # wlock at runtime may prevent 'wlock.release()'
1569 # wlock at runtime may prevent 'wlock.release()'
1574 # after this block from doing so for subsequent
1570 # after this block from doing so for subsequent
1575 # changing files
1571 # changing files
1576 self._repo.dirstate.write(self._repo.currenttransaction())
1572 self._repo.dirstate.write(self._repo.currenttransaction())
1577 except error.LockError:
1573 except error.LockError:
1578 pass
1574 pass
1579 return modified, fixup
1575 return modified, fixup
1580
1576
1581 def _manifestmatches(self, match, s):
1577 def _manifestmatches(self, match, s):
1582 """Slow path for workingctx
1578 """Slow path for workingctx
1583
1579
1584 The fast path is when we compare the working directory to its parent
1580 The fast path is when we compare the working directory to its parent
1585 which means this function is comparing with a non-parent; therefore we
1581 which means this function is comparing with a non-parent; therefore we
1586 need to build a manifest and return what matches.
1582 need to build a manifest and return what matches.
1587 """
1583 """
1588 mf = self._repo['.']._manifestmatches(match, s)
1584 mf = self._repo['.']._manifestmatches(match, s)
1589 for f in s.modified + s.added:
1585 for f in s.modified + s.added:
1590 mf[f] = _newnode
1586 mf[f] = newnodeid
1591 mf.setflag(f, self.flags(f))
1587 mf.setflag(f, self.flags(f))
1592 for f in s.removed:
1588 for f in s.removed:
1593 if f in mf:
1589 if f in mf:
1594 del mf[f]
1590 del mf[f]
1595 return mf
1591 return mf
1596
1592
1597 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1593 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1598 unknown=False):
1594 unknown=False):
1599 '''Gets the status from the dirstate -- internal use only.'''
1595 '''Gets the status from the dirstate -- internal use only.'''
1600 listignored, listclean, listunknown = ignored, clean, unknown
1596 listignored, listclean, listunknown = ignored, clean, unknown
1601 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1597 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1602 subrepos = []
1598 subrepos = []
1603 if '.hgsub' in self:
1599 if '.hgsub' in self:
1604 subrepos = sorted(self.substate)
1600 subrepos = sorted(self.substate)
1605 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1601 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1606 listclean, listunknown)
1602 listclean, listunknown)
1607
1603
1608 # check for any possibly clean files
1604 # check for any possibly clean files
1609 if cmp:
1605 if cmp:
1610 modified2, fixup = self._checklookup(cmp)
1606 modified2, fixup = self._checklookup(cmp)
1611 s.modified.extend(modified2)
1607 s.modified.extend(modified2)
1612
1608
1613 # update dirstate for files that are actually clean
1609 # update dirstate for files that are actually clean
1614 if fixup and listclean:
1610 if fixup and listclean:
1615 s.clean.extend(fixup)
1611 s.clean.extend(fixup)
1616
1612
1617 if match.always():
1613 if match.always():
1618 # cache for performance
1614 # cache for performance
1619 if s.unknown or s.ignored or s.clean:
1615 if s.unknown or s.ignored or s.clean:
1620 # "_status" is cached with list*=False in the normal route
1616 # "_status" is cached with list*=False in the normal route
1621 self._status = scmutil.status(s.modified, s.added, s.removed,
1617 self._status = scmutil.status(s.modified, s.added, s.removed,
1622 s.deleted, [], [], [])
1618 s.deleted, [], [], [])
1623 else:
1619 else:
1624 self._status = s
1620 self._status = s
1625
1621
1626 return s
1622 return s
1627
1623
1628 def _buildstatus(self, other, s, match, listignored, listclean,
1624 def _buildstatus(self, other, s, match, listignored, listclean,
1629 listunknown):
1625 listunknown):
1630 """build a status with respect to another context
1626 """build a status with respect to another context
1631
1627
1632 This includes logic for maintaining the fast path of status when
1628 This includes logic for maintaining the fast path of status when
1633 comparing the working directory against its parent, which is to skip
1629 comparing the working directory against its parent, which is to skip
1634 building a new manifest if self (working directory) is not comparing
1630 building a new manifest if self (working directory) is not comparing
1635 against its parent (repo['.']).
1631 against its parent (repo['.']).
1636 """
1632 """
1637 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1633 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1638 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1634 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1639 # might have accidentally ended up with the entire contents of the file
1635 # might have accidentally ended up with the entire contents of the file
1640 # they are supposed to be linking to.
1636 # they are supposed to be linking to.
1641 s.modified[:] = self._filtersuspectsymlink(s.modified)
1637 s.modified[:] = self._filtersuspectsymlink(s.modified)
1642 if other != self._repo['.']:
1638 if other != self._repo['.']:
1643 s = super(workingctx, self)._buildstatus(other, s, match,
1639 s = super(workingctx, self)._buildstatus(other, s, match,
1644 listignored, listclean,
1640 listignored, listclean,
1645 listunknown)
1641 listunknown)
1646 return s
1642 return s
1647
1643
1648 def _matchstatus(self, other, match):
1644 def _matchstatus(self, other, match):
1649 """override the match method with a filter for directory patterns
1645 """override the match method with a filter for directory patterns
1650
1646
1651 We use inheritance to customize the match.bad method only in cases of
1647 We use inheritance to customize the match.bad method only in cases of
1652 workingctx since it belongs only to the working directory when
1648 workingctx since it belongs only to the working directory when
1653 comparing against the parent changeset.
1649 comparing against the parent changeset.
1654
1650
1655 If we aren't comparing against the working directory's parent, then we
1651 If we aren't comparing against the working directory's parent, then we
1656 just use the default match object sent to us.
1652 just use the default match object sent to us.
1657 """
1653 """
1658 superself = super(workingctx, self)
1654 superself = super(workingctx, self)
1659 match = superself._matchstatus(other, match)
1655 match = superself._matchstatus(other, match)
1660 if other != self._repo['.']:
1656 if other != self._repo['.']:
1661 def bad(f, msg):
1657 def bad(f, msg):
1662 # 'f' may be a directory pattern from 'match.files()',
1658 # 'f' may be a directory pattern from 'match.files()',
1663 # so 'f not in ctx1' is not enough
1659 # so 'f not in ctx1' is not enough
1664 if f not in other and not other.hasdir(f):
1660 if f not in other and not other.hasdir(f):
1665 self._repo.ui.warn('%s: %s\n' %
1661 self._repo.ui.warn('%s: %s\n' %
1666 (self._repo.dirstate.pathto(f), msg))
1662 (self._repo.dirstate.pathto(f), msg))
1667 match.bad = bad
1663 match.bad = bad
1668 return match
1664 return match
1669
1665
1670 class committablefilectx(basefilectx):
1666 class committablefilectx(basefilectx):
1671 """A committablefilectx provides common functionality for a file context
1667 """A committablefilectx provides common functionality for a file context
1672 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1668 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1673 def __init__(self, repo, path, filelog=None, ctx=None):
1669 def __init__(self, repo, path, filelog=None, ctx=None):
1674 self._repo = repo
1670 self._repo = repo
1675 self._path = path
1671 self._path = path
1676 self._changeid = None
1672 self._changeid = None
1677 self._filerev = self._filenode = None
1673 self._filerev = self._filenode = None
1678
1674
1679 if filelog is not None:
1675 if filelog is not None:
1680 self._filelog = filelog
1676 self._filelog = filelog
1681 if ctx:
1677 if ctx:
1682 self._changectx = ctx
1678 self._changectx = ctx
1683
1679
1684 def __nonzero__(self):
1680 def __nonzero__(self):
1685 return True
1681 return True
1686
1682
1687 def linkrev(self):
1683 def linkrev(self):
1688 # linked to self._changectx no matter if file is modified or not
1684 # linked to self._changectx no matter if file is modified or not
1689 return self.rev()
1685 return self.rev()
1690
1686
1691 def parents(self):
1687 def parents(self):
1692 '''return parent filectxs, following copies if necessary'''
1688 '''return parent filectxs, following copies if necessary'''
1693 def filenode(ctx, path):
1689 def filenode(ctx, path):
1694 return ctx._manifest.get(path, nullid)
1690 return ctx._manifest.get(path, nullid)
1695
1691
1696 path = self._path
1692 path = self._path
1697 fl = self._filelog
1693 fl = self._filelog
1698 pcl = self._changectx._parents
1694 pcl = self._changectx._parents
1699 renamed = self.renamed()
1695 renamed = self.renamed()
1700
1696
1701 if renamed:
1697 if renamed:
1702 pl = [renamed + (None,)]
1698 pl = [renamed + (None,)]
1703 else:
1699 else:
1704 pl = [(path, filenode(pcl[0], path), fl)]
1700 pl = [(path, filenode(pcl[0], path), fl)]
1705
1701
1706 for pc in pcl[1:]:
1702 for pc in pcl[1:]:
1707 pl.append((path, filenode(pc, path), fl))
1703 pl.append((path, filenode(pc, path), fl))
1708
1704
1709 return [self._parentfilectx(p, fileid=n, filelog=l)
1705 return [self._parentfilectx(p, fileid=n, filelog=l)
1710 for p, n, l in pl if n != nullid]
1706 for p, n, l in pl if n != nullid]
1711
1707
1712 def children(self):
1708 def children(self):
1713 return []
1709 return []
1714
1710
1715 class workingfilectx(committablefilectx):
1711 class workingfilectx(committablefilectx):
1716 """A workingfilectx object makes access to data related to a particular
1712 """A workingfilectx object makes access to data related to a particular
1717 file in the working directory convenient."""
1713 file in the working directory convenient."""
1718 def __init__(self, repo, path, filelog=None, workingctx=None):
1714 def __init__(self, repo, path, filelog=None, workingctx=None):
1719 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1715 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1720
1716
1721 @propertycache
1717 @propertycache
1722 def _changectx(self):
1718 def _changectx(self):
1723 return workingctx(self._repo)
1719 return workingctx(self._repo)
1724
1720
1725 def data(self):
1721 def data(self):
1726 return self._repo.wread(self._path)
1722 return self._repo.wread(self._path)
1727 def renamed(self):
1723 def renamed(self):
1728 rp = self._repo.dirstate.copied(self._path)
1724 rp = self._repo.dirstate.copied(self._path)
1729 if not rp:
1725 if not rp:
1730 return None
1726 return None
1731 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1727 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1732
1728
1733 def size(self):
1729 def size(self):
1734 return self._repo.wvfs.lstat(self._path).st_size
1730 return self._repo.wvfs.lstat(self._path).st_size
1735 def date(self):
1731 def date(self):
1736 t, tz = self._changectx.date()
1732 t, tz = self._changectx.date()
1737 try:
1733 try:
1738 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1734 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1739 except OSError as err:
1735 except OSError as err:
1740 if err.errno != errno.ENOENT:
1736 if err.errno != errno.ENOENT:
1741 raise
1737 raise
1742 return (t, tz)
1738 return (t, tz)
1743
1739
1744 def cmp(self, fctx):
1740 def cmp(self, fctx):
1745 """compare with other file context
1741 """compare with other file context
1746
1742
1747 returns True if different than fctx.
1743 returns True if different than fctx.
1748 """
1744 """
1749 # fctx should be a filectx (not a workingfilectx)
1745 # fctx should be a filectx (not a workingfilectx)
1750 # invert comparison to reuse the same code path
1746 # invert comparison to reuse the same code path
1751 return fctx.cmp(self)
1747 return fctx.cmp(self)
1752
1748
1753 def remove(self, ignoremissing=False):
1749 def remove(self, ignoremissing=False):
1754 """wraps unlink for a repo's working directory"""
1750 """wraps unlink for a repo's working directory"""
1755 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1751 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1756
1752
1757 def write(self, data, flags):
1753 def write(self, data, flags):
1758 """wraps repo.wwrite"""
1754 """wraps repo.wwrite"""
1759 self._repo.wwrite(self._path, data, flags)
1755 self._repo.wwrite(self._path, data, flags)
1760
1756
1761 class workingcommitctx(workingctx):
1757 class workingcommitctx(workingctx):
1762 """A workingcommitctx object makes access to data related to
1758 """A workingcommitctx object makes access to data related to
1763 the revision being committed convenient.
1759 the revision being committed convenient.
1764
1760
1765 This hides changes in the working directory, if they aren't
1761 This hides changes in the working directory, if they aren't
1766 committed in this context.
1762 committed in this context.
1767 """
1763 """
1768 def __init__(self, repo, changes,
1764 def __init__(self, repo, changes,
1769 text="", user=None, date=None, extra=None):
1765 text="", user=None, date=None, extra=None):
1770 super(workingctx, self).__init__(repo, text, user, date, extra,
1766 super(workingctx, self).__init__(repo, text, user, date, extra,
1771 changes)
1767 changes)
1772
1768
1773 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1769 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1774 unknown=False):
1770 unknown=False):
1775 """Return matched files only in ``self._status``
1771 """Return matched files only in ``self._status``
1776
1772
1777 Uncommitted files appear "clean" via this context, even if
1773 Uncommitted files appear "clean" via this context, even if
1778 they aren't actually so in the working directory.
1774 they aren't actually so in the working directory.
1779 """
1775 """
1780 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1776 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1781 if clean:
1777 if clean:
1782 clean = [f for f in self._manifest if f not in self._changedset]
1778 clean = [f for f in self._manifest if f not in self._changedset]
1783 else:
1779 else:
1784 clean = []
1780 clean = []
1785 return scmutil.status([f for f in self._status.modified if match(f)],
1781 return scmutil.status([f for f in self._status.modified if match(f)],
1786 [f for f in self._status.added if match(f)],
1782 [f for f in self._status.added if match(f)],
1787 [f for f in self._status.removed if match(f)],
1783 [f for f in self._status.removed if match(f)],
1788 [], [], [], clean)
1784 [], [], [], clean)
1789
1785
1790 @propertycache
1786 @propertycache
1791 def _changedset(self):
1787 def _changedset(self):
1792 """Return the set of files changed in this context
1788 """Return the set of files changed in this context
1793 """
1789 """
1794 changed = set(self._status.modified)
1790 changed = set(self._status.modified)
1795 changed.update(self._status.added)
1791 changed.update(self._status.added)
1796 changed.update(self._status.removed)
1792 changed.update(self._status.removed)
1797 return changed
1793 return changed
1798
1794
1799 def makecachingfilectxfn(func):
1795 def makecachingfilectxfn(func):
1800 """Create a filectxfn that caches based on the path.
1796 """Create a filectxfn that caches based on the path.
1801
1797
1802 We can't use util.cachefunc because it uses all arguments as the cache
1798 We can't use util.cachefunc because it uses all arguments as the cache
1803 key and this creates a cycle since the arguments include the repo and
1799 key and this creates a cycle since the arguments include the repo and
1804 memctx.
1800 memctx.
1805 """
1801 """
1806 cache = {}
1802 cache = {}
1807
1803
1808 def getfilectx(repo, memctx, path):
1804 def getfilectx(repo, memctx, path):
1809 if path not in cache:
1805 if path not in cache:
1810 cache[path] = func(repo, memctx, path)
1806 cache[path] = func(repo, memctx, path)
1811 return cache[path]
1807 return cache[path]
1812
1808
1813 return getfilectx
1809 return getfilectx
1814
1810
1815 class memctx(committablectx):
1811 class memctx(committablectx):
1816 """Use memctx to perform in-memory commits via localrepo.commitctx().
1812 """Use memctx to perform in-memory commits via localrepo.commitctx().
1817
1813
1818 Revision information is supplied at initialization time while
1814 Revision information is supplied at initialization time while
1819 related files data and is made available through a callback
1815 related files data and is made available through a callback
1820 mechanism. 'repo' is the current localrepo, 'parents' is a
1816 mechanism. 'repo' is the current localrepo, 'parents' is a
1821 sequence of two parent revisions identifiers (pass None for every
1817 sequence of two parent revisions identifiers (pass None for every
1822 missing parent), 'text' is the commit message and 'files' lists
1818 missing parent), 'text' is the commit message and 'files' lists
1823 names of files touched by the revision (normalized and relative to
1819 names of files touched by the revision (normalized and relative to
1824 repository root).
1820 repository root).
1825
1821
1826 filectxfn(repo, memctx, path) is a callable receiving the
1822 filectxfn(repo, memctx, path) is a callable receiving the
1827 repository, the current memctx object and the normalized path of
1823 repository, the current memctx object and the normalized path of
1828 requested file, relative to repository root. It is fired by the
1824 requested file, relative to repository root. It is fired by the
1829 commit function for every file in 'files', but calls order is
1825 commit function for every file in 'files', but calls order is
1830 undefined. If the file is available in the revision being
1826 undefined. If the file is available in the revision being
1831 committed (updated or added), filectxfn returns a memfilectx
1827 committed (updated or added), filectxfn returns a memfilectx
1832 object. If the file was removed, filectxfn raises an
1828 object. If the file was removed, filectxfn raises an
1833 IOError. Moved files are represented by marking the source file
1829 IOError. Moved files are represented by marking the source file
1834 removed and the new file added with copy information (see
1830 removed and the new file added with copy information (see
1835 memfilectx).
1831 memfilectx).
1836
1832
1837 user receives the committer name and defaults to current
1833 user receives the committer name and defaults to current
1838 repository username, date is the commit date in any format
1834 repository username, date is the commit date in any format
1839 supported by util.parsedate() and defaults to current date, extra
1835 supported by util.parsedate() and defaults to current date, extra
1840 is a dictionary of metadata or is left empty.
1836 is a dictionary of metadata or is left empty.
1841 """
1837 """
1842
1838
1843 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1839 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1844 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1840 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1845 # this field to determine what to do in filectxfn.
1841 # this field to determine what to do in filectxfn.
1846 _returnnoneformissingfiles = True
1842 _returnnoneformissingfiles = True
1847
1843
1848 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1844 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1849 date=None, extra=None, editor=False):
1845 date=None, extra=None, editor=False):
1850 super(memctx, self).__init__(repo, text, user, date, extra)
1846 super(memctx, self).__init__(repo, text, user, date, extra)
1851 self._rev = None
1847 self._rev = None
1852 self._node = None
1848 self._node = None
1853 parents = [(p or nullid) for p in parents]
1849 parents = [(p or nullid) for p in parents]
1854 p1, p2 = parents
1850 p1, p2 = parents
1855 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1851 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1856 files = sorted(set(files))
1852 files = sorted(set(files))
1857 self._files = files
1853 self._files = files
1858 self.substate = {}
1854 self.substate = {}
1859
1855
1860 # if store is not callable, wrap it in a function
1856 # if store is not callable, wrap it in a function
1861 if not callable(filectxfn):
1857 if not callable(filectxfn):
1862 def getfilectx(repo, memctx, path):
1858 def getfilectx(repo, memctx, path):
1863 fctx = filectxfn[path]
1859 fctx = filectxfn[path]
1864 # this is weird but apparently we only keep track of one parent
1860 # this is weird but apparently we only keep track of one parent
1865 # (why not only store that instead of a tuple?)
1861 # (why not only store that instead of a tuple?)
1866 copied = fctx.renamed()
1862 copied = fctx.renamed()
1867 if copied:
1863 if copied:
1868 copied = copied[0]
1864 copied = copied[0]
1869 return memfilectx(repo, path, fctx.data(),
1865 return memfilectx(repo, path, fctx.data(),
1870 islink=fctx.islink(), isexec=fctx.isexec(),
1866 islink=fctx.islink(), isexec=fctx.isexec(),
1871 copied=copied, memctx=memctx)
1867 copied=copied, memctx=memctx)
1872 self._filectxfn = getfilectx
1868 self._filectxfn = getfilectx
1873 else:
1869 else:
1874 # memoizing increases performance for e.g. vcs convert scenarios.
1870 # memoizing increases performance for e.g. vcs convert scenarios.
1875 self._filectxfn = makecachingfilectxfn(filectxfn)
1871 self._filectxfn = makecachingfilectxfn(filectxfn)
1876
1872
1877 if extra:
1873 if extra:
1878 self._extra = extra.copy()
1874 self._extra = extra.copy()
1879 else:
1875 else:
1880 self._extra = {}
1876 self._extra = {}
1881
1877
1882 if self._extra.get('branch', '') == '':
1878 if self._extra.get('branch', '') == '':
1883 self._extra['branch'] = 'default'
1879 self._extra['branch'] = 'default'
1884
1880
1885 if editor:
1881 if editor:
1886 self._text = editor(self._repo, self, [])
1882 self._text = editor(self._repo, self, [])
1887 self._repo.savecommitmessage(self._text)
1883 self._repo.savecommitmessage(self._text)
1888
1884
1889 def filectx(self, path, filelog=None):
1885 def filectx(self, path, filelog=None):
1890 """get a file context from the working directory
1886 """get a file context from the working directory
1891
1887
1892 Returns None if file doesn't exist and should be removed."""
1888 Returns None if file doesn't exist and should be removed."""
1893 return self._filectxfn(self._repo, self, path)
1889 return self._filectxfn(self._repo, self, path)
1894
1890
1895 def commit(self):
1891 def commit(self):
1896 """commit context to the repo"""
1892 """commit context to the repo"""
1897 return self._repo.commitctx(self)
1893 return self._repo.commitctx(self)
1898
1894
1899 @propertycache
1895 @propertycache
1900 def _manifest(self):
1896 def _manifest(self):
1901 """generate a manifest based on the return values of filectxfn"""
1897 """generate a manifest based on the return values of filectxfn"""
1902
1898
1903 # keep this simple for now; just worry about p1
1899 # keep this simple for now; just worry about p1
1904 pctx = self._parents[0]
1900 pctx = self._parents[0]
1905 man = pctx.manifest().copy()
1901 man = pctx.manifest().copy()
1906
1902
1907 for f in self._status.modified:
1903 for f in self._status.modified:
1908 p1node = nullid
1904 p1node = nullid
1909 p2node = nullid
1905 p2node = nullid
1910 p = pctx[f].parents() # if file isn't in pctx, check p2?
1906 p = pctx[f].parents() # if file isn't in pctx, check p2?
1911 if len(p) > 0:
1907 if len(p) > 0:
1912 p1node = p[0].filenode()
1908 p1node = p[0].filenode()
1913 if len(p) > 1:
1909 if len(p) > 1:
1914 p2node = p[1].filenode()
1910 p2node = p[1].filenode()
1915 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1911 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1916
1912
1917 for f in self._status.added:
1913 for f in self._status.added:
1918 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1914 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1919
1915
1920 for f in self._status.removed:
1916 for f in self._status.removed:
1921 if f in man:
1917 if f in man:
1922 del man[f]
1918 del man[f]
1923
1919
1924 return man
1920 return man
1925
1921
1926 @propertycache
1922 @propertycache
1927 def _status(self):
1923 def _status(self):
1928 """Calculate exact status from ``files`` specified at construction
1924 """Calculate exact status from ``files`` specified at construction
1929 """
1925 """
1930 man1 = self.p1().manifest()
1926 man1 = self.p1().manifest()
1931 p2 = self._parents[1]
1927 p2 = self._parents[1]
1932 # "1 < len(self._parents)" can't be used for checking
1928 # "1 < len(self._parents)" can't be used for checking
1933 # existence of the 2nd parent, because "memctx._parents" is
1929 # existence of the 2nd parent, because "memctx._parents" is
1934 # explicitly initialized by the list, of which length is 2.
1930 # explicitly initialized by the list, of which length is 2.
1935 if p2.node() != nullid:
1931 if p2.node() != nullid:
1936 man2 = p2.manifest()
1932 man2 = p2.manifest()
1937 managing = lambda f: f in man1 or f in man2
1933 managing = lambda f: f in man1 or f in man2
1938 else:
1934 else:
1939 managing = lambda f: f in man1
1935 managing = lambda f: f in man1
1940
1936
1941 modified, added, removed = [], [], []
1937 modified, added, removed = [], [], []
1942 for f in self._files:
1938 for f in self._files:
1943 if not managing(f):
1939 if not managing(f):
1944 added.append(f)
1940 added.append(f)
1945 elif self[f]:
1941 elif self[f]:
1946 modified.append(f)
1942 modified.append(f)
1947 else:
1943 else:
1948 removed.append(f)
1944 removed.append(f)
1949
1945
1950 return scmutil.status(modified, added, removed, [], [], [], [])
1946 return scmutil.status(modified, added, removed, [], [], [], [])
1951
1947
1952 class memfilectx(committablefilectx):
1948 class memfilectx(committablefilectx):
1953 """memfilectx represents an in-memory file to commit.
1949 """memfilectx represents an in-memory file to commit.
1954
1950
1955 See memctx and committablefilectx for more details.
1951 See memctx and committablefilectx for more details.
1956 """
1952 """
1957 def __init__(self, repo, path, data, islink=False,
1953 def __init__(self, repo, path, data, islink=False,
1958 isexec=False, copied=None, memctx=None):
1954 isexec=False, copied=None, memctx=None):
1959 """
1955 """
1960 path is the normalized file path relative to repository root.
1956 path is the normalized file path relative to repository root.
1961 data is the file content as a string.
1957 data is the file content as a string.
1962 islink is True if the file is a symbolic link.
1958 islink is True if the file is a symbolic link.
1963 isexec is True if the file is executable.
1959 isexec is True if the file is executable.
1964 copied is the source file path if current file was copied in the
1960 copied is the source file path if current file was copied in the
1965 revision being committed, or None."""
1961 revision being committed, or None."""
1966 super(memfilectx, self).__init__(repo, path, None, memctx)
1962 super(memfilectx, self).__init__(repo, path, None, memctx)
1967 self._data = data
1963 self._data = data
1968 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1964 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1969 self._copied = None
1965 self._copied = None
1970 if copied:
1966 if copied:
1971 self._copied = (copied, nullid)
1967 self._copied = (copied, nullid)
1972
1968
1973 def data(self):
1969 def data(self):
1974 return self._data
1970 return self._data
1975 def size(self):
1971 def size(self):
1976 return len(self.data())
1972 return len(self.data())
1977 def flags(self):
1973 def flags(self):
1978 return self._flags
1974 return self._flags
1979 def renamed(self):
1975 def renamed(self):
1980 return self._copied
1976 return self._copied
1981
1977
1982 def remove(self, ignoremissing=False):
1978 def remove(self, ignoremissing=False):
1983 """wraps unlink for a repo's working directory"""
1979 """wraps unlink for a repo's working directory"""
1984 # need to figure out what to do here
1980 # need to figure out what to do here
1985 del self._changectx[self._path]
1981 del self._changectx[self._path]
1986
1982
1987 def write(self, data, flags):
1983 def write(self, data, flags):
1988 """wraps repo.wwrite"""
1984 """wraps repo.wwrite"""
1989 self._data = data
1985 self._data = data
@@ -1,714 +1,714
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from . import (
12 from . import (
13 node,
13 node,
14 pathutil,
14 pathutil,
15 scmutil,
15 scmutil,
16 util,
16 util,
17 )
17 )
18
18
19 def _findlimit(repo, a, b):
19 def _findlimit(repo, a, b):
20 """
20 """
21 Find the last revision that needs to be checked to ensure that a full
21 Find the last revision that needs to be checked to ensure that a full
22 transitive closure for file copies can be properly calculated.
22 transitive closure for file copies can be properly calculated.
23 Generally, this means finding the earliest revision number that's an
23 Generally, this means finding the earliest revision number that's an
24 ancestor of a or b but not both, except when a or b is a direct descendent
24 ancestor of a or b but not both, except when a or b is a direct descendent
25 of the other, in which case we can return the minimum revnum of a and b.
25 of the other, in which case we can return the minimum revnum of a and b.
26 None if no such revision exists.
26 None if no such revision exists.
27 """
27 """
28
28
29 # basic idea:
29 # basic idea:
30 # - mark a and b with different sides
30 # - mark a and b with different sides
31 # - if a parent's children are all on the same side, the parent is
31 # - if a parent's children are all on the same side, the parent is
32 # on that side, otherwise it is on no side
32 # on that side, otherwise it is on no side
33 # - walk the graph in topological order with the help of a heap;
33 # - walk the graph in topological order with the help of a heap;
34 # - add unseen parents to side map
34 # - add unseen parents to side map
35 # - clear side of any parent that has children on different sides
35 # - clear side of any parent that has children on different sides
36 # - track number of interesting revs that might still be on a side
36 # - track number of interesting revs that might still be on a side
37 # - track the lowest interesting rev seen
37 # - track the lowest interesting rev seen
38 # - quit when interesting revs is zero
38 # - quit when interesting revs is zero
39
39
40 cl = repo.changelog
40 cl = repo.changelog
41 working = len(cl) # pseudo rev for the working directory
41 working = len(cl) # pseudo rev for the working directory
42 if a is None:
42 if a is None:
43 a = working
43 a = working
44 if b is None:
44 if b is None:
45 b = working
45 b = working
46
46
47 side = {a: -1, b: 1}
47 side = {a: -1, b: 1}
48 visit = [-a, -b]
48 visit = [-a, -b]
49 heapq.heapify(visit)
49 heapq.heapify(visit)
50 interesting = len(visit)
50 interesting = len(visit)
51 hascommonancestor = False
51 hascommonancestor = False
52 limit = working
52 limit = working
53
53
54 while interesting:
54 while interesting:
55 r = -heapq.heappop(visit)
55 r = -heapq.heappop(visit)
56 if r == working:
56 if r == working:
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
57 parents = [cl.rev(p) for p in repo.dirstate.parents()]
58 else:
58 else:
59 parents = cl.parentrevs(r)
59 parents = cl.parentrevs(r)
60 for p in parents:
60 for p in parents:
61 if p < 0:
61 if p < 0:
62 continue
62 continue
63 if p not in side:
63 if p not in side:
64 # first time we see p; add it to visit
64 # first time we see p; add it to visit
65 side[p] = side[r]
65 side[p] = side[r]
66 if side[p]:
66 if side[p]:
67 interesting += 1
67 interesting += 1
68 heapq.heappush(visit, -p)
68 heapq.heappush(visit, -p)
69 elif side[p] and side[p] != side[r]:
69 elif side[p] and side[p] != side[r]:
70 # p was interesting but now we know better
70 # p was interesting but now we know better
71 side[p] = 0
71 side[p] = 0
72 interesting -= 1
72 interesting -= 1
73 hascommonancestor = True
73 hascommonancestor = True
74 if side[r]:
74 if side[r]:
75 limit = r # lowest rev visited
75 limit = r # lowest rev visited
76 interesting -= 1
76 interesting -= 1
77
77
78 if not hascommonancestor:
78 if not hascommonancestor:
79 return None
79 return None
80
80
81 # Consider the following flow (see test-commit-amend.t under issue4405):
81 # Consider the following flow (see test-commit-amend.t under issue4405):
82 # 1/ File 'a0' committed
82 # 1/ File 'a0' committed
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
83 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
84 # 3/ Move back to first commit
84 # 3/ Move back to first commit
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
85 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
86 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
87 #
87 #
88 # During the amend in step five, we will be in this state:
88 # During the amend in step five, we will be in this state:
89 #
89 #
90 # @ 3 temporary amend commit for a1-amend
90 # @ 3 temporary amend commit for a1-amend
91 # |
91 # |
92 # o 2 a1-amend
92 # o 2 a1-amend
93 # |
93 # |
94 # | o 1 a1
94 # | o 1 a1
95 # |/
95 # |/
96 # o 0 a0
96 # o 0 a0
97 #
97 #
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
98 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
99 # yet the filelog has the copy information in rev 1 and we will not look
99 # yet the filelog has the copy information in rev 1 and we will not look
100 # back far enough unless we also look at the a and b as candidates.
100 # back far enough unless we also look at the a and b as candidates.
101 # This only occurs when a is a descendent of b or visa-versa.
101 # This only occurs when a is a descendent of b or visa-versa.
102 return min(limit, a, b)
102 return min(limit, a, b)
103
103
104 def _chain(src, dst, a, b):
104 def _chain(src, dst, a, b):
105 '''chain two sets of copies a->b'''
105 '''chain two sets of copies a->b'''
106 t = a.copy()
106 t = a.copy()
107 for k, v in b.iteritems():
107 for k, v in b.iteritems():
108 if v in t:
108 if v in t:
109 # found a chain
109 # found a chain
110 if t[v] != k:
110 if t[v] != k:
111 # file wasn't renamed back to itself
111 # file wasn't renamed back to itself
112 t[k] = t[v]
112 t[k] = t[v]
113 if v not in dst:
113 if v not in dst:
114 # chain was a rename, not a copy
114 # chain was a rename, not a copy
115 del t[v]
115 del t[v]
116 if v in src:
116 if v in src:
117 # file is a copy of an existing file
117 # file is a copy of an existing file
118 t[k] = v
118 t[k] = v
119
119
120 # remove criss-crossed copies
120 # remove criss-crossed copies
121 for k, v in t.items():
121 for k, v in t.items():
122 if k in src and v in dst:
122 if k in src and v in dst:
123 del t[k]
123 del t[k]
124
124
125 return t
125 return t
126
126
127 def _tracefile(fctx, am, limit=-1):
127 def _tracefile(fctx, am, limit=-1):
128 '''return file context that is the ancestor of fctx present in ancestor
128 '''return file context that is the ancestor of fctx present in ancestor
129 manifest am, stopping after the first ancestor lower than limit'''
129 manifest am, stopping after the first ancestor lower than limit'''
130
130
131 for f in fctx.ancestors():
131 for f in fctx.ancestors():
132 if am.get(f.path(), None) == f.filenode():
132 if am.get(f.path(), None) == f.filenode():
133 return f
133 return f
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
134 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
135 return None
135 return None
136
136
137 def _dirstatecopies(d):
137 def _dirstatecopies(d):
138 ds = d._repo.dirstate
138 ds = d._repo.dirstate
139 c = ds.copies().copy()
139 c = ds.copies().copy()
140 for k in c.keys():
140 for k in c.keys():
141 if ds[k] not in 'anm':
141 if ds[k] not in 'anm':
142 del c[k]
142 del c[k]
143 return c
143 return c
144
144
145 def _computeforwardmissing(a, b, match=None):
145 def _computeforwardmissing(a, b, match=None):
146 """Computes which files are in b but not a.
146 """Computes which files are in b but not a.
147 This is its own function so extensions can easily wrap this call to see what
147 This is its own function so extensions can easily wrap this call to see what
148 files _forwardcopies is about to process.
148 files _forwardcopies is about to process.
149 """
149 """
150 ma = a.manifest()
150 ma = a.manifest()
151 mb = b.manifest()
151 mb = b.manifest()
152 if match:
152 if match:
153 ma = ma.matches(match)
153 ma = ma.matches(match)
154 mb = mb.matches(match)
154 mb = mb.matches(match)
155 return mb.filesnotin(ma)
155 return mb.filesnotin(ma)
156
156
157 def _forwardcopies(a, b, match=None):
157 def _forwardcopies(a, b, match=None):
158 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
158 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
159
159
160 # check for working copy
160 # check for working copy
161 w = None
161 w = None
162 if b.rev() is None:
162 if b.rev() is None:
163 w = b
163 w = b
164 b = w.p1()
164 b = w.p1()
165 if a == b:
165 if a == b:
166 # short-circuit to avoid issues with merge states
166 # short-circuit to avoid issues with merge states
167 return _dirstatecopies(w)
167 return _dirstatecopies(w)
168
168
169 # files might have to be traced back to the fctx parent of the last
169 # files might have to be traced back to the fctx parent of the last
170 # one-side-only changeset, but not further back than that
170 # one-side-only changeset, but not further back than that
171 limit = _findlimit(a._repo, a.rev(), b.rev())
171 limit = _findlimit(a._repo, a.rev(), b.rev())
172 if limit is None:
172 if limit is None:
173 limit = -1
173 limit = -1
174 am = a.manifest()
174 am = a.manifest()
175
175
176 # find where new files came from
176 # find where new files came from
177 # we currently don't try to find where old files went, too expensive
177 # we currently don't try to find where old files went, too expensive
178 # this means we can miss a case like 'hg rm b; hg cp a b'
178 # this means we can miss a case like 'hg rm b; hg cp a b'
179 cm = {}
179 cm = {}
180
180
181 # Computing the forward missing is quite expensive on large manifests, since
181 # Computing the forward missing is quite expensive on large manifests, since
182 # it compares the entire manifests. We can optimize it in the common use
182 # it compares the entire manifests. We can optimize it in the common use
183 # case of computing what copies are in a commit versus its parent (like
183 # case of computing what copies are in a commit versus its parent (like
184 # during a rebase or histedit). Note, we exclude merge commits from this
184 # during a rebase or histedit). Note, we exclude merge commits from this
185 # optimization, since the ctx.files() for a merge commit is not correct for
185 # optimization, since the ctx.files() for a merge commit is not correct for
186 # this comparison.
186 # this comparison.
187 forwardmissingmatch = match
187 forwardmissingmatch = match
188 if not match and b.p1() == a and b.p2().node() == node.nullid:
188 if not match and b.p1() == a and b.p2().node() == node.nullid:
189 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
189 forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
190 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
190 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
191
191
192 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
192 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
193 for f in missing:
193 for f in missing:
194 fctx = b[f]
194 fctx = b[f]
195 fctx._ancestrycontext = ancestrycontext
195 fctx._ancestrycontext = ancestrycontext
196 ofctx = _tracefile(fctx, am, limit)
196 ofctx = _tracefile(fctx, am, limit)
197 if ofctx:
197 if ofctx:
198 cm[f] = ofctx.path()
198 cm[f] = ofctx.path()
199
199
200 # combine copies from dirstate if necessary
200 # combine copies from dirstate if necessary
201 if w is not None:
201 if w is not None:
202 cm = _chain(a, w, cm, _dirstatecopies(w))
202 cm = _chain(a, w, cm, _dirstatecopies(w))
203
203
204 return cm
204 return cm
205
205
206 def _backwardrenames(a, b):
206 def _backwardrenames(a, b):
207 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
207 if a._repo.ui.configbool('experimental', 'disablecopytrace'):
208 return {}
208 return {}
209
209
210 # Even though we're not taking copies into account, 1:n rename situations
210 # Even though we're not taking copies into account, 1:n rename situations
211 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
211 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
212 # arbitrarily pick one of the renames.
212 # arbitrarily pick one of the renames.
213 f = _forwardcopies(b, a)
213 f = _forwardcopies(b, a)
214 r = {}
214 r = {}
215 for k, v in sorted(f.iteritems()):
215 for k, v in sorted(f.iteritems()):
216 # remove copies
216 # remove copies
217 if v in a:
217 if v in a:
218 continue
218 continue
219 r[v] = k
219 r[v] = k
220 return r
220 return r
221
221
222 def pathcopies(x, y, match=None):
222 def pathcopies(x, y, match=None):
223 '''find {dst@y: src@x} copy mapping for directed compare'''
223 '''find {dst@y: src@x} copy mapping for directed compare'''
224 if x == y or not x or not y:
224 if x == y or not x or not y:
225 return {}
225 return {}
226 a = y.ancestor(x)
226 a = y.ancestor(x)
227 if a == x:
227 if a == x:
228 return _forwardcopies(x, y, match=match)
228 return _forwardcopies(x, y, match=match)
229 if a == y:
229 if a == y:
230 return _backwardrenames(x, y)
230 return _backwardrenames(x, y)
231 return _chain(x, y, _backwardrenames(x, a),
231 return _chain(x, y, _backwardrenames(x, a),
232 _forwardcopies(a, y, match=match))
232 _forwardcopies(a, y, match=match))
233
233
234 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
234 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2, baselabel=''):
235 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
235 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
236 and c2. This is its own function so extensions can easily wrap this call
236 and c2. This is its own function so extensions can easily wrap this call
237 to see what files mergecopies is about to process.
237 to see what files mergecopies is about to process.
238
238
239 Even though c1 and c2 are not used in this function, they are useful in
239 Even though c1 and c2 are not used in this function, they are useful in
240 other extensions for being able to read the file nodes of the changed files.
240 other extensions for being able to read the file nodes of the changed files.
241
241
242 "baselabel" can be passed to help distinguish the multiple computations
242 "baselabel" can be passed to help distinguish the multiple computations
243 done in the graft case.
243 done in the graft case.
244 """
244 """
245 u1 = sorted(addedinm1 - addedinm2)
245 u1 = sorted(addedinm1 - addedinm2)
246 u2 = sorted(addedinm2 - addedinm1)
246 u2 = sorted(addedinm2 - addedinm1)
247
247
248 header = " unmatched files in %s"
248 header = " unmatched files in %s"
249 if baselabel:
249 if baselabel:
250 header += ' (from %s)' % baselabel
250 header += ' (from %s)' % baselabel
251 if u1:
251 if u1:
252 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
252 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
253 if u2:
253 if u2:
254 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
254 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
255 return u1, u2
255 return u1, u2
256
256
257 def _makegetfctx(ctx):
257 def _makegetfctx(ctx):
258 """return a 'getfctx' function suitable for _checkcopies usage
258 """return a 'getfctx' function suitable for _checkcopies usage
259
259
260 We have to re-setup the function building 'filectx' for each
260 We have to re-setup the function building 'filectx' for each
261 '_checkcopies' to ensure the linkrev adjustment is properly setup for
261 '_checkcopies' to ensure the linkrev adjustment is properly setup for
262 each. Linkrev adjustment is important to avoid bug in rename
262 each. Linkrev adjustment is important to avoid bug in rename
263 detection. Moreover, having a proper '_ancestrycontext' setup ensures
263 detection. Moreover, having a proper '_ancestrycontext' setup ensures
264 the performance impact of this adjustment is kept limited. Without it,
264 the performance impact of this adjustment is kept limited. Without it,
265 each file could do a full dag traversal making the time complexity of
265 each file could do a full dag traversal making the time complexity of
266 the operation explode (see issue4537).
266 the operation explode (see issue4537).
267
267
268 This function exists here mostly to limit the impact on stable. Feel
268 This function exists here mostly to limit the impact on stable. Feel
269 free to refactor on default.
269 free to refactor on default.
270 """
270 """
271 rev = ctx.rev()
271 rev = ctx.rev()
272 repo = ctx._repo
272 repo = ctx._repo
273 ac = getattr(ctx, '_ancestrycontext', None)
273 ac = getattr(ctx, '_ancestrycontext', None)
274 if ac is None:
274 if ac is None:
275 revs = [rev]
275 revs = [rev]
276 if rev is None:
276 if rev is None:
277 revs = [p.rev() for p in ctx.parents()]
277 revs = [p.rev() for p in ctx.parents()]
278 ac = repo.changelog.ancestors(revs, inclusive=True)
278 ac = repo.changelog.ancestors(revs, inclusive=True)
279 ctx._ancestrycontext = ac
279 ctx._ancestrycontext = ac
280 def makectx(f, n):
280 def makectx(f, n):
281 if len(n) != 20: # in a working context?
281 if len(n) != 20 or n in node.wdirnodes: # in a working context?
282 if ctx.rev() is None:
282 if ctx.rev() is None:
283 return ctx.filectx(f)
283 return ctx.filectx(f)
284 return repo[None][f]
284 return repo[None][f]
285 fctx = repo.filectx(f, fileid=n)
285 fctx = repo.filectx(f, fileid=n)
286 # setup only needed for filectx not create from a changectx
286 # setup only needed for filectx not create from a changectx
287 fctx._ancestrycontext = ac
287 fctx._ancestrycontext = ac
288 fctx._descendantrev = rev
288 fctx._descendantrev = rev
289 return fctx
289 return fctx
290 return util.lrucachefunc(makectx)
290 return util.lrucachefunc(makectx)
291
291
292 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
292 def _combinecopies(copyfrom, copyto, finalcopy, diverge, incompletediverge):
293 """combine partial copy paths"""
293 """combine partial copy paths"""
294 remainder = {}
294 remainder = {}
295 for f in copyfrom:
295 for f in copyfrom:
296 if f in copyto:
296 if f in copyto:
297 finalcopy[copyto[f]] = copyfrom[f]
297 finalcopy[copyto[f]] = copyfrom[f]
298 del copyto[f]
298 del copyto[f]
299 for f in incompletediverge:
299 for f in incompletediverge:
300 assert f not in diverge
300 assert f not in diverge
301 ic = incompletediverge[f]
301 ic = incompletediverge[f]
302 if ic[0] in copyto:
302 if ic[0] in copyto:
303 diverge[f] = [copyto[ic[0]], ic[1]]
303 diverge[f] = [copyto[ic[0]], ic[1]]
304 else:
304 else:
305 remainder[f] = ic
305 remainder[f] = ic
306 return remainder
306 return remainder
307
307
308 def mergecopies(repo, c1, c2, base):
308 def mergecopies(repo, c1, c2, base):
309 """
309 """
310 Find moves and copies between context c1 and c2 that are relevant
310 Find moves and copies between context c1 and c2 that are relevant
311 for merging. 'base' will be used as the merge base.
311 for merging. 'base' will be used as the merge base.
312
312
313 Returns four dicts: "copy", "movewithdir", "diverge", and
313 Returns four dicts: "copy", "movewithdir", "diverge", and
314 "renamedelete".
314 "renamedelete".
315
315
316 "copy" is a mapping from destination name -> source name,
316 "copy" is a mapping from destination name -> source name,
317 where source is in c1 and destination is in c2 or vice-versa.
317 where source is in c1 and destination is in c2 or vice-versa.
318
318
319 "movewithdir" is a mapping from source name -> destination name,
319 "movewithdir" is a mapping from source name -> destination name,
320 where the file at source present in one context but not the other
320 where the file at source present in one context but not the other
321 needs to be moved to destination by the merge process, because the
321 needs to be moved to destination by the merge process, because the
322 other context moved the directory it is in.
322 other context moved the directory it is in.
323
323
324 "diverge" is a mapping of source name -> list of destination names
324 "diverge" is a mapping of source name -> list of destination names
325 for divergent renames.
325 for divergent renames.
326
326
327 "renamedelete" is a mapping of source name -> list of destination
327 "renamedelete" is a mapping of source name -> list of destination
328 names for files deleted in c1 that were renamed in c2 or vice-versa.
328 names for files deleted in c1 that were renamed in c2 or vice-versa.
329 """
329 """
330 # avoid silly behavior for update from empty dir
330 # avoid silly behavior for update from empty dir
331 if not c1 or not c2 or c1 == c2:
331 if not c1 or not c2 or c1 == c2:
332 return {}, {}, {}, {}
332 return {}, {}, {}, {}
333
333
334 # avoid silly behavior for parent -> working dir
334 # avoid silly behavior for parent -> working dir
335 if c2.node() is None and c1.node() == repo.dirstate.p1():
335 if c2.node() is None and c1.node() == repo.dirstate.p1():
336 return repo.dirstate.copies(), {}, {}, {}
336 return repo.dirstate.copies(), {}, {}, {}
337
337
338 # Copy trace disabling is explicitly below the node == p1 logic above
338 # Copy trace disabling is explicitly below the node == p1 logic above
339 # because the logic above is required for a simple copy to be kept across a
339 # because the logic above is required for a simple copy to be kept across a
340 # rebase.
340 # rebase.
341 if repo.ui.configbool('experimental', 'disablecopytrace'):
341 if repo.ui.configbool('experimental', 'disablecopytrace'):
342 return {}, {}, {}, {}
342 return {}, {}, {}, {}
343
343
344 # In certain scenarios (e.g. graft, update or rebase), base can be
344 # In certain scenarios (e.g. graft, update or rebase), base can be
345 # overridden We still need to know a real common ancestor in this case We
345 # overridden We still need to know a real common ancestor in this case We
346 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
346 # can't just compute _c1.ancestor(_c2) and compare it to ca, because there
347 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
347 # can be multiple common ancestors, e.g. in case of bidmerge. Because our
348 # caller may not know if the revision passed in lieu of the CA is a genuine
348 # caller may not know if the revision passed in lieu of the CA is a genuine
349 # common ancestor or not without explicitly checking it, it's better to
349 # common ancestor or not without explicitly checking it, it's better to
350 # determine that here.
350 # determine that here.
351 #
351 #
352 # base.descendant(wc) and base.descendant(base) are False, work around that
352 # base.descendant(wc) and base.descendant(base) are False, work around that
353 _c1 = c1.p1() if c1.rev() is None else c1
353 _c1 = c1.p1() if c1.rev() is None else c1
354 _c2 = c2.p1() if c2.rev() is None else c2
354 _c2 = c2.p1() if c2.rev() is None else c2
355 # an endpoint is "dirty" if it isn't a descendant of the merge base
355 # an endpoint is "dirty" if it isn't a descendant of the merge base
356 # if we have a dirty endpoint, we need to trigger graft logic, and also
356 # if we have a dirty endpoint, we need to trigger graft logic, and also
357 # keep track of which endpoint is dirty
357 # keep track of which endpoint is dirty
358 dirtyc1 = not (base == _c1 or base.descendant(_c1))
358 dirtyc1 = not (base == _c1 or base.descendant(_c1))
359 dirtyc2 = not (base== _c2 or base.descendant(_c2))
359 dirtyc2 = not (base== _c2 or base.descendant(_c2))
360 graft = dirtyc1 or dirtyc2
360 graft = dirtyc1 or dirtyc2
361 tca = base
361 tca = base
362 if graft:
362 if graft:
363 tca = _c1.ancestor(_c2)
363 tca = _c1.ancestor(_c2)
364
364
365 limit = _findlimit(repo, c1.rev(), c2.rev())
365 limit = _findlimit(repo, c1.rev(), c2.rev())
366 if limit is None:
366 if limit is None:
367 # no common ancestor, no copies
367 # no common ancestor, no copies
368 return {}, {}, {}, {}
368 return {}, {}, {}, {}
369 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
369 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
370
370
371 m1 = c1.manifest()
371 m1 = c1.manifest()
372 m2 = c2.manifest()
372 m2 = c2.manifest()
373 mb = base.manifest()
373 mb = base.manifest()
374
374
375 # gather data from _checkcopies:
375 # gather data from _checkcopies:
376 # - diverge = record all diverges in this dict
376 # - diverge = record all diverges in this dict
377 # - copy = record all non-divergent copies in this dict
377 # - copy = record all non-divergent copies in this dict
378 # - fullcopy = record all copies in this dict
378 # - fullcopy = record all copies in this dict
379 # - incomplete = record non-divergent partial copies here
379 # - incomplete = record non-divergent partial copies here
380 # - incompletediverge = record divergent partial copies here
380 # - incompletediverge = record divergent partial copies here
381 diverge = {} # divergence data is shared
381 diverge = {} # divergence data is shared
382 incompletediverge = {}
382 incompletediverge = {}
383 data1 = {'copy': {},
383 data1 = {'copy': {},
384 'fullcopy': {},
384 'fullcopy': {},
385 'incomplete': {},
385 'incomplete': {},
386 'diverge': diverge,
386 'diverge': diverge,
387 'incompletediverge': incompletediverge,
387 'incompletediverge': incompletediverge,
388 }
388 }
389 data2 = {'copy': {},
389 data2 = {'copy': {},
390 'fullcopy': {},
390 'fullcopy': {},
391 'incomplete': {},
391 'incomplete': {},
392 'diverge': diverge,
392 'diverge': diverge,
393 'incompletediverge': incompletediverge,
393 'incompletediverge': incompletediverge,
394 }
394 }
395
395
396 # find interesting file sets from manifests
396 # find interesting file sets from manifests
397 addedinm1 = m1.filesnotin(mb)
397 addedinm1 = m1.filesnotin(mb)
398 addedinm2 = m2.filesnotin(mb)
398 addedinm2 = m2.filesnotin(mb)
399 bothnew = sorted(addedinm1 & addedinm2)
399 bothnew = sorted(addedinm1 & addedinm2)
400 if tca == base:
400 if tca == base:
401 # unmatched file from base
401 # unmatched file from base
402 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
402 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
403 u1u, u2u = u1r, u2r
403 u1u, u2u = u1r, u2r
404 else:
404 else:
405 # unmatched file from base (DAG rotation in the graft case)
405 # unmatched file from base (DAG rotation in the graft case)
406 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
406 u1r, u2r = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
407 baselabel='base')
407 baselabel='base')
408 # unmatched file from topological common ancestors (no DAG rotation)
408 # unmatched file from topological common ancestors (no DAG rotation)
409 # need to recompute this for directory move handling when grafting
409 # need to recompute this for directory move handling when grafting
410 mta = tca.manifest()
410 mta = tca.manifest()
411 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
411 u1u, u2u = _computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
412 m2.filesnotin(mta),
412 m2.filesnotin(mta),
413 baselabel='topological common ancestor')
413 baselabel='topological common ancestor')
414
414
415 for f in u1u:
415 for f in u1u:
416 _checkcopies(c1, f, m1, m2, base, tca, dirtyc1, limit, data1)
416 _checkcopies(c1, f, m1, m2, base, tca, dirtyc1, limit, data1)
417
417
418 for f in u2u:
418 for f in u2u:
419 _checkcopies(c2, f, m2, m1, base, tca, dirtyc2, limit, data2)
419 _checkcopies(c2, f, m2, m1, base, tca, dirtyc2, limit, data2)
420
420
421 copy = dict(data1['copy'].items() + data2['copy'].items())
421 copy = dict(data1['copy'].items() + data2['copy'].items())
422 fullcopy = dict(data1['fullcopy'].items() + data2['fullcopy'].items())
422 fullcopy = dict(data1['fullcopy'].items() + data2['fullcopy'].items())
423
423
424 if dirtyc1:
424 if dirtyc1:
425 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
425 _combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
426 incompletediverge)
426 incompletediverge)
427 else:
427 else:
428 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
428 _combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
429 incompletediverge)
429 incompletediverge)
430
430
431 renamedelete = {}
431 renamedelete = {}
432 renamedeleteset = set()
432 renamedeleteset = set()
433 divergeset = set()
433 divergeset = set()
434 for of, fl in diverge.items():
434 for of, fl in diverge.items():
435 if len(fl) == 1 or of in c1 or of in c2:
435 if len(fl) == 1 or of in c1 or of in c2:
436 del diverge[of] # not actually divergent, or not a rename
436 del diverge[of] # not actually divergent, or not a rename
437 if of not in c1 and of not in c2:
437 if of not in c1 and of not in c2:
438 # renamed on one side, deleted on the other side, but filter
438 # renamed on one side, deleted on the other side, but filter
439 # out files that have been renamed and then deleted
439 # out files that have been renamed and then deleted
440 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
440 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
441 renamedeleteset.update(fl) # reverse map for below
441 renamedeleteset.update(fl) # reverse map for below
442 else:
442 else:
443 divergeset.update(fl) # reverse map for below
443 divergeset.update(fl) # reverse map for below
444
444
445 if bothnew:
445 if bothnew:
446 repo.ui.debug(" unmatched files new in both:\n %s\n"
446 repo.ui.debug(" unmatched files new in both:\n %s\n"
447 % "\n ".join(bothnew))
447 % "\n ".join(bothnew))
448 bothdiverge = {}
448 bothdiverge = {}
449 bothincompletediverge = {}
449 bothincompletediverge = {}
450 remainder = {}
450 remainder = {}
451 both1 = {'copy': {},
451 both1 = {'copy': {},
452 'fullcopy': {},
452 'fullcopy': {},
453 'incomplete': {},
453 'incomplete': {},
454 'diverge': bothdiverge,
454 'diverge': bothdiverge,
455 'incompletediverge': bothincompletediverge
455 'incompletediverge': bothincompletediverge
456 }
456 }
457 both2 = {'copy': {},
457 both2 = {'copy': {},
458 'fullcopy': {},
458 'fullcopy': {},
459 'incomplete': {},
459 'incomplete': {},
460 'diverge': bothdiverge,
460 'diverge': bothdiverge,
461 'incompletediverge': bothincompletediverge
461 'incompletediverge': bothincompletediverge
462 }
462 }
463 for f in bothnew:
463 for f in bothnew:
464 _checkcopies(c1, f, m1, m2, base, tca, dirtyc1, limit, both1)
464 _checkcopies(c1, f, m1, m2, base, tca, dirtyc1, limit, both1)
465 _checkcopies(c2, f, m2, m1, base, tca, dirtyc2, limit, both2)
465 _checkcopies(c2, f, m2, m1, base, tca, dirtyc2, limit, both2)
466 if dirtyc1:
466 if dirtyc1:
467 # incomplete copies may only be found on the "dirty" side for bothnew
467 # incomplete copies may only be found on the "dirty" side for bothnew
468 assert not both2['incomplete']
468 assert not both2['incomplete']
469 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
469 remainder = _combinecopies({}, both1['incomplete'], copy, bothdiverge,
470 bothincompletediverge)
470 bothincompletediverge)
471 elif dirtyc2:
471 elif dirtyc2:
472 assert not both1['incomplete']
472 assert not both1['incomplete']
473 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
473 remainder = _combinecopies({}, both2['incomplete'], copy, bothdiverge,
474 bothincompletediverge)
474 bothincompletediverge)
475 else:
475 else:
476 # incomplete copies and divergences can't happen outside grafts
476 # incomplete copies and divergences can't happen outside grafts
477 assert not both1['incomplete']
477 assert not both1['incomplete']
478 assert not both2['incomplete']
478 assert not both2['incomplete']
479 assert not bothincompletediverge
479 assert not bothincompletediverge
480 for f in remainder:
480 for f in remainder:
481 assert f not in bothdiverge
481 assert f not in bothdiverge
482 ic = remainder[f]
482 ic = remainder[f]
483 if ic[0] in (m1 if dirtyc1 else m2):
483 if ic[0] in (m1 if dirtyc1 else m2):
484 # backed-out rename on one side, but watch out for deleted files
484 # backed-out rename on one side, but watch out for deleted files
485 bothdiverge[f] = ic
485 bothdiverge[f] = ic
486 for of, fl in bothdiverge.items():
486 for of, fl in bothdiverge.items():
487 if len(fl) == 2 and fl[0] == fl[1]:
487 if len(fl) == 2 and fl[0] == fl[1]:
488 copy[fl[0]] = of # not actually divergent, just matching renames
488 copy[fl[0]] = of # not actually divergent, just matching renames
489
489
490 if fullcopy and repo.ui.debugflag:
490 if fullcopy and repo.ui.debugflag:
491 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
491 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
492 "% = renamed and deleted):\n")
492 "% = renamed and deleted):\n")
493 for f in sorted(fullcopy):
493 for f in sorted(fullcopy):
494 note = ""
494 note = ""
495 if f in copy:
495 if f in copy:
496 note += "*"
496 note += "*"
497 if f in divergeset:
497 if f in divergeset:
498 note += "!"
498 note += "!"
499 if f in renamedeleteset:
499 if f in renamedeleteset:
500 note += "%"
500 note += "%"
501 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
501 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
502 note))
502 note))
503 del divergeset
503 del divergeset
504
504
505 if not fullcopy:
505 if not fullcopy:
506 return copy, {}, diverge, renamedelete
506 return copy, {}, diverge, renamedelete
507
507
508 repo.ui.debug(" checking for directory renames\n")
508 repo.ui.debug(" checking for directory renames\n")
509
509
510 # generate a directory move map
510 # generate a directory move map
511 d1, d2 = c1.dirs(), c2.dirs()
511 d1, d2 = c1.dirs(), c2.dirs()
512 # Hack for adding '', which is not otherwise added, to d1 and d2
512 # Hack for adding '', which is not otherwise added, to d1 and d2
513 d1.addpath('/')
513 d1.addpath('/')
514 d2.addpath('/')
514 d2.addpath('/')
515 invalid = set()
515 invalid = set()
516 dirmove = {}
516 dirmove = {}
517
517
518 # examine each file copy for a potential directory move, which is
518 # examine each file copy for a potential directory move, which is
519 # when all the files in a directory are moved to a new directory
519 # when all the files in a directory are moved to a new directory
520 for dst, src in fullcopy.iteritems():
520 for dst, src in fullcopy.iteritems():
521 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
521 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
522 if dsrc in invalid:
522 if dsrc in invalid:
523 # already seen to be uninteresting
523 # already seen to be uninteresting
524 continue
524 continue
525 elif dsrc in d1 and ddst in d1:
525 elif dsrc in d1 and ddst in d1:
526 # directory wasn't entirely moved locally
526 # directory wasn't entirely moved locally
527 invalid.add(dsrc + "/")
527 invalid.add(dsrc + "/")
528 elif dsrc in d2 and ddst in d2:
528 elif dsrc in d2 and ddst in d2:
529 # directory wasn't entirely moved remotely
529 # directory wasn't entirely moved remotely
530 invalid.add(dsrc + "/")
530 invalid.add(dsrc + "/")
531 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
531 elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
532 # files from the same directory moved to two different places
532 # files from the same directory moved to two different places
533 invalid.add(dsrc + "/")
533 invalid.add(dsrc + "/")
534 else:
534 else:
535 # looks good so far
535 # looks good so far
536 dirmove[dsrc + "/"] = ddst + "/"
536 dirmove[dsrc + "/"] = ddst + "/"
537
537
538 for i in invalid:
538 for i in invalid:
539 if i in dirmove:
539 if i in dirmove:
540 del dirmove[i]
540 del dirmove[i]
541 del d1, d2, invalid
541 del d1, d2, invalid
542
542
543 if not dirmove:
543 if not dirmove:
544 return copy, {}, diverge, renamedelete
544 return copy, {}, diverge, renamedelete
545
545
546 for d in dirmove:
546 for d in dirmove:
547 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
547 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
548 (d, dirmove[d]))
548 (d, dirmove[d]))
549
549
550 movewithdir = {}
550 movewithdir = {}
551 # check unaccounted nonoverlapping files against directory moves
551 # check unaccounted nonoverlapping files against directory moves
552 for f in u1r + u2r:
552 for f in u1r + u2r:
553 if f not in fullcopy:
553 if f not in fullcopy:
554 for d in dirmove:
554 for d in dirmove:
555 if f.startswith(d):
555 if f.startswith(d):
556 # new file added in a directory that was moved, move it
556 # new file added in a directory that was moved, move it
557 df = dirmove[d] + f[len(d):]
557 df = dirmove[d] + f[len(d):]
558 if df not in copy:
558 if df not in copy:
559 movewithdir[f] = df
559 movewithdir[f] = df
560 repo.ui.debug((" pending file src: '%s' -> "
560 repo.ui.debug((" pending file src: '%s' -> "
561 "dst: '%s'\n") % (f, df))
561 "dst: '%s'\n") % (f, df))
562 break
562 break
563
563
564 return copy, movewithdir, diverge, renamedelete
564 return copy, movewithdir, diverge, renamedelete
565
565
566 def _related(f1, f2, limit):
566 def _related(f1, f2, limit):
567 """return True if f1 and f2 filectx have a common ancestor
567 """return True if f1 and f2 filectx have a common ancestor
568
568
569 Walk back to common ancestor to see if the two files originate
569 Walk back to common ancestor to see if the two files originate
570 from the same file. Since workingfilectx's rev() is None it messes
570 from the same file. Since workingfilectx's rev() is None it messes
571 up the integer comparison logic, hence the pre-step check for
571 up the integer comparison logic, hence the pre-step check for
572 None (f1 and f2 can only be workingfilectx's initially).
572 None (f1 and f2 can only be workingfilectx's initially).
573 """
573 """
574
574
575 if f1 == f2:
575 if f1 == f2:
576 return f1 # a match
576 return f1 # a match
577
577
578 g1, g2 = f1.ancestors(), f2.ancestors()
578 g1, g2 = f1.ancestors(), f2.ancestors()
579 try:
579 try:
580 f1r, f2r = f1.linkrev(), f2.linkrev()
580 f1r, f2r = f1.linkrev(), f2.linkrev()
581
581
582 if f1r is None:
582 if f1r is None:
583 f1 = next(g1)
583 f1 = next(g1)
584 if f2r is None:
584 if f2r is None:
585 f2 = next(g2)
585 f2 = next(g2)
586
586
587 while True:
587 while True:
588 f1r, f2r = f1.linkrev(), f2.linkrev()
588 f1r, f2r = f1.linkrev(), f2.linkrev()
589 if f1r > f2r:
589 if f1r > f2r:
590 f1 = next(g1)
590 f1 = next(g1)
591 elif f2r > f1r:
591 elif f2r > f1r:
592 f2 = next(g2)
592 f2 = next(g2)
593 elif f1 == f2:
593 elif f1 == f2:
594 return f1 # a match
594 return f1 # a match
595 elif f1r == f2r or f1r < limit or f2r < limit:
595 elif f1r == f2r or f1r < limit or f2r < limit:
596 return False # copy no longer relevant
596 return False # copy no longer relevant
597 except StopIteration:
597 except StopIteration:
598 return False
598 return False
599
599
600 def _checkcopies(ctx, f, m1, m2, base, tca, remotebase, limit, data):
600 def _checkcopies(ctx, f, m1, m2, base, tca, remotebase, limit, data):
601 """
601 """
602 check possible copies of f from m1 to m2
602 check possible copies of f from m1 to m2
603
603
604 ctx = starting context for f in m1
604 ctx = starting context for f in m1
605 f = the filename to check (as in m1)
605 f = the filename to check (as in m1)
606 m1 = the source manifest
606 m1 = the source manifest
607 m2 = the destination manifest
607 m2 = the destination manifest
608 base = the changectx used as a merge base
608 base = the changectx used as a merge base
609 tca = topological common ancestor for graft-like scenarios
609 tca = topological common ancestor for graft-like scenarios
610 remotebase = True if base is outside tca::ctx, False otherwise
610 remotebase = True if base is outside tca::ctx, False otherwise
611 limit = the rev number to not search beyond
611 limit = the rev number to not search beyond
612 data = dictionary of dictionary to store copy data. (see mergecopies)
612 data = dictionary of dictionary to store copy data. (see mergecopies)
613
613
614 note: limit is only an optimization, and there is no guarantee that
614 note: limit is only an optimization, and there is no guarantee that
615 irrelevant revisions will not be limited
615 irrelevant revisions will not be limited
616 there is no easy way to make this algorithm stop in a guaranteed way
616 there is no easy way to make this algorithm stop in a guaranteed way
617 once it "goes behind a certain revision".
617 once it "goes behind a certain revision".
618 """
618 """
619
619
620 mb = base.manifest()
620 mb = base.manifest()
621 mta = tca.manifest()
621 mta = tca.manifest()
622 # Might be true if this call is about finding backward renames,
622 # Might be true if this call is about finding backward renames,
623 # This happens in the case of grafts because the DAG is then rotated.
623 # This happens in the case of grafts because the DAG is then rotated.
624 # If the file exists in both the base and the source, we are not looking
624 # If the file exists in both the base and the source, we are not looking
625 # for a rename on the source side, but on the part of the DAG that is
625 # for a rename on the source side, but on the part of the DAG that is
626 # traversed backwards.
626 # traversed backwards.
627 #
627 #
628 # In the case there is both backward and forward renames (before and after
628 # In the case there is both backward and forward renames (before and after
629 # the base) this is more complicated as we must detect a divergence.
629 # the base) this is more complicated as we must detect a divergence.
630 # We use 'backwards = False' in that case.
630 # We use 'backwards = False' in that case.
631 backwards = not remotebase and base != tca and f in mb
631 backwards = not remotebase and base != tca and f in mb
632 getfctx = _makegetfctx(ctx)
632 getfctx = _makegetfctx(ctx)
633
633
634 if m1[f] == mb.get(f) and not remotebase:
634 if m1[f] == mb.get(f) and not remotebase:
635 # Nothing to merge
635 # Nothing to merge
636 return
636 return
637
637
638 of = None
638 of = None
639 seen = set([f])
639 seen = set([f])
640 for oc in getfctx(f, m1[f]).ancestors():
640 for oc in getfctx(f, m1[f]).ancestors():
641 ocr = oc.linkrev()
641 ocr = oc.linkrev()
642 of = oc.path()
642 of = oc.path()
643 if of in seen:
643 if of in seen:
644 # check limit late - grab last rename before
644 # check limit late - grab last rename before
645 if ocr < limit:
645 if ocr < limit:
646 break
646 break
647 continue
647 continue
648 seen.add(of)
648 seen.add(of)
649
649
650 # remember for dir rename detection
650 # remember for dir rename detection
651 if backwards:
651 if backwards:
652 data['fullcopy'][of] = f # grafting backwards through renames
652 data['fullcopy'][of] = f # grafting backwards through renames
653 else:
653 else:
654 data['fullcopy'][f] = of
654 data['fullcopy'][f] = of
655 if of not in m2:
655 if of not in m2:
656 continue # no match, keep looking
656 continue # no match, keep looking
657 if m2[of] == mb.get(of):
657 if m2[of] == mb.get(of):
658 return # no merge needed, quit early
658 return # no merge needed, quit early
659 c2 = getfctx(of, m2[of])
659 c2 = getfctx(of, m2[of])
660 # c2 might be a plain new file on added on destination side that is
660 # c2 might be a plain new file on added on destination side that is
661 # unrelated to the droids we are looking for.
661 # unrelated to the droids we are looking for.
662 cr = _related(oc, c2, tca.rev())
662 cr = _related(oc, c2, tca.rev())
663 if cr and (of == f or of == c2.path()): # non-divergent
663 if cr and (of == f or of == c2.path()): # non-divergent
664 if backwards:
664 if backwards:
665 data['copy'][of] = f
665 data['copy'][of] = f
666 elif of in mb:
666 elif of in mb:
667 data['copy'][f] = of
667 data['copy'][f] = of
668 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
668 elif remotebase: # special case: a <- b <- a -> b "ping-pong" rename
669 data['copy'][of] = f
669 data['copy'][of] = f
670 del data['fullcopy'][f]
670 del data['fullcopy'][f]
671 data['fullcopy'][of] = f
671 data['fullcopy'][of] = f
672 else: # divergence w.r.t. graft CA on one side of topological CA
672 else: # divergence w.r.t. graft CA on one side of topological CA
673 for sf in seen:
673 for sf in seen:
674 if sf in mb:
674 if sf in mb:
675 assert sf not in data['diverge']
675 assert sf not in data['diverge']
676 data['diverge'][sf] = [f, of]
676 data['diverge'][sf] = [f, of]
677 break
677 break
678 return
678 return
679
679
680 if of in mta:
680 if of in mta:
681 if backwards or remotebase:
681 if backwards or remotebase:
682 data['incomplete'][of] = f
682 data['incomplete'][of] = f
683 else:
683 else:
684 for sf in seen:
684 for sf in seen:
685 if sf in mb:
685 if sf in mb:
686 if tca == base:
686 if tca == base:
687 data['diverge'].setdefault(sf, []).append(f)
687 data['diverge'].setdefault(sf, []).append(f)
688 else:
688 else:
689 data['incompletediverge'][sf] = [of, f]
689 data['incompletediverge'][sf] = [of, f]
690 return
690 return
691
691
692 def duplicatecopies(repo, rev, fromrev, skiprev=None):
692 def duplicatecopies(repo, rev, fromrev, skiprev=None):
693 '''reproduce copies from fromrev to rev in the dirstate
693 '''reproduce copies from fromrev to rev in the dirstate
694
694
695 If skiprev is specified, it's a revision that should be used to
695 If skiprev is specified, it's a revision that should be used to
696 filter copy records. Any copies that occur between fromrev and
696 filter copy records. Any copies that occur between fromrev and
697 skiprev will not be duplicated, even if they appear in the set of
697 skiprev will not be duplicated, even if they appear in the set of
698 copies between fromrev and rev.
698 copies between fromrev and rev.
699 '''
699 '''
700 exclude = {}
700 exclude = {}
701 if (skiprev is not None and
701 if (skiprev is not None and
702 not repo.ui.configbool('experimental', 'disablecopytrace')):
702 not repo.ui.configbool('experimental', 'disablecopytrace')):
703 # disablecopytrace skips this line, but not the entire function because
703 # disablecopytrace skips this line, but not the entire function because
704 # the line below is O(size of the repo) during a rebase, while the rest
704 # the line below is O(size of the repo) during a rebase, while the rest
705 # of the function is much faster (and is required for carrying copy
705 # of the function is much faster (and is required for carrying copy
706 # metadata across the rebase anyway).
706 # metadata across the rebase anyway).
707 exclude = pathcopies(repo[fromrev], repo[skiprev])
707 exclude = pathcopies(repo[fromrev], repo[skiprev])
708 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
708 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
709 # copies.pathcopies returns backward renames, so dst might not
709 # copies.pathcopies returns backward renames, so dst might not
710 # actually be in the dirstate
710 # actually be in the dirstate
711 if dst in exclude:
711 if dst in exclude:
712 continue
712 continue
713 if repo.dirstate[dst] in "nma":
713 if repo.dirstate[dst] in "nma":
714 repo.dirstate.copy(src, dst)
714 repo.dirstate.copy(src, dst)
@@ -1,26 +1,32
1 # node.py - basic nodeid manipulation for mercurial
1 # node.py - basic nodeid manipulation for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11
11
12 # This ugly style has a noticeable effect in manifest parsing
12 # This ugly style has a noticeable effect in manifest parsing
13 hex = binascii.hexlify
13 hex = binascii.hexlify
14 bin = binascii.unhexlify
14 bin = binascii.unhexlify
15
15
16 nullrev = -1
16 nullrev = -1
17 nullid = b"\0" * 20
17 nullid = b"\0" * 20
18 nullhex = hex(nullid)
18 nullhex = hex(nullid)
19
19
20 # Phony node value to stand-in for new files in some uses of
21 # manifests.
22 newnodeid = '!' * 20
23
24 wdirnodes = set((newnodeid,))
25
20 # pseudo identifiers for working directory
26 # pseudo identifiers for working directory
21 # (they are experimental, so don't add too many dependencies on them)
27 # (they are experimental, so don't add too many dependencies on them)
22 wdirrev = 0x7fffffff
28 wdirrev = 0x7fffffff
23 wdirid = b"\xff" * 20
29 wdirid = b"\xff" * 20
24
30
25 def short(node):
31 def short(node):
26 return hex(node[:6])
32 return hex(node[:6])
General Comments 0
You need to be logged in to leave comments. Login now