##// END OF EJS Templates
copies: extract an explicit `computechangesetcopie` method from context...
marmoute -
r42935:3cffc7bb default
parent child Browse files
Show More
@@ -1,2604 +1,2589 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 dagop,
28 dagop,
28 encoding,
29 encoding,
29 error,
30 error,
30 fileset,
31 fileset,
31 match as matchmod,
32 match as matchmod,
32 obsolete as obsmod,
33 obsolete as obsmod,
33 patch,
34 patch,
34 pathutil,
35 pathutil,
35 phases,
36 phases,
36 pycompat,
37 pycompat,
37 repoview,
38 repoview,
38 scmutil,
39 scmutil,
39 sparse,
40 sparse,
40 subrepo,
41 subrepo,
41 subrepoutil,
42 subrepoutil,
42 util,
43 util,
43 )
44 )
44 from .utils import (
45 from .utils import (
45 dateutil,
46 dateutil,
46 stringutil,
47 stringutil,
47 )
48 )
48
49
49 propertycache = util.propertycache
50 propertycache = util.propertycache
50
51
51 class basectx(object):
52 class basectx(object):
52 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
55 be committed,
56 be committed,
56 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
57 be committed."""
58 be committed."""
58
59
59 def __init__(self, repo):
60 def __init__(self, repo):
60 self._repo = repo
61 self._repo = repo
61
62
62 def __bytes__(self):
63 def __bytes__(self):
63 return short(self.node())
64 return short(self.node())
64
65
65 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
66
67
67 def __repr__(self):
68 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
69
70
70 def __eq__(self, other):
71 def __eq__(self, other):
71 try:
72 try:
72 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
74 except AttributeError:
74 return False
75 return False
75
76
76 def __ne__(self, other):
77 def __ne__(self, other):
77 return not (self == other)
78 return not (self == other)
78
79
79 def __contains__(self, key):
80 def __contains__(self, key):
80 return key in self._manifest
81 return key in self._manifest
81
82
82 def __getitem__(self, key):
83 def __getitem__(self, key):
83 return self.filectx(key)
84 return self.filectx(key)
84
85
85 def __iter__(self):
86 def __iter__(self):
86 return iter(self._manifest)
87 return iter(self._manifest)
87
88
88 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
92 the normal manifest."""
92 return self.manifest()
93 return self.manifest()
93
94
94 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
96 match operator.
97 match operator.
97 """
98 """
98 return match
99 return match
99
100
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
102 listunknown):
102 """build a status with respect to another context"""
103 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
109 # delta application.
109 mf2 = None
110 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
114 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
115
116
116 modified, added = [], []
117 modified, added = [], []
117 removed = []
118 removed = []
118 clean = []
119 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
121 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
123 if fn in deletedset:
124 if fn in deletedset:
124 continue
125 continue
125 if value is None:
126 if value is None:
126 clean.append(fn)
127 clean.append(fn)
127 continue
128 continue
128 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
130 if node1 is None:
130 added.append(fn)
131 added.append(fn)
131 elif node2 is None:
132 elif node2 is None:
132 removed.append(fn)
133 removed.append(fn)
133 elif flag1 != flag2:
134 elif flag1 != flag2:
134 modified.append(fn)
135 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
140 # to a file as a modification.
140 modified.append(fn)
141 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
143 modified.append(fn)
143 else:
144 else:
144 clean.append(fn)
145 clean.append(fn)
145
146
146 if removed:
147 if removed:
147 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
150 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
152 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
154
155
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
157 ignored, clean)
157
158
158 @propertycache
159 @propertycache
159 def substate(self):
160 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
161
162
162 def subrev(self, subpath):
163 def subrev(self, subpath):
163 return self.substate[subpath][1]
164 return self.substate[subpath][1]
164
165
165 def rev(self):
166 def rev(self):
166 return self._rev
167 return self._rev
167 def node(self):
168 def node(self):
168 return self._node
169 return self._node
169 def hex(self):
170 def hex(self):
170 return hex(self.node())
171 return hex(self.node())
171 def manifest(self):
172 def manifest(self):
172 return self._manifest
173 return self._manifest
173 def manifestctx(self):
174 def manifestctx(self):
174 return self._manifestctx
175 return self._manifestctx
175 def repo(self):
176 def repo(self):
176 return self._repo
177 return self._repo
177 def phasestr(self):
178 def phasestr(self):
178 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
179 def mutable(self):
180 def mutable(self):
180 return self.phase() > phases.public
181 return self.phase() > phases.public
181
182
182 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
184
185
185 def obsolete(self):
186 def obsolete(self):
186 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
189
189 def extinct(self):
190 def extinct(self):
190 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
193
193 def orphan(self):
194 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
197
197 def phasedivergent(self):
198 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
199
200
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
202 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
204
204 def contentdivergent(self):
205 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
206
207
207 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
208 """
209 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
211
211 def isunstable(self):
212 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
214 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
216
216 def instabilities(self):
217 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
218
219
219 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
220 - orphan,
221 - orphan,
221 - phase-divergent,
222 - phase-divergent,
222 - content-divergent.
223 - content-divergent.
223 """
224 """
224 instabilities = []
225 instabilities = []
225 if self.orphan():
226 if self.orphan():
226 instabilities.append('orphan')
227 instabilities.append('orphan')
227 if self.phasedivergent():
228 if self.phasedivergent():
228 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
229 if self.contentdivergent():
230 if self.contentdivergent():
230 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
231 return instabilities
232 return instabilities
232
233
233 def parents(self):
234 def parents(self):
234 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
235 return self._parents
236 return self._parents
236
237
237 def p1(self):
238 def p1(self):
238 return self._parents[0]
239 return self._parents[0]
239
240
240 def p2(self):
241 def p2(self):
241 parents = self._parents
242 parents = self._parents
242 if len(parents) == 2:
243 if len(parents) == 2:
243 return parents[1]
244 return parents[1]
244 return self._repo[nullrev]
245 return self._repo[nullrev]
245
246
246 def _fileinfo(self, path):
247 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
248 try:
249 try:
249 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
251 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
253 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
255 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
258 try:
259 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
261 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
263 _('not found in manifest'))
263
264
264 return node, flag
265 return node, flag
265
266
266 def filenode(self, path):
267 def filenode(self, path):
267 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
268
269
269 def flags(self, path):
270 def flags(self, path):
270 try:
271 try:
271 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
272 except error.LookupError:
273 except error.LookupError:
273 return ''
274 return ''
274
275
275 @propertycache
276 @propertycache
276 def _copies(self):
277 def _copies(self):
277 p1copies = {}
278 return copies.computechangesetcopies(self)
278 p2copies = {}
279 p1 = self.p1()
280 p2 = self.p2()
281 narrowmatch = self._repo.narrowmatch()
282 for dst in self.files():
283 if not narrowmatch(dst) or dst not in self:
284 continue
285 copied = self[dst].renamed()
286 if not copied:
287 continue
288 src, srcnode = copied
289 if src in p1 and p1[src].filenode() == srcnode:
290 p1copies[dst] = src
291 elif src in p2 and p2[src].filenode() == srcnode:
292 p2copies[dst] = src
293 return p1copies, p2copies
294 def p1copies(self):
279 def p1copies(self):
295 return self._copies[0]
280 return self._copies[0]
296 def p2copies(self):
281 def p2copies(self):
297 return self._copies[1]
282 return self._copies[1]
298
283
299 def sub(self, path, allowcreate=True):
284 def sub(self, path, allowcreate=True):
300 '''return a subrepo for the stored revision of path, never wdir()'''
285 '''return a subrepo for the stored revision of path, never wdir()'''
301 return subrepo.subrepo(self, path, allowcreate=allowcreate)
286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
302
287
303 def nullsub(self, path, pctx):
288 def nullsub(self, path, pctx):
304 return subrepo.nullsubrepo(self, path, pctx)
289 return subrepo.nullsubrepo(self, path, pctx)
305
290
306 def workingsub(self, path):
291 def workingsub(self, path):
307 '''return a subrepo for the stored revision, or wdir if this is a wdir
292 '''return a subrepo for the stored revision, or wdir if this is a wdir
308 context.
293 context.
309 '''
294 '''
310 return subrepo.subrepo(self, path, allowwdir=True)
295 return subrepo.subrepo(self, path, allowwdir=True)
311
296
312 def match(self, pats=None, include=None, exclude=None, default='glob',
297 def match(self, pats=None, include=None, exclude=None, default='glob',
313 listsubrepos=False, badfn=None):
298 listsubrepos=False, badfn=None):
314 r = self._repo
299 r = self._repo
315 return matchmod.match(r.root, r.getcwd(), pats,
300 return matchmod.match(r.root, r.getcwd(), pats,
316 include, exclude, default,
301 include, exclude, default,
317 auditor=r.nofsauditor, ctx=self,
302 auditor=r.nofsauditor, ctx=self,
318 listsubrepos=listsubrepos, badfn=badfn)
303 listsubrepos=listsubrepos, badfn=badfn)
319
304
320 def diff(self, ctx2=None, match=None, changes=None, opts=None,
305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
321 losedatafn=None, pathfn=None, copy=None,
306 losedatafn=None, pathfn=None, copy=None,
322 copysourcematch=None, hunksfilterfn=None):
307 copysourcematch=None, hunksfilterfn=None):
323 """Returns a diff generator for the given contexts and matcher"""
308 """Returns a diff generator for the given contexts and matcher"""
324 if ctx2 is None:
309 if ctx2 is None:
325 ctx2 = self.p1()
310 ctx2 = self.p1()
326 if ctx2 is not None:
311 if ctx2 is not None:
327 ctx2 = self._repo[ctx2]
312 ctx2 = self._repo[ctx2]
328 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
329 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
330 copy=copy, copysourcematch=copysourcematch,
315 copy=copy, copysourcematch=copysourcematch,
331 hunksfilterfn=hunksfilterfn)
316 hunksfilterfn=hunksfilterfn)
332
317
333 def dirs(self):
318 def dirs(self):
334 return self._manifest.dirs()
319 return self._manifest.dirs()
335
320
336 def hasdir(self, dir):
321 def hasdir(self, dir):
337 return self._manifest.hasdir(dir)
322 return self._manifest.hasdir(dir)
338
323
339 def status(self, other=None, match=None, listignored=False,
324 def status(self, other=None, match=None, listignored=False,
340 listclean=False, listunknown=False, listsubrepos=False):
325 listclean=False, listunknown=False, listsubrepos=False):
341 """return status of files between two nodes or node and working
326 """return status of files between two nodes or node and working
342 directory.
327 directory.
343
328
344 If other is None, compare this node with working directory.
329 If other is None, compare this node with working directory.
345
330
346 returns (modified, added, removed, deleted, unknown, ignored, clean)
331 returns (modified, added, removed, deleted, unknown, ignored, clean)
347 """
332 """
348
333
349 ctx1 = self
334 ctx1 = self
350 ctx2 = self._repo[other]
335 ctx2 = self._repo[other]
351
336
352 # This next code block is, admittedly, fragile logic that tests for
337 # This next code block is, admittedly, fragile logic that tests for
353 # reversing the contexts and wouldn't need to exist if it weren't for
338 # reversing the contexts and wouldn't need to exist if it weren't for
354 # the fast (and common) code path of comparing the working directory
339 # the fast (and common) code path of comparing the working directory
355 # with its first parent.
340 # with its first parent.
356 #
341 #
357 # What we're aiming for here is the ability to call:
342 # What we're aiming for here is the ability to call:
358 #
343 #
359 # workingctx.status(parentctx)
344 # workingctx.status(parentctx)
360 #
345 #
361 # If we always built the manifest for each context and compared those,
346 # If we always built the manifest for each context and compared those,
362 # then we'd be done. But the special case of the above call means we
347 # then we'd be done. But the special case of the above call means we
363 # just copy the manifest of the parent.
348 # just copy the manifest of the parent.
364 reversed = False
349 reversed = False
365 if (not isinstance(ctx1, changectx)
350 if (not isinstance(ctx1, changectx)
366 and isinstance(ctx2, changectx)):
351 and isinstance(ctx2, changectx)):
367 reversed = True
352 reversed = True
368 ctx1, ctx2 = ctx2, ctx1
353 ctx1, ctx2 = ctx2, ctx1
369
354
370 match = self._repo.narrowmatch(match)
355 match = self._repo.narrowmatch(match)
371 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
372 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
373 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
374 listunknown)
359 listunknown)
375
360
376 if reversed:
361 if reversed:
377 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
378 # these make no sense to reverse.
363 # these make no sense to reverse.
379 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
380 r.clean)
365 r.clean)
381
366
382 if listsubrepos:
367 if listsubrepos:
383 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
384 try:
369 try:
385 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
386 except KeyError:
371 except KeyError:
387 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
388 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
389 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
390 rev2 = None
375 rev2 = None
391 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
392 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
393 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
394 listsubrepos=True)
379 listsubrepos=True)
395 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
396 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
397
382
398 for l in r:
383 for l in r:
399 l.sort()
384 l.sort()
400
385
401 return r
386 return r
402
387
403 class changectx(basectx):
388 class changectx(basectx):
404 """A changecontext object makes access to data related to a particular
389 """A changecontext object makes access to data related to a particular
405 changeset convenient. It represents a read-only context already present in
390 changeset convenient. It represents a read-only context already present in
406 the repo."""
391 the repo."""
407 def __init__(self, repo, rev, node):
392 def __init__(self, repo, rev, node):
408 super(changectx, self).__init__(repo)
393 super(changectx, self).__init__(repo)
409 self._rev = rev
394 self._rev = rev
410 self._node = node
395 self._node = node
411
396
412 def __hash__(self):
397 def __hash__(self):
413 try:
398 try:
414 return hash(self._rev)
399 return hash(self._rev)
415 except AttributeError:
400 except AttributeError:
416 return id(self)
401 return id(self)
417
402
418 def __nonzero__(self):
403 def __nonzero__(self):
419 return self._rev != nullrev
404 return self._rev != nullrev
420
405
421 __bool__ = __nonzero__
406 __bool__ = __nonzero__
422
407
423 @propertycache
408 @propertycache
424 def _changeset(self):
409 def _changeset(self):
425 return self._repo.changelog.changelogrevision(self.rev())
410 return self._repo.changelog.changelogrevision(self.rev())
426
411
427 @propertycache
412 @propertycache
428 def _manifest(self):
413 def _manifest(self):
429 return self._manifestctx.read()
414 return self._manifestctx.read()
430
415
431 @property
416 @property
432 def _manifestctx(self):
417 def _manifestctx(self):
433 return self._repo.manifestlog[self._changeset.manifest]
418 return self._repo.manifestlog[self._changeset.manifest]
434
419
435 @propertycache
420 @propertycache
436 def _manifestdelta(self):
421 def _manifestdelta(self):
437 return self._manifestctx.readdelta()
422 return self._manifestctx.readdelta()
438
423
439 @propertycache
424 @propertycache
440 def _parents(self):
425 def _parents(self):
441 repo = self._repo
426 repo = self._repo
442 p1, p2 = repo.changelog.parentrevs(self._rev)
427 p1, p2 = repo.changelog.parentrevs(self._rev)
443 if p2 == nullrev:
428 if p2 == nullrev:
444 return [repo[p1]]
429 return [repo[p1]]
445 return [repo[p1], repo[p2]]
430 return [repo[p1], repo[p2]]
446
431
447 def changeset(self):
432 def changeset(self):
448 c = self._changeset
433 c = self._changeset
449 return (
434 return (
450 c.manifest,
435 c.manifest,
451 c.user,
436 c.user,
452 c.date,
437 c.date,
453 c.files,
438 c.files,
454 c.description,
439 c.description,
455 c.extra,
440 c.extra,
456 )
441 )
457 def manifestnode(self):
442 def manifestnode(self):
458 return self._changeset.manifest
443 return self._changeset.manifest
459
444
460 def user(self):
445 def user(self):
461 return self._changeset.user
446 return self._changeset.user
462 def date(self):
447 def date(self):
463 return self._changeset.date
448 return self._changeset.date
464 def files(self):
449 def files(self):
465 return self._changeset.files
450 return self._changeset.files
466 def filesmodified(self):
451 def filesmodified(self):
467 modified = set(self.files())
452 modified = set(self.files())
468 modified.difference_update(self.filesadded())
453 modified.difference_update(self.filesadded())
469 modified.difference_update(self.filesremoved())
454 modified.difference_update(self.filesremoved())
470 return sorted(modified)
455 return sorted(modified)
471 def filesadded(self):
456 def filesadded(self):
472 source = self._repo.ui.config('experimental', 'copies.read-from')
457 source = self._repo.ui.config('experimental', 'copies.read-from')
473 if (source == 'changeset-only' or
458 if (source == 'changeset-only' or
474 (source == 'compatibility' and
459 (source == 'compatibility' and
475 self._changeset.filesadded is not None)):
460 self._changeset.filesadded is not None)):
476 return self._changeset.filesadded or []
461 return self._changeset.filesadded or []
477
462
478 added = []
463 added = []
479 for f in self.files():
464 for f in self.files():
480 if not any(f in p for p in self.parents()):
465 if not any(f in p for p in self.parents()):
481 added.append(f)
466 added.append(f)
482 return added
467 return added
483 def filesremoved(self):
468 def filesremoved(self):
484 source = self._repo.ui.config('experimental', 'copies.read-from')
469 source = self._repo.ui.config('experimental', 'copies.read-from')
485 if (source == 'changeset-only' or
470 if (source == 'changeset-only' or
486 (source == 'compatibility' and
471 (source == 'compatibility' and
487 self._changeset.filesremoved is not None)):
472 self._changeset.filesremoved is not None)):
488 return self._changeset.filesremoved or []
473 return self._changeset.filesremoved or []
489
474
490 removed = []
475 removed = []
491 for f in self.files():
476 for f in self.files():
492 if f not in self:
477 if f not in self:
493 removed.append(f)
478 removed.append(f)
494 return removed
479 return removed
495
480
496 @propertycache
481 @propertycache
497 def _copies(self):
482 def _copies(self):
498 source = self._repo.ui.config('experimental', 'copies.read-from')
483 source = self._repo.ui.config('experimental', 'copies.read-from')
499 p1copies = self._changeset.p1copies
484 p1copies = self._changeset.p1copies
500 p2copies = self._changeset.p2copies
485 p2copies = self._changeset.p2copies
501 # If config says to get copy metadata only from changeset, then return
486 # If config says to get copy metadata only from changeset, then return
502 # that, defaulting to {} if there was no copy metadata.
487 # that, defaulting to {} if there was no copy metadata.
503 # In compatibility mode, we return copy data from the changeset if
488 # In compatibility mode, we return copy data from the changeset if
504 # it was recorded there, and otherwise we fall back to getting it from
489 # it was recorded there, and otherwise we fall back to getting it from
505 # the filelogs (below).
490 # the filelogs (below).
506 if (source == 'changeset-only' or
491 if (source == 'changeset-only' or
507 (source == 'compatibility' and p1copies is not None)):
492 (source == 'compatibility' and p1copies is not None)):
508 return p1copies or {}, p2copies or {}
493 return p1copies or {}, p2copies or {}
509
494
510 # Otherwise (config said to read only from filelog, or we are in
495 # Otherwise (config said to read only from filelog, or we are in
511 # compatiblity mode and there is not data in the changeset), we get
496 # compatiblity mode and there is not data in the changeset), we get
512 # the copy metadata from the filelogs.
497 # the copy metadata from the filelogs.
513 return super(changectx, self)._copies
498 return super(changectx, self)._copies
514 def description(self):
499 def description(self):
515 return self._changeset.description
500 return self._changeset.description
516 def branch(self):
501 def branch(self):
517 return encoding.tolocal(self._changeset.extra.get("branch"))
502 return encoding.tolocal(self._changeset.extra.get("branch"))
518 def closesbranch(self):
503 def closesbranch(self):
519 return 'close' in self._changeset.extra
504 return 'close' in self._changeset.extra
520 def extra(self):
505 def extra(self):
521 """Return a dict of extra information."""
506 """Return a dict of extra information."""
522 return self._changeset.extra
507 return self._changeset.extra
523 def tags(self):
508 def tags(self):
524 """Return a list of byte tag names"""
509 """Return a list of byte tag names"""
525 return self._repo.nodetags(self._node)
510 return self._repo.nodetags(self._node)
526 def bookmarks(self):
511 def bookmarks(self):
527 """Return a list of byte bookmark names."""
512 """Return a list of byte bookmark names."""
528 return self._repo.nodebookmarks(self._node)
513 return self._repo.nodebookmarks(self._node)
529 def phase(self):
514 def phase(self):
530 return self._repo._phasecache.phase(self._repo, self._rev)
515 return self._repo._phasecache.phase(self._repo, self._rev)
531 def hidden(self):
516 def hidden(self):
532 return self._rev in repoview.filterrevs(self._repo, 'visible')
517 return self._rev in repoview.filterrevs(self._repo, 'visible')
533
518
534 def isinmemory(self):
519 def isinmemory(self):
535 return False
520 return False
536
521
537 def children(self):
522 def children(self):
538 """return list of changectx contexts for each child changeset.
523 """return list of changectx contexts for each child changeset.
539
524
540 This returns only the immediate child changesets. Use descendants() to
525 This returns only the immediate child changesets. Use descendants() to
541 recursively walk children.
526 recursively walk children.
542 """
527 """
543 c = self._repo.changelog.children(self._node)
528 c = self._repo.changelog.children(self._node)
544 return [self._repo[x] for x in c]
529 return [self._repo[x] for x in c]
545
530
546 def ancestors(self):
531 def ancestors(self):
547 for a in self._repo.changelog.ancestors([self._rev]):
532 for a in self._repo.changelog.ancestors([self._rev]):
548 yield self._repo[a]
533 yield self._repo[a]
549
534
550 def descendants(self):
535 def descendants(self):
551 """Recursively yield all children of the changeset.
536 """Recursively yield all children of the changeset.
552
537
553 For just the immediate children, use children()
538 For just the immediate children, use children()
554 """
539 """
555 for d in self._repo.changelog.descendants([self._rev]):
540 for d in self._repo.changelog.descendants([self._rev]):
556 yield self._repo[d]
541 yield self._repo[d]
557
542
558 def filectx(self, path, fileid=None, filelog=None):
543 def filectx(self, path, fileid=None, filelog=None):
559 """get a file context from this changeset"""
544 """get a file context from this changeset"""
560 if fileid is None:
545 if fileid is None:
561 fileid = self.filenode(path)
546 fileid = self.filenode(path)
562 return filectx(self._repo, path, fileid=fileid,
547 return filectx(self._repo, path, fileid=fileid,
563 changectx=self, filelog=filelog)
548 changectx=self, filelog=filelog)
564
549
565 def ancestor(self, c2, warn=False):
550 def ancestor(self, c2, warn=False):
566 """return the "best" ancestor context of self and c2
551 """return the "best" ancestor context of self and c2
567
552
568 If there are multiple candidates, it will show a message and check
553 If there are multiple candidates, it will show a message and check
569 merge.preferancestor configuration before falling back to the
554 merge.preferancestor configuration before falling back to the
570 revlog ancestor."""
555 revlog ancestor."""
571 # deal with workingctxs
556 # deal with workingctxs
572 n2 = c2._node
557 n2 = c2._node
573 if n2 is None:
558 if n2 is None:
574 n2 = c2._parents[0]._node
559 n2 = c2._parents[0]._node
575 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
560 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
576 if not cahs:
561 if not cahs:
577 anc = nullid
562 anc = nullid
578 elif len(cahs) == 1:
563 elif len(cahs) == 1:
579 anc = cahs[0]
564 anc = cahs[0]
580 else:
565 else:
581 # experimental config: merge.preferancestor
566 # experimental config: merge.preferancestor
582 for r in self._repo.ui.configlist('merge', 'preferancestor'):
567 for r in self._repo.ui.configlist('merge', 'preferancestor'):
583 try:
568 try:
584 ctx = scmutil.revsymbol(self._repo, r)
569 ctx = scmutil.revsymbol(self._repo, r)
585 except error.RepoLookupError:
570 except error.RepoLookupError:
586 continue
571 continue
587 anc = ctx.node()
572 anc = ctx.node()
588 if anc in cahs:
573 if anc in cahs:
589 break
574 break
590 else:
575 else:
591 anc = self._repo.changelog.ancestor(self._node, n2)
576 anc = self._repo.changelog.ancestor(self._node, n2)
592 if warn:
577 if warn:
593 self._repo.ui.status(
578 self._repo.ui.status(
594 (_("note: using %s as ancestor of %s and %s\n") %
579 (_("note: using %s as ancestor of %s and %s\n") %
595 (short(anc), short(self._node), short(n2))) +
580 (short(anc), short(self._node), short(n2))) +
596 ''.join(_(" alternatively, use --config "
581 ''.join(_(" alternatively, use --config "
597 "merge.preferancestor=%s\n") %
582 "merge.preferancestor=%s\n") %
598 short(n) for n in sorted(cahs) if n != anc))
583 short(n) for n in sorted(cahs) if n != anc))
599 return self._repo[anc]
584 return self._repo[anc]
600
585
601 def isancestorof(self, other):
586 def isancestorof(self, other):
602 """True if this changeset is an ancestor of other"""
587 """True if this changeset is an ancestor of other"""
603 return self._repo.changelog.isancestorrev(self._rev, other._rev)
588 return self._repo.changelog.isancestorrev(self._rev, other._rev)
604
589
605 def walk(self, match):
590 def walk(self, match):
606 '''Generates matching file names.'''
591 '''Generates matching file names.'''
607
592
608 # Wrap match.bad method to have message with nodeid
593 # Wrap match.bad method to have message with nodeid
609 def bad(fn, msg):
594 def bad(fn, msg):
610 # The manifest doesn't know about subrepos, so don't complain about
595 # The manifest doesn't know about subrepos, so don't complain about
611 # paths into valid subrepos.
596 # paths into valid subrepos.
612 if any(fn == s or fn.startswith(s + '/')
597 if any(fn == s or fn.startswith(s + '/')
613 for s in self.substate):
598 for s in self.substate):
614 return
599 return
615 match.bad(fn, _('no such file in rev %s') % self)
600 match.bad(fn, _('no such file in rev %s') % self)
616
601
617 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
602 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
618 return self._manifest.walk(m)
603 return self._manifest.walk(m)
619
604
620 def matches(self, match):
605 def matches(self, match):
621 return self.walk(match)
606 return self.walk(match)
622
607
623 class basefilectx(object):
608 class basefilectx(object):
624 """A filecontext object represents the common logic for its children:
609 """A filecontext object represents the common logic for its children:
625 filectx: read-only access to a filerevision that is already present
610 filectx: read-only access to a filerevision that is already present
626 in the repo,
611 in the repo,
627 workingfilectx: a filecontext that represents files from the working
612 workingfilectx: a filecontext that represents files from the working
628 directory,
613 directory,
629 memfilectx: a filecontext that represents files in-memory,
614 memfilectx: a filecontext that represents files in-memory,
630 """
615 """
631 @propertycache
616 @propertycache
632 def _filelog(self):
617 def _filelog(self):
633 return self._repo.file(self._path)
618 return self._repo.file(self._path)
634
619
635 @propertycache
620 @propertycache
636 def _changeid(self):
621 def _changeid(self):
637 if r'_changectx' in self.__dict__:
622 if r'_changectx' in self.__dict__:
638 return self._changectx.rev()
623 return self._changectx.rev()
639 elif r'_descendantrev' in self.__dict__:
624 elif r'_descendantrev' in self.__dict__:
640 # this file context was created from a revision with a known
625 # this file context was created from a revision with a known
641 # descendant, we can (lazily) correct for linkrev aliases
626 # descendant, we can (lazily) correct for linkrev aliases
642 return self._adjustlinkrev(self._descendantrev)
627 return self._adjustlinkrev(self._descendantrev)
643 else:
628 else:
644 return self._filelog.linkrev(self._filerev)
629 return self._filelog.linkrev(self._filerev)
645
630
646 @propertycache
631 @propertycache
647 def _filenode(self):
632 def _filenode(self):
648 if r'_fileid' in self.__dict__:
633 if r'_fileid' in self.__dict__:
649 return self._filelog.lookup(self._fileid)
634 return self._filelog.lookup(self._fileid)
650 else:
635 else:
651 return self._changectx.filenode(self._path)
636 return self._changectx.filenode(self._path)
652
637
653 @propertycache
638 @propertycache
654 def _filerev(self):
639 def _filerev(self):
655 return self._filelog.rev(self._filenode)
640 return self._filelog.rev(self._filenode)
656
641
657 @propertycache
642 @propertycache
658 def _repopath(self):
643 def _repopath(self):
659 return self._path
644 return self._path
660
645
661 def __nonzero__(self):
646 def __nonzero__(self):
662 try:
647 try:
663 self._filenode
648 self._filenode
664 return True
649 return True
665 except error.LookupError:
650 except error.LookupError:
666 # file is missing
651 # file is missing
667 return False
652 return False
668
653
669 __bool__ = __nonzero__
654 __bool__ = __nonzero__
670
655
671 def __bytes__(self):
656 def __bytes__(self):
672 try:
657 try:
673 return "%s@%s" % (self.path(), self._changectx)
658 return "%s@%s" % (self.path(), self._changectx)
674 except error.LookupError:
659 except error.LookupError:
675 return "%s@???" % self.path()
660 return "%s@???" % self.path()
676
661
677 __str__ = encoding.strmethod(__bytes__)
662 __str__ = encoding.strmethod(__bytes__)
678
663
679 def __repr__(self):
664 def __repr__(self):
680 return r"<%s %s>" % (type(self).__name__, str(self))
665 return r"<%s %s>" % (type(self).__name__, str(self))
681
666
682 def __hash__(self):
667 def __hash__(self):
683 try:
668 try:
684 return hash((self._path, self._filenode))
669 return hash((self._path, self._filenode))
685 except AttributeError:
670 except AttributeError:
686 return id(self)
671 return id(self)
687
672
688 def __eq__(self, other):
673 def __eq__(self, other):
689 try:
674 try:
690 return (type(self) == type(other) and self._path == other._path
675 return (type(self) == type(other) and self._path == other._path
691 and self._filenode == other._filenode)
676 and self._filenode == other._filenode)
692 except AttributeError:
677 except AttributeError:
693 return False
678 return False
694
679
695 def __ne__(self, other):
680 def __ne__(self, other):
696 return not (self == other)
681 return not (self == other)
697
682
698 def filerev(self):
683 def filerev(self):
699 return self._filerev
684 return self._filerev
700 def filenode(self):
685 def filenode(self):
701 return self._filenode
686 return self._filenode
702 @propertycache
687 @propertycache
703 def _flags(self):
688 def _flags(self):
704 return self._changectx.flags(self._path)
689 return self._changectx.flags(self._path)
705 def flags(self):
690 def flags(self):
706 return self._flags
691 return self._flags
707 def filelog(self):
692 def filelog(self):
708 return self._filelog
693 return self._filelog
709 def rev(self):
694 def rev(self):
710 return self._changeid
695 return self._changeid
711 def linkrev(self):
696 def linkrev(self):
712 return self._filelog.linkrev(self._filerev)
697 return self._filelog.linkrev(self._filerev)
713 def node(self):
698 def node(self):
714 return self._changectx.node()
699 return self._changectx.node()
715 def hex(self):
700 def hex(self):
716 return self._changectx.hex()
701 return self._changectx.hex()
717 def user(self):
702 def user(self):
718 return self._changectx.user()
703 return self._changectx.user()
719 def date(self):
704 def date(self):
720 return self._changectx.date()
705 return self._changectx.date()
721 def files(self):
706 def files(self):
722 return self._changectx.files()
707 return self._changectx.files()
723 def description(self):
708 def description(self):
724 return self._changectx.description()
709 return self._changectx.description()
725 def branch(self):
710 def branch(self):
726 return self._changectx.branch()
711 return self._changectx.branch()
727 def extra(self):
712 def extra(self):
728 return self._changectx.extra()
713 return self._changectx.extra()
729 def phase(self):
714 def phase(self):
730 return self._changectx.phase()
715 return self._changectx.phase()
731 def phasestr(self):
716 def phasestr(self):
732 return self._changectx.phasestr()
717 return self._changectx.phasestr()
733 def obsolete(self):
718 def obsolete(self):
734 return self._changectx.obsolete()
719 return self._changectx.obsolete()
735 def instabilities(self):
720 def instabilities(self):
736 return self._changectx.instabilities()
721 return self._changectx.instabilities()
737 def manifest(self):
722 def manifest(self):
738 return self._changectx.manifest()
723 return self._changectx.manifest()
739 def changectx(self):
724 def changectx(self):
740 return self._changectx
725 return self._changectx
741 def renamed(self):
726 def renamed(self):
742 return self._copied
727 return self._copied
743 def copysource(self):
728 def copysource(self):
744 return self._copied and self._copied[0]
729 return self._copied and self._copied[0]
745 def repo(self):
730 def repo(self):
746 return self._repo
731 return self._repo
747 def size(self):
732 def size(self):
748 return len(self.data())
733 return len(self.data())
749
734
750 def path(self):
735 def path(self):
751 return self._path
736 return self._path
752
737
753 def isbinary(self):
738 def isbinary(self):
754 try:
739 try:
755 return stringutil.binary(self.data())
740 return stringutil.binary(self.data())
756 except IOError:
741 except IOError:
757 return False
742 return False
758 def isexec(self):
743 def isexec(self):
759 return 'x' in self.flags()
744 return 'x' in self.flags()
760 def islink(self):
745 def islink(self):
761 return 'l' in self.flags()
746 return 'l' in self.flags()
762
747
763 def isabsent(self):
748 def isabsent(self):
764 """whether this filectx represents a file not in self._changectx
749 """whether this filectx represents a file not in self._changectx
765
750
766 This is mainly for merge code to detect change/delete conflicts. This is
751 This is mainly for merge code to detect change/delete conflicts. This is
767 expected to be True for all subclasses of basectx."""
752 expected to be True for all subclasses of basectx."""
768 return False
753 return False
769
754
770 _customcmp = False
755 _customcmp = False
771 def cmp(self, fctx):
756 def cmp(self, fctx):
772 """compare with other file context
757 """compare with other file context
773
758
774 returns True if different than fctx.
759 returns True if different than fctx.
775 """
760 """
776 if fctx._customcmp:
761 if fctx._customcmp:
777 return fctx.cmp(self)
762 return fctx.cmp(self)
778
763
779 if self._filenode is None:
764 if self._filenode is None:
780 raise error.ProgrammingError(
765 raise error.ProgrammingError(
781 'filectx.cmp() must be reimplemented if not backed by revlog')
766 'filectx.cmp() must be reimplemented if not backed by revlog')
782
767
783 if fctx._filenode is None:
768 if fctx._filenode is None:
784 if self._repo._encodefilterpats:
769 if self._repo._encodefilterpats:
785 # can't rely on size() because wdir content may be decoded
770 # can't rely on size() because wdir content may be decoded
786 return self._filelog.cmp(self._filenode, fctx.data())
771 return self._filelog.cmp(self._filenode, fctx.data())
787 if self.size() - 4 == fctx.size():
772 if self.size() - 4 == fctx.size():
788 # size() can match:
773 # size() can match:
789 # if file data starts with '\1\n', empty metadata block is
774 # if file data starts with '\1\n', empty metadata block is
790 # prepended, which adds 4 bytes to filelog.size().
775 # prepended, which adds 4 bytes to filelog.size().
791 return self._filelog.cmp(self._filenode, fctx.data())
776 return self._filelog.cmp(self._filenode, fctx.data())
792 if self.size() == fctx.size():
777 if self.size() == fctx.size():
793 # size() matches: need to compare content
778 # size() matches: need to compare content
794 return self._filelog.cmp(self._filenode, fctx.data())
779 return self._filelog.cmp(self._filenode, fctx.data())
795
780
796 # size() differs
781 # size() differs
797 return True
782 return True
798
783
799 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
784 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
800 """return the first ancestor of <srcrev> introducing <fnode>
785 """return the first ancestor of <srcrev> introducing <fnode>
801
786
802 If the linkrev of the file revision does not point to an ancestor of
787 If the linkrev of the file revision does not point to an ancestor of
803 srcrev, we'll walk down the ancestors until we find one introducing
788 srcrev, we'll walk down the ancestors until we find one introducing
804 this file revision.
789 this file revision.
805
790
806 :srcrev: the changeset revision we search ancestors from
791 :srcrev: the changeset revision we search ancestors from
807 :inclusive: if true, the src revision will also be checked
792 :inclusive: if true, the src revision will also be checked
808 :stoprev: an optional revision to stop the walk at. If no introduction
793 :stoprev: an optional revision to stop the walk at. If no introduction
809 of this file content could be found before this floor
794 of this file content could be found before this floor
810 revision, the function will returns "None" and stops its
795 revision, the function will returns "None" and stops its
811 iteration.
796 iteration.
812 """
797 """
813 repo = self._repo
798 repo = self._repo
814 cl = repo.unfiltered().changelog
799 cl = repo.unfiltered().changelog
815 mfl = repo.manifestlog
800 mfl = repo.manifestlog
816 # fetch the linkrev
801 # fetch the linkrev
817 lkr = self.linkrev()
802 lkr = self.linkrev()
818 if srcrev == lkr:
803 if srcrev == lkr:
819 return lkr
804 return lkr
820 # hack to reuse ancestor computation when searching for renames
805 # hack to reuse ancestor computation when searching for renames
821 memberanc = getattr(self, '_ancestrycontext', None)
806 memberanc = getattr(self, '_ancestrycontext', None)
822 iteranc = None
807 iteranc = None
823 if srcrev is None:
808 if srcrev is None:
824 # wctx case, used by workingfilectx during mergecopy
809 # wctx case, used by workingfilectx during mergecopy
825 revs = [p.rev() for p in self._repo[None].parents()]
810 revs = [p.rev() for p in self._repo[None].parents()]
826 inclusive = True # we skipped the real (revless) source
811 inclusive = True # we skipped the real (revless) source
827 else:
812 else:
828 revs = [srcrev]
813 revs = [srcrev]
829 if memberanc is None:
814 if memberanc is None:
830 memberanc = iteranc = cl.ancestors(revs, lkr,
815 memberanc = iteranc = cl.ancestors(revs, lkr,
831 inclusive=inclusive)
816 inclusive=inclusive)
832 # check if this linkrev is an ancestor of srcrev
817 # check if this linkrev is an ancestor of srcrev
833 if lkr not in memberanc:
818 if lkr not in memberanc:
834 if iteranc is None:
819 if iteranc is None:
835 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
820 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
836 fnode = self._filenode
821 fnode = self._filenode
837 path = self._path
822 path = self._path
838 for a in iteranc:
823 for a in iteranc:
839 if stoprev is not None and a < stoprev:
824 if stoprev is not None and a < stoprev:
840 return None
825 return None
841 ac = cl.read(a) # get changeset data (we avoid object creation)
826 ac = cl.read(a) # get changeset data (we avoid object creation)
842 if path in ac[3]: # checking the 'files' field.
827 if path in ac[3]: # checking the 'files' field.
843 # The file has been touched, check if the content is
828 # The file has been touched, check if the content is
844 # similar to the one we search for.
829 # similar to the one we search for.
845 if fnode == mfl[ac[0]].readfast().get(path):
830 if fnode == mfl[ac[0]].readfast().get(path):
846 return a
831 return a
847 # In theory, we should never get out of that loop without a result.
832 # In theory, we should never get out of that loop without a result.
848 # But if manifest uses a buggy file revision (not children of the
833 # But if manifest uses a buggy file revision (not children of the
849 # one it replaces) we could. Such a buggy situation will likely
834 # one it replaces) we could. Such a buggy situation will likely
850 # result is crash somewhere else at to some point.
835 # result is crash somewhere else at to some point.
851 return lkr
836 return lkr
852
837
853 def isintroducedafter(self, changelogrev):
838 def isintroducedafter(self, changelogrev):
854 """True if a filectx has been introduced after a given floor revision
839 """True if a filectx has been introduced after a given floor revision
855 """
840 """
856 if self.linkrev() >= changelogrev:
841 if self.linkrev() >= changelogrev:
857 return True
842 return True
858 introrev = self._introrev(stoprev=changelogrev)
843 introrev = self._introrev(stoprev=changelogrev)
859 if introrev is None:
844 if introrev is None:
860 return False
845 return False
861 return introrev >= changelogrev
846 return introrev >= changelogrev
862
847
863 def introrev(self):
848 def introrev(self):
864 """return the rev of the changeset which introduced this file revision
849 """return the rev of the changeset which introduced this file revision
865
850
866 This method is different from linkrev because it take into account the
851 This method is different from linkrev because it take into account the
867 changeset the filectx was created from. It ensures the returned
852 changeset the filectx was created from. It ensures the returned
868 revision is one of its ancestors. This prevents bugs from
853 revision is one of its ancestors. This prevents bugs from
869 'linkrev-shadowing' when a file revision is used by multiple
854 'linkrev-shadowing' when a file revision is used by multiple
870 changesets.
855 changesets.
871 """
856 """
872 return self._introrev()
857 return self._introrev()
873
858
874 def _introrev(self, stoprev=None):
859 def _introrev(self, stoprev=None):
875 """
860 """
876 Same as `introrev` but, with an extra argument to limit changelog
861 Same as `introrev` but, with an extra argument to limit changelog
877 iteration range in some internal usecase.
862 iteration range in some internal usecase.
878
863
879 If `stoprev` is set, the `introrev` will not be searched past that
864 If `stoprev` is set, the `introrev` will not be searched past that
880 `stoprev` revision and "None" might be returned. This is useful to
865 `stoprev` revision and "None" might be returned. This is useful to
881 limit the iteration range.
866 limit the iteration range.
882 """
867 """
883 toprev = None
868 toprev = None
884 attrs = vars(self)
869 attrs = vars(self)
885 if r'_changeid' in attrs:
870 if r'_changeid' in attrs:
886 # We have a cached value already
871 # We have a cached value already
887 toprev = self._changeid
872 toprev = self._changeid
888 elif r'_changectx' in attrs:
873 elif r'_changectx' in attrs:
889 # We know which changelog entry we are coming from
874 # We know which changelog entry we are coming from
890 toprev = self._changectx.rev()
875 toprev = self._changectx.rev()
891
876
892 if toprev is not None:
877 if toprev is not None:
893 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
878 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
894 elif r'_descendantrev' in attrs:
879 elif r'_descendantrev' in attrs:
895 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
880 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
896 # be nice and cache the result of the computation
881 # be nice and cache the result of the computation
897 if introrev is not None:
882 if introrev is not None:
898 self._changeid = introrev
883 self._changeid = introrev
899 return introrev
884 return introrev
900 else:
885 else:
901 return self.linkrev()
886 return self.linkrev()
902
887
903 def introfilectx(self):
888 def introfilectx(self):
904 """Return filectx having identical contents, but pointing to the
889 """Return filectx having identical contents, but pointing to the
905 changeset revision where this filectx was introduced"""
890 changeset revision where this filectx was introduced"""
906 introrev = self.introrev()
891 introrev = self.introrev()
907 if self.rev() == introrev:
892 if self.rev() == introrev:
908 return self
893 return self
909 return self.filectx(self.filenode(), changeid=introrev)
894 return self.filectx(self.filenode(), changeid=introrev)
910
895
911 def _parentfilectx(self, path, fileid, filelog):
896 def _parentfilectx(self, path, fileid, filelog):
912 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
897 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
913 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
898 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
914 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
899 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
915 # If self is associated with a changeset (probably explicitly
900 # If self is associated with a changeset (probably explicitly
916 # fed), ensure the created filectx is associated with a
901 # fed), ensure the created filectx is associated with a
917 # changeset that is an ancestor of self.changectx.
902 # changeset that is an ancestor of self.changectx.
918 # This lets us later use _adjustlinkrev to get a correct link.
903 # This lets us later use _adjustlinkrev to get a correct link.
919 fctx._descendantrev = self.rev()
904 fctx._descendantrev = self.rev()
920 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
905 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
921 elif r'_descendantrev' in vars(self):
906 elif r'_descendantrev' in vars(self):
922 # Otherwise propagate _descendantrev if we have one associated.
907 # Otherwise propagate _descendantrev if we have one associated.
923 fctx._descendantrev = self._descendantrev
908 fctx._descendantrev = self._descendantrev
924 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
909 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
925 return fctx
910 return fctx
926
911
927 def parents(self):
912 def parents(self):
928 _path = self._path
913 _path = self._path
929 fl = self._filelog
914 fl = self._filelog
930 parents = self._filelog.parents(self._filenode)
915 parents = self._filelog.parents(self._filenode)
931 pl = [(_path, node, fl) for node in parents if node != nullid]
916 pl = [(_path, node, fl) for node in parents if node != nullid]
932
917
933 r = fl.renamed(self._filenode)
918 r = fl.renamed(self._filenode)
934 if r:
919 if r:
935 # - In the simple rename case, both parent are nullid, pl is empty.
920 # - In the simple rename case, both parent are nullid, pl is empty.
936 # - In case of merge, only one of the parent is null id and should
921 # - In case of merge, only one of the parent is null id and should
937 # be replaced with the rename information. This parent is -always-
922 # be replaced with the rename information. This parent is -always-
938 # the first one.
923 # the first one.
939 #
924 #
940 # As null id have always been filtered out in the previous list
925 # As null id have always been filtered out in the previous list
941 # comprehension, inserting to 0 will always result in "replacing
926 # comprehension, inserting to 0 will always result in "replacing
942 # first nullid parent with rename information.
927 # first nullid parent with rename information.
943 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
928 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
944
929
945 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
930 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
946
931
947 def p1(self):
932 def p1(self):
948 return self.parents()[0]
933 return self.parents()[0]
949
934
950 def p2(self):
935 def p2(self):
951 p = self.parents()
936 p = self.parents()
952 if len(p) == 2:
937 if len(p) == 2:
953 return p[1]
938 return p[1]
954 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
939 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
955
940
956 def annotate(self, follow=False, skiprevs=None, diffopts=None):
941 def annotate(self, follow=False, skiprevs=None, diffopts=None):
957 """Returns a list of annotateline objects for each line in the file
942 """Returns a list of annotateline objects for each line in the file
958
943
959 - line.fctx is the filectx of the node where that line was last changed
944 - line.fctx is the filectx of the node where that line was last changed
960 - line.lineno is the line number at the first appearance in the managed
945 - line.lineno is the line number at the first appearance in the managed
961 file
946 file
962 - line.text is the data on that line (including newline character)
947 - line.text is the data on that line (including newline character)
963 """
948 """
964 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
965
950
966 def parents(f):
951 def parents(f):
967 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
968 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
969 # from the topmost introrev (= srcrev) down to p.linkrev() if it
954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
970 # isn't an ancestor of the srcrev.
955 # isn't an ancestor of the srcrev.
971 f._changeid
956 f._changeid
972 pl = f.parents()
957 pl = f.parents()
973
958
974 # Don't return renamed parents if we aren't following.
959 # Don't return renamed parents if we aren't following.
975 if not follow:
960 if not follow:
976 pl = [p for p in pl if p.path() == f.path()]
961 pl = [p for p in pl if p.path() == f.path()]
977
962
978 # renamed filectx won't have a filelog yet, so set it
963 # renamed filectx won't have a filelog yet, so set it
979 # from the cache to save time
964 # from the cache to save time
980 for p in pl:
965 for p in pl:
981 if not r'_filelog' in p.__dict__:
966 if not r'_filelog' in p.__dict__:
982 p._filelog = getlog(p.path())
967 p._filelog = getlog(p.path())
983
968
984 return pl
969 return pl
985
970
986 # use linkrev to find the first changeset where self appeared
971 # use linkrev to find the first changeset where self appeared
987 base = self.introfilectx()
972 base = self.introfilectx()
988 if getattr(base, '_ancestrycontext', None) is None:
973 if getattr(base, '_ancestrycontext', None) is None:
989 cl = self._repo.changelog
974 cl = self._repo.changelog
990 if base.rev() is None:
975 if base.rev() is None:
991 # wctx is not inclusive, but works because _ancestrycontext
976 # wctx is not inclusive, but works because _ancestrycontext
992 # is used to test filelog revisions
977 # is used to test filelog revisions
993 ac = cl.ancestors([p.rev() for p in base.parents()],
978 ac = cl.ancestors([p.rev() for p in base.parents()],
994 inclusive=True)
979 inclusive=True)
995 else:
980 else:
996 ac = cl.ancestors([base.rev()], inclusive=True)
981 ac = cl.ancestors([base.rev()], inclusive=True)
997 base._ancestrycontext = ac
982 base._ancestrycontext = ac
998
983
999 return dagop.annotate(base, parents, skiprevs=skiprevs,
984 return dagop.annotate(base, parents, skiprevs=skiprevs,
1000 diffopts=diffopts)
985 diffopts=diffopts)
1001
986
1002 def ancestors(self, followfirst=False):
987 def ancestors(self, followfirst=False):
1003 visit = {}
988 visit = {}
1004 c = self
989 c = self
1005 if followfirst:
990 if followfirst:
1006 cut = 1
991 cut = 1
1007 else:
992 else:
1008 cut = None
993 cut = None
1009
994
1010 while True:
995 while True:
1011 for parent in c.parents()[:cut]:
996 for parent in c.parents()[:cut]:
1012 visit[(parent.linkrev(), parent.filenode())] = parent
997 visit[(parent.linkrev(), parent.filenode())] = parent
1013 if not visit:
998 if not visit:
1014 break
999 break
1015 c = visit.pop(max(visit))
1000 c = visit.pop(max(visit))
1016 yield c
1001 yield c
1017
1002
1018 def decodeddata(self):
1003 def decodeddata(self):
1019 """Returns `data()` after running repository decoding filters.
1004 """Returns `data()` after running repository decoding filters.
1020
1005
1021 This is often equivalent to how the data would be expressed on disk.
1006 This is often equivalent to how the data would be expressed on disk.
1022 """
1007 """
1023 return self._repo.wwritedata(self.path(), self.data())
1008 return self._repo.wwritedata(self.path(), self.data())
1024
1009
1025 class filectx(basefilectx):
1010 class filectx(basefilectx):
1026 """A filecontext object makes access to data related to a particular
1011 """A filecontext object makes access to data related to a particular
1027 filerevision convenient."""
1012 filerevision convenient."""
1028 def __init__(self, repo, path, changeid=None, fileid=None,
1013 def __init__(self, repo, path, changeid=None, fileid=None,
1029 filelog=None, changectx=None):
1014 filelog=None, changectx=None):
1030 """changeid must be a revision number, if specified.
1015 """changeid must be a revision number, if specified.
1031 fileid can be a file revision or node."""
1016 fileid can be a file revision or node."""
1032 self._repo = repo
1017 self._repo = repo
1033 self._path = path
1018 self._path = path
1034
1019
1035 assert (changeid is not None
1020 assert (changeid is not None
1036 or fileid is not None
1021 or fileid is not None
1037 or changectx is not None), (
1022 or changectx is not None), (
1038 "bad args: changeid=%r, fileid=%r, changectx=%r"
1023 "bad args: changeid=%r, fileid=%r, changectx=%r"
1039 % (changeid, fileid, changectx))
1024 % (changeid, fileid, changectx))
1040
1025
1041 if filelog is not None:
1026 if filelog is not None:
1042 self._filelog = filelog
1027 self._filelog = filelog
1043
1028
1044 if changeid is not None:
1029 if changeid is not None:
1045 self._changeid = changeid
1030 self._changeid = changeid
1046 if changectx is not None:
1031 if changectx is not None:
1047 self._changectx = changectx
1032 self._changectx = changectx
1048 if fileid is not None:
1033 if fileid is not None:
1049 self._fileid = fileid
1034 self._fileid = fileid
1050
1035
1051 @propertycache
1036 @propertycache
1052 def _changectx(self):
1037 def _changectx(self):
1053 try:
1038 try:
1054 return self._repo[self._changeid]
1039 return self._repo[self._changeid]
1055 except error.FilteredRepoLookupError:
1040 except error.FilteredRepoLookupError:
1056 # Linkrev may point to any revision in the repository. When the
1041 # Linkrev may point to any revision in the repository. When the
1057 # repository is filtered this may lead to `filectx` trying to build
1042 # repository is filtered this may lead to `filectx` trying to build
1058 # `changectx` for filtered revision. In such case we fallback to
1043 # `changectx` for filtered revision. In such case we fallback to
1059 # creating `changectx` on the unfiltered version of the reposition.
1044 # creating `changectx` on the unfiltered version of the reposition.
1060 # This fallback should not be an issue because `changectx` from
1045 # This fallback should not be an issue because `changectx` from
1061 # `filectx` are not used in complex operations that care about
1046 # `filectx` are not used in complex operations that care about
1062 # filtering.
1047 # filtering.
1063 #
1048 #
1064 # This fallback is a cheap and dirty fix that prevent several
1049 # This fallback is a cheap and dirty fix that prevent several
1065 # crashes. It does not ensure the behavior is correct. However the
1050 # crashes. It does not ensure the behavior is correct. However the
1066 # behavior was not correct before filtering either and "incorrect
1051 # behavior was not correct before filtering either and "incorrect
1067 # behavior" is seen as better as "crash"
1052 # behavior" is seen as better as "crash"
1068 #
1053 #
1069 # Linkrevs have several serious troubles with filtering that are
1054 # Linkrevs have several serious troubles with filtering that are
1070 # complicated to solve. Proper handling of the issue here should be
1055 # complicated to solve. Proper handling of the issue here should be
1071 # considered when solving linkrev issue are on the table.
1056 # considered when solving linkrev issue are on the table.
1072 return self._repo.unfiltered()[self._changeid]
1057 return self._repo.unfiltered()[self._changeid]
1073
1058
1074 def filectx(self, fileid, changeid=None):
1059 def filectx(self, fileid, changeid=None):
1075 '''opens an arbitrary revision of the file without
1060 '''opens an arbitrary revision of the file without
1076 opening a new filelog'''
1061 opening a new filelog'''
1077 return filectx(self._repo, self._path, fileid=fileid,
1062 return filectx(self._repo, self._path, fileid=fileid,
1078 filelog=self._filelog, changeid=changeid)
1063 filelog=self._filelog, changeid=changeid)
1079
1064
1080 def rawdata(self):
1065 def rawdata(self):
1081 return self._filelog.revision(self._filenode, raw=True)
1066 return self._filelog.revision(self._filenode, raw=True)
1082
1067
1083 def rawflags(self):
1068 def rawflags(self):
1084 """low-level revlog flags"""
1069 """low-level revlog flags"""
1085 return self._filelog.flags(self._filerev)
1070 return self._filelog.flags(self._filerev)
1086
1071
1087 def data(self):
1072 def data(self):
1088 try:
1073 try:
1089 return self._filelog.read(self._filenode)
1074 return self._filelog.read(self._filenode)
1090 except error.CensoredNodeError:
1075 except error.CensoredNodeError:
1091 if self._repo.ui.config("censor", "policy") == "ignore":
1076 if self._repo.ui.config("censor", "policy") == "ignore":
1092 return ""
1077 return ""
1093 raise error.Abort(_("censored node: %s") % short(self._filenode),
1078 raise error.Abort(_("censored node: %s") % short(self._filenode),
1094 hint=_("set censor.policy to ignore errors"))
1079 hint=_("set censor.policy to ignore errors"))
1095
1080
1096 def size(self):
1081 def size(self):
1097 return self._filelog.size(self._filerev)
1082 return self._filelog.size(self._filerev)
1098
1083
1099 @propertycache
1084 @propertycache
1100 def _copied(self):
1085 def _copied(self):
1101 """check if file was actually renamed in this changeset revision
1086 """check if file was actually renamed in this changeset revision
1102
1087
1103 If rename logged in file revision, we report copy for changeset only
1088 If rename logged in file revision, we report copy for changeset only
1104 if file revisions linkrev points back to the changeset in question
1089 if file revisions linkrev points back to the changeset in question
1105 or both changeset parents contain different file revisions.
1090 or both changeset parents contain different file revisions.
1106 """
1091 """
1107
1092
1108 renamed = self._filelog.renamed(self._filenode)
1093 renamed = self._filelog.renamed(self._filenode)
1109 if not renamed:
1094 if not renamed:
1110 return None
1095 return None
1111
1096
1112 if self.rev() == self.linkrev():
1097 if self.rev() == self.linkrev():
1113 return renamed
1098 return renamed
1114
1099
1115 name = self.path()
1100 name = self.path()
1116 fnode = self._filenode
1101 fnode = self._filenode
1117 for p in self._changectx.parents():
1102 for p in self._changectx.parents():
1118 try:
1103 try:
1119 if fnode == p.filenode(name):
1104 if fnode == p.filenode(name):
1120 return None
1105 return None
1121 except error.LookupError:
1106 except error.LookupError:
1122 pass
1107 pass
1123 return renamed
1108 return renamed
1124
1109
1125 def children(self):
1110 def children(self):
1126 # hard for renames
1111 # hard for renames
1127 c = self._filelog.children(self._filenode)
1112 c = self._filelog.children(self._filenode)
1128 return [filectx(self._repo, self._path, fileid=x,
1113 return [filectx(self._repo, self._path, fileid=x,
1129 filelog=self._filelog) for x in c]
1114 filelog=self._filelog) for x in c]
1130
1115
1131 class committablectx(basectx):
1116 class committablectx(basectx):
1132 """A committablectx object provides common functionality for a context that
1117 """A committablectx object provides common functionality for a context that
1133 wants the ability to commit, e.g. workingctx or memctx."""
1118 wants the ability to commit, e.g. workingctx or memctx."""
1134 def __init__(self, repo, text="", user=None, date=None, extra=None,
1119 def __init__(self, repo, text="", user=None, date=None, extra=None,
1135 changes=None, branch=None):
1120 changes=None, branch=None):
1136 super(committablectx, self).__init__(repo)
1121 super(committablectx, self).__init__(repo)
1137 self._rev = None
1122 self._rev = None
1138 self._node = None
1123 self._node = None
1139 self._text = text
1124 self._text = text
1140 if date:
1125 if date:
1141 self._date = dateutil.parsedate(date)
1126 self._date = dateutil.parsedate(date)
1142 if user:
1127 if user:
1143 self._user = user
1128 self._user = user
1144 if changes:
1129 if changes:
1145 self._status = changes
1130 self._status = changes
1146
1131
1147 self._extra = {}
1132 self._extra = {}
1148 if extra:
1133 if extra:
1149 self._extra = extra.copy()
1134 self._extra = extra.copy()
1150 if branch is not None:
1135 if branch is not None:
1151 self._extra['branch'] = encoding.fromlocal(branch)
1136 self._extra['branch'] = encoding.fromlocal(branch)
1152 if not self._extra.get('branch'):
1137 if not self._extra.get('branch'):
1153 self._extra['branch'] = 'default'
1138 self._extra['branch'] = 'default'
1154
1139
1155 def __bytes__(self):
1140 def __bytes__(self):
1156 return bytes(self._parents[0]) + "+"
1141 return bytes(self._parents[0]) + "+"
1157
1142
1158 __str__ = encoding.strmethod(__bytes__)
1143 __str__ = encoding.strmethod(__bytes__)
1159
1144
1160 def __nonzero__(self):
1145 def __nonzero__(self):
1161 return True
1146 return True
1162
1147
1163 __bool__ = __nonzero__
1148 __bool__ = __nonzero__
1164
1149
1165 @propertycache
1150 @propertycache
1166 def _status(self):
1151 def _status(self):
1167 return self._repo.status()
1152 return self._repo.status()
1168
1153
1169 @propertycache
1154 @propertycache
1170 def _user(self):
1155 def _user(self):
1171 return self._repo.ui.username()
1156 return self._repo.ui.username()
1172
1157
1173 @propertycache
1158 @propertycache
1174 def _date(self):
1159 def _date(self):
1175 ui = self._repo.ui
1160 ui = self._repo.ui
1176 date = ui.configdate('devel', 'default-date')
1161 date = ui.configdate('devel', 'default-date')
1177 if date is None:
1162 if date is None:
1178 date = dateutil.makedate()
1163 date = dateutil.makedate()
1179 return date
1164 return date
1180
1165
1181 def subrev(self, subpath):
1166 def subrev(self, subpath):
1182 return None
1167 return None
1183
1168
1184 def manifestnode(self):
1169 def manifestnode(self):
1185 return None
1170 return None
1186 def user(self):
1171 def user(self):
1187 return self._user or self._repo.ui.username()
1172 return self._user or self._repo.ui.username()
1188 def date(self):
1173 def date(self):
1189 return self._date
1174 return self._date
1190 def description(self):
1175 def description(self):
1191 return self._text
1176 return self._text
1192 def files(self):
1177 def files(self):
1193 return sorted(self._status.modified + self._status.added +
1178 return sorted(self._status.modified + self._status.added +
1194 self._status.removed)
1179 self._status.removed)
1195 def modified(self):
1180 def modified(self):
1196 return self._status.modified
1181 return self._status.modified
1197 def added(self):
1182 def added(self):
1198 return self._status.added
1183 return self._status.added
1199 def removed(self):
1184 def removed(self):
1200 return self._status.removed
1185 return self._status.removed
1201 def deleted(self):
1186 def deleted(self):
1202 return self._status.deleted
1187 return self._status.deleted
1203 filesmodified = modified
1188 filesmodified = modified
1204 filesadded = added
1189 filesadded = added
1205 filesremoved = removed
1190 filesremoved = removed
1206
1191
1207 def branch(self):
1192 def branch(self):
1208 return encoding.tolocal(self._extra['branch'])
1193 return encoding.tolocal(self._extra['branch'])
1209 def closesbranch(self):
1194 def closesbranch(self):
1210 return 'close' in self._extra
1195 return 'close' in self._extra
1211 def extra(self):
1196 def extra(self):
1212 return self._extra
1197 return self._extra
1213
1198
1214 def isinmemory(self):
1199 def isinmemory(self):
1215 return False
1200 return False
1216
1201
1217 def tags(self):
1202 def tags(self):
1218 return []
1203 return []
1219
1204
1220 def bookmarks(self):
1205 def bookmarks(self):
1221 b = []
1206 b = []
1222 for p in self.parents():
1207 for p in self.parents():
1223 b.extend(p.bookmarks())
1208 b.extend(p.bookmarks())
1224 return b
1209 return b
1225
1210
1226 def phase(self):
1211 def phase(self):
1227 phase = phases.draft # default phase to draft
1212 phase = phases.draft # default phase to draft
1228 for p in self.parents():
1213 for p in self.parents():
1229 phase = max(phase, p.phase())
1214 phase = max(phase, p.phase())
1230 return phase
1215 return phase
1231
1216
1232 def hidden(self):
1217 def hidden(self):
1233 return False
1218 return False
1234
1219
1235 def children(self):
1220 def children(self):
1236 return []
1221 return []
1237
1222
1238 def ancestor(self, c2):
1223 def ancestor(self, c2):
1239 """return the "best" ancestor context of self and c2"""
1224 """return the "best" ancestor context of self and c2"""
1240 return self._parents[0].ancestor(c2) # punt on two parents for now
1225 return self._parents[0].ancestor(c2) # punt on two parents for now
1241
1226
1242 def ancestors(self):
1227 def ancestors(self):
1243 for p in self._parents:
1228 for p in self._parents:
1244 yield p
1229 yield p
1245 for a in self._repo.changelog.ancestors(
1230 for a in self._repo.changelog.ancestors(
1246 [p.rev() for p in self._parents]):
1231 [p.rev() for p in self._parents]):
1247 yield self._repo[a]
1232 yield self._repo[a]
1248
1233
1249 def markcommitted(self, node):
1234 def markcommitted(self, node):
1250 """Perform post-commit cleanup necessary after committing this ctx
1235 """Perform post-commit cleanup necessary after committing this ctx
1251
1236
1252 Specifically, this updates backing stores this working context
1237 Specifically, this updates backing stores this working context
1253 wraps to reflect the fact that the changes reflected by this
1238 wraps to reflect the fact that the changes reflected by this
1254 workingctx have been committed. For example, it marks
1239 workingctx have been committed. For example, it marks
1255 modified and added files as normal in the dirstate.
1240 modified and added files as normal in the dirstate.
1256
1241
1257 """
1242 """
1258
1243
1259 def dirty(self, missing=False, merge=True, branch=True):
1244 def dirty(self, missing=False, merge=True, branch=True):
1260 return False
1245 return False
1261
1246
1262 class workingctx(committablectx):
1247 class workingctx(committablectx):
1263 """A workingctx object makes access to data related to
1248 """A workingctx object makes access to data related to
1264 the current working directory convenient.
1249 the current working directory convenient.
1265 date - any valid date string or (unixtime, offset), or None.
1250 date - any valid date string or (unixtime, offset), or None.
1266 user - username string, or None.
1251 user - username string, or None.
1267 extra - a dictionary of extra values, or None.
1252 extra - a dictionary of extra values, or None.
1268 changes - a list of file lists as returned by localrepo.status()
1253 changes - a list of file lists as returned by localrepo.status()
1269 or None to use the repository status.
1254 or None to use the repository status.
1270 """
1255 """
1271 def __init__(self, repo, text="", user=None, date=None, extra=None,
1256 def __init__(self, repo, text="", user=None, date=None, extra=None,
1272 changes=None):
1257 changes=None):
1273 branch = None
1258 branch = None
1274 if not extra or 'branch' not in extra:
1259 if not extra or 'branch' not in extra:
1275 try:
1260 try:
1276 branch = repo.dirstate.branch()
1261 branch = repo.dirstate.branch()
1277 except UnicodeDecodeError:
1262 except UnicodeDecodeError:
1278 raise error.Abort(_('branch name not in UTF-8!'))
1263 raise error.Abort(_('branch name not in UTF-8!'))
1279 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1264 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1280 branch=branch)
1265 branch=branch)
1281
1266
1282 def __iter__(self):
1267 def __iter__(self):
1283 d = self._repo.dirstate
1268 d = self._repo.dirstate
1284 for f in d:
1269 for f in d:
1285 if d[f] != 'r':
1270 if d[f] != 'r':
1286 yield f
1271 yield f
1287
1272
1288 def __contains__(self, key):
1273 def __contains__(self, key):
1289 return self._repo.dirstate[key] not in "?r"
1274 return self._repo.dirstate[key] not in "?r"
1290
1275
1291 def hex(self):
1276 def hex(self):
1292 return wdirhex
1277 return wdirhex
1293
1278
1294 @propertycache
1279 @propertycache
1295 def _parents(self):
1280 def _parents(self):
1296 p = self._repo.dirstate.parents()
1281 p = self._repo.dirstate.parents()
1297 if p[1] == nullid:
1282 if p[1] == nullid:
1298 p = p[:-1]
1283 p = p[:-1]
1299 # use unfiltered repo to delay/avoid loading obsmarkers
1284 # use unfiltered repo to delay/avoid loading obsmarkers
1300 unfi = self._repo.unfiltered()
1285 unfi = self._repo.unfiltered()
1301 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1286 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1302
1287
1303 def _fileinfo(self, path):
1288 def _fileinfo(self, path):
1304 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1289 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1305 self._manifest
1290 self._manifest
1306 return super(workingctx, self)._fileinfo(path)
1291 return super(workingctx, self)._fileinfo(path)
1307
1292
1308 def _buildflagfunc(self):
1293 def _buildflagfunc(self):
1309 # Create a fallback function for getting file flags when the
1294 # Create a fallback function for getting file flags when the
1310 # filesystem doesn't support them
1295 # filesystem doesn't support them
1311
1296
1312 copiesget = self._repo.dirstate.copies().get
1297 copiesget = self._repo.dirstate.copies().get
1313 parents = self.parents()
1298 parents = self.parents()
1314 if len(parents) < 2:
1299 if len(parents) < 2:
1315 # when we have one parent, it's easy: copy from parent
1300 # when we have one parent, it's easy: copy from parent
1316 man = parents[0].manifest()
1301 man = parents[0].manifest()
1317 def func(f):
1302 def func(f):
1318 f = copiesget(f, f)
1303 f = copiesget(f, f)
1319 return man.flags(f)
1304 return man.flags(f)
1320 else:
1305 else:
1321 # merges are tricky: we try to reconstruct the unstored
1306 # merges are tricky: we try to reconstruct the unstored
1322 # result from the merge (issue1802)
1307 # result from the merge (issue1802)
1323 p1, p2 = parents
1308 p1, p2 = parents
1324 pa = p1.ancestor(p2)
1309 pa = p1.ancestor(p2)
1325 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1310 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1326
1311
1327 def func(f):
1312 def func(f):
1328 f = copiesget(f, f) # may be wrong for merges with copies
1313 f = copiesget(f, f) # may be wrong for merges with copies
1329 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1314 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1330 if fl1 == fl2:
1315 if fl1 == fl2:
1331 return fl1
1316 return fl1
1332 if fl1 == fla:
1317 if fl1 == fla:
1333 return fl2
1318 return fl2
1334 if fl2 == fla:
1319 if fl2 == fla:
1335 return fl1
1320 return fl1
1336 return '' # punt for conflicts
1321 return '' # punt for conflicts
1337
1322
1338 return func
1323 return func
1339
1324
1340 @propertycache
1325 @propertycache
1341 def _flagfunc(self):
1326 def _flagfunc(self):
1342 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1327 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1343
1328
1344 def flags(self, path):
1329 def flags(self, path):
1345 if r'_manifest' in self.__dict__:
1330 if r'_manifest' in self.__dict__:
1346 try:
1331 try:
1347 return self._manifest.flags(path)
1332 return self._manifest.flags(path)
1348 except KeyError:
1333 except KeyError:
1349 return ''
1334 return ''
1350
1335
1351 try:
1336 try:
1352 return self._flagfunc(path)
1337 return self._flagfunc(path)
1353 except OSError:
1338 except OSError:
1354 return ''
1339 return ''
1355
1340
1356 def filectx(self, path, filelog=None):
1341 def filectx(self, path, filelog=None):
1357 """get a file context from the working directory"""
1342 """get a file context from the working directory"""
1358 return workingfilectx(self._repo, path, workingctx=self,
1343 return workingfilectx(self._repo, path, workingctx=self,
1359 filelog=filelog)
1344 filelog=filelog)
1360
1345
1361 def dirty(self, missing=False, merge=True, branch=True):
1346 def dirty(self, missing=False, merge=True, branch=True):
1362 "check whether a working directory is modified"
1347 "check whether a working directory is modified"
1363 # check subrepos first
1348 # check subrepos first
1364 for s in sorted(self.substate):
1349 for s in sorted(self.substate):
1365 if self.sub(s).dirty(missing=missing):
1350 if self.sub(s).dirty(missing=missing):
1366 return True
1351 return True
1367 # check current working dir
1352 # check current working dir
1368 return ((merge and self.p2()) or
1353 return ((merge and self.p2()) or
1369 (branch and self.branch() != self.p1().branch()) or
1354 (branch and self.branch() != self.p1().branch()) or
1370 self.modified() or self.added() or self.removed() or
1355 self.modified() or self.added() or self.removed() or
1371 (missing and self.deleted()))
1356 (missing and self.deleted()))
1372
1357
1373 def add(self, list, prefix=""):
1358 def add(self, list, prefix=""):
1374 with self._repo.wlock():
1359 with self._repo.wlock():
1375 ui, ds = self._repo.ui, self._repo.dirstate
1360 ui, ds = self._repo.ui, self._repo.dirstate
1376 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1361 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1377 rejected = []
1362 rejected = []
1378 lstat = self._repo.wvfs.lstat
1363 lstat = self._repo.wvfs.lstat
1379 for f in list:
1364 for f in list:
1380 # ds.pathto() returns an absolute file when this is invoked from
1365 # ds.pathto() returns an absolute file when this is invoked from
1381 # the keyword extension. That gets flagged as non-portable on
1366 # the keyword extension. That gets flagged as non-portable on
1382 # Windows, since it contains the drive letter and colon.
1367 # Windows, since it contains the drive letter and colon.
1383 scmutil.checkportable(ui, os.path.join(prefix, f))
1368 scmutil.checkportable(ui, os.path.join(prefix, f))
1384 try:
1369 try:
1385 st = lstat(f)
1370 st = lstat(f)
1386 except OSError:
1371 except OSError:
1387 ui.warn(_("%s does not exist!\n") % uipath(f))
1372 ui.warn(_("%s does not exist!\n") % uipath(f))
1388 rejected.append(f)
1373 rejected.append(f)
1389 continue
1374 continue
1390 limit = ui.configbytes('ui', 'large-file-limit')
1375 limit = ui.configbytes('ui', 'large-file-limit')
1391 if limit != 0 and st.st_size > limit:
1376 if limit != 0 and st.st_size > limit:
1392 ui.warn(_("%s: up to %d MB of RAM may be required "
1377 ui.warn(_("%s: up to %d MB of RAM may be required "
1393 "to manage this file\n"
1378 "to manage this file\n"
1394 "(use 'hg revert %s' to cancel the "
1379 "(use 'hg revert %s' to cancel the "
1395 "pending addition)\n")
1380 "pending addition)\n")
1396 % (f, 3 * st.st_size // 1000000, uipath(f)))
1381 % (f, 3 * st.st_size // 1000000, uipath(f)))
1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1382 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1398 ui.warn(_("%s not added: only files and symlinks "
1383 ui.warn(_("%s not added: only files and symlinks "
1399 "supported currently\n") % uipath(f))
1384 "supported currently\n") % uipath(f))
1400 rejected.append(f)
1385 rejected.append(f)
1401 elif ds[f] in 'amn':
1386 elif ds[f] in 'amn':
1402 ui.warn(_("%s already tracked!\n") % uipath(f))
1387 ui.warn(_("%s already tracked!\n") % uipath(f))
1403 elif ds[f] == 'r':
1388 elif ds[f] == 'r':
1404 ds.normallookup(f)
1389 ds.normallookup(f)
1405 else:
1390 else:
1406 ds.add(f)
1391 ds.add(f)
1407 return rejected
1392 return rejected
1408
1393
1409 def forget(self, files, prefix=""):
1394 def forget(self, files, prefix=""):
1410 with self._repo.wlock():
1395 with self._repo.wlock():
1411 ds = self._repo.dirstate
1396 ds = self._repo.dirstate
1412 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1397 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1413 rejected = []
1398 rejected = []
1414 for f in files:
1399 for f in files:
1415 if f not in ds:
1400 if f not in ds:
1416 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1401 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1417 rejected.append(f)
1402 rejected.append(f)
1418 elif ds[f] != 'a':
1403 elif ds[f] != 'a':
1419 ds.remove(f)
1404 ds.remove(f)
1420 else:
1405 else:
1421 ds.drop(f)
1406 ds.drop(f)
1422 return rejected
1407 return rejected
1423
1408
1424 def copy(self, source, dest):
1409 def copy(self, source, dest):
1425 try:
1410 try:
1426 st = self._repo.wvfs.lstat(dest)
1411 st = self._repo.wvfs.lstat(dest)
1427 except OSError as err:
1412 except OSError as err:
1428 if err.errno != errno.ENOENT:
1413 if err.errno != errno.ENOENT:
1429 raise
1414 raise
1430 self._repo.ui.warn(_("%s does not exist!\n")
1415 self._repo.ui.warn(_("%s does not exist!\n")
1431 % self._repo.dirstate.pathto(dest))
1416 % self._repo.dirstate.pathto(dest))
1432 return
1417 return
1433 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1418 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1434 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1419 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1435 "symbolic link\n")
1420 "symbolic link\n")
1436 % self._repo.dirstate.pathto(dest))
1421 % self._repo.dirstate.pathto(dest))
1437 else:
1422 else:
1438 with self._repo.wlock():
1423 with self._repo.wlock():
1439 ds = self._repo.dirstate
1424 ds = self._repo.dirstate
1440 if ds[dest] in '?':
1425 if ds[dest] in '?':
1441 ds.add(dest)
1426 ds.add(dest)
1442 elif ds[dest] in 'r':
1427 elif ds[dest] in 'r':
1443 ds.normallookup(dest)
1428 ds.normallookup(dest)
1444 ds.copy(source, dest)
1429 ds.copy(source, dest)
1445
1430
1446 def match(self, pats=None, include=None, exclude=None, default='glob',
1431 def match(self, pats=None, include=None, exclude=None, default='glob',
1447 listsubrepos=False, badfn=None):
1432 listsubrepos=False, badfn=None):
1448 r = self._repo
1433 r = self._repo
1449
1434
1450 # Only a case insensitive filesystem needs magic to translate user input
1435 # Only a case insensitive filesystem needs magic to translate user input
1451 # to actual case in the filesystem.
1436 # to actual case in the filesystem.
1452 icasefs = not util.fscasesensitive(r.root)
1437 icasefs = not util.fscasesensitive(r.root)
1453 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1438 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1454 default, auditor=r.auditor, ctx=self,
1439 default, auditor=r.auditor, ctx=self,
1455 listsubrepos=listsubrepos, badfn=badfn,
1440 listsubrepos=listsubrepos, badfn=badfn,
1456 icasefs=icasefs)
1441 icasefs=icasefs)
1457
1442
1458 def _filtersuspectsymlink(self, files):
1443 def _filtersuspectsymlink(self, files):
1459 if not files or self._repo.dirstate._checklink:
1444 if not files or self._repo.dirstate._checklink:
1460 return files
1445 return files
1461
1446
1462 # Symlink placeholders may get non-symlink-like contents
1447 # Symlink placeholders may get non-symlink-like contents
1463 # via user error or dereferencing by NFS or Samba servers,
1448 # via user error or dereferencing by NFS or Samba servers,
1464 # so we filter out any placeholders that don't look like a
1449 # so we filter out any placeholders that don't look like a
1465 # symlink
1450 # symlink
1466 sane = []
1451 sane = []
1467 for f in files:
1452 for f in files:
1468 if self.flags(f) == 'l':
1453 if self.flags(f) == 'l':
1469 d = self[f].data()
1454 d = self[f].data()
1470 if (d == '' or len(d) >= 1024 or '\n' in d
1455 if (d == '' or len(d) >= 1024 or '\n' in d
1471 or stringutil.binary(d)):
1456 or stringutil.binary(d)):
1472 self._repo.ui.debug('ignoring suspect symlink placeholder'
1457 self._repo.ui.debug('ignoring suspect symlink placeholder'
1473 ' "%s"\n' % f)
1458 ' "%s"\n' % f)
1474 continue
1459 continue
1475 sane.append(f)
1460 sane.append(f)
1476 return sane
1461 return sane
1477
1462
1478 def _checklookup(self, files):
1463 def _checklookup(self, files):
1479 # check for any possibly clean files
1464 # check for any possibly clean files
1480 if not files:
1465 if not files:
1481 return [], [], []
1466 return [], [], []
1482
1467
1483 modified = []
1468 modified = []
1484 deleted = []
1469 deleted = []
1485 fixup = []
1470 fixup = []
1486 pctx = self._parents[0]
1471 pctx = self._parents[0]
1487 # do a full compare of any files that might have changed
1472 # do a full compare of any files that might have changed
1488 for f in sorted(files):
1473 for f in sorted(files):
1489 try:
1474 try:
1490 # This will return True for a file that got replaced by a
1475 # This will return True for a file that got replaced by a
1491 # directory in the interim, but fixing that is pretty hard.
1476 # directory in the interim, but fixing that is pretty hard.
1492 if (f not in pctx or self.flags(f) != pctx.flags(f)
1477 if (f not in pctx or self.flags(f) != pctx.flags(f)
1493 or pctx[f].cmp(self[f])):
1478 or pctx[f].cmp(self[f])):
1494 modified.append(f)
1479 modified.append(f)
1495 else:
1480 else:
1496 fixup.append(f)
1481 fixup.append(f)
1497 except (IOError, OSError):
1482 except (IOError, OSError):
1498 # A file become inaccessible in between? Mark it as deleted,
1483 # A file become inaccessible in between? Mark it as deleted,
1499 # matching dirstate behavior (issue5584).
1484 # matching dirstate behavior (issue5584).
1500 # The dirstate has more complex behavior around whether a
1485 # The dirstate has more complex behavior around whether a
1501 # missing file matches a directory, etc, but we don't need to
1486 # missing file matches a directory, etc, but we don't need to
1502 # bother with that: if f has made it to this point, we're sure
1487 # bother with that: if f has made it to this point, we're sure
1503 # it's in the dirstate.
1488 # it's in the dirstate.
1504 deleted.append(f)
1489 deleted.append(f)
1505
1490
1506 return modified, deleted, fixup
1491 return modified, deleted, fixup
1507
1492
1508 def _poststatusfixup(self, status, fixup):
1493 def _poststatusfixup(self, status, fixup):
1509 """update dirstate for files that are actually clean"""
1494 """update dirstate for files that are actually clean"""
1510 poststatus = self._repo.postdsstatus()
1495 poststatus = self._repo.postdsstatus()
1511 if fixup or poststatus:
1496 if fixup or poststatus:
1512 try:
1497 try:
1513 oldid = self._repo.dirstate.identity()
1498 oldid = self._repo.dirstate.identity()
1514
1499
1515 # updating the dirstate is optional
1500 # updating the dirstate is optional
1516 # so we don't wait on the lock
1501 # so we don't wait on the lock
1517 # wlock can invalidate the dirstate, so cache normal _after_
1502 # wlock can invalidate the dirstate, so cache normal _after_
1518 # taking the lock
1503 # taking the lock
1519 with self._repo.wlock(False):
1504 with self._repo.wlock(False):
1520 if self._repo.dirstate.identity() == oldid:
1505 if self._repo.dirstate.identity() == oldid:
1521 if fixup:
1506 if fixup:
1522 normal = self._repo.dirstate.normal
1507 normal = self._repo.dirstate.normal
1523 for f in fixup:
1508 for f in fixup:
1524 normal(f)
1509 normal(f)
1525 # write changes out explicitly, because nesting
1510 # write changes out explicitly, because nesting
1526 # wlock at runtime may prevent 'wlock.release()'
1511 # wlock at runtime may prevent 'wlock.release()'
1527 # after this block from doing so for subsequent
1512 # after this block from doing so for subsequent
1528 # changing files
1513 # changing files
1529 tr = self._repo.currenttransaction()
1514 tr = self._repo.currenttransaction()
1530 self._repo.dirstate.write(tr)
1515 self._repo.dirstate.write(tr)
1531
1516
1532 if poststatus:
1517 if poststatus:
1533 for ps in poststatus:
1518 for ps in poststatus:
1534 ps(self, status)
1519 ps(self, status)
1535 else:
1520 else:
1536 # in this case, writing changes out breaks
1521 # in this case, writing changes out breaks
1537 # consistency, because .hg/dirstate was
1522 # consistency, because .hg/dirstate was
1538 # already changed simultaneously after last
1523 # already changed simultaneously after last
1539 # caching (see also issue5584 for detail)
1524 # caching (see also issue5584 for detail)
1540 self._repo.ui.debug('skip updating dirstate: '
1525 self._repo.ui.debug('skip updating dirstate: '
1541 'identity mismatch\n')
1526 'identity mismatch\n')
1542 except error.LockError:
1527 except error.LockError:
1543 pass
1528 pass
1544 finally:
1529 finally:
1545 # Even if the wlock couldn't be grabbed, clear out the list.
1530 # Even if the wlock couldn't be grabbed, clear out the list.
1546 self._repo.clearpostdsstatus()
1531 self._repo.clearpostdsstatus()
1547
1532
1548 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1533 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1549 '''Gets the status from the dirstate -- internal use only.'''
1534 '''Gets the status from the dirstate -- internal use only.'''
1550 subrepos = []
1535 subrepos = []
1551 if '.hgsub' in self:
1536 if '.hgsub' in self:
1552 subrepos = sorted(self.substate)
1537 subrepos = sorted(self.substate)
1553 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1538 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1554 clean=clean, unknown=unknown)
1539 clean=clean, unknown=unknown)
1555
1540
1556 # check for any possibly clean files
1541 # check for any possibly clean files
1557 fixup = []
1542 fixup = []
1558 if cmp:
1543 if cmp:
1559 modified2, deleted2, fixup = self._checklookup(cmp)
1544 modified2, deleted2, fixup = self._checklookup(cmp)
1560 s.modified.extend(modified2)
1545 s.modified.extend(modified2)
1561 s.deleted.extend(deleted2)
1546 s.deleted.extend(deleted2)
1562
1547
1563 if fixup and clean:
1548 if fixup and clean:
1564 s.clean.extend(fixup)
1549 s.clean.extend(fixup)
1565
1550
1566 self._poststatusfixup(s, fixup)
1551 self._poststatusfixup(s, fixup)
1567
1552
1568 if match.always():
1553 if match.always():
1569 # cache for performance
1554 # cache for performance
1570 if s.unknown or s.ignored or s.clean:
1555 if s.unknown or s.ignored or s.clean:
1571 # "_status" is cached with list*=False in the normal route
1556 # "_status" is cached with list*=False in the normal route
1572 self._status = scmutil.status(s.modified, s.added, s.removed,
1557 self._status = scmutil.status(s.modified, s.added, s.removed,
1573 s.deleted, [], [], [])
1558 s.deleted, [], [], [])
1574 else:
1559 else:
1575 self._status = s
1560 self._status = s
1576
1561
1577 return s
1562 return s
1578
1563
1579 @propertycache
1564 @propertycache
1580 def _copies(self):
1565 def _copies(self):
1581 p1copies = {}
1566 p1copies = {}
1582 p2copies = {}
1567 p2copies = {}
1583 parents = self._repo.dirstate.parents()
1568 parents = self._repo.dirstate.parents()
1584 p1manifest = self._repo[parents[0]].manifest()
1569 p1manifest = self._repo[parents[0]].manifest()
1585 p2manifest = self._repo[parents[1]].manifest()
1570 p2manifest = self._repo[parents[1]].manifest()
1586 narrowmatch = self._repo.narrowmatch()
1571 narrowmatch = self._repo.narrowmatch()
1587 for dst, src in self._repo.dirstate.copies().items():
1572 for dst, src in self._repo.dirstate.copies().items():
1588 if not narrowmatch(dst):
1573 if not narrowmatch(dst):
1589 continue
1574 continue
1590 if src in p1manifest:
1575 if src in p1manifest:
1591 p1copies[dst] = src
1576 p1copies[dst] = src
1592 elif src in p2manifest:
1577 elif src in p2manifest:
1593 p2copies[dst] = src
1578 p2copies[dst] = src
1594 return p1copies, p2copies
1579 return p1copies, p2copies
1595
1580
1596 @propertycache
1581 @propertycache
1597 def _manifest(self):
1582 def _manifest(self):
1598 """generate a manifest corresponding to the values in self._status
1583 """generate a manifest corresponding to the values in self._status
1599
1584
1600 This reuse the file nodeid from parent, but we use special node
1585 This reuse the file nodeid from parent, but we use special node
1601 identifiers for added and modified files. This is used by manifests
1586 identifiers for added and modified files. This is used by manifests
1602 merge to see that files are different and by update logic to avoid
1587 merge to see that files are different and by update logic to avoid
1603 deleting newly added files.
1588 deleting newly added files.
1604 """
1589 """
1605 return self._buildstatusmanifest(self._status)
1590 return self._buildstatusmanifest(self._status)
1606
1591
1607 def _buildstatusmanifest(self, status):
1592 def _buildstatusmanifest(self, status):
1608 """Builds a manifest that includes the given status results."""
1593 """Builds a manifest that includes the given status results."""
1609 parents = self.parents()
1594 parents = self.parents()
1610
1595
1611 man = parents[0].manifest().copy()
1596 man = parents[0].manifest().copy()
1612
1597
1613 ff = self._flagfunc
1598 ff = self._flagfunc
1614 for i, l in ((addednodeid, status.added),
1599 for i, l in ((addednodeid, status.added),
1615 (modifiednodeid, status.modified)):
1600 (modifiednodeid, status.modified)):
1616 for f in l:
1601 for f in l:
1617 man[f] = i
1602 man[f] = i
1618 try:
1603 try:
1619 man.setflag(f, ff(f))
1604 man.setflag(f, ff(f))
1620 except OSError:
1605 except OSError:
1621 pass
1606 pass
1622
1607
1623 for f in status.deleted + status.removed:
1608 for f in status.deleted + status.removed:
1624 if f in man:
1609 if f in man:
1625 del man[f]
1610 del man[f]
1626
1611
1627 return man
1612 return man
1628
1613
1629 def _buildstatus(self, other, s, match, listignored, listclean,
1614 def _buildstatus(self, other, s, match, listignored, listclean,
1630 listunknown):
1615 listunknown):
1631 """build a status with respect to another context
1616 """build a status with respect to another context
1632
1617
1633 This includes logic for maintaining the fast path of status when
1618 This includes logic for maintaining the fast path of status when
1634 comparing the working directory against its parent, which is to skip
1619 comparing the working directory against its parent, which is to skip
1635 building a new manifest if self (working directory) is not comparing
1620 building a new manifest if self (working directory) is not comparing
1636 against its parent (repo['.']).
1621 against its parent (repo['.']).
1637 """
1622 """
1638 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1623 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1639 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1624 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1640 # might have accidentally ended up with the entire contents of the file
1625 # might have accidentally ended up with the entire contents of the file
1641 # they are supposed to be linking to.
1626 # they are supposed to be linking to.
1642 s.modified[:] = self._filtersuspectsymlink(s.modified)
1627 s.modified[:] = self._filtersuspectsymlink(s.modified)
1643 if other != self._repo['.']:
1628 if other != self._repo['.']:
1644 s = super(workingctx, self)._buildstatus(other, s, match,
1629 s = super(workingctx, self)._buildstatus(other, s, match,
1645 listignored, listclean,
1630 listignored, listclean,
1646 listunknown)
1631 listunknown)
1647 return s
1632 return s
1648
1633
1649 def _matchstatus(self, other, match):
1634 def _matchstatus(self, other, match):
1650 """override the match method with a filter for directory patterns
1635 """override the match method with a filter for directory patterns
1651
1636
1652 We use inheritance to customize the match.bad method only in cases of
1637 We use inheritance to customize the match.bad method only in cases of
1653 workingctx since it belongs only to the working directory when
1638 workingctx since it belongs only to the working directory when
1654 comparing against the parent changeset.
1639 comparing against the parent changeset.
1655
1640
1656 If we aren't comparing against the working directory's parent, then we
1641 If we aren't comparing against the working directory's parent, then we
1657 just use the default match object sent to us.
1642 just use the default match object sent to us.
1658 """
1643 """
1659 if other != self._repo['.']:
1644 if other != self._repo['.']:
1660 def bad(f, msg):
1645 def bad(f, msg):
1661 # 'f' may be a directory pattern from 'match.files()',
1646 # 'f' may be a directory pattern from 'match.files()',
1662 # so 'f not in ctx1' is not enough
1647 # so 'f not in ctx1' is not enough
1663 if f not in other and not other.hasdir(f):
1648 if f not in other and not other.hasdir(f):
1664 self._repo.ui.warn('%s: %s\n' %
1649 self._repo.ui.warn('%s: %s\n' %
1665 (self._repo.dirstate.pathto(f), msg))
1650 (self._repo.dirstate.pathto(f), msg))
1666 match.bad = bad
1651 match.bad = bad
1667 return match
1652 return match
1668
1653
1669 def walk(self, match):
1654 def walk(self, match):
1670 '''Generates matching file names.'''
1655 '''Generates matching file names.'''
1671 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1656 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1672 subrepos=sorted(self.substate),
1657 subrepos=sorted(self.substate),
1673 unknown=True, ignored=False))
1658 unknown=True, ignored=False))
1674
1659
1675 def matches(self, match):
1660 def matches(self, match):
1676 match = self._repo.narrowmatch(match)
1661 match = self._repo.narrowmatch(match)
1677 ds = self._repo.dirstate
1662 ds = self._repo.dirstate
1678 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1663 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1679
1664
1680 def markcommitted(self, node):
1665 def markcommitted(self, node):
1681 with self._repo.dirstate.parentchange():
1666 with self._repo.dirstate.parentchange():
1682 for f in self.modified() + self.added():
1667 for f in self.modified() + self.added():
1683 self._repo.dirstate.normal(f)
1668 self._repo.dirstate.normal(f)
1684 for f in self.removed():
1669 for f in self.removed():
1685 self._repo.dirstate.drop(f)
1670 self._repo.dirstate.drop(f)
1686 self._repo.dirstate.setparents(node)
1671 self._repo.dirstate.setparents(node)
1687
1672
1688 # write changes out explicitly, because nesting wlock at
1673 # write changes out explicitly, because nesting wlock at
1689 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1674 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1690 # from immediately doing so for subsequent changing files
1675 # from immediately doing so for subsequent changing files
1691 self._repo.dirstate.write(self._repo.currenttransaction())
1676 self._repo.dirstate.write(self._repo.currenttransaction())
1692
1677
1693 sparse.aftercommit(self._repo, node)
1678 sparse.aftercommit(self._repo, node)
1694
1679
1695 class committablefilectx(basefilectx):
1680 class committablefilectx(basefilectx):
1696 """A committablefilectx provides common functionality for a file context
1681 """A committablefilectx provides common functionality for a file context
1697 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1682 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1698 def __init__(self, repo, path, filelog=None, ctx=None):
1683 def __init__(self, repo, path, filelog=None, ctx=None):
1699 self._repo = repo
1684 self._repo = repo
1700 self._path = path
1685 self._path = path
1701 self._changeid = None
1686 self._changeid = None
1702 self._filerev = self._filenode = None
1687 self._filerev = self._filenode = None
1703
1688
1704 if filelog is not None:
1689 if filelog is not None:
1705 self._filelog = filelog
1690 self._filelog = filelog
1706 if ctx:
1691 if ctx:
1707 self._changectx = ctx
1692 self._changectx = ctx
1708
1693
1709 def __nonzero__(self):
1694 def __nonzero__(self):
1710 return True
1695 return True
1711
1696
1712 __bool__ = __nonzero__
1697 __bool__ = __nonzero__
1713
1698
1714 def linkrev(self):
1699 def linkrev(self):
1715 # linked to self._changectx no matter if file is modified or not
1700 # linked to self._changectx no matter if file is modified or not
1716 return self.rev()
1701 return self.rev()
1717
1702
1718 def renamed(self):
1703 def renamed(self):
1719 path = self.copysource()
1704 path = self.copysource()
1720 if not path:
1705 if not path:
1721 return None
1706 return None
1722 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1707 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1723
1708
1724 def parents(self):
1709 def parents(self):
1725 '''return parent filectxs, following copies if necessary'''
1710 '''return parent filectxs, following copies if necessary'''
1726 def filenode(ctx, path):
1711 def filenode(ctx, path):
1727 return ctx._manifest.get(path, nullid)
1712 return ctx._manifest.get(path, nullid)
1728
1713
1729 path = self._path
1714 path = self._path
1730 fl = self._filelog
1715 fl = self._filelog
1731 pcl = self._changectx._parents
1716 pcl = self._changectx._parents
1732 renamed = self.renamed()
1717 renamed = self.renamed()
1733
1718
1734 if renamed:
1719 if renamed:
1735 pl = [renamed + (None,)]
1720 pl = [renamed + (None,)]
1736 else:
1721 else:
1737 pl = [(path, filenode(pcl[0], path), fl)]
1722 pl = [(path, filenode(pcl[0], path), fl)]
1738
1723
1739 for pc in pcl[1:]:
1724 for pc in pcl[1:]:
1740 pl.append((path, filenode(pc, path), fl))
1725 pl.append((path, filenode(pc, path), fl))
1741
1726
1742 return [self._parentfilectx(p, fileid=n, filelog=l)
1727 return [self._parentfilectx(p, fileid=n, filelog=l)
1743 for p, n, l in pl if n != nullid]
1728 for p, n, l in pl if n != nullid]
1744
1729
1745 def children(self):
1730 def children(self):
1746 return []
1731 return []
1747
1732
1748 class workingfilectx(committablefilectx):
1733 class workingfilectx(committablefilectx):
1749 """A workingfilectx object makes access to data related to a particular
1734 """A workingfilectx object makes access to data related to a particular
1750 file in the working directory convenient."""
1735 file in the working directory convenient."""
1751 def __init__(self, repo, path, filelog=None, workingctx=None):
1736 def __init__(self, repo, path, filelog=None, workingctx=None):
1752 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1737 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1753
1738
1754 @propertycache
1739 @propertycache
1755 def _changectx(self):
1740 def _changectx(self):
1756 return workingctx(self._repo)
1741 return workingctx(self._repo)
1757
1742
1758 def data(self):
1743 def data(self):
1759 return self._repo.wread(self._path)
1744 return self._repo.wread(self._path)
1760 def copysource(self):
1745 def copysource(self):
1761 return self._repo.dirstate.copied(self._path)
1746 return self._repo.dirstate.copied(self._path)
1762
1747
1763 def size(self):
1748 def size(self):
1764 return self._repo.wvfs.lstat(self._path).st_size
1749 return self._repo.wvfs.lstat(self._path).st_size
1765 def lstat(self):
1750 def lstat(self):
1766 return self._repo.wvfs.lstat(self._path)
1751 return self._repo.wvfs.lstat(self._path)
1767 def date(self):
1752 def date(self):
1768 t, tz = self._changectx.date()
1753 t, tz = self._changectx.date()
1769 try:
1754 try:
1770 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1755 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1771 except OSError as err:
1756 except OSError as err:
1772 if err.errno != errno.ENOENT:
1757 if err.errno != errno.ENOENT:
1773 raise
1758 raise
1774 return (t, tz)
1759 return (t, tz)
1775
1760
1776 def exists(self):
1761 def exists(self):
1777 return self._repo.wvfs.exists(self._path)
1762 return self._repo.wvfs.exists(self._path)
1778
1763
1779 def lexists(self):
1764 def lexists(self):
1780 return self._repo.wvfs.lexists(self._path)
1765 return self._repo.wvfs.lexists(self._path)
1781
1766
1782 def audit(self):
1767 def audit(self):
1783 return self._repo.wvfs.audit(self._path)
1768 return self._repo.wvfs.audit(self._path)
1784
1769
1785 def cmp(self, fctx):
1770 def cmp(self, fctx):
1786 """compare with other file context
1771 """compare with other file context
1787
1772
1788 returns True if different than fctx.
1773 returns True if different than fctx.
1789 """
1774 """
1790 # fctx should be a filectx (not a workingfilectx)
1775 # fctx should be a filectx (not a workingfilectx)
1791 # invert comparison to reuse the same code path
1776 # invert comparison to reuse the same code path
1792 return fctx.cmp(self)
1777 return fctx.cmp(self)
1793
1778
1794 def remove(self, ignoremissing=False):
1779 def remove(self, ignoremissing=False):
1795 """wraps unlink for a repo's working directory"""
1780 """wraps unlink for a repo's working directory"""
1796 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1781 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1797 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1782 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1798 rmdir=rmdir)
1783 rmdir=rmdir)
1799
1784
1800 def write(self, data, flags, backgroundclose=False, **kwargs):
1785 def write(self, data, flags, backgroundclose=False, **kwargs):
1801 """wraps repo.wwrite"""
1786 """wraps repo.wwrite"""
1802 return self._repo.wwrite(self._path, data, flags,
1787 return self._repo.wwrite(self._path, data, flags,
1803 backgroundclose=backgroundclose,
1788 backgroundclose=backgroundclose,
1804 **kwargs)
1789 **kwargs)
1805
1790
1806 def markcopied(self, src):
1791 def markcopied(self, src):
1807 """marks this file a copy of `src`"""
1792 """marks this file a copy of `src`"""
1808 self._repo.dirstate.copy(src, self._path)
1793 self._repo.dirstate.copy(src, self._path)
1809
1794
1810 def clearunknown(self):
1795 def clearunknown(self):
1811 """Removes conflicting items in the working directory so that
1796 """Removes conflicting items in the working directory so that
1812 ``write()`` can be called successfully.
1797 ``write()`` can be called successfully.
1813 """
1798 """
1814 wvfs = self._repo.wvfs
1799 wvfs = self._repo.wvfs
1815 f = self._path
1800 f = self._path
1816 wvfs.audit(f)
1801 wvfs.audit(f)
1817 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1802 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1818 # remove files under the directory as they should already be
1803 # remove files under the directory as they should already be
1819 # warned and backed up
1804 # warned and backed up
1820 if wvfs.isdir(f) and not wvfs.islink(f):
1805 if wvfs.isdir(f) and not wvfs.islink(f):
1821 wvfs.rmtree(f, forcibly=True)
1806 wvfs.rmtree(f, forcibly=True)
1822 for p in reversed(list(util.finddirs(f))):
1807 for p in reversed(list(util.finddirs(f))):
1823 if wvfs.isfileorlink(p):
1808 if wvfs.isfileorlink(p):
1824 wvfs.unlink(p)
1809 wvfs.unlink(p)
1825 break
1810 break
1826 else:
1811 else:
1827 # don't remove files if path conflicts are not processed
1812 # don't remove files if path conflicts are not processed
1828 if wvfs.isdir(f) and not wvfs.islink(f):
1813 if wvfs.isdir(f) and not wvfs.islink(f):
1829 wvfs.removedirs(f)
1814 wvfs.removedirs(f)
1830
1815
1831 def setflags(self, l, x):
1816 def setflags(self, l, x):
1832 self._repo.wvfs.setflags(self._path, l, x)
1817 self._repo.wvfs.setflags(self._path, l, x)
1833
1818
1834 class overlayworkingctx(committablectx):
1819 class overlayworkingctx(committablectx):
1835 """Wraps another mutable context with a write-back cache that can be
1820 """Wraps another mutable context with a write-back cache that can be
1836 converted into a commit context.
1821 converted into a commit context.
1837
1822
1838 self._cache[path] maps to a dict with keys: {
1823 self._cache[path] maps to a dict with keys: {
1839 'exists': bool?
1824 'exists': bool?
1840 'date': date?
1825 'date': date?
1841 'data': str?
1826 'data': str?
1842 'flags': str?
1827 'flags': str?
1843 'copied': str? (path or None)
1828 'copied': str? (path or None)
1844 }
1829 }
1845 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1830 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1846 is `False`, the file was deleted.
1831 is `False`, the file was deleted.
1847 """
1832 """
1848
1833
1849 def __init__(self, repo):
1834 def __init__(self, repo):
1850 super(overlayworkingctx, self).__init__(repo)
1835 super(overlayworkingctx, self).__init__(repo)
1851 self.clean()
1836 self.clean()
1852
1837
1853 def setbase(self, wrappedctx):
1838 def setbase(self, wrappedctx):
1854 self._wrappedctx = wrappedctx
1839 self._wrappedctx = wrappedctx
1855 self._parents = [wrappedctx]
1840 self._parents = [wrappedctx]
1856 # Drop old manifest cache as it is now out of date.
1841 # Drop old manifest cache as it is now out of date.
1857 # This is necessary when, e.g., rebasing several nodes with one
1842 # This is necessary when, e.g., rebasing several nodes with one
1858 # ``overlayworkingctx`` (e.g. with --collapse).
1843 # ``overlayworkingctx`` (e.g. with --collapse).
1859 util.clearcachedproperty(self, '_manifest')
1844 util.clearcachedproperty(self, '_manifest')
1860
1845
1861 def data(self, path):
1846 def data(self, path):
1862 if self.isdirty(path):
1847 if self.isdirty(path):
1863 if self._cache[path]['exists']:
1848 if self._cache[path]['exists']:
1864 if self._cache[path]['data'] is not None:
1849 if self._cache[path]['data'] is not None:
1865 return self._cache[path]['data']
1850 return self._cache[path]['data']
1866 else:
1851 else:
1867 # Must fallback here, too, because we only set flags.
1852 # Must fallback here, too, because we only set flags.
1868 return self._wrappedctx[path].data()
1853 return self._wrappedctx[path].data()
1869 else:
1854 else:
1870 raise error.ProgrammingError("No such file or directory: %s" %
1855 raise error.ProgrammingError("No such file or directory: %s" %
1871 path)
1856 path)
1872 else:
1857 else:
1873 return self._wrappedctx[path].data()
1858 return self._wrappedctx[path].data()
1874
1859
1875 @propertycache
1860 @propertycache
1876 def _manifest(self):
1861 def _manifest(self):
1877 parents = self.parents()
1862 parents = self.parents()
1878 man = parents[0].manifest().copy()
1863 man = parents[0].manifest().copy()
1879
1864
1880 flag = self._flagfunc
1865 flag = self._flagfunc
1881 for path in self.added():
1866 for path in self.added():
1882 man[path] = addednodeid
1867 man[path] = addednodeid
1883 man.setflag(path, flag(path))
1868 man.setflag(path, flag(path))
1884 for path in self.modified():
1869 for path in self.modified():
1885 man[path] = modifiednodeid
1870 man[path] = modifiednodeid
1886 man.setflag(path, flag(path))
1871 man.setflag(path, flag(path))
1887 for path in self.removed():
1872 for path in self.removed():
1888 del man[path]
1873 del man[path]
1889 return man
1874 return man
1890
1875
1891 @propertycache
1876 @propertycache
1892 def _flagfunc(self):
1877 def _flagfunc(self):
1893 def f(path):
1878 def f(path):
1894 return self._cache[path]['flags']
1879 return self._cache[path]['flags']
1895 return f
1880 return f
1896
1881
1897 def files(self):
1882 def files(self):
1898 return sorted(self.added() + self.modified() + self.removed())
1883 return sorted(self.added() + self.modified() + self.removed())
1899
1884
1900 def modified(self):
1885 def modified(self):
1901 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1886 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1902 self._existsinparent(f)]
1887 self._existsinparent(f)]
1903
1888
1904 def added(self):
1889 def added(self):
1905 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1890 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1906 not self._existsinparent(f)]
1891 not self._existsinparent(f)]
1907
1892
1908 def removed(self):
1893 def removed(self):
1909 return [f for f in self._cache.keys() if
1894 return [f for f in self._cache.keys() if
1910 not self._cache[f]['exists'] and self._existsinparent(f)]
1895 not self._cache[f]['exists'] and self._existsinparent(f)]
1911
1896
1912 def p1copies(self):
1897 def p1copies(self):
1913 copies = self._repo._wrappedctx.p1copies().copy()
1898 copies = self._repo._wrappedctx.p1copies().copy()
1914 narrowmatch = self._repo.narrowmatch()
1899 narrowmatch = self._repo.narrowmatch()
1915 for f in self._cache.keys():
1900 for f in self._cache.keys():
1916 if not narrowmatch(f):
1901 if not narrowmatch(f):
1917 continue
1902 continue
1918 copies.pop(f, None) # delete if it exists
1903 copies.pop(f, None) # delete if it exists
1919 source = self._cache[f]['copied']
1904 source = self._cache[f]['copied']
1920 if source:
1905 if source:
1921 copies[f] = source
1906 copies[f] = source
1922 return copies
1907 return copies
1923
1908
1924 def p2copies(self):
1909 def p2copies(self):
1925 copies = self._repo._wrappedctx.p2copies().copy()
1910 copies = self._repo._wrappedctx.p2copies().copy()
1926 narrowmatch = self._repo.narrowmatch()
1911 narrowmatch = self._repo.narrowmatch()
1927 for f in self._cache.keys():
1912 for f in self._cache.keys():
1928 if not narrowmatch(f):
1913 if not narrowmatch(f):
1929 continue
1914 continue
1930 copies.pop(f, None) # delete if it exists
1915 copies.pop(f, None) # delete if it exists
1931 source = self._cache[f]['copied']
1916 source = self._cache[f]['copied']
1932 if source:
1917 if source:
1933 copies[f] = source
1918 copies[f] = source
1934 return copies
1919 return copies
1935
1920
1936 def isinmemory(self):
1921 def isinmemory(self):
1937 return True
1922 return True
1938
1923
1939 def filedate(self, path):
1924 def filedate(self, path):
1940 if self.isdirty(path):
1925 if self.isdirty(path):
1941 return self._cache[path]['date']
1926 return self._cache[path]['date']
1942 else:
1927 else:
1943 return self._wrappedctx[path].date()
1928 return self._wrappedctx[path].date()
1944
1929
1945 def markcopied(self, path, origin):
1930 def markcopied(self, path, origin):
1946 self._markdirty(path, exists=True, date=self.filedate(path),
1931 self._markdirty(path, exists=True, date=self.filedate(path),
1947 flags=self.flags(path), copied=origin)
1932 flags=self.flags(path), copied=origin)
1948
1933
1949 def copydata(self, path):
1934 def copydata(self, path):
1950 if self.isdirty(path):
1935 if self.isdirty(path):
1951 return self._cache[path]['copied']
1936 return self._cache[path]['copied']
1952 else:
1937 else:
1953 return None
1938 return None
1954
1939
1955 def flags(self, path):
1940 def flags(self, path):
1956 if self.isdirty(path):
1941 if self.isdirty(path):
1957 if self._cache[path]['exists']:
1942 if self._cache[path]['exists']:
1958 return self._cache[path]['flags']
1943 return self._cache[path]['flags']
1959 else:
1944 else:
1960 raise error.ProgrammingError("No such file or directory: %s" %
1945 raise error.ProgrammingError("No such file or directory: %s" %
1961 self._path)
1946 self._path)
1962 else:
1947 else:
1963 return self._wrappedctx[path].flags()
1948 return self._wrappedctx[path].flags()
1964
1949
1965 def __contains__(self, key):
1950 def __contains__(self, key):
1966 if key in self._cache:
1951 if key in self._cache:
1967 return self._cache[key]['exists']
1952 return self._cache[key]['exists']
1968 return key in self.p1()
1953 return key in self.p1()
1969
1954
1970 def _existsinparent(self, path):
1955 def _existsinparent(self, path):
1971 try:
1956 try:
1972 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1957 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1973 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1958 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1974 # with an ``exists()`` function.
1959 # with an ``exists()`` function.
1975 self._wrappedctx[path]
1960 self._wrappedctx[path]
1976 return True
1961 return True
1977 except error.ManifestLookupError:
1962 except error.ManifestLookupError:
1978 return False
1963 return False
1979
1964
1980 def _auditconflicts(self, path):
1965 def _auditconflicts(self, path):
1981 """Replicates conflict checks done by wvfs.write().
1966 """Replicates conflict checks done by wvfs.write().
1982
1967
1983 Since we never write to the filesystem and never call `applyupdates` in
1968 Since we never write to the filesystem and never call `applyupdates` in
1984 IMM, we'll never check that a path is actually writable -- e.g., because
1969 IMM, we'll never check that a path is actually writable -- e.g., because
1985 it adds `a/foo`, but `a` is actually a file in the other commit.
1970 it adds `a/foo`, but `a` is actually a file in the other commit.
1986 """
1971 """
1987 def fail(path, component):
1972 def fail(path, component):
1988 # p1() is the base and we're receiving "writes" for p2()'s
1973 # p1() is the base and we're receiving "writes" for p2()'s
1989 # files.
1974 # files.
1990 if 'l' in self.p1()[component].flags():
1975 if 'l' in self.p1()[component].flags():
1991 raise error.Abort("error: %s conflicts with symlink %s "
1976 raise error.Abort("error: %s conflicts with symlink %s "
1992 "in %d." % (path, component,
1977 "in %d." % (path, component,
1993 self.p1().rev()))
1978 self.p1().rev()))
1994 else:
1979 else:
1995 raise error.Abort("error: '%s' conflicts with file '%s' in "
1980 raise error.Abort("error: '%s' conflicts with file '%s' in "
1996 "%d." % (path, component,
1981 "%d." % (path, component,
1997 self.p1().rev()))
1982 self.p1().rev()))
1998
1983
1999 # Test that each new directory to be created to write this path from p2
1984 # Test that each new directory to be created to write this path from p2
2000 # is not a file in p1.
1985 # is not a file in p1.
2001 components = path.split('/')
1986 components = path.split('/')
2002 for i in pycompat.xrange(len(components)):
1987 for i in pycompat.xrange(len(components)):
2003 component = "/".join(components[0:i])
1988 component = "/".join(components[0:i])
2004 if component in self:
1989 if component in self:
2005 fail(path, component)
1990 fail(path, component)
2006
1991
2007 # Test the other direction -- that this path from p2 isn't a directory
1992 # Test the other direction -- that this path from p2 isn't a directory
2008 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1993 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2009 match = self.match([path], default=b'path')
1994 match = self.match([path], default=b'path')
2010 matches = self.p1().manifest().matches(match)
1995 matches = self.p1().manifest().matches(match)
2011 mfiles = matches.keys()
1996 mfiles = matches.keys()
2012 if len(mfiles) > 0:
1997 if len(mfiles) > 0:
2013 if len(mfiles) == 1 and mfiles[0] == path:
1998 if len(mfiles) == 1 and mfiles[0] == path:
2014 return
1999 return
2015 # omit the files which are deleted in current IMM wctx
2000 # omit the files which are deleted in current IMM wctx
2016 mfiles = [m for m in mfiles if m in self]
2001 mfiles = [m for m in mfiles if m in self]
2017 if not mfiles:
2002 if not mfiles:
2018 return
2003 return
2019 raise error.Abort("error: file '%s' cannot be written because "
2004 raise error.Abort("error: file '%s' cannot be written because "
2020 " '%s/' is a directory in %s (containing %d "
2005 " '%s/' is a directory in %s (containing %d "
2021 "entries: %s)"
2006 "entries: %s)"
2022 % (path, path, self.p1(), len(mfiles),
2007 % (path, path, self.p1(), len(mfiles),
2023 ', '.join(mfiles)))
2008 ', '.join(mfiles)))
2024
2009
2025 def write(self, path, data, flags='', **kwargs):
2010 def write(self, path, data, flags='', **kwargs):
2026 if data is None:
2011 if data is None:
2027 raise error.ProgrammingError("data must be non-None")
2012 raise error.ProgrammingError("data must be non-None")
2028 self._auditconflicts(path)
2013 self._auditconflicts(path)
2029 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2014 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2030 flags=flags)
2015 flags=flags)
2031
2016
2032 def setflags(self, path, l, x):
2017 def setflags(self, path, l, x):
2033 flag = ''
2018 flag = ''
2034 if l:
2019 if l:
2035 flag = 'l'
2020 flag = 'l'
2036 elif x:
2021 elif x:
2037 flag = 'x'
2022 flag = 'x'
2038 self._markdirty(path, exists=True, date=dateutil.makedate(),
2023 self._markdirty(path, exists=True, date=dateutil.makedate(),
2039 flags=flag)
2024 flags=flag)
2040
2025
2041 def remove(self, path):
2026 def remove(self, path):
2042 self._markdirty(path, exists=False)
2027 self._markdirty(path, exists=False)
2043
2028
2044 def exists(self, path):
2029 def exists(self, path):
2045 """exists behaves like `lexists`, but needs to follow symlinks and
2030 """exists behaves like `lexists`, but needs to follow symlinks and
2046 return False if they are broken.
2031 return False if they are broken.
2047 """
2032 """
2048 if self.isdirty(path):
2033 if self.isdirty(path):
2049 # If this path exists and is a symlink, "follow" it by calling
2034 # If this path exists and is a symlink, "follow" it by calling
2050 # exists on the destination path.
2035 # exists on the destination path.
2051 if (self._cache[path]['exists'] and
2036 if (self._cache[path]['exists'] and
2052 'l' in self._cache[path]['flags']):
2037 'l' in self._cache[path]['flags']):
2053 return self.exists(self._cache[path]['data'].strip())
2038 return self.exists(self._cache[path]['data'].strip())
2054 else:
2039 else:
2055 return self._cache[path]['exists']
2040 return self._cache[path]['exists']
2056
2041
2057 return self._existsinparent(path)
2042 return self._existsinparent(path)
2058
2043
2059 def lexists(self, path):
2044 def lexists(self, path):
2060 """lexists returns True if the path exists"""
2045 """lexists returns True if the path exists"""
2061 if self.isdirty(path):
2046 if self.isdirty(path):
2062 return self._cache[path]['exists']
2047 return self._cache[path]['exists']
2063
2048
2064 return self._existsinparent(path)
2049 return self._existsinparent(path)
2065
2050
2066 def size(self, path):
2051 def size(self, path):
2067 if self.isdirty(path):
2052 if self.isdirty(path):
2068 if self._cache[path]['exists']:
2053 if self._cache[path]['exists']:
2069 return len(self._cache[path]['data'])
2054 return len(self._cache[path]['data'])
2070 else:
2055 else:
2071 raise error.ProgrammingError("No such file or directory: %s" %
2056 raise error.ProgrammingError("No such file or directory: %s" %
2072 self._path)
2057 self._path)
2073 return self._wrappedctx[path].size()
2058 return self._wrappedctx[path].size()
2074
2059
2075 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2060 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2076 user=None, editor=None):
2061 user=None, editor=None):
2077 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2062 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2078 committed.
2063 committed.
2079
2064
2080 ``text`` is the commit message.
2065 ``text`` is the commit message.
2081 ``parents`` (optional) are rev numbers.
2066 ``parents`` (optional) are rev numbers.
2082 """
2067 """
2083 # Default parents to the wrapped contexts' if not passed.
2068 # Default parents to the wrapped contexts' if not passed.
2084 if parents is None:
2069 if parents is None:
2085 parents = self._wrappedctx.parents()
2070 parents = self._wrappedctx.parents()
2086 if len(parents) == 1:
2071 if len(parents) == 1:
2087 parents = (parents[0], None)
2072 parents = (parents[0], None)
2088
2073
2089 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2074 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2090 if parents[1] is None:
2075 if parents[1] is None:
2091 parents = (self._repo[parents[0]], None)
2076 parents = (self._repo[parents[0]], None)
2092 else:
2077 else:
2093 parents = (self._repo[parents[0]], self._repo[parents[1]])
2078 parents = (self._repo[parents[0]], self._repo[parents[1]])
2094
2079
2095 files = self.files()
2080 files = self.files()
2096 def getfile(repo, memctx, path):
2081 def getfile(repo, memctx, path):
2097 if self._cache[path]['exists']:
2082 if self._cache[path]['exists']:
2098 return memfilectx(repo, memctx, path,
2083 return memfilectx(repo, memctx, path,
2099 self._cache[path]['data'],
2084 self._cache[path]['data'],
2100 'l' in self._cache[path]['flags'],
2085 'l' in self._cache[path]['flags'],
2101 'x' in self._cache[path]['flags'],
2086 'x' in self._cache[path]['flags'],
2102 self._cache[path]['copied'])
2087 self._cache[path]['copied'])
2103 else:
2088 else:
2104 # Returning None, but including the path in `files`, is
2089 # Returning None, but including the path in `files`, is
2105 # necessary for memctx to register a deletion.
2090 # necessary for memctx to register a deletion.
2106 return None
2091 return None
2107 return memctx(self._repo, parents, text, files, getfile, date=date,
2092 return memctx(self._repo, parents, text, files, getfile, date=date,
2108 extra=extra, user=user, branch=branch, editor=editor)
2093 extra=extra, user=user, branch=branch, editor=editor)
2109
2094
2110 def isdirty(self, path):
2095 def isdirty(self, path):
2111 return path in self._cache
2096 return path in self._cache
2112
2097
2113 def isempty(self):
2098 def isempty(self):
2114 # We need to discard any keys that are actually clean before the empty
2099 # We need to discard any keys that are actually clean before the empty
2115 # commit check.
2100 # commit check.
2116 self._compact()
2101 self._compact()
2117 return len(self._cache) == 0
2102 return len(self._cache) == 0
2118
2103
2119 def clean(self):
2104 def clean(self):
2120 self._cache = {}
2105 self._cache = {}
2121
2106
2122 def _compact(self):
2107 def _compact(self):
2123 """Removes keys from the cache that are actually clean, by comparing
2108 """Removes keys from the cache that are actually clean, by comparing
2124 them with the underlying context.
2109 them with the underlying context.
2125
2110
2126 This can occur during the merge process, e.g. by passing --tool :local
2111 This can occur during the merge process, e.g. by passing --tool :local
2127 to resolve a conflict.
2112 to resolve a conflict.
2128 """
2113 """
2129 keys = []
2114 keys = []
2130 # This won't be perfect, but can help performance significantly when
2115 # This won't be perfect, but can help performance significantly when
2131 # using things like remotefilelog.
2116 # using things like remotefilelog.
2132 scmutil.prefetchfiles(
2117 scmutil.prefetchfiles(
2133 self.repo(), [self.p1().rev()],
2118 self.repo(), [self.p1().rev()],
2134 scmutil.matchfiles(self.repo(), self._cache.keys()))
2119 scmutil.matchfiles(self.repo(), self._cache.keys()))
2135
2120
2136 for path in self._cache.keys():
2121 for path in self._cache.keys():
2137 cache = self._cache[path]
2122 cache = self._cache[path]
2138 try:
2123 try:
2139 underlying = self._wrappedctx[path]
2124 underlying = self._wrappedctx[path]
2140 if (underlying.data() == cache['data'] and
2125 if (underlying.data() == cache['data'] and
2141 underlying.flags() == cache['flags']):
2126 underlying.flags() == cache['flags']):
2142 keys.append(path)
2127 keys.append(path)
2143 except error.ManifestLookupError:
2128 except error.ManifestLookupError:
2144 # Path not in the underlying manifest (created).
2129 # Path not in the underlying manifest (created).
2145 continue
2130 continue
2146
2131
2147 for path in keys:
2132 for path in keys:
2148 del self._cache[path]
2133 del self._cache[path]
2149 return keys
2134 return keys
2150
2135
2151 def _markdirty(self, path, exists, data=None, date=None, flags='',
2136 def _markdirty(self, path, exists, data=None, date=None, flags='',
2152 copied=None):
2137 copied=None):
2153 # data not provided, let's see if we already have some; if not, let's
2138 # data not provided, let's see if we already have some; if not, let's
2154 # grab it from our underlying context, so that we always have data if
2139 # grab it from our underlying context, so that we always have data if
2155 # the file is marked as existing.
2140 # the file is marked as existing.
2156 if exists and data is None:
2141 if exists and data is None:
2157 oldentry = self._cache.get(path) or {}
2142 oldentry = self._cache.get(path) or {}
2158 data = oldentry.get('data')
2143 data = oldentry.get('data')
2159 if data is None:
2144 if data is None:
2160 data = self._wrappedctx[path].data()
2145 data = self._wrappedctx[path].data()
2161
2146
2162 self._cache[path] = {
2147 self._cache[path] = {
2163 'exists': exists,
2148 'exists': exists,
2164 'data': data,
2149 'data': data,
2165 'date': date,
2150 'date': date,
2166 'flags': flags,
2151 'flags': flags,
2167 'copied': copied,
2152 'copied': copied,
2168 }
2153 }
2169
2154
2170 def filectx(self, path, filelog=None):
2155 def filectx(self, path, filelog=None):
2171 return overlayworkingfilectx(self._repo, path, parent=self,
2156 return overlayworkingfilectx(self._repo, path, parent=self,
2172 filelog=filelog)
2157 filelog=filelog)
2173
2158
2174 class overlayworkingfilectx(committablefilectx):
2159 class overlayworkingfilectx(committablefilectx):
2175 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2160 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2176 cache, which can be flushed through later by calling ``flush()``."""
2161 cache, which can be flushed through later by calling ``flush()``."""
2177
2162
2178 def __init__(self, repo, path, filelog=None, parent=None):
2163 def __init__(self, repo, path, filelog=None, parent=None):
2179 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2164 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2180 parent)
2165 parent)
2181 self._repo = repo
2166 self._repo = repo
2182 self._parent = parent
2167 self._parent = parent
2183 self._path = path
2168 self._path = path
2184
2169
2185 def cmp(self, fctx):
2170 def cmp(self, fctx):
2186 return self.data() != fctx.data()
2171 return self.data() != fctx.data()
2187
2172
2188 def changectx(self):
2173 def changectx(self):
2189 return self._parent
2174 return self._parent
2190
2175
2191 def data(self):
2176 def data(self):
2192 return self._parent.data(self._path)
2177 return self._parent.data(self._path)
2193
2178
2194 def date(self):
2179 def date(self):
2195 return self._parent.filedate(self._path)
2180 return self._parent.filedate(self._path)
2196
2181
2197 def exists(self):
2182 def exists(self):
2198 return self.lexists()
2183 return self.lexists()
2199
2184
2200 def lexists(self):
2185 def lexists(self):
2201 return self._parent.exists(self._path)
2186 return self._parent.exists(self._path)
2202
2187
2203 def copysource(self):
2188 def copysource(self):
2204 return self._parent.copydata(self._path)
2189 return self._parent.copydata(self._path)
2205
2190
2206 def size(self):
2191 def size(self):
2207 return self._parent.size(self._path)
2192 return self._parent.size(self._path)
2208
2193
2209 def markcopied(self, origin):
2194 def markcopied(self, origin):
2210 self._parent.markcopied(self._path, origin)
2195 self._parent.markcopied(self._path, origin)
2211
2196
2212 def audit(self):
2197 def audit(self):
2213 pass
2198 pass
2214
2199
2215 def flags(self):
2200 def flags(self):
2216 return self._parent.flags(self._path)
2201 return self._parent.flags(self._path)
2217
2202
2218 def setflags(self, islink, isexec):
2203 def setflags(self, islink, isexec):
2219 return self._parent.setflags(self._path, islink, isexec)
2204 return self._parent.setflags(self._path, islink, isexec)
2220
2205
2221 def write(self, data, flags, backgroundclose=False, **kwargs):
2206 def write(self, data, flags, backgroundclose=False, **kwargs):
2222 return self._parent.write(self._path, data, flags, **kwargs)
2207 return self._parent.write(self._path, data, flags, **kwargs)
2223
2208
2224 def remove(self, ignoremissing=False):
2209 def remove(self, ignoremissing=False):
2225 return self._parent.remove(self._path)
2210 return self._parent.remove(self._path)
2226
2211
2227 def clearunknown(self):
2212 def clearunknown(self):
2228 pass
2213 pass
2229
2214
2230 class workingcommitctx(workingctx):
2215 class workingcommitctx(workingctx):
2231 """A workingcommitctx object makes access to data related to
2216 """A workingcommitctx object makes access to data related to
2232 the revision being committed convenient.
2217 the revision being committed convenient.
2233
2218
2234 This hides changes in the working directory, if they aren't
2219 This hides changes in the working directory, if they aren't
2235 committed in this context.
2220 committed in this context.
2236 """
2221 """
2237 def __init__(self, repo, changes,
2222 def __init__(self, repo, changes,
2238 text="", user=None, date=None, extra=None):
2223 text="", user=None, date=None, extra=None):
2239 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2224 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2240 changes)
2225 changes)
2241
2226
2242 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2227 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2243 """Return matched files only in ``self._status``
2228 """Return matched files only in ``self._status``
2244
2229
2245 Uncommitted files appear "clean" via this context, even if
2230 Uncommitted files appear "clean" via this context, even if
2246 they aren't actually so in the working directory.
2231 they aren't actually so in the working directory.
2247 """
2232 """
2248 if clean:
2233 if clean:
2249 clean = [f for f in self._manifest if f not in self._changedset]
2234 clean = [f for f in self._manifest if f not in self._changedset]
2250 else:
2235 else:
2251 clean = []
2236 clean = []
2252 return scmutil.status([f for f in self._status.modified if match(f)],
2237 return scmutil.status([f for f in self._status.modified if match(f)],
2253 [f for f in self._status.added if match(f)],
2238 [f for f in self._status.added if match(f)],
2254 [f for f in self._status.removed if match(f)],
2239 [f for f in self._status.removed if match(f)],
2255 [], [], [], clean)
2240 [], [], [], clean)
2256
2241
2257 @propertycache
2242 @propertycache
2258 def _changedset(self):
2243 def _changedset(self):
2259 """Return the set of files changed in this context
2244 """Return the set of files changed in this context
2260 """
2245 """
2261 changed = set(self._status.modified)
2246 changed = set(self._status.modified)
2262 changed.update(self._status.added)
2247 changed.update(self._status.added)
2263 changed.update(self._status.removed)
2248 changed.update(self._status.removed)
2264 return changed
2249 return changed
2265
2250
2266 def makecachingfilectxfn(func):
2251 def makecachingfilectxfn(func):
2267 """Create a filectxfn that caches based on the path.
2252 """Create a filectxfn that caches based on the path.
2268
2253
2269 We can't use util.cachefunc because it uses all arguments as the cache
2254 We can't use util.cachefunc because it uses all arguments as the cache
2270 key and this creates a cycle since the arguments include the repo and
2255 key and this creates a cycle since the arguments include the repo and
2271 memctx.
2256 memctx.
2272 """
2257 """
2273 cache = {}
2258 cache = {}
2274
2259
2275 def getfilectx(repo, memctx, path):
2260 def getfilectx(repo, memctx, path):
2276 if path not in cache:
2261 if path not in cache:
2277 cache[path] = func(repo, memctx, path)
2262 cache[path] = func(repo, memctx, path)
2278 return cache[path]
2263 return cache[path]
2279
2264
2280 return getfilectx
2265 return getfilectx
2281
2266
2282 def memfilefromctx(ctx):
2267 def memfilefromctx(ctx):
2283 """Given a context return a memfilectx for ctx[path]
2268 """Given a context return a memfilectx for ctx[path]
2284
2269
2285 This is a convenience method for building a memctx based on another
2270 This is a convenience method for building a memctx based on another
2286 context.
2271 context.
2287 """
2272 """
2288 def getfilectx(repo, memctx, path):
2273 def getfilectx(repo, memctx, path):
2289 fctx = ctx[path]
2274 fctx = ctx[path]
2290 copysource = fctx.copysource()
2275 copysource = fctx.copysource()
2291 return memfilectx(repo, memctx, path, fctx.data(),
2276 return memfilectx(repo, memctx, path, fctx.data(),
2292 islink=fctx.islink(), isexec=fctx.isexec(),
2277 islink=fctx.islink(), isexec=fctx.isexec(),
2293 copysource=copysource)
2278 copysource=copysource)
2294
2279
2295 return getfilectx
2280 return getfilectx
2296
2281
2297 def memfilefrompatch(patchstore):
2282 def memfilefrompatch(patchstore):
2298 """Given a patch (e.g. patchstore object) return a memfilectx
2283 """Given a patch (e.g. patchstore object) return a memfilectx
2299
2284
2300 This is a convenience method for building a memctx based on a patchstore.
2285 This is a convenience method for building a memctx based on a patchstore.
2301 """
2286 """
2302 def getfilectx(repo, memctx, path):
2287 def getfilectx(repo, memctx, path):
2303 data, mode, copysource = patchstore.getfile(path)
2288 data, mode, copysource = patchstore.getfile(path)
2304 if data is None:
2289 if data is None:
2305 return None
2290 return None
2306 islink, isexec = mode
2291 islink, isexec = mode
2307 return memfilectx(repo, memctx, path, data, islink=islink,
2292 return memfilectx(repo, memctx, path, data, islink=islink,
2308 isexec=isexec, copysource=copysource)
2293 isexec=isexec, copysource=copysource)
2309
2294
2310 return getfilectx
2295 return getfilectx
2311
2296
2312 class memctx(committablectx):
2297 class memctx(committablectx):
2313 """Use memctx to perform in-memory commits via localrepo.commitctx().
2298 """Use memctx to perform in-memory commits via localrepo.commitctx().
2314
2299
2315 Revision information is supplied at initialization time while
2300 Revision information is supplied at initialization time while
2316 related files data and is made available through a callback
2301 related files data and is made available through a callback
2317 mechanism. 'repo' is the current localrepo, 'parents' is a
2302 mechanism. 'repo' is the current localrepo, 'parents' is a
2318 sequence of two parent revisions identifiers (pass None for every
2303 sequence of two parent revisions identifiers (pass None for every
2319 missing parent), 'text' is the commit message and 'files' lists
2304 missing parent), 'text' is the commit message and 'files' lists
2320 names of files touched by the revision (normalized and relative to
2305 names of files touched by the revision (normalized and relative to
2321 repository root).
2306 repository root).
2322
2307
2323 filectxfn(repo, memctx, path) is a callable receiving the
2308 filectxfn(repo, memctx, path) is a callable receiving the
2324 repository, the current memctx object and the normalized path of
2309 repository, the current memctx object and the normalized path of
2325 requested file, relative to repository root. It is fired by the
2310 requested file, relative to repository root. It is fired by the
2326 commit function for every file in 'files', but calls order is
2311 commit function for every file in 'files', but calls order is
2327 undefined. If the file is available in the revision being
2312 undefined. If the file is available in the revision being
2328 committed (updated or added), filectxfn returns a memfilectx
2313 committed (updated or added), filectxfn returns a memfilectx
2329 object. If the file was removed, filectxfn return None for recent
2314 object. If the file was removed, filectxfn return None for recent
2330 Mercurial. Moved files are represented by marking the source file
2315 Mercurial. Moved files are represented by marking the source file
2331 removed and the new file added with copy information (see
2316 removed and the new file added with copy information (see
2332 memfilectx).
2317 memfilectx).
2333
2318
2334 user receives the committer name and defaults to current
2319 user receives the committer name and defaults to current
2335 repository username, date is the commit date in any format
2320 repository username, date is the commit date in any format
2336 supported by dateutil.parsedate() and defaults to current date, extra
2321 supported by dateutil.parsedate() and defaults to current date, extra
2337 is a dictionary of metadata or is left empty.
2322 is a dictionary of metadata or is left empty.
2338 """
2323 """
2339
2324
2340 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2325 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2341 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2326 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2342 # this field to determine what to do in filectxfn.
2327 # this field to determine what to do in filectxfn.
2343 _returnnoneformissingfiles = True
2328 _returnnoneformissingfiles = True
2344
2329
2345 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2330 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2346 date=None, extra=None, branch=None, editor=False):
2331 date=None, extra=None, branch=None, editor=False):
2347 super(memctx, self).__init__(repo, text, user, date, extra,
2332 super(memctx, self).__init__(repo, text, user, date, extra,
2348 branch=branch)
2333 branch=branch)
2349 self._rev = None
2334 self._rev = None
2350 self._node = None
2335 self._node = None
2351 parents = [(p or nullid) for p in parents]
2336 parents = [(p or nullid) for p in parents]
2352 p1, p2 = parents
2337 p1, p2 = parents
2353 self._parents = [self._repo[p] for p in (p1, p2)]
2338 self._parents = [self._repo[p] for p in (p1, p2)]
2354 files = sorted(set(files))
2339 files = sorted(set(files))
2355 self._files = files
2340 self._files = files
2356 self.substate = {}
2341 self.substate = {}
2357
2342
2358 if isinstance(filectxfn, patch.filestore):
2343 if isinstance(filectxfn, patch.filestore):
2359 filectxfn = memfilefrompatch(filectxfn)
2344 filectxfn = memfilefrompatch(filectxfn)
2360 elif not callable(filectxfn):
2345 elif not callable(filectxfn):
2361 # if store is not callable, wrap it in a function
2346 # if store is not callable, wrap it in a function
2362 filectxfn = memfilefromctx(filectxfn)
2347 filectxfn = memfilefromctx(filectxfn)
2363
2348
2364 # memoizing increases performance for e.g. vcs convert scenarios.
2349 # memoizing increases performance for e.g. vcs convert scenarios.
2365 self._filectxfn = makecachingfilectxfn(filectxfn)
2350 self._filectxfn = makecachingfilectxfn(filectxfn)
2366
2351
2367 if editor:
2352 if editor:
2368 self._text = editor(self._repo, self, [])
2353 self._text = editor(self._repo, self, [])
2369 self._repo.savecommitmessage(self._text)
2354 self._repo.savecommitmessage(self._text)
2370
2355
2371 def filectx(self, path, filelog=None):
2356 def filectx(self, path, filelog=None):
2372 """get a file context from the working directory
2357 """get a file context from the working directory
2373
2358
2374 Returns None if file doesn't exist and should be removed."""
2359 Returns None if file doesn't exist and should be removed."""
2375 return self._filectxfn(self._repo, self, path)
2360 return self._filectxfn(self._repo, self, path)
2376
2361
2377 def commit(self):
2362 def commit(self):
2378 """commit context to the repo"""
2363 """commit context to the repo"""
2379 return self._repo.commitctx(self)
2364 return self._repo.commitctx(self)
2380
2365
2381 @propertycache
2366 @propertycache
2382 def _manifest(self):
2367 def _manifest(self):
2383 """generate a manifest based on the return values of filectxfn"""
2368 """generate a manifest based on the return values of filectxfn"""
2384
2369
2385 # keep this simple for now; just worry about p1
2370 # keep this simple for now; just worry about p1
2386 pctx = self._parents[0]
2371 pctx = self._parents[0]
2387 man = pctx.manifest().copy()
2372 man = pctx.manifest().copy()
2388
2373
2389 for f in self._status.modified:
2374 for f in self._status.modified:
2390 man[f] = modifiednodeid
2375 man[f] = modifiednodeid
2391
2376
2392 for f in self._status.added:
2377 for f in self._status.added:
2393 man[f] = addednodeid
2378 man[f] = addednodeid
2394
2379
2395 for f in self._status.removed:
2380 for f in self._status.removed:
2396 if f in man:
2381 if f in man:
2397 del man[f]
2382 del man[f]
2398
2383
2399 return man
2384 return man
2400
2385
2401 @propertycache
2386 @propertycache
2402 def _status(self):
2387 def _status(self):
2403 """Calculate exact status from ``files`` specified at construction
2388 """Calculate exact status from ``files`` specified at construction
2404 """
2389 """
2405 man1 = self.p1().manifest()
2390 man1 = self.p1().manifest()
2406 p2 = self._parents[1]
2391 p2 = self._parents[1]
2407 # "1 < len(self._parents)" can't be used for checking
2392 # "1 < len(self._parents)" can't be used for checking
2408 # existence of the 2nd parent, because "memctx._parents" is
2393 # existence of the 2nd parent, because "memctx._parents" is
2409 # explicitly initialized by the list, of which length is 2.
2394 # explicitly initialized by the list, of which length is 2.
2410 if p2.node() != nullid:
2395 if p2.node() != nullid:
2411 man2 = p2.manifest()
2396 man2 = p2.manifest()
2412 managing = lambda f: f in man1 or f in man2
2397 managing = lambda f: f in man1 or f in man2
2413 else:
2398 else:
2414 managing = lambda f: f in man1
2399 managing = lambda f: f in man1
2415
2400
2416 modified, added, removed = [], [], []
2401 modified, added, removed = [], [], []
2417 for f in self._files:
2402 for f in self._files:
2418 if not managing(f):
2403 if not managing(f):
2419 added.append(f)
2404 added.append(f)
2420 elif self[f]:
2405 elif self[f]:
2421 modified.append(f)
2406 modified.append(f)
2422 else:
2407 else:
2423 removed.append(f)
2408 removed.append(f)
2424
2409
2425 return scmutil.status(modified, added, removed, [], [], [], [])
2410 return scmutil.status(modified, added, removed, [], [], [], [])
2426
2411
2427 class memfilectx(committablefilectx):
2412 class memfilectx(committablefilectx):
2428 """memfilectx represents an in-memory file to commit.
2413 """memfilectx represents an in-memory file to commit.
2429
2414
2430 See memctx and committablefilectx for more details.
2415 See memctx and committablefilectx for more details.
2431 """
2416 """
2432 def __init__(self, repo, changectx, path, data, islink=False,
2417 def __init__(self, repo, changectx, path, data, islink=False,
2433 isexec=False, copysource=None):
2418 isexec=False, copysource=None):
2434 """
2419 """
2435 path is the normalized file path relative to repository root.
2420 path is the normalized file path relative to repository root.
2436 data is the file content as a string.
2421 data is the file content as a string.
2437 islink is True if the file is a symbolic link.
2422 islink is True if the file is a symbolic link.
2438 isexec is True if the file is executable.
2423 isexec is True if the file is executable.
2439 copied is the source file path if current file was copied in the
2424 copied is the source file path if current file was copied in the
2440 revision being committed, or None."""
2425 revision being committed, or None."""
2441 super(memfilectx, self).__init__(repo, path, None, changectx)
2426 super(memfilectx, self).__init__(repo, path, None, changectx)
2442 self._data = data
2427 self._data = data
2443 if islink:
2428 if islink:
2444 self._flags = 'l'
2429 self._flags = 'l'
2445 elif isexec:
2430 elif isexec:
2446 self._flags = 'x'
2431 self._flags = 'x'
2447 else:
2432 else:
2448 self._flags = ''
2433 self._flags = ''
2449 self._copysource = copysource
2434 self._copysource = copysource
2450
2435
2451 def copysource(self):
2436 def copysource(self):
2452 return self._copysource
2437 return self._copysource
2453
2438
2454 def cmp(self, fctx):
2439 def cmp(self, fctx):
2455 return self.data() != fctx.data()
2440 return self.data() != fctx.data()
2456
2441
2457 def data(self):
2442 def data(self):
2458 return self._data
2443 return self._data
2459
2444
2460 def remove(self, ignoremissing=False):
2445 def remove(self, ignoremissing=False):
2461 """wraps unlink for a repo's working directory"""
2446 """wraps unlink for a repo's working directory"""
2462 # need to figure out what to do here
2447 # need to figure out what to do here
2463 del self._changectx[self._path]
2448 del self._changectx[self._path]
2464
2449
2465 def write(self, data, flags, **kwargs):
2450 def write(self, data, flags, **kwargs):
2466 """wraps repo.wwrite"""
2451 """wraps repo.wwrite"""
2467 self._data = data
2452 self._data = data
2468
2453
2469
2454
2470 class metadataonlyctx(committablectx):
2455 class metadataonlyctx(committablectx):
2471 """Like memctx but it's reusing the manifest of different commit.
2456 """Like memctx but it's reusing the manifest of different commit.
2472 Intended to be used by lightweight operations that are creating
2457 Intended to be used by lightweight operations that are creating
2473 metadata-only changes.
2458 metadata-only changes.
2474
2459
2475 Revision information is supplied at initialization time. 'repo' is the
2460 Revision information is supplied at initialization time. 'repo' is the
2476 current localrepo, 'ctx' is original revision which manifest we're reuisng
2461 current localrepo, 'ctx' is original revision which manifest we're reuisng
2477 'parents' is a sequence of two parent revisions identifiers (pass None for
2462 'parents' is a sequence of two parent revisions identifiers (pass None for
2478 every missing parent), 'text' is the commit.
2463 every missing parent), 'text' is the commit.
2479
2464
2480 user receives the committer name and defaults to current repository
2465 user receives the committer name and defaults to current repository
2481 username, date is the commit date in any format supported by
2466 username, date is the commit date in any format supported by
2482 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2467 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2483 metadata or is left empty.
2468 metadata or is left empty.
2484 """
2469 """
2485 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2470 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2486 date=None, extra=None, editor=False):
2471 date=None, extra=None, editor=False):
2487 if text is None:
2472 if text is None:
2488 text = originalctx.description()
2473 text = originalctx.description()
2489 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2474 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2490 self._rev = None
2475 self._rev = None
2491 self._node = None
2476 self._node = None
2492 self._originalctx = originalctx
2477 self._originalctx = originalctx
2493 self._manifestnode = originalctx.manifestnode()
2478 self._manifestnode = originalctx.manifestnode()
2494 if parents is None:
2479 if parents is None:
2495 parents = originalctx.parents()
2480 parents = originalctx.parents()
2496 else:
2481 else:
2497 parents = [repo[p] for p in parents if p is not None]
2482 parents = [repo[p] for p in parents if p is not None]
2498 parents = parents[:]
2483 parents = parents[:]
2499 while len(parents) < 2:
2484 while len(parents) < 2:
2500 parents.append(repo[nullid])
2485 parents.append(repo[nullid])
2501 p1, p2 = self._parents = parents
2486 p1, p2 = self._parents = parents
2502
2487
2503 # sanity check to ensure that the reused manifest parents are
2488 # sanity check to ensure that the reused manifest parents are
2504 # manifests of our commit parents
2489 # manifests of our commit parents
2505 mp1, mp2 = self.manifestctx().parents
2490 mp1, mp2 = self.manifestctx().parents
2506 if p1 != nullid and p1.manifestnode() != mp1:
2491 if p1 != nullid and p1.manifestnode() != mp1:
2507 raise RuntimeError(r"can't reuse the manifest: its p1 "
2492 raise RuntimeError(r"can't reuse the manifest: its p1 "
2508 r"doesn't match the new ctx p1")
2493 r"doesn't match the new ctx p1")
2509 if p2 != nullid and p2.manifestnode() != mp2:
2494 if p2 != nullid and p2.manifestnode() != mp2:
2510 raise RuntimeError(r"can't reuse the manifest: "
2495 raise RuntimeError(r"can't reuse the manifest: "
2511 r"its p2 doesn't match the new ctx p2")
2496 r"its p2 doesn't match the new ctx p2")
2512
2497
2513 self._files = originalctx.files()
2498 self._files = originalctx.files()
2514 self.substate = {}
2499 self.substate = {}
2515
2500
2516 if editor:
2501 if editor:
2517 self._text = editor(self._repo, self, [])
2502 self._text = editor(self._repo, self, [])
2518 self._repo.savecommitmessage(self._text)
2503 self._repo.savecommitmessage(self._text)
2519
2504
2520 def manifestnode(self):
2505 def manifestnode(self):
2521 return self._manifestnode
2506 return self._manifestnode
2522
2507
2523 @property
2508 @property
2524 def _manifestctx(self):
2509 def _manifestctx(self):
2525 return self._repo.manifestlog[self._manifestnode]
2510 return self._repo.manifestlog[self._manifestnode]
2526
2511
2527 def filectx(self, path, filelog=None):
2512 def filectx(self, path, filelog=None):
2528 return self._originalctx.filectx(path, filelog=filelog)
2513 return self._originalctx.filectx(path, filelog=filelog)
2529
2514
2530 def commit(self):
2515 def commit(self):
2531 """commit context to the repo"""
2516 """commit context to the repo"""
2532 return self._repo.commitctx(self)
2517 return self._repo.commitctx(self)
2533
2518
2534 @property
2519 @property
2535 def _manifest(self):
2520 def _manifest(self):
2536 return self._originalctx.manifest()
2521 return self._originalctx.manifest()
2537
2522
2538 @propertycache
2523 @propertycache
2539 def _status(self):
2524 def _status(self):
2540 """Calculate exact status from ``files`` specified in the ``origctx``
2525 """Calculate exact status from ``files`` specified in the ``origctx``
2541 and parents manifests.
2526 and parents manifests.
2542 """
2527 """
2543 man1 = self.p1().manifest()
2528 man1 = self.p1().manifest()
2544 p2 = self._parents[1]
2529 p2 = self._parents[1]
2545 # "1 < len(self._parents)" can't be used for checking
2530 # "1 < len(self._parents)" can't be used for checking
2546 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2531 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2547 # explicitly initialized by the list, of which length is 2.
2532 # explicitly initialized by the list, of which length is 2.
2548 if p2.node() != nullid:
2533 if p2.node() != nullid:
2549 man2 = p2.manifest()
2534 man2 = p2.manifest()
2550 managing = lambda f: f in man1 or f in man2
2535 managing = lambda f: f in man1 or f in man2
2551 else:
2536 else:
2552 managing = lambda f: f in man1
2537 managing = lambda f: f in man1
2553
2538
2554 modified, added, removed = [], [], []
2539 modified, added, removed = [], [], []
2555 for f in self._files:
2540 for f in self._files:
2556 if not managing(f):
2541 if not managing(f):
2557 added.append(f)
2542 added.append(f)
2558 elif f in self:
2543 elif f in self:
2559 modified.append(f)
2544 modified.append(f)
2560 else:
2545 else:
2561 removed.append(f)
2546 removed.append(f)
2562
2547
2563 return scmutil.status(modified, added, removed, [], [], [], [])
2548 return scmutil.status(modified, added, removed, [], [], [], [])
2564
2549
2565 class arbitraryfilectx(object):
2550 class arbitraryfilectx(object):
2566 """Allows you to use filectx-like functions on a file in an arbitrary
2551 """Allows you to use filectx-like functions on a file in an arbitrary
2567 location on disk, possibly not in the working directory.
2552 location on disk, possibly not in the working directory.
2568 """
2553 """
2569 def __init__(self, path, repo=None):
2554 def __init__(self, path, repo=None):
2570 # Repo is optional because contrib/simplemerge uses this class.
2555 # Repo is optional because contrib/simplemerge uses this class.
2571 self._repo = repo
2556 self._repo = repo
2572 self._path = path
2557 self._path = path
2573
2558
2574 def cmp(self, fctx):
2559 def cmp(self, fctx):
2575 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2560 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2576 # path if either side is a symlink.
2561 # path if either side is a symlink.
2577 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2562 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2578 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2563 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2579 # Add a fast-path for merge if both sides are disk-backed.
2564 # Add a fast-path for merge if both sides are disk-backed.
2580 # Note that filecmp uses the opposite return values (True if same)
2565 # Note that filecmp uses the opposite return values (True if same)
2581 # from our cmp functions (True if different).
2566 # from our cmp functions (True if different).
2582 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2567 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2583 return self.data() != fctx.data()
2568 return self.data() != fctx.data()
2584
2569
2585 def path(self):
2570 def path(self):
2586 return self._path
2571 return self._path
2587
2572
2588 def flags(self):
2573 def flags(self):
2589 return ''
2574 return ''
2590
2575
2591 def data(self):
2576 def data(self):
2592 return util.readfile(self._path)
2577 return util.readfile(self._path)
2593
2578
2594 def decodeddata(self):
2579 def decodeddata(self):
2595 with open(self._path, "rb") as f:
2580 with open(self._path, "rb") as f:
2596 return f.read()
2581 return f.read()
2597
2582
2598 def remove(self):
2583 def remove(self):
2599 util.unlink(self._path)
2584 util.unlink(self._path)
2600
2585
2601 def write(self, data, flags, **kwargs):
2586 def write(self, data, flags, **kwargs):
2602 assert not flags
2587 assert not flags
2603 with open(self._path, "wb") as f:
2588 with open(self._path, "wb") as f:
2604 f.write(data)
2589 f.write(data)
@@ -1,811 +1,836 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import heapq
11 import heapq
12 import os
12 import os
13
13
14 from .i18n import _
14 from .i18n import _
15
15
16 from . import (
16 from . import (
17 match as matchmod,
17 match as matchmod,
18 node,
18 node,
19 pathutil,
19 pathutil,
20 util,
20 util,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 stringutil,
23 stringutil,
24 )
24 )
25
25
26 def _findlimit(repo, ctxa, ctxb):
26 def _findlimit(repo, ctxa, ctxb):
27 """
27 """
28 Find the last revision that needs to be checked to ensure that a full
28 Find the last revision that needs to be checked to ensure that a full
29 transitive closure for file copies can be properly calculated.
29 transitive closure for file copies can be properly calculated.
30 Generally, this means finding the earliest revision number that's an
30 Generally, this means finding the earliest revision number that's an
31 ancestor of a or b but not both, except when a or b is a direct descendent
31 ancestor of a or b but not both, except when a or b is a direct descendent
32 of the other, in which case we can return the minimum revnum of a and b.
32 of the other, in which case we can return the minimum revnum of a and b.
33 """
33 """
34
34
35 # basic idea:
35 # basic idea:
36 # - mark a and b with different sides
36 # - mark a and b with different sides
37 # - if a parent's children are all on the same side, the parent is
37 # - if a parent's children are all on the same side, the parent is
38 # on that side, otherwise it is on no side
38 # on that side, otherwise it is on no side
39 # - walk the graph in topological order with the help of a heap;
39 # - walk the graph in topological order with the help of a heap;
40 # - add unseen parents to side map
40 # - add unseen parents to side map
41 # - clear side of any parent that has children on different sides
41 # - clear side of any parent that has children on different sides
42 # - track number of interesting revs that might still be on a side
42 # - track number of interesting revs that might still be on a side
43 # - track the lowest interesting rev seen
43 # - track the lowest interesting rev seen
44 # - quit when interesting revs is zero
44 # - quit when interesting revs is zero
45
45
46 cl = repo.changelog
46 cl = repo.changelog
47 wdirparents = None
47 wdirparents = None
48 a = ctxa.rev()
48 a = ctxa.rev()
49 b = ctxb.rev()
49 b = ctxb.rev()
50 if a is None:
50 if a is None:
51 wdirparents = (ctxa.p1(), ctxa.p2())
51 wdirparents = (ctxa.p1(), ctxa.p2())
52 a = node.wdirrev
52 a = node.wdirrev
53 if b is None:
53 if b is None:
54 assert not wdirparents
54 assert not wdirparents
55 wdirparents = (ctxb.p1(), ctxb.p2())
55 wdirparents = (ctxb.p1(), ctxb.p2())
56 b = node.wdirrev
56 b = node.wdirrev
57
57
58 side = {a: -1, b: 1}
58 side = {a: -1, b: 1}
59 visit = [-a, -b]
59 visit = [-a, -b]
60 heapq.heapify(visit)
60 heapq.heapify(visit)
61 interesting = len(visit)
61 interesting = len(visit)
62 limit = node.wdirrev
62 limit = node.wdirrev
63
63
64 while interesting:
64 while interesting:
65 r = -heapq.heappop(visit)
65 r = -heapq.heappop(visit)
66 if r == node.wdirrev:
66 if r == node.wdirrev:
67 parents = [pctx.rev() for pctx in wdirparents]
67 parents = [pctx.rev() for pctx in wdirparents]
68 else:
68 else:
69 parents = cl.parentrevs(r)
69 parents = cl.parentrevs(r)
70 if parents[1] == node.nullrev:
70 if parents[1] == node.nullrev:
71 parents = parents[:1]
71 parents = parents[:1]
72 for p in parents:
72 for p in parents:
73 if p not in side:
73 if p not in side:
74 # first time we see p; add it to visit
74 # first time we see p; add it to visit
75 side[p] = side[r]
75 side[p] = side[r]
76 if side[p]:
76 if side[p]:
77 interesting += 1
77 interesting += 1
78 heapq.heappush(visit, -p)
78 heapq.heappush(visit, -p)
79 elif side[p] and side[p] != side[r]:
79 elif side[p] and side[p] != side[r]:
80 # p was interesting but now we know better
80 # p was interesting but now we know better
81 side[p] = 0
81 side[p] = 0
82 interesting -= 1
82 interesting -= 1
83 if side[r]:
83 if side[r]:
84 limit = r # lowest rev visited
84 limit = r # lowest rev visited
85 interesting -= 1
85 interesting -= 1
86
86
87 # Consider the following flow (see test-commit-amend.t under issue4405):
87 # Consider the following flow (see test-commit-amend.t under issue4405):
88 # 1/ File 'a0' committed
88 # 1/ File 'a0' committed
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
89 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
90 # 3/ Move back to first commit
90 # 3/ Move back to first commit
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
91 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
92 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
93 #
93 #
94 # During the amend in step five, we will be in this state:
94 # During the amend in step five, we will be in this state:
95 #
95 #
96 # @ 3 temporary amend commit for a1-amend
96 # @ 3 temporary amend commit for a1-amend
97 # |
97 # |
98 # o 2 a1-amend
98 # o 2 a1-amend
99 # |
99 # |
100 # | o 1 a1
100 # | o 1 a1
101 # |/
101 # |/
102 # o 0 a0
102 # o 0 a0
103 #
103 #
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
104 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
105 # yet the filelog has the copy information in rev 1 and we will not look
105 # yet the filelog has the copy information in rev 1 and we will not look
106 # back far enough unless we also look at the a and b as candidates.
106 # back far enough unless we also look at the a and b as candidates.
107 # This only occurs when a is a descendent of b or visa-versa.
107 # This only occurs when a is a descendent of b or visa-versa.
108 return min(limit, a, b)
108 return min(limit, a, b)
109
109
110 def _filter(src, dst, t):
110 def _filter(src, dst, t):
111 """filters out invalid copies after chaining"""
111 """filters out invalid copies after chaining"""
112
112
113 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
113 # When _chain()'ing copies in 'a' (from 'src' via some other commit 'mid')
114 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
114 # with copies in 'b' (from 'mid' to 'dst'), we can get the different cases
115 # in the following table (not including trivial cases). For example, case 2
115 # in the following table (not including trivial cases). For example, case 2
116 # is where a file existed in 'src' and remained under that name in 'mid' and
116 # is where a file existed in 'src' and remained under that name in 'mid' and
117 # then was renamed between 'mid' and 'dst'.
117 # then was renamed between 'mid' and 'dst'.
118 #
118 #
119 # case src mid dst result
119 # case src mid dst result
120 # 1 x y - -
120 # 1 x y - -
121 # 2 x y y x->y
121 # 2 x y y x->y
122 # 3 x y x -
122 # 3 x y x -
123 # 4 x y z x->z
123 # 4 x y z x->z
124 # 5 - x y -
124 # 5 - x y -
125 # 6 x x y x->y
125 # 6 x x y x->y
126 #
126 #
127 # _chain() takes care of chaining the copies in 'a' and 'b', but it
127 # _chain() takes care of chaining the copies in 'a' and 'b', but it
128 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
128 # cannot tell the difference between cases 1 and 2, between 3 and 4, or
129 # between 5 and 6, so it includes all cases in its result.
129 # between 5 and 6, so it includes all cases in its result.
130 # Cases 1, 3, and 5 are then removed by _filter().
130 # Cases 1, 3, and 5 are then removed by _filter().
131
131
132 for k, v in list(t.items()):
132 for k, v in list(t.items()):
133 # remove copies from files that didn't exist
133 # remove copies from files that didn't exist
134 if v not in src:
134 if v not in src:
135 del t[k]
135 del t[k]
136 # remove criss-crossed copies
136 # remove criss-crossed copies
137 elif k in src and v in dst:
137 elif k in src and v in dst:
138 del t[k]
138 del t[k]
139 # remove copies to files that were then removed
139 # remove copies to files that were then removed
140 elif k not in dst:
140 elif k not in dst:
141 del t[k]
141 del t[k]
142
142
143 def _chain(a, b):
143 def _chain(a, b):
144 """chain two sets of copies 'a' and 'b'"""
144 """chain two sets of copies 'a' and 'b'"""
145 t = a.copy()
145 t = a.copy()
146 for k, v in b.iteritems():
146 for k, v in b.iteritems():
147 if v in t:
147 if v in t:
148 t[k] = t[v]
148 t[k] = t[v]
149 else:
149 else:
150 t[k] = v
150 t[k] = v
151 return t
151 return t
152
152
153 def _tracefile(fctx, am, basemf, limit):
153 def _tracefile(fctx, am, basemf, limit):
154 """return file context that is the ancestor of fctx present in ancestor
154 """return file context that is the ancestor of fctx present in ancestor
155 manifest am, stopping after the first ancestor lower than limit"""
155 manifest am, stopping after the first ancestor lower than limit"""
156
156
157 for f in fctx.ancestors():
157 for f in fctx.ancestors():
158 path = f.path()
158 path = f.path()
159 if am.get(path, None) == f.filenode():
159 if am.get(path, None) == f.filenode():
160 return path
160 return path
161 if basemf and basemf.get(path, None) == f.filenode():
161 if basemf and basemf.get(path, None) == f.filenode():
162 return path
162 return path
163 if not f.isintroducedafter(limit):
163 if not f.isintroducedafter(limit):
164 return None
164 return None
165
165
166 def _dirstatecopies(repo, match=None):
166 def _dirstatecopies(repo, match=None):
167 ds = repo.dirstate
167 ds = repo.dirstate
168 c = ds.copies().copy()
168 c = ds.copies().copy()
169 for k in list(c):
169 for k in list(c):
170 if ds[k] not in 'anm' or (match and not match(k)):
170 if ds[k] not in 'anm' or (match and not match(k)):
171 del c[k]
171 del c[k]
172 return c
172 return c
173
173
174 def _computeforwardmissing(a, b, match=None):
174 def _computeforwardmissing(a, b, match=None):
175 """Computes which files are in b but not a.
175 """Computes which files are in b but not a.
176 This is its own function so extensions can easily wrap this call to see what
176 This is its own function so extensions can easily wrap this call to see what
177 files _forwardcopies is about to process.
177 files _forwardcopies is about to process.
178 """
178 """
179 ma = a.manifest()
179 ma = a.manifest()
180 mb = b.manifest()
180 mb = b.manifest()
181 return mb.filesnotin(ma, match=match)
181 return mb.filesnotin(ma, match=match)
182
182
183 def usechangesetcentricalgo(repo):
183 def usechangesetcentricalgo(repo):
184 """Checks if we should use changeset-centric copy algorithms"""
184 """Checks if we should use changeset-centric copy algorithms"""
185 return (repo.ui.config('experimental', 'copies.read-from') in
185 return (repo.ui.config('experimental', 'copies.read-from') in
186 ('changeset-only', 'compatibility'))
186 ('changeset-only', 'compatibility'))
187
187
188 def _committedforwardcopies(a, b, base, match):
188 def _committedforwardcopies(a, b, base, match):
189 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
189 """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
190 # files might have to be traced back to the fctx parent of the last
190 # files might have to be traced back to the fctx parent of the last
191 # one-side-only changeset, but not further back than that
191 # one-side-only changeset, but not further back than that
192 repo = a._repo
192 repo = a._repo
193
193
194 if usechangesetcentricalgo(repo):
194 if usechangesetcentricalgo(repo):
195 return _changesetforwardcopies(a, b, match)
195 return _changesetforwardcopies(a, b, match)
196
196
197 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
197 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
198 dbg = repo.ui.debug
198 dbg = repo.ui.debug
199 if debug:
199 if debug:
200 dbg('debug.copies: looking into rename from %s to %s\n'
200 dbg('debug.copies: looking into rename from %s to %s\n'
201 % (a, b))
201 % (a, b))
202 limit = _findlimit(repo, a, b)
202 limit = _findlimit(repo, a, b)
203 if debug:
203 if debug:
204 dbg('debug.copies: search limit: %d\n' % limit)
204 dbg('debug.copies: search limit: %d\n' % limit)
205 am = a.manifest()
205 am = a.manifest()
206 basemf = None if base is None else base.manifest()
206 basemf = None if base is None else base.manifest()
207
207
208 # find where new files came from
208 # find where new files came from
209 # we currently don't try to find where old files went, too expensive
209 # we currently don't try to find where old files went, too expensive
210 # this means we can miss a case like 'hg rm b; hg cp a b'
210 # this means we can miss a case like 'hg rm b; hg cp a b'
211 cm = {}
211 cm = {}
212
212
213 # Computing the forward missing is quite expensive on large manifests, since
213 # Computing the forward missing is quite expensive on large manifests, since
214 # it compares the entire manifests. We can optimize it in the common use
214 # it compares the entire manifests. We can optimize it in the common use
215 # case of computing what copies are in a commit versus its parent (like
215 # case of computing what copies are in a commit versus its parent (like
216 # during a rebase or histedit). Note, we exclude merge commits from this
216 # during a rebase or histedit). Note, we exclude merge commits from this
217 # optimization, since the ctx.files() for a merge commit is not correct for
217 # optimization, since the ctx.files() for a merge commit is not correct for
218 # this comparison.
218 # this comparison.
219 forwardmissingmatch = match
219 forwardmissingmatch = match
220 if b.p1() == a and b.p2().node() == node.nullid:
220 if b.p1() == a and b.p2().node() == node.nullid:
221 filesmatcher = matchmod.exact(b.files())
221 filesmatcher = matchmod.exact(b.files())
222 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
222 forwardmissingmatch = matchmod.intersectmatchers(match, filesmatcher)
223 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
223 missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
224
224
225 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
225 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
226
226
227 if debug:
227 if debug:
228 dbg('debug.copies: missing files to search: %d\n' % len(missing))
228 dbg('debug.copies: missing files to search: %d\n' % len(missing))
229
229
230 for f in sorted(missing):
230 for f in sorted(missing):
231 if debug:
231 if debug:
232 dbg('debug.copies: tracing file: %s\n' % f)
232 dbg('debug.copies: tracing file: %s\n' % f)
233 fctx = b[f]
233 fctx = b[f]
234 fctx._ancestrycontext = ancestrycontext
234 fctx._ancestrycontext = ancestrycontext
235
235
236 if debug:
236 if debug:
237 start = util.timer()
237 start = util.timer()
238 opath = _tracefile(fctx, am, basemf, limit)
238 opath = _tracefile(fctx, am, basemf, limit)
239 if opath:
239 if opath:
240 if debug:
240 if debug:
241 dbg('debug.copies: rename of: %s\n' % opath)
241 dbg('debug.copies: rename of: %s\n' % opath)
242 cm[f] = opath
242 cm[f] = opath
243 if debug:
243 if debug:
244 dbg('debug.copies: time: %f seconds\n'
244 dbg('debug.copies: time: %f seconds\n'
245 % (util.timer() - start))
245 % (util.timer() - start))
246 return cm
246 return cm
247
247
248 def _changesetforwardcopies(a, b, match):
248 def _changesetforwardcopies(a, b, match):
249 if a.rev() in (node.nullrev, b.rev()):
249 if a.rev() in (node.nullrev, b.rev()):
250 return {}
250 return {}
251
251
252 repo = a.repo()
252 repo = a.repo()
253 children = {}
253 children = {}
254 cl = repo.changelog
254 cl = repo.changelog
255 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
255 missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
256 for r in missingrevs:
256 for r in missingrevs:
257 for p in cl.parentrevs(r):
257 for p in cl.parentrevs(r):
258 if p == node.nullrev:
258 if p == node.nullrev:
259 continue
259 continue
260 if p not in children:
260 if p not in children:
261 children[p] = [r]
261 children[p] = [r]
262 else:
262 else:
263 children[p].append(r)
263 children[p].append(r)
264
264
265 roots = set(children) - set(missingrevs)
265 roots = set(children) - set(missingrevs)
266 # 'work' contains 3-tuples of a (revision number, parent number, copies).
266 # 'work' contains 3-tuples of a (revision number, parent number, copies).
267 # The parent number is only used for knowing which parent the copies dict
267 # The parent number is only used for knowing which parent the copies dict
268 # came from.
268 # came from.
269 # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
269 # NOTE: To reduce costly copying the 'copies' dicts, we reuse the same
270 # instance for *one* of the child nodes (the last one). Once an instance
270 # instance for *one* of the child nodes (the last one). Once an instance
271 # has been put on the queue, it is thus no longer safe to modify it.
271 # has been put on the queue, it is thus no longer safe to modify it.
272 # Conversely, it *is* safe to modify an instance popped off the queue.
272 # Conversely, it *is* safe to modify an instance popped off the queue.
273 work = [(r, 1, {}) for r in roots]
273 work = [(r, 1, {}) for r in roots]
274 heapq.heapify(work)
274 heapq.heapify(work)
275 alwaysmatch = match.always()
275 alwaysmatch = match.always()
276 while work:
276 while work:
277 r, i1, copies = heapq.heappop(work)
277 r, i1, copies = heapq.heappop(work)
278 if work and work[0][0] == r:
278 if work and work[0][0] == r:
279 # We are tracing copies from both parents
279 # We are tracing copies from both parents
280 r, i2, copies2 = heapq.heappop(work)
280 r, i2, copies2 = heapq.heappop(work)
281 for dst, src in copies2.items():
281 for dst, src in copies2.items():
282 # Unlike when copies are stored in the filelog, we consider
282 # Unlike when copies are stored in the filelog, we consider
283 # it a copy even if the destination already existed on the
283 # it a copy even if the destination already existed on the
284 # other branch. It's simply too expensive to check if the
284 # other branch. It's simply too expensive to check if the
285 # file existed in the manifest.
285 # file existed in the manifest.
286 if dst not in copies:
286 if dst not in copies:
287 # If it was copied on the p1 side, leave it as copied from
287 # If it was copied on the p1 side, leave it as copied from
288 # that side, even if it was also copied on the p2 side.
288 # that side, even if it was also copied on the p2 side.
289 copies[dst] = copies2[dst]
289 copies[dst] = copies2[dst]
290 if r == b.rev():
290 if r == b.rev():
291 return copies
291 return copies
292 for i, c in enumerate(children[r]):
292 for i, c in enumerate(children[r]):
293 childctx = repo[c]
293 childctx = repo[c]
294 if r == childctx.p1().rev():
294 if r == childctx.p1().rev():
295 parent = 1
295 parent = 1
296 childcopies = childctx.p1copies()
296 childcopies = childctx.p1copies()
297 else:
297 else:
298 assert r == childctx.p2().rev()
298 assert r == childctx.p2().rev()
299 parent = 2
299 parent = 2
300 childcopies = childctx.p2copies()
300 childcopies = childctx.p2copies()
301 if not alwaysmatch:
301 if not alwaysmatch:
302 childcopies = {dst: src for dst, src in childcopies.items()
302 childcopies = {dst: src for dst, src in childcopies.items()
303 if match(dst)}
303 if match(dst)}
304 # Copy the dict only if later iterations will also need it
304 # Copy the dict only if later iterations will also need it
305 if i != len(children[r]) - 1:
305 if i != len(children[r]) - 1:
306 newcopies = copies.copy()
306 newcopies = copies.copy()
307 else:
307 else:
308 newcopies = copies
308 newcopies = copies
309 if childcopies:
309 if childcopies:
310 newcopies = _chain(newcopies, childcopies)
310 newcopies = _chain(newcopies, childcopies)
311 for f in childctx.filesremoved():
311 for f in childctx.filesremoved():
312 if f in newcopies:
312 if f in newcopies:
313 del newcopies[f]
313 del newcopies[f]
314 heapq.heappush(work, (c, parent, newcopies))
314 heapq.heappush(work, (c, parent, newcopies))
315 assert False
315 assert False
316
316
317 def _forwardcopies(a, b, base=None, match=None):
317 def _forwardcopies(a, b, base=None, match=None):
318 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
318 """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
319
319
320 if base is None:
320 if base is None:
321 base = a
321 base = a
322 match = a.repo().narrowmatch(match)
322 match = a.repo().narrowmatch(match)
323 # check for working copy
323 # check for working copy
324 if b.rev() is None:
324 if b.rev() is None:
325 cm = _committedforwardcopies(a, b.p1(), base, match)
325 cm = _committedforwardcopies(a, b.p1(), base, match)
326 # combine copies from dirstate if necessary
326 # combine copies from dirstate if necessary
327 copies = _chain(cm, _dirstatecopies(b._repo, match))
327 copies = _chain(cm, _dirstatecopies(b._repo, match))
328 else:
328 else:
329 copies = _committedforwardcopies(a, b, base, match)
329 copies = _committedforwardcopies(a, b, base, match)
330 return copies
330 return copies
331
331
332 def _backwardrenames(a, b, match):
332 def _backwardrenames(a, b, match):
333 if a._repo.ui.config('experimental', 'copytrace') == 'off':
333 if a._repo.ui.config('experimental', 'copytrace') == 'off':
334 return {}
334 return {}
335
335
336 # Even though we're not taking copies into account, 1:n rename situations
336 # Even though we're not taking copies into account, 1:n rename situations
337 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
337 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
338 # arbitrarily pick one of the renames.
338 # arbitrarily pick one of the renames.
339 # We don't want to pass in "match" here, since that would filter
339 # We don't want to pass in "match" here, since that would filter
340 # the destination by it. Since we're reversing the copies, we want
340 # the destination by it. Since we're reversing the copies, we want
341 # to filter the source instead.
341 # to filter the source instead.
342 f = _forwardcopies(b, a)
342 f = _forwardcopies(b, a)
343 r = {}
343 r = {}
344 for k, v in sorted(f.iteritems()):
344 for k, v in sorted(f.iteritems()):
345 if match and not match(v):
345 if match and not match(v):
346 continue
346 continue
347 # remove copies
347 # remove copies
348 if v in a:
348 if v in a:
349 continue
349 continue
350 r[v] = k
350 r[v] = k
351 return r
351 return r
352
352
353 def pathcopies(x, y, match=None):
353 def pathcopies(x, y, match=None):
354 """find {dst@y: src@x} copy mapping for directed compare"""
354 """find {dst@y: src@x} copy mapping for directed compare"""
355 repo = x._repo
355 repo = x._repo
356 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
356 debug = repo.ui.debugflag and repo.ui.configbool('devel', 'debug.copies')
357 if debug:
357 if debug:
358 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
358 repo.ui.debug('debug.copies: searching copies from %s to %s\n'
359 % (x, y))
359 % (x, y))
360 if x == y or not x or not y:
360 if x == y or not x or not y:
361 return {}
361 return {}
362 a = y.ancestor(x)
362 a = y.ancestor(x)
363 if a == x:
363 if a == x:
364 if debug:
364 if debug:
365 repo.ui.debug('debug.copies: search mode: forward\n')
365 repo.ui.debug('debug.copies: search mode: forward\n')
366 if y.rev() is None and x == y.p1():
366 if y.rev() is None and x == y.p1():
367 # short-circuit to avoid issues with merge states
367 # short-circuit to avoid issues with merge states
368 return _dirstatecopies(repo, match)
368 return _dirstatecopies(repo, match)
369 copies = _forwardcopies(x, y, match=match)
369 copies = _forwardcopies(x, y, match=match)
370 elif a == y:
370 elif a == y:
371 if debug:
371 if debug:
372 repo.ui.debug('debug.copies: search mode: backward\n')
372 repo.ui.debug('debug.copies: search mode: backward\n')
373 copies = _backwardrenames(x, y, match=match)
373 copies = _backwardrenames(x, y, match=match)
374 else:
374 else:
375 if debug:
375 if debug:
376 repo.ui.debug('debug.copies: search mode: combined\n')
376 repo.ui.debug('debug.copies: search mode: combined\n')
377 base = None
377 base = None
378 if a.rev() != node.nullrev:
378 if a.rev() != node.nullrev:
379 base = x
379 base = x
380 copies = _chain(_backwardrenames(x, a, match=match),
380 copies = _chain(_backwardrenames(x, a, match=match),
381 _forwardcopies(a, y, base, match=match))
381 _forwardcopies(a, y, base, match=match))
382 _filter(x, y, copies)
382 _filter(x, y, copies)
383 return copies
383 return copies
384
384
385 def mergecopies(repo, c1, c2, base):
385 def mergecopies(repo, c1, c2, base):
386 """
386 """
387 Finds moves and copies between context c1 and c2 that are relevant for
387 Finds moves and copies between context c1 and c2 that are relevant for
388 merging. 'base' will be used as the merge base.
388 merging. 'base' will be used as the merge base.
389
389
390 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
390 Copytracing is used in commands like rebase, merge, unshelve, etc to merge
391 files that were moved/ copied in one merge parent and modified in another.
391 files that were moved/ copied in one merge parent and modified in another.
392 For example:
392 For example:
393
393
394 o ---> 4 another commit
394 o ---> 4 another commit
395 |
395 |
396 | o ---> 3 commit that modifies a.txt
396 | o ---> 3 commit that modifies a.txt
397 | /
397 | /
398 o / ---> 2 commit that moves a.txt to b.txt
398 o / ---> 2 commit that moves a.txt to b.txt
399 |/
399 |/
400 o ---> 1 merge base
400 o ---> 1 merge base
401
401
402 If we try to rebase revision 3 on revision 4, since there is no a.txt in
402 If we try to rebase revision 3 on revision 4, since there is no a.txt in
403 revision 4, and if user have copytrace disabled, we prints the following
403 revision 4, and if user have copytrace disabled, we prints the following
404 message:
404 message:
405
405
406 ```other changed <file> which local deleted```
406 ```other changed <file> which local deleted```
407
407
408 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
408 Returns five dicts: "copy", "movewithdir", "diverge", "renamedelete" and
409 "dirmove".
409 "dirmove".
410
410
411 "copy" is a mapping from destination name -> source name,
411 "copy" is a mapping from destination name -> source name,
412 where source is in c1 and destination is in c2 or vice-versa.
412 where source is in c1 and destination is in c2 or vice-versa.
413
413
414 "movewithdir" is a mapping from source name -> destination name,
414 "movewithdir" is a mapping from source name -> destination name,
415 where the file at source present in one context but not the other
415 where the file at source present in one context but not the other
416 needs to be moved to destination by the merge process, because the
416 needs to be moved to destination by the merge process, because the
417 other context moved the directory it is in.
417 other context moved the directory it is in.
418
418
419 "diverge" is a mapping of source name -> list of destination names
419 "diverge" is a mapping of source name -> list of destination names
420 for divergent renames.
420 for divergent renames.
421
421
422 "renamedelete" is a mapping of source name -> list of destination
422 "renamedelete" is a mapping of source name -> list of destination
423 names for files deleted in c1 that were renamed in c2 or vice-versa.
423 names for files deleted in c1 that were renamed in c2 or vice-versa.
424
424
425 "dirmove" is a mapping of detected source dir -> destination dir renames.
425 "dirmove" is a mapping of detected source dir -> destination dir renames.
426 This is needed for handling changes to new files previously grafted into
426 This is needed for handling changes to new files previously grafted into
427 renamed directories.
427 renamed directories.
428
428
429 This function calls different copytracing algorithms based on config.
429 This function calls different copytracing algorithms based on config.
430 """
430 """
431 # avoid silly behavior for update from empty dir
431 # avoid silly behavior for update from empty dir
432 if not c1 or not c2 or c1 == c2:
432 if not c1 or not c2 or c1 == c2:
433 return {}, {}, {}, {}, {}
433 return {}, {}, {}, {}, {}
434
434
435 narrowmatch = c1.repo().narrowmatch()
435 narrowmatch = c1.repo().narrowmatch()
436
436
437 # avoid silly behavior for parent -> working dir
437 # avoid silly behavior for parent -> working dir
438 if c2.node() is None and c1.node() == repo.dirstate.p1():
438 if c2.node() is None and c1.node() == repo.dirstate.p1():
439 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
439 return _dirstatecopies(repo, narrowmatch), {}, {}, {}, {}
440
440
441 copytracing = repo.ui.config('experimental', 'copytrace')
441 copytracing = repo.ui.config('experimental', 'copytrace')
442 if stringutil.parsebool(copytracing) is False:
442 if stringutil.parsebool(copytracing) is False:
443 # stringutil.parsebool() returns None when it is unable to parse the
443 # stringutil.parsebool() returns None when it is unable to parse the
444 # value, so we should rely on making sure copytracing is on such cases
444 # value, so we should rely on making sure copytracing is on such cases
445 return {}, {}, {}, {}, {}
445 return {}, {}, {}, {}, {}
446
446
447 if usechangesetcentricalgo(repo):
447 if usechangesetcentricalgo(repo):
448 # The heuristics don't make sense when we need changeset-centric algos
448 # The heuristics don't make sense when we need changeset-centric algos
449 return _fullcopytracing(repo, c1, c2, base)
449 return _fullcopytracing(repo, c1, c2, base)
450
450
451 # Copy trace disabling is explicitly below the node == p1 logic above
451 # Copy trace disabling is explicitly below the node == p1 logic above
452 # because the logic above is required for a simple copy to be kept across a
452 # because the logic above is required for a simple copy to be kept across a
453 # rebase.
453 # rebase.
454 if copytracing == 'heuristics':
454 if copytracing == 'heuristics':
455 # Do full copytracing if only non-public revisions are involved as
455 # Do full copytracing if only non-public revisions are involved as
456 # that will be fast enough and will also cover the copies which could
456 # that will be fast enough and will also cover the copies which could
457 # be missed by heuristics
457 # be missed by heuristics
458 if _isfullcopytraceable(repo, c1, base):
458 if _isfullcopytraceable(repo, c1, base):
459 return _fullcopytracing(repo, c1, c2, base)
459 return _fullcopytracing(repo, c1, c2, base)
460 return _heuristicscopytracing(repo, c1, c2, base)
460 return _heuristicscopytracing(repo, c1, c2, base)
461 else:
461 else:
462 return _fullcopytracing(repo, c1, c2, base)
462 return _fullcopytracing(repo, c1, c2, base)
463
463
464 def _isfullcopytraceable(repo, c1, base):
464 def _isfullcopytraceable(repo, c1, base):
465 """ Checks that if base, source and destination are all no-public branches,
465 """ Checks that if base, source and destination are all no-public branches,
466 if yes let's use the full copytrace algorithm for increased capabilities
466 if yes let's use the full copytrace algorithm for increased capabilities
467 since it will be fast enough.
467 since it will be fast enough.
468
468
469 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
469 `experimental.copytrace.sourcecommitlimit` can be used to set a limit for
470 number of changesets from c1 to base such that if number of changesets are
470 number of changesets from c1 to base such that if number of changesets are
471 more than the limit, full copytracing algorithm won't be used.
471 more than the limit, full copytracing algorithm won't be used.
472 """
472 """
473 if c1.rev() is None:
473 if c1.rev() is None:
474 c1 = c1.p1()
474 c1 = c1.p1()
475 if c1.mutable() and base.mutable():
475 if c1.mutable() and base.mutable():
476 sourcecommitlimit = repo.ui.configint('experimental',
476 sourcecommitlimit = repo.ui.configint('experimental',
477 'copytrace.sourcecommitlimit')
477 'copytrace.sourcecommitlimit')
478 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
478 commits = len(repo.revs('%d::%d', base.rev(), c1.rev()))
479 return commits < sourcecommitlimit
479 return commits < sourcecommitlimit
480 return False
480 return False
481
481
482 def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
482 def _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
483 copy, renamedelete):
483 copy, renamedelete):
484 if src not in m2:
484 if src not in m2:
485 # deleted on side 2
485 # deleted on side 2
486 if src not in m1:
486 if src not in m1:
487 # renamed on side 1, deleted on side 2
487 # renamed on side 1, deleted on side 2
488 renamedelete[src] = dsts1
488 renamedelete[src] = dsts1
489 elif m2[src] != mb[src]:
489 elif m2[src] != mb[src]:
490 if not _related(c2[src], base[src]):
490 if not _related(c2[src], base[src]):
491 return
491 return
492 # modified on side 2
492 # modified on side 2
493 for dst in dsts1:
493 for dst in dsts1:
494 if dst not in m2:
494 if dst not in m2:
495 # dst not added on side 2 (handle as regular
495 # dst not added on side 2 (handle as regular
496 # "both created" case in manifestmerge otherwise)
496 # "both created" case in manifestmerge otherwise)
497 copy[dst] = src
497 copy[dst] = src
498
498
499 def _fullcopytracing(repo, c1, c2, base):
499 def _fullcopytracing(repo, c1, c2, base):
500 """ The full copytracing algorithm which finds all the new files that were
500 """ The full copytracing algorithm which finds all the new files that were
501 added from merge base up to the top commit and for each file it checks if
501 added from merge base up to the top commit and for each file it checks if
502 this file was copied from another file.
502 this file was copied from another file.
503
503
504 This is pretty slow when a lot of changesets are involved but will track all
504 This is pretty slow when a lot of changesets are involved but will track all
505 the copies.
505 the copies.
506 """
506 """
507 m1 = c1.manifest()
507 m1 = c1.manifest()
508 m2 = c2.manifest()
508 m2 = c2.manifest()
509 mb = base.manifest()
509 mb = base.manifest()
510
510
511 copies1 = pathcopies(base, c1)
511 copies1 = pathcopies(base, c1)
512 copies2 = pathcopies(base, c2)
512 copies2 = pathcopies(base, c2)
513
513
514 inversecopies1 = {}
514 inversecopies1 = {}
515 inversecopies2 = {}
515 inversecopies2 = {}
516 for dst, src in copies1.items():
516 for dst, src in copies1.items():
517 inversecopies1.setdefault(src, []).append(dst)
517 inversecopies1.setdefault(src, []).append(dst)
518 for dst, src in copies2.items():
518 for dst, src in copies2.items():
519 inversecopies2.setdefault(src, []).append(dst)
519 inversecopies2.setdefault(src, []).append(dst)
520
520
521 copy = {}
521 copy = {}
522 diverge = {}
522 diverge = {}
523 renamedelete = {}
523 renamedelete = {}
524 allsources = set(inversecopies1) | set(inversecopies2)
524 allsources = set(inversecopies1) | set(inversecopies2)
525 for src in allsources:
525 for src in allsources:
526 dsts1 = inversecopies1.get(src)
526 dsts1 = inversecopies1.get(src)
527 dsts2 = inversecopies2.get(src)
527 dsts2 = inversecopies2.get(src)
528 if dsts1 and dsts2:
528 if dsts1 and dsts2:
529 # copied/renamed on both sides
529 # copied/renamed on both sides
530 if src not in m1 and src not in m2:
530 if src not in m1 and src not in m2:
531 # renamed on both sides
531 # renamed on both sides
532 dsts1 = set(dsts1)
532 dsts1 = set(dsts1)
533 dsts2 = set(dsts2)
533 dsts2 = set(dsts2)
534 # If there's some overlap in the rename destinations, we
534 # If there's some overlap in the rename destinations, we
535 # consider it not divergent. For example, if side 1 copies 'a'
535 # consider it not divergent. For example, if side 1 copies 'a'
536 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
536 # to 'b' and 'c' and deletes 'a', and side 2 copies 'a' to 'c'
537 # and 'd' and deletes 'a'.
537 # and 'd' and deletes 'a'.
538 if dsts1 & dsts2:
538 if dsts1 & dsts2:
539 for dst in (dsts1 & dsts2):
539 for dst in (dsts1 & dsts2):
540 copy[dst] = src
540 copy[dst] = src
541 else:
541 else:
542 diverge[src] = sorted(dsts1 | dsts2)
542 diverge[src] = sorted(dsts1 | dsts2)
543 elif src in m1 and src in m2:
543 elif src in m1 and src in m2:
544 # copied on both sides
544 # copied on both sides
545 dsts1 = set(dsts1)
545 dsts1 = set(dsts1)
546 dsts2 = set(dsts2)
546 dsts2 = set(dsts2)
547 for dst in (dsts1 & dsts2):
547 for dst in (dsts1 & dsts2):
548 copy[dst] = src
548 copy[dst] = src
549 # TODO: Handle cases where it was renamed on one side and copied
549 # TODO: Handle cases where it was renamed on one side and copied
550 # on the other side
550 # on the other side
551 elif dsts1:
551 elif dsts1:
552 # copied/renamed only on side 1
552 # copied/renamed only on side 1
553 _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
553 _checksinglesidecopies(src, dsts1, m1, m2, mb, c2, base,
554 copy, renamedelete)
554 copy, renamedelete)
555 elif dsts2:
555 elif dsts2:
556 # copied/renamed only on side 2
556 # copied/renamed only on side 2
557 _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base,
557 _checksinglesidecopies(src, dsts2, m2, m1, mb, c1, base,
558 copy, renamedelete)
558 copy, renamedelete)
559
559
560 renamedeleteset = set()
560 renamedeleteset = set()
561 divergeset = set()
561 divergeset = set()
562 for dsts in diverge.values():
562 for dsts in diverge.values():
563 divergeset.update(dsts)
563 divergeset.update(dsts)
564 for dsts in renamedelete.values():
564 for dsts in renamedelete.values():
565 renamedeleteset.update(dsts)
565 renamedeleteset.update(dsts)
566
566
567 # find interesting file sets from manifests
567 # find interesting file sets from manifests
568 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
568 addedinm1 = m1.filesnotin(mb, repo.narrowmatch())
569 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
569 addedinm2 = m2.filesnotin(mb, repo.narrowmatch())
570 u1 = sorted(addedinm1 - addedinm2)
570 u1 = sorted(addedinm1 - addedinm2)
571 u2 = sorted(addedinm2 - addedinm1)
571 u2 = sorted(addedinm2 - addedinm1)
572
572
573 header = " unmatched files in %s"
573 header = " unmatched files in %s"
574 if u1:
574 if u1:
575 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
575 repo.ui.debug("%s:\n %s\n" % (header % 'local', "\n ".join(u1)))
576 if u2:
576 if u2:
577 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
577 repo.ui.debug("%s:\n %s\n" % (header % 'other', "\n ".join(u2)))
578
578
579 fullcopy = copies1.copy()
579 fullcopy = copies1.copy()
580 fullcopy.update(copies2)
580 fullcopy.update(copies2)
581 if not fullcopy:
581 if not fullcopy:
582 return copy, {}, diverge, renamedelete, {}
582 return copy, {}, diverge, renamedelete, {}
583
583
584 if repo.ui.debugflag:
584 if repo.ui.debugflag:
585 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
585 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
586 "% = renamed and deleted):\n")
586 "% = renamed and deleted):\n")
587 for f in sorted(fullcopy):
587 for f in sorted(fullcopy):
588 note = ""
588 note = ""
589 if f in copy:
589 if f in copy:
590 note += "*"
590 note += "*"
591 if f in divergeset:
591 if f in divergeset:
592 note += "!"
592 note += "!"
593 if f in renamedeleteset:
593 if f in renamedeleteset:
594 note += "%"
594 note += "%"
595 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
595 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
596 note))
596 note))
597 del divergeset
597 del divergeset
598
598
599 repo.ui.debug(" checking for directory renames\n")
599 repo.ui.debug(" checking for directory renames\n")
600
600
601 # generate a directory move map
601 # generate a directory move map
602 d1, d2 = c1.dirs(), c2.dirs()
602 d1, d2 = c1.dirs(), c2.dirs()
603 invalid = set()
603 invalid = set()
604 dirmove = {}
604 dirmove = {}
605
605
606 # examine each file copy for a potential directory move, which is
606 # examine each file copy for a potential directory move, which is
607 # when all the files in a directory are moved to a new directory
607 # when all the files in a directory are moved to a new directory
608 for dst, src in fullcopy.iteritems():
608 for dst, src in fullcopy.iteritems():
609 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
609 dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
610 if dsrc in invalid:
610 if dsrc in invalid:
611 # already seen to be uninteresting
611 # already seen to be uninteresting
612 continue
612 continue
613 elif dsrc in d1 and ddst in d1:
613 elif dsrc in d1 and ddst in d1:
614 # directory wasn't entirely moved locally
614 # directory wasn't entirely moved locally
615 invalid.add(dsrc)
615 invalid.add(dsrc)
616 elif dsrc in d2 and ddst in d2:
616 elif dsrc in d2 and ddst in d2:
617 # directory wasn't entirely moved remotely
617 # directory wasn't entirely moved remotely
618 invalid.add(dsrc)
618 invalid.add(dsrc)
619 elif dsrc in dirmove and dirmove[dsrc] != ddst:
619 elif dsrc in dirmove and dirmove[dsrc] != ddst:
620 # files from the same directory moved to two different places
620 # files from the same directory moved to two different places
621 invalid.add(dsrc)
621 invalid.add(dsrc)
622 else:
622 else:
623 # looks good so far
623 # looks good so far
624 dirmove[dsrc] = ddst
624 dirmove[dsrc] = ddst
625
625
626 for i in invalid:
626 for i in invalid:
627 if i in dirmove:
627 if i in dirmove:
628 del dirmove[i]
628 del dirmove[i]
629 del d1, d2, invalid
629 del d1, d2, invalid
630
630
631 if not dirmove:
631 if not dirmove:
632 return copy, {}, diverge, renamedelete, {}
632 return copy, {}, diverge, renamedelete, {}
633
633
634 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
634 dirmove = {k + "/": v + "/" for k, v in dirmove.iteritems()}
635
635
636 for d in dirmove:
636 for d in dirmove:
637 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
637 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
638 (d, dirmove[d]))
638 (d, dirmove[d]))
639
639
640 movewithdir = {}
640 movewithdir = {}
641 # check unaccounted nonoverlapping files against directory moves
641 # check unaccounted nonoverlapping files against directory moves
642 for f in u1 + u2:
642 for f in u1 + u2:
643 if f not in fullcopy:
643 if f not in fullcopy:
644 for d in dirmove:
644 for d in dirmove:
645 if f.startswith(d):
645 if f.startswith(d):
646 # new file added in a directory that was moved, move it
646 # new file added in a directory that was moved, move it
647 df = dirmove[d] + f[len(d):]
647 df = dirmove[d] + f[len(d):]
648 if df not in copy:
648 if df not in copy:
649 movewithdir[f] = df
649 movewithdir[f] = df
650 repo.ui.debug((" pending file src: '%s' -> "
650 repo.ui.debug((" pending file src: '%s' -> "
651 "dst: '%s'\n") % (f, df))
651 "dst: '%s'\n") % (f, df))
652 break
652 break
653
653
654 return copy, movewithdir, diverge, renamedelete, dirmove
654 return copy, movewithdir, diverge, renamedelete, dirmove
655
655
656 def _heuristicscopytracing(repo, c1, c2, base):
656 def _heuristicscopytracing(repo, c1, c2, base):
657 """ Fast copytracing using filename heuristics
657 """ Fast copytracing using filename heuristics
658
658
659 Assumes that moves or renames are of following two types:
659 Assumes that moves or renames are of following two types:
660
660
661 1) Inside a directory only (same directory name but different filenames)
661 1) Inside a directory only (same directory name but different filenames)
662 2) Move from one directory to another
662 2) Move from one directory to another
663 (same filenames but different directory names)
663 (same filenames but different directory names)
664
664
665 Works only when there are no merge commits in the "source branch".
665 Works only when there are no merge commits in the "source branch".
666 Source branch is commits from base up to c2 not including base.
666 Source branch is commits from base up to c2 not including base.
667
667
668 If merge is involved it fallbacks to _fullcopytracing().
668 If merge is involved it fallbacks to _fullcopytracing().
669
669
670 Can be used by setting the following config:
670 Can be used by setting the following config:
671
671
672 [experimental]
672 [experimental]
673 copytrace = heuristics
673 copytrace = heuristics
674
674
675 In some cases the copy/move candidates found by heuristics can be very large
675 In some cases the copy/move candidates found by heuristics can be very large
676 in number and that will make the algorithm slow. The number of possible
676 in number and that will make the algorithm slow. The number of possible
677 candidates to check can be limited by using the config
677 candidates to check can be limited by using the config
678 `experimental.copytrace.movecandidateslimit` which defaults to 100.
678 `experimental.copytrace.movecandidateslimit` which defaults to 100.
679 """
679 """
680
680
681 if c1.rev() is None:
681 if c1.rev() is None:
682 c1 = c1.p1()
682 c1 = c1.p1()
683 if c2.rev() is None:
683 if c2.rev() is None:
684 c2 = c2.p1()
684 c2 = c2.p1()
685
685
686 copies = {}
686 copies = {}
687
687
688 changedfiles = set()
688 changedfiles = set()
689 m1 = c1.manifest()
689 m1 = c1.manifest()
690 if not repo.revs('%d::%d', base.rev(), c2.rev()):
690 if not repo.revs('%d::%d', base.rev(), c2.rev()):
691 # If base is not in c2 branch, we switch to fullcopytracing
691 # If base is not in c2 branch, we switch to fullcopytracing
692 repo.ui.debug("switching to full copytracing as base is not "
692 repo.ui.debug("switching to full copytracing as base is not "
693 "an ancestor of c2\n")
693 "an ancestor of c2\n")
694 return _fullcopytracing(repo, c1, c2, base)
694 return _fullcopytracing(repo, c1, c2, base)
695
695
696 ctx = c2
696 ctx = c2
697 while ctx != base:
697 while ctx != base:
698 if len(ctx.parents()) == 2:
698 if len(ctx.parents()) == 2:
699 # To keep things simple let's not handle merges
699 # To keep things simple let's not handle merges
700 repo.ui.debug("switching to full copytracing because of merges\n")
700 repo.ui.debug("switching to full copytracing because of merges\n")
701 return _fullcopytracing(repo, c1, c2, base)
701 return _fullcopytracing(repo, c1, c2, base)
702 changedfiles.update(ctx.files())
702 changedfiles.update(ctx.files())
703 ctx = ctx.p1()
703 ctx = ctx.p1()
704
704
705 cp = _forwardcopies(base, c2)
705 cp = _forwardcopies(base, c2)
706 for dst, src in cp.iteritems():
706 for dst, src in cp.iteritems():
707 if src in m1:
707 if src in m1:
708 copies[dst] = src
708 copies[dst] = src
709
709
710 # file is missing if it isn't present in the destination, but is present in
710 # file is missing if it isn't present in the destination, but is present in
711 # the base and present in the source.
711 # the base and present in the source.
712 # Presence in the base is important to exclude added files, presence in the
712 # Presence in the base is important to exclude added files, presence in the
713 # source is important to exclude removed files.
713 # source is important to exclude removed files.
714 filt = lambda f: f not in m1 and f in base and f in c2
714 filt = lambda f: f not in m1 and f in base and f in c2
715 missingfiles = [f for f in changedfiles if filt(f)]
715 missingfiles = [f for f in changedfiles if filt(f)]
716
716
717 if missingfiles:
717 if missingfiles:
718 basenametofilename = collections.defaultdict(list)
718 basenametofilename = collections.defaultdict(list)
719 dirnametofilename = collections.defaultdict(list)
719 dirnametofilename = collections.defaultdict(list)
720
720
721 for f in m1.filesnotin(base.manifest()):
721 for f in m1.filesnotin(base.manifest()):
722 basename = os.path.basename(f)
722 basename = os.path.basename(f)
723 dirname = os.path.dirname(f)
723 dirname = os.path.dirname(f)
724 basenametofilename[basename].append(f)
724 basenametofilename[basename].append(f)
725 dirnametofilename[dirname].append(f)
725 dirnametofilename[dirname].append(f)
726
726
727 for f in missingfiles:
727 for f in missingfiles:
728 basename = os.path.basename(f)
728 basename = os.path.basename(f)
729 dirname = os.path.dirname(f)
729 dirname = os.path.dirname(f)
730 samebasename = basenametofilename[basename]
730 samebasename = basenametofilename[basename]
731 samedirname = dirnametofilename[dirname]
731 samedirname = dirnametofilename[dirname]
732 movecandidates = samebasename + samedirname
732 movecandidates = samebasename + samedirname
733 # f is guaranteed to be present in c2, that's why
733 # f is guaranteed to be present in c2, that's why
734 # c2.filectx(f) won't fail
734 # c2.filectx(f) won't fail
735 f2 = c2.filectx(f)
735 f2 = c2.filectx(f)
736 # we can have a lot of candidates which can slow down the heuristics
736 # we can have a lot of candidates which can slow down the heuristics
737 # config value to limit the number of candidates moves to check
737 # config value to limit the number of candidates moves to check
738 maxcandidates = repo.ui.configint('experimental',
738 maxcandidates = repo.ui.configint('experimental',
739 'copytrace.movecandidateslimit')
739 'copytrace.movecandidateslimit')
740
740
741 if len(movecandidates) > maxcandidates:
741 if len(movecandidates) > maxcandidates:
742 repo.ui.status(_("skipping copytracing for '%s', more "
742 repo.ui.status(_("skipping copytracing for '%s', more "
743 "candidates than the limit: %d\n")
743 "candidates than the limit: %d\n")
744 % (f, len(movecandidates)))
744 % (f, len(movecandidates)))
745 continue
745 continue
746
746
747 for candidate in movecandidates:
747 for candidate in movecandidates:
748 f1 = c1.filectx(candidate)
748 f1 = c1.filectx(candidate)
749 if _related(f1, f2):
749 if _related(f1, f2):
750 # if there are a few related copies then we'll merge
750 # if there are a few related copies then we'll merge
751 # changes into all of them. This matches the behaviour
751 # changes into all of them. This matches the behaviour
752 # of upstream copytracing
752 # of upstream copytracing
753 copies[candidate] = f
753 copies[candidate] = f
754
754
755 return copies, {}, {}, {}, {}
755 return copies, {}, {}, {}, {}
756
756
757 def _related(f1, f2):
757 def _related(f1, f2):
758 """return True if f1 and f2 filectx have a common ancestor
758 """return True if f1 and f2 filectx have a common ancestor
759
759
760 Walk back to common ancestor to see if the two files originate
760 Walk back to common ancestor to see if the two files originate
761 from the same file. Since workingfilectx's rev() is None it messes
761 from the same file. Since workingfilectx's rev() is None it messes
762 up the integer comparison logic, hence the pre-step check for
762 up the integer comparison logic, hence the pre-step check for
763 None (f1 and f2 can only be workingfilectx's initially).
763 None (f1 and f2 can only be workingfilectx's initially).
764 """
764 """
765
765
766 if f1 == f2:
766 if f1 == f2:
767 return True # a match
767 return True # a match
768
768
769 g1, g2 = f1.ancestors(), f2.ancestors()
769 g1, g2 = f1.ancestors(), f2.ancestors()
770 try:
770 try:
771 f1r, f2r = f1.linkrev(), f2.linkrev()
771 f1r, f2r = f1.linkrev(), f2.linkrev()
772
772
773 if f1r is None:
773 if f1r is None:
774 f1 = next(g1)
774 f1 = next(g1)
775 if f2r is None:
775 if f2r is None:
776 f2 = next(g2)
776 f2 = next(g2)
777
777
778 while True:
778 while True:
779 f1r, f2r = f1.linkrev(), f2.linkrev()
779 f1r, f2r = f1.linkrev(), f2.linkrev()
780 if f1r > f2r:
780 if f1r > f2r:
781 f1 = next(g1)
781 f1 = next(g1)
782 elif f2r > f1r:
782 elif f2r > f1r:
783 f2 = next(g2)
783 f2 = next(g2)
784 else: # f1 and f2 point to files in the same linkrev
784 else: # f1 and f2 point to files in the same linkrev
785 return f1 == f2 # true if they point to the same file
785 return f1 == f2 # true if they point to the same file
786 except StopIteration:
786 except StopIteration:
787 return False
787 return False
788
788
789 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
789 def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
790 """reproduce copies from fromrev to rev in the dirstate
790 """reproduce copies from fromrev to rev in the dirstate
791
791
792 If skiprev is specified, it's a revision that should be used to
792 If skiprev is specified, it's a revision that should be used to
793 filter copy records. Any copies that occur between fromrev and
793 filter copy records. Any copies that occur between fromrev and
794 skiprev will not be duplicated, even if they appear in the set of
794 skiprev will not be duplicated, even if they appear in the set of
795 copies between fromrev and rev.
795 copies between fromrev and rev.
796 """
796 """
797 exclude = {}
797 exclude = {}
798 ctraceconfig = repo.ui.config('experimental', 'copytrace')
798 ctraceconfig = repo.ui.config('experimental', 'copytrace')
799 bctrace = stringutil.parsebool(ctraceconfig)
799 bctrace = stringutil.parsebool(ctraceconfig)
800 if (skiprev is not None and
800 if (skiprev is not None and
801 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
801 (ctraceconfig == 'heuristics' or bctrace or bctrace is None)):
802 # copytrace='off' skips this line, but not the entire function because
802 # copytrace='off' skips this line, but not the entire function because
803 # the line below is O(size of the repo) during a rebase, while the rest
803 # the line below is O(size of the repo) during a rebase, while the rest
804 # of the function is much faster (and is required for carrying copy
804 # of the function is much faster (and is required for carrying copy
805 # metadata across the rebase anyway).
805 # metadata across the rebase anyway).
806 exclude = pathcopies(repo[fromrev], repo[skiprev])
806 exclude = pathcopies(repo[fromrev], repo[skiprev])
807 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
807 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
808 if dst in exclude:
808 if dst in exclude:
809 continue
809 continue
810 if dst in wctx:
810 if dst in wctx:
811 wctx[dst].markcopied(src)
811 wctx[dst].markcopied(src)
812
813 def computechangesetcopies(ctx):
814 """return the copies data for a changeset
815
816 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
817
818 Each dictionnary are in the form: `{newname: oldname}`
819 """
820 p1copies = {}
821 p2copies = {}
822 p1 = ctx.p1()
823 p2 = ctx.p2()
824 narrowmatch = ctx._repo.narrowmatch()
825 for dst in ctx.files():
826 if not narrowmatch(dst) or dst not in ctx:
827 continue
828 copied = ctx[dst].renamed()
829 if not copied:
830 continue
831 src, srcnode = copied
832 if src in p1 and p1[src].filenode() == srcnode:
833 p1copies[dst] = src
834 elif src in p2 and p2[src].filenode() == srcnode:
835 p2copies[dst] = src
836 return p1copies, p2copies
General Comments 0
You need to be logged in to leave comments. Login now