##// END OF EJS Templates
workingfilectx: add backgroundclose as a kwarg to write()...
Phil Cohen -
r33085:1e79c66d default
parent child Browse files
Show More
@@ -1,2306 +1,2307
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 subrepo,
41 subrepo,
42 util,
42 util,
43 )
43 )
44
44
45 propertycache = util.propertycache
45 propertycache = util.propertycache
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __int__(self):
73 def __int__(self):
74 return self.rev()
74 return self.rev()
75
75
76 def __repr__(self):
76 def __repr__(self):
77 return r"<%s %s>" % (type(self).__name__, str(self))
77 return r"<%s %s>" % (type(self).__name__, str(self))
78
78
79 def __eq__(self, other):
79 def __eq__(self, other):
80 try:
80 try:
81 return type(self) == type(other) and self._rev == other._rev
81 return type(self) == type(other) and self._rev == other._rev
82 except AttributeError:
82 except AttributeError:
83 return False
83 return False
84
84
85 def __ne__(self, other):
85 def __ne__(self, other):
86 return not (self == other)
86 return not (self == other)
87
87
88 def __contains__(self, key):
88 def __contains__(self, key):
89 return key in self._manifest
89 return key in self._manifest
90
90
91 def __getitem__(self, key):
91 def __getitem__(self, key):
92 return self.filectx(key)
92 return self.filectx(key)
93
93
94 def __iter__(self):
94 def __iter__(self):
95 return iter(self._manifest)
95 return iter(self._manifest)
96
96
97 def _buildstatusmanifest(self, status):
97 def _buildstatusmanifest(self, status):
98 """Builds a manifest that includes the given status results, if this is
98 """Builds a manifest that includes the given status results, if this is
99 a working copy context. For non-working copy contexts, it just returns
99 a working copy context. For non-working copy contexts, it just returns
100 the normal manifest."""
100 the normal manifest."""
101 return self.manifest()
101 return self.manifest()
102
102
103 def _matchstatus(self, other, match):
103 def _matchstatus(self, other, match):
104 """return match.always if match is none
104 """return match.always if match is none
105
105
106 This internal method provides a way for child objects to override the
106 This internal method provides a way for child objects to override the
107 match operator.
107 match operator.
108 """
108 """
109 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110
110
111 def _buildstatus(self, other, s, match, listignored, listclean,
111 def _buildstatus(self, other, s, match, listignored, listclean,
112 listunknown):
112 listunknown):
113 """build a status with respect to another context"""
113 """build a status with respect to another context"""
114 # Load earliest manifest first for caching reasons. More specifically,
114 # Load earliest manifest first for caching reasons. More specifically,
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta application.
119 # delta application.
120 mf2 = None
120 mf2 = None
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
124 if mf2 is None:
124 if mf2 is None:
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126
126
127 modified, added = [], []
127 modified, added = [], []
128 removed = []
128 removed = []
129 clean = []
129 clean = []
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 deletedset = set(deleted)
131 deletedset = set(deleted)
132 d = mf1.diff(mf2, match=match, clean=listclean)
132 d = mf1.diff(mf2, match=match, clean=listclean)
133 for fn, value in d.iteritems():
133 for fn, value in d.iteritems():
134 if fn in deletedset:
134 if fn in deletedset:
135 continue
135 continue
136 if value is None:
136 if value is None:
137 clean.append(fn)
137 clean.append(fn)
138 continue
138 continue
139 (node1, flag1), (node2, flag2) = value
139 (node1, flag1), (node2, flag2) = value
140 if node1 is None:
140 if node1 is None:
141 added.append(fn)
141 added.append(fn)
142 elif node2 is None:
142 elif node2 is None:
143 removed.append(fn)
143 removed.append(fn)
144 elif flag1 != flag2:
144 elif flag1 != flag2:
145 modified.append(fn)
145 modified.append(fn)
146 elif node2 not in wdirnodes:
146 elif node2 not in wdirnodes:
147 # When comparing files between two commits, we save time by
147 # When comparing files between two commits, we save time by
148 # not comparing the file contents when the nodeids differ.
148 # not comparing the file contents when the nodeids differ.
149 # Note that this means we incorrectly report a reverted change
149 # Note that this means we incorrectly report a reverted change
150 # to a file as a modification.
150 # to a file as a modification.
151 modified.append(fn)
151 modified.append(fn)
152 elif self[fn].cmp(other[fn]):
152 elif self[fn].cmp(other[fn]):
153 modified.append(fn)
153 modified.append(fn)
154 else:
154 else:
155 clean.append(fn)
155 clean.append(fn)
156
156
157 if removed:
157 if removed:
158 # need to filter files if they are already reported as removed
158 # need to filter files if they are already reported as removed
159 unknown = [fn for fn in unknown if fn not in mf1 and
159 unknown = [fn for fn in unknown if fn not in mf1 and
160 (not match or match(fn))]
160 (not match or match(fn))]
161 ignored = [fn for fn in ignored if fn not in mf1 and
161 ignored = [fn for fn in ignored if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 # if they're deleted, don't report them as removed
163 # if they're deleted, don't report them as removed
164 removed = [fn for fn in removed if fn not in deletedset]
164 removed = [fn for fn in removed if fn not in deletedset]
165
165
166 return scmutil.status(modified, added, removed, deleted, unknown,
166 return scmutil.status(modified, added, removed, deleted, unknown,
167 ignored, clean)
167 ignored, clean)
168
168
169 @propertycache
169 @propertycache
170 def substate(self):
170 def substate(self):
171 return subrepo.state(self, self._repo.ui)
171 return subrepo.state(self, self._repo.ui)
172
172
173 def subrev(self, subpath):
173 def subrev(self, subpath):
174 return self.substate[subpath][1]
174 return self.substate[subpath][1]
175
175
176 def rev(self):
176 def rev(self):
177 return self._rev
177 return self._rev
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180 def hex(self):
180 def hex(self):
181 return hex(self.node())
181 return hex(self.node())
182 def manifest(self):
182 def manifest(self):
183 return self._manifest
183 return self._manifest
184 def manifestctx(self):
184 def manifestctx(self):
185 return self._manifestctx
185 return self._manifestctx
186 def repo(self):
186 def repo(self):
187 return self._repo
187 return self._repo
188 def phasestr(self):
188 def phasestr(self):
189 return phases.phasenames[self.phase()]
189 return phases.phasenames[self.phase()]
190 def mutable(self):
190 def mutable(self):
191 return self.phase() > phases.public
191 return self.phase() > phases.public
192
192
193 def getfileset(self, expr):
193 def getfileset(self, expr):
194 return fileset.getfileset(self, expr)
194 return fileset.getfileset(self, expr)
195
195
196 def obsolete(self):
196 def obsolete(self):
197 """True if the changeset is obsolete"""
197 """True if the changeset is obsolete"""
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199
199
200 def extinct(self):
200 def extinct(self):
201 """True if the changeset is extinct"""
201 """True if the changeset is extinct"""
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203
203
204 def unstable(self):
204 def unstable(self):
205 """True if the changeset is not obsolete but it's ancestor are"""
205 """True if the changeset is not obsolete but it's ancestor are"""
206 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
206 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
207
207
208 def bumped(self):
208 def bumped(self):
209 """True if the changeset try to be a successor of a public changeset
209 """True if the changeset try to be a successor of a public changeset
210
210
211 Only non-public and non-obsolete changesets may be bumped.
211 Only non-public and non-obsolete changesets may be bumped.
212 """
212 """
213 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
213 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
214
214
215 def divergent(self):
215 def divergent(self):
216 """Is a successors of a changeset with multiple possible successors set
216 """Is a successors of a changeset with multiple possible successors set
217
217
218 Only non-public and non-obsolete changesets may be divergent.
218 Only non-public and non-obsolete changesets may be divergent.
219 """
219 """
220 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
220 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
221
221
222 def troubled(self):
222 def troubled(self):
223 """True if the changeset is either unstable, bumped or divergent"""
223 """True if the changeset is either unstable, bumped or divergent"""
224 return self.unstable() or self.bumped() or self.divergent()
224 return self.unstable() or self.bumped() or self.divergent()
225
225
226 def troubles(self):
226 def troubles(self):
227 """return the list of troubles affecting this changesets.
227 """return the list of troubles affecting this changesets.
228
228
229 Troubles are returned as strings. possible values are:
229 Troubles are returned as strings. possible values are:
230 - unstable,
230 - unstable,
231 - bumped,
231 - bumped,
232 - divergent.
232 - divergent.
233 """
233 """
234 troubles = []
234 troubles = []
235 if self.unstable():
235 if self.unstable():
236 troubles.append('unstable')
236 troubles.append('unstable')
237 if self.bumped():
237 if self.bumped():
238 troubles.append('bumped')
238 troubles.append('bumped')
239 if self.divergent():
239 if self.divergent():
240 troubles.append('divergent')
240 troubles.append('divergent')
241 return troubles
241 return troubles
242
242
243 def parents(self):
243 def parents(self):
244 """return contexts for each parent changeset"""
244 """return contexts for each parent changeset"""
245 return self._parents
245 return self._parents
246
246
247 def p1(self):
247 def p1(self):
248 return self._parents[0]
248 return self._parents[0]
249
249
250 def p2(self):
250 def p2(self):
251 parents = self._parents
251 parents = self._parents
252 if len(parents) == 2:
252 if len(parents) == 2:
253 return parents[1]
253 return parents[1]
254 return changectx(self._repo, nullrev)
254 return changectx(self._repo, nullrev)
255
255
256 def _fileinfo(self, path):
256 def _fileinfo(self, path):
257 if r'_manifest' in self.__dict__:
257 if r'_manifest' in self.__dict__:
258 try:
258 try:
259 return self._manifest[path], self._manifest.flags(path)
259 return self._manifest[path], self._manifest.flags(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263 if r'_manifestdelta' in self.__dict__ or path in self.files():
263 if r'_manifestdelta' in self.__dict__ or path in self.files():
264 if path in self._manifestdelta:
264 if path in self._manifestdelta:
265 return (self._manifestdelta[path],
265 return (self._manifestdelta[path],
266 self._manifestdelta.flags(path))
266 self._manifestdelta.flags(path))
267 mfl = self._repo.manifestlog
267 mfl = self._repo.manifestlog
268 try:
268 try:
269 node, flag = mfl[self._changeset.manifest].find(path)
269 node, flag = mfl[self._changeset.manifest].find(path)
270 except KeyError:
270 except KeyError:
271 raise error.ManifestLookupError(self._node, path,
271 raise error.ManifestLookupError(self._node, path,
272 _('not found in manifest'))
272 _('not found in manifest'))
273
273
274 return node, flag
274 return node, flag
275
275
276 def filenode(self, path):
276 def filenode(self, path):
277 return self._fileinfo(path)[0]
277 return self._fileinfo(path)[0]
278
278
279 def flags(self, path):
279 def flags(self, path):
280 try:
280 try:
281 return self._fileinfo(path)[1]
281 return self._fileinfo(path)[1]
282 except error.LookupError:
282 except error.LookupError:
283 return ''
283 return ''
284
284
285 def sub(self, path, allowcreate=True):
285 def sub(self, path, allowcreate=True):
286 '''return a subrepo for the stored revision of path, never wdir()'''
286 '''return a subrepo for the stored revision of path, never wdir()'''
287 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287 return subrepo.subrepo(self, path, allowcreate=allowcreate)
288
288
289 def nullsub(self, path, pctx):
289 def nullsub(self, path, pctx):
290 return subrepo.nullsubrepo(self, path, pctx)
290 return subrepo.nullsubrepo(self, path, pctx)
291
291
292 def workingsub(self, path):
292 def workingsub(self, path):
293 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 '''return a subrepo for the stored revision, or wdir if this is a wdir
294 context.
294 context.
295 '''
295 '''
296 return subrepo.subrepo(self, path, allowwdir=True)
296 return subrepo.subrepo(self, path, allowwdir=True)
297
297
298 def match(self, pats=None, include=None, exclude=None, default='glob',
298 def match(self, pats=None, include=None, exclude=None, default='glob',
299 listsubrepos=False, badfn=None):
299 listsubrepos=False, badfn=None):
300 r = self._repo
300 r = self._repo
301 return matchmod.match(r.root, r.getcwd(), pats,
301 return matchmod.match(r.root, r.getcwd(), pats,
302 include, exclude, default,
302 include, exclude, default,
303 auditor=r.nofsauditor, ctx=self,
303 auditor=r.nofsauditor, ctx=self,
304 listsubrepos=listsubrepos, badfn=badfn)
304 listsubrepos=listsubrepos, badfn=badfn)
305
305
306 def diff(self, ctx2=None, match=None, **opts):
306 def diff(self, ctx2=None, match=None, **opts):
307 """Returns a diff generator for the given contexts and matcher"""
307 """Returns a diff generator for the given contexts and matcher"""
308 if ctx2 is None:
308 if ctx2 is None:
309 ctx2 = self.p1()
309 ctx2 = self.p1()
310 if ctx2 is not None:
310 if ctx2 is not None:
311 ctx2 = self._repo[ctx2]
311 ctx2 = self._repo[ctx2]
312 diffopts = patch.diffopts(self._repo.ui, opts)
312 diffopts = patch.diffopts(self._repo.ui, opts)
313 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
313 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
314
314
315 def dirs(self):
315 def dirs(self):
316 return self._manifest.dirs()
316 return self._manifest.dirs()
317
317
318 def hasdir(self, dir):
318 def hasdir(self, dir):
319 return self._manifest.hasdir(dir)
319 return self._manifest.hasdir(dir)
320
320
321 def status(self, other=None, match=None, listignored=False,
321 def status(self, other=None, match=None, listignored=False,
322 listclean=False, listunknown=False, listsubrepos=False):
322 listclean=False, listunknown=False, listsubrepos=False):
323 """return status of files between two nodes or node and working
323 """return status of files between two nodes or node and working
324 directory.
324 directory.
325
325
326 If other is None, compare this node with working directory.
326 If other is None, compare this node with working directory.
327
327
328 returns (modified, added, removed, deleted, unknown, ignored, clean)
328 returns (modified, added, removed, deleted, unknown, ignored, clean)
329 """
329 """
330
330
331 ctx1 = self
331 ctx1 = self
332 ctx2 = self._repo[other]
332 ctx2 = self._repo[other]
333
333
334 # This next code block is, admittedly, fragile logic that tests for
334 # This next code block is, admittedly, fragile logic that tests for
335 # reversing the contexts and wouldn't need to exist if it weren't for
335 # reversing the contexts and wouldn't need to exist if it weren't for
336 # the fast (and common) code path of comparing the working directory
336 # the fast (and common) code path of comparing the working directory
337 # with its first parent.
337 # with its first parent.
338 #
338 #
339 # What we're aiming for here is the ability to call:
339 # What we're aiming for here is the ability to call:
340 #
340 #
341 # workingctx.status(parentctx)
341 # workingctx.status(parentctx)
342 #
342 #
343 # If we always built the manifest for each context and compared those,
343 # If we always built the manifest for each context and compared those,
344 # then we'd be done. But the special case of the above call means we
344 # then we'd be done. But the special case of the above call means we
345 # just copy the manifest of the parent.
345 # just copy the manifest of the parent.
346 reversed = False
346 reversed = False
347 if (not isinstance(ctx1, changectx)
347 if (not isinstance(ctx1, changectx)
348 and isinstance(ctx2, changectx)):
348 and isinstance(ctx2, changectx)):
349 reversed = True
349 reversed = True
350 ctx1, ctx2 = ctx2, ctx1
350 ctx1, ctx2 = ctx2, ctx1
351
351
352 match = ctx2._matchstatus(ctx1, match)
352 match = ctx2._matchstatus(ctx1, match)
353 r = scmutil.status([], [], [], [], [], [], [])
353 r = scmutil.status([], [], [], [], [], [], [])
354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
354 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
355 listunknown)
355 listunknown)
356
356
357 if reversed:
357 if reversed:
358 # Reverse added and removed. Clear deleted, unknown and ignored as
358 # Reverse added and removed. Clear deleted, unknown and ignored as
359 # these make no sense to reverse.
359 # these make no sense to reverse.
360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
360 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
361 r.clean)
361 r.clean)
362
362
363 if listsubrepos:
363 if listsubrepos:
364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
364 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
365 try:
365 try:
366 rev2 = ctx2.subrev(subpath)
366 rev2 = ctx2.subrev(subpath)
367 except KeyError:
367 except KeyError:
368 # A subrepo that existed in node1 was deleted between
368 # A subrepo that existed in node1 was deleted between
369 # node1 and node2 (inclusive). Thus, ctx2's substate
369 # node1 and node2 (inclusive). Thus, ctx2's substate
370 # won't contain that subpath. The best we can do ignore it.
370 # won't contain that subpath. The best we can do ignore it.
371 rev2 = None
371 rev2 = None
372 submatch = matchmod.subdirmatcher(subpath, match)
372 submatch = matchmod.subdirmatcher(subpath, match)
373 s = sub.status(rev2, match=submatch, ignored=listignored,
373 s = sub.status(rev2, match=submatch, ignored=listignored,
374 clean=listclean, unknown=listunknown,
374 clean=listclean, unknown=listunknown,
375 listsubrepos=True)
375 listsubrepos=True)
376 for rfiles, sfiles in zip(r, s):
376 for rfiles, sfiles in zip(r, s):
377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
377 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
378
378
379 for l in r:
379 for l in r:
380 l.sort()
380 l.sort()
381
381
382 return r
382 return r
383
383
384 def _filterederror(repo, changeid):
384 def _filterederror(repo, changeid):
385 """build an exception to be raised about a filtered changeid
385 """build an exception to be raised about a filtered changeid
386
386
387 This is extracted in a function to help extensions (eg: evolve) to
387 This is extracted in a function to help extensions (eg: evolve) to
388 experiment with various message variants."""
388 experiment with various message variants."""
389 if repo.filtername.startswith('visible'):
389 if repo.filtername.startswith('visible'):
390 msg = _("hidden revision '%s'") % changeid
390 msg = _("hidden revision '%s'") % changeid
391 hint = _('use --hidden to access hidden revisions')
391 hint = _('use --hidden to access hidden revisions')
392 return error.FilteredRepoLookupError(msg, hint=hint)
392 return error.FilteredRepoLookupError(msg, hint=hint)
393 msg = _("filtered revision '%s' (not in '%s' subset)")
393 msg = _("filtered revision '%s' (not in '%s' subset)")
394 msg %= (changeid, repo.filtername)
394 msg %= (changeid, repo.filtername)
395 return error.FilteredRepoLookupError(msg)
395 return error.FilteredRepoLookupError(msg)
396
396
397 class changectx(basectx):
397 class changectx(basectx):
398 """A changecontext object makes access to data related to a particular
398 """A changecontext object makes access to data related to a particular
399 changeset convenient. It represents a read-only context already present in
399 changeset convenient. It represents a read-only context already present in
400 the repo."""
400 the repo."""
401 def __init__(self, repo, changeid=''):
401 def __init__(self, repo, changeid=''):
402 """changeid is a revision number, node, or tag"""
402 """changeid is a revision number, node, or tag"""
403
403
404 # since basectx.__new__ already took care of copying the object, we
404 # since basectx.__new__ already took care of copying the object, we
405 # don't need to do anything in __init__, so we just exit here
405 # don't need to do anything in __init__, so we just exit here
406 if isinstance(changeid, basectx):
406 if isinstance(changeid, basectx):
407 return
407 return
408
408
409 if changeid == '':
409 if changeid == '':
410 changeid = '.'
410 changeid = '.'
411 self._repo = repo
411 self._repo = repo
412
412
413 try:
413 try:
414 if isinstance(changeid, int):
414 if isinstance(changeid, int):
415 self._node = repo.changelog.node(changeid)
415 self._node = repo.changelog.node(changeid)
416 self._rev = changeid
416 self._rev = changeid
417 return
417 return
418 if not pycompat.ispy3 and isinstance(changeid, long):
418 if not pycompat.ispy3 and isinstance(changeid, long):
419 changeid = str(changeid)
419 changeid = str(changeid)
420 if changeid == 'null':
420 if changeid == 'null':
421 self._node = nullid
421 self._node = nullid
422 self._rev = nullrev
422 self._rev = nullrev
423 return
423 return
424 if changeid == 'tip':
424 if changeid == 'tip':
425 self._node = repo.changelog.tip()
425 self._node = repo.changelog.tip()
426 self._rev = repo.changelog.rev(self._node)
426 self._rev = repo.changelog.rev(self._node)
427 return
427 return
428 if changeid == '.' or changeid == repo.dirstate.p1():
428 if changeid == '.' or changeid == repo.dirstate.p1():
429 # this is a hack to delay/avoid loading obsmarkers
429 # this is a hack to delay/avoid loading obsmarkers
430 # when we know that '.' won't be hidden
430 # when we know that '.' won't be hidden
431 self._node = repo.dirstate.p1()
431 self._node = repo.dirstate.p1()
432 self._rev = repo.unfiltered().changelog.rev(self._node)
432 self._rev = repo.unfiltered().changelog.rev(self._node)
433 return
433 return
434 if len(changeid) == 20:
434 if len(changeid) == 20:
435 try:
435 try:
436 self._node = changeid
436 self._node = changeid
437 self._rev = repo.changelog.rev(changeid)
437 self._rev = repo.changelog.rev(changeid)
438 return
438 return
439 except error.FilteredRepoLookupError:
439 except error.FilteredRepoLookupError:
440 raise
440 raise
441 except LookupError:
441 except LookupError:
442 pass
442 pass
443
443
444 try:
444 try:
445 r = int(changeid)
445 r = int(changeid)
446 if '%d' % r != changeid:
446 if '%d' % r != changeid:
447 raise ValueError
447 raise ValueError
448 l = len(repo.changelog)
448 l = len(repo.changelog)
449 if r < 0:
449 if r < 0:
450 r += l
450 r += l
451 if r < 0 or r >= l and r != wdirrev:
451 if r < 0 or r >= l and r != wdirrev:
452 raise ValueError
452 raise ValueError
453 self._rev = r
453 self._rev = r
454 self._node = repo.changelog.node(r)
454 self._node = repo.changelog.node(r)
455 return
455 return
456 except error.FilteredIndexError:
456 except error.FilteredIndexError:
457 raise
457 raise
458 except (ValueError, OverflowError, IndexError):
458 except (ValueError, OverflowError, IndexError):
459 pass
459 pass
460
460
461 if len(changeid) == 40:
461 if len(changeid) == 40:
462 try:
462 try:
463 self._node = bin(changeid)
463 self._node = bin(changeid)
464 self._rev = repo.changelog.rev(self._node)
464 self._rev = repo.changelog.rev(self._node)
465 return
465 return
466 except error.FilteredLookupError:
466 except error.FilteredLookupError:
467 raise
467 raise
468 except (TypeError, LookupError):
468 except (TypeError, LookupError):
469 pass
469 pass
470
470
471 # lookup bookmarks through the name interface
471 # lookup bookmarks through the name interface
472 try:
472 try:
473 self._node = repo.names.singlenode(repo, changeid)
473 self._node = repo.names.singlenode(repo, changeid)
474 self._rev = repo.changelog.rev(self._node)
474 self._rev = repo.changelog.rev(self._node)
475 return
475 return
476 except KeyError:
476 except KeyError:
477 pass
477 pass
478 except error.FilteredRepoLookupError:
478 except error.FilteredRepoLookupError:
479 raise
479 raise
480 except error.RepoLookupError:
480 except error.RepoLookupError:
481 pass
481 pass
482
482
483 self._node = repo.unfiltered().changelog._partialmatch(changeid)
483 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 if self._node is not None:
484 if self._node is not None:
485 self._rev = repo.changelog.rev(self._node)
485 self._rev = repo.changelog.rev(self._node)
486 return
486 return
487
487
488 # lookup failed
488 # lookup failed
489 # check if it might have come from damaged dirstate
489 # check if it might have come from damaged dirstate
490 #
490 #
491 # XXX we could avoid the unfiltered if we had a recognizable
491 # XXX we could avoid the unfiltered if we had a recognizable
492 # exception for filtered changeset access
492 # exception for filtered changeset access
493 if changeid in repo.unfiltered().dirstate.parents():
493 if changeid in repo.unfiltered().dirstate.parents():
494 msg = _("working directory has unknown parent '%s'!")
494 msg = _("working directory has unknown parent '%s'!")
495 raise error.Abort(msg % short(changeid))
495 raise error.Abort(msg % short(changeid))
496 try:
496 try:
497 if len(changeid) == 20 and nonascii(changeid):
497 if len(changeid) == 20 and nonascii(changeid):
498 changeid = hex(changeid)
498 changeid = hex(changeid)
499 except TypeError:
499 except TypeError:
500 pass
500 pass
501 except (error.FilteredIndexError, error.FilteredLookupError,
501 except (error.FilteredIndexError, error.FilteredLookupError,
502 error.FilteredRepoLookupError):
502 error.FilteredRepoLookupError):
503 raise _filterederror(repo, changeid)
503 raise _filterederror(repo, changeid)
504 except IndexError:
504 except IndexError:
505 pass
505 pass
506 raise error.RepoLookupError(
506 raise error.RepoLookupError(
507 _("unknown revision '%s'") % changeid)
507 _("unknown revision '%s'") % changeid)
508
508
509 def __hash__(self):
509 def __hash__(self):
510 try:
510 try:
511 return hash(self._rev)
511 return hash(self._rev)
512 except AttributeError:
512 except AttributeError:
513 return id(self)
513 return id(self)
514
514
515 def __nonzero__(self):
515 def __nonzero__(self):
516 return self._rev != nullrev
516 return self._rev != nullrev
517
517
518 __bool__ = __nonzero__
518 __bool__ = __nonzero__
519
519
520 @propertycache
520 @propertycache
521 def _changeset(self):
521 def _changeset(self):
522 return self._repo.changelog.changelogrevision(self.rev())
522 return self._repo.changelog.changelogrevision(self.rev())
523
523
524 @propertycache
524 @propertycache
525 def _manifest(self):
525 def _manifest(self):
526 return self._manifestctx.read()
526 return self._manifestctx.read()
527
527
528 @property
528 @property
529 def _manifestctx(self):
529 def _manifestctx(self):
530 return self._repo.manifestlog[self._changeset.manifest]
530 return self._repo.manifestlog[self._changeset.manifest]
531
531
532 @propertycache
532 @propertycache
533 def _manifestdelta(self):
533 def _manifestdelta(self):
534 return self._manifestctx.readdelta()
534 return self._manifestctx.readdelta()
535
535
536 @propertycache
536 @propertycache
537 def _parents(self):
537 def _parents(self):
538 repo = self._repo
538 repo = self._repo
539 p1, p2 = repo.changelog.parentrevs(self._rev)
539 p1, p2 = repo.changelog.parentrevs(self._rev)
540 if p2 == nullrev:
540 if p2 == nullrev:
541 return [changectx(repo, p1)]
541 return [changectx(repo, p1)]
542 return [changectx(repo, p1), changectx(repo, p2)]
542 return [changectx(repo, p1), changectx(repo, p2)]
543
543
544 def changeset(self):
544 def changeset(self):
545 c = self._changeset
545 c = self._changeset
546 return (
546 return (
547 c.manifest,
547 c.manifest,
548 c.user,
548 c.user,
549 c.date,
549 c.date,
550 c.files,
550 c.files,
551 c.description,
551 c.description,
552 c.extra,
552 c.extra,
553 )
553 )
554 def manifestnode(self):
554 def manifestnode(self):
555 return self._changeset.manifest
555 return self._changeset.manifest
556
556
557 def user(self):
557 def user(self):
558 return self._changeset.user
558 return self._changeset.user
559 def date(self):
559 def date(self):
560 return self._changeset.date
560 return self._changeset.date
561 def files(self):
561 def files(self):
562 return self._changeset.files
562 return self._changeset.files
563 def description(self):
563 def description(self):
564 return self._changeset.description
564 return self._changeset.description
565 def branch(self):
565 def branch(self):
566 return encoding.tolocal(self._changeset.extra.get("branch"))
566 return encoding.tolocal(self._changeset.extra.get("branch"))
567 def closesbranch(self):
567 def closesbranch(self):
568 return 'close' in self._changeset.extra
568 return 'close' in self._changeset.extra
569 def extra(self):
569 def extra(self):
570 return self._changeset.extra
570 return self._changeset.extra
571 def tags(self):
571 def tags(self):
572 return self._repo.nodetags(self._node)
572 return self._repo.nodetags(self._node)
573 def bookmarks(self):
573 def bookmarks(self):
574 return self._repo.nodebookmarks(self._node)
574 return self._repo.nodebookmarks(self._node)
575 def phase(self):
575 def phase(self):
576 return self._repo._phasecache.phase(self._repo, self._rev)
576 return self._repo._phasecache.phase(self._repo, self._rev)
577 def hidden(self):
577 def hidden(self):
578 return self._rev in repoview.filterrevs(self._repo, 'visible')
578 return self._rev in repoview.filterrevs(self._repo, 'visible')
579
579
580 def children(self):
580 def children(self):
581 """return contexts for each child changeset"""
581 """return contexts for each child changeset"""
582 c = self._repo.changelog.children(self._node)
582 c = self._repo.changelog.children(self._node)
583 return [changectx(self._repo, x) for x in c]
583 return [changectx(self._repo, x) for x in c]
584
584
585 def ancestors(self):
585 def ancestors(self):
586 for a in self._repo.changelog.ancestors([self._rev]):
586 for a in self._repo.changelog.ancestors([self._rev]):
587 yield changectx(self._repo, a)
587 yield changectx(self._repo, a)
588
588
589 def descendants(self):
589 def descendants(self):
590 for d in self._repo.changelog.descendants([self._rev]):
590 for d in self._repo.changelog.descendants([self._rev]):
591 yield changectx(self._repo, d)
591 yield changectx(self._repo, d)
592
592
593 def filectx(self, path, fileid=None, filelog=None):
593 def filectx(self, path, fileid=None, filelog=None):
594 """get a file context from this changeset"""
594 """get a file context from this changeset"""
595 if fileid is None:
595 if fileid is None:
596 fileid = self.filenode(path)
596 fileid = self.filenode(path)
597 return filectx(self._repo, path, fileid=fileid,
597 return filectx(self._repo, path, fileid=fileid,
598 changectx=self, filelog=filelog)
598 changectx=self, filelog=filelog)
599
599
600 def ancestor(self, c2, warn=False):
600 def ancestor(self, c2, warn=False):
601 """return the "best" ancestor context of self and c2
601 """return the "best" ancestor context of self and c2
602
602
603 If there are multiple candidates, it will show a message and check
603 If there are multiple candidates, it will show a message and check
604 merge.preferancestor configuration before falling back to the
604 merge.preferancestor configuration before falling back to the
605 revlog ancestor."""
605 revlog ancestor."""
606 # deal with workingctxs
606 # deal with workingctxs
607 n2 = c2._node
607 n2 = c2._node
608 if n2 is None:
608 if n2 is None:
609 n2 = c2._parents[0]._node
609 n2 = c2._parents[0]._node
610 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
610 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
611 if not cahs:
611 if not cahs:
612 anc = nullid
612 anc = nullid
613 elif len(cahs) == 1:
613 elif len(cahs) == 1:
614 anc = cahs[0]
614 anc = cahs[0]
615 else:
615 else:
616 # experimental config: merge.preferancestor
616 # experimental config: merge.preferancestor
617 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
617 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
618 try:
618 try:
619 ctx = changectx(self._repo, r)
619 ctx = changectx(self._repo, r)
620 except error.RepoLookupError:
620 except error.RepoLookupError:
621 continue
621 continue
622 anc = ctx.node()
622 anc = ctx.node()
623 if anc in cahs:
623 if anc in cahs:
624 break
624 break
625 else:
625 else:
626 anc = self._repo.changelog.ancestor(self._node, n2)
626 anc = self._repo.changelog.ancestor(self._node, n2)
627 if warn:
627 if warn:
628 self._repo.ui.status(
628 self._repo.ui.status(
629 (_("note: using %s as ancestor of %s and %s\n") %
629 (_("note: using %s as ancestor of %s and %s\n") %
630 (short(anc), short(self._node), short(n2))) +
630 (short(anc), short(self._node), short(n2))) +
631 ''.join(_(" alternatively, use --config "
631 ''.join(_(" alternatively, use --config "
632 "merge.preferancestor=%s\n") %
632 "merge.preferancestor=%s\n") %
633 short(n) for n in sorted(cahs) if n != anc))
633 short(n) for n in sorted(cahs) if n != anc))
634 return changectx(self._repo, anc)
634 return changectx(self._repo, anc)
635
635
636 def descendant(self, other):
636 def descendant(self, other):
637 """True if other is descendant of this changeset"""
637 """True if other is descendant of this changeset"""
638 return self._repo.changelog.descendant(self._rev, other._rev)
638 return self._repo.changelog.descendant(self._rev, other._rev)
639
639
640 def walk(self, match):
640 def walk(self, match):
641 '''Generates matching file names.'''
641 '''Generates matching file names.'''
642
642
643 # Wrap match.bad method to have message with nodeid
643 # Wrap match.bad method to have message with nodeid
644 def bad(fn, msg):
644 def bad(fn, msg):
645 # The manifest doesn't know about subrepos, so don't complain about
645 # The manifest doesn't know about subrepos, so don't complain about
646 # paths into valid subrepos.
646 # paths into valid subrepos.
647 if any(fn == s or fn.startswith(s + '/')
647 if any(fn == s or fn.startswith(s + '/')
648 for s in self.substate):
648 for s in self.substate):
649 return
649 return
650 match.bad(fn, _('no such file in rev %s') % self)
650 match.bad(fn, _('no such file in rev %s') % self)
651
651
652 m = matchmod.badmatch(match, bad)
652 m = matchmod.badmatch(match, bad)
653 return self._manifest.walk(m)
653 return self._manifest.walk(m)
654
654
655 def matches(self, match):
655 def matches(self, match):
656 return self.walk(match)
656 return self.walk(match)
657
657
658 class basefilectx(object):
658 class basefilectx(object):
659 """A filecontext object represents the common logic for its children:
659 """A filecontext object represents the common logic for its children:
660 filectx: read-only access to a filerevision that is already present
660 filectx: read-only access to a filerevision that is already present
661 in the repo,
661 in the repo,
662 workingfilectx: a filecontext that represents files from the working
662 workingfilectx: a filecontext that represents files from the working
663 directory,
663 directory,
664 memfilectx: a filecontext that represents files in-memory,
664 memfilectx: a filecontext that represents files in-memory,
665 overlayfilectx: duplicate another filecontext with some fields overridden.
665 overlayfilectx: duplicate another filecontext with some fields overridden.
666 """
666 """
667 @propertycache
667 @propertycache
668 def _filelog(self):
668 def _filelog(self):
669 return self._repo.file(self._path)
669 return self._repo.file(self._path)
670
670
671 @propertycache
671 @propertycache
672 def _changeid(self):
672 def _changeid(self):
673 if r'_changeid' in self.__dict__:
673 if r'_changeid' in self.__dict__:
674 return self._changeid
674 return self._changeid
675 elif r'_changectx' in self.__dict__:
675 elif r'_changectx' in self.__dict__:
676 return self._changectx.rev()
676 return self._changectx.rev()
677 elif r'_descendantrev' in self.__dict__:
677 elif r'_descendantrev' in self.__dict__:
678 # this file context was created from a revision with a known
678 # this file context was created from a revision with a known
679 # descendant, we can (lazily) correct for linkrev aliases
679 # descendant, we can (lazily) correct for linkrev aliases
680 return self._adjustlinkrev(self._descendantrev)
680 return self._adjustlinkrev(self._descendantrev)
681 else:
681 else:
682 return self._filelog.linkrev(self._filerev)
682 return self._filelog.linkrev(self._filerev)
683
683
684 @propertycache
684 @propertycache
685 def _filenode(self):
685 def _filenode(self):
686 if r'_fileid' in self.__dict__:
686 if r'_fileid' in self.__dict__:
687 return self._filelog.lookup(self._fileid)
687 return self._filelog.lookup(self._fileid)
688 else:
688 else:
689 return self._changectx.filenode(self._path)
689 return self._changectx.filenode(self._path)
690
690
691 @propertycache
691 @propertycache
692 def _filerev(self):
692 def _filerev(self):
693 return self._filelog.rev(self._filenode)
693 return self._filelog.rev(self._filenode)
694
694
695 @propertycache
695 @propertycache
696 def _repopath(self):
696 def _repopath(self):
697 return self._path
697 return self._path
698
698
699 def __nonzero__(self):
699 def __nonzero__(self):
700 try:
700 try:
701 self._filenode
701 self._filenode
702 return True
702 return True
703 except error.LookupError:
703 except error.LookupError:
704 # file is missing
704 # file is missing
705 return False
705 return False
706
706
707 __bool__ = __nonzero__
707 __bool__ = __nonzero__
708
708
709 def __bytes__(self):
709 def __bytes__(self):
710 try:
710 try:
711 return "%s@%s" % (self.path(), self._changectx)
711 return "%s@%s" % (self.path(), self._changectx)
712 except error.LookupError:
712 except error.LookupError:
713 return "%s@???" % self.path()
713 return "%s@???" % self.path()
714
714
715 __str__ = encoding.strmethod(__bytes__)
715 __str__ = encoding.strmethod(__bytes__)
716
716
717 def __repr__(self):
717 def __repr__(self):
718 return "<%s %s>" % (type(self).__name__, str(self))
718 return "<%s %s>" % (type(self).__name__, str(self))
719
719
720 def __hash__(self):
720 def __hash__(self):
721 try:
721 try:
722 return hash((self._path, self._filenode))
722 return hash((self._path, self._filenode))
723 except AttributeError:
723 except AttributeError:
724 return id(self)
724 return id(self)
725
725
726 def __eq__(self, other):
726 def __eq__(self, other):
727 try:
727 try:
728 return (type(self) == type(other) and self._path == other._path
728 return (type(self) == type(other) and self._path == other._path
729 and self._filenode == other._filenode)
729 and self._filenode == other._filenode)
730 except AttributeError:
730 except AttributeError:
731 return False
731 return False
732
732
733 def __ne__(self, other):
733 def __ne__(self, other):
734 return not (self == other)
734 return not (self == other)
735
735
736 def filerev(self):
736 def filerev(self):
737 return self._filerev
737 return self._filerev
738 def filenode(self):
738 def filenode(self):
739 return self._filenode
739 return self._filenode
740 @propertycache
740 @propertycache
741 def _flags(self):
741 def _flags(self):
742 return self._changectx.flags(self._path)
742 return self._changectx.flags(self._path)
743 def flags(self):
743 def flags(self):
744 return self._flags
744 return self._flags
745 def filelog(self):
745 def filelog(self):
746 return self._filelog
746 return self._filelog
747 def rev(self):
747 def rev(self):
748 return self._changeid
748 return self._changeid
749 def linkrev(self):
749 def linkrev(self):
750 return self._filelog.linkrev(self._filerev)
750 return self._filelog.linkrev(self._filerev)
751 def node(self):
751 def node(self):
752 return self._changectx.node()
752 return self._changectx.node()
753 def hex(self):
753 def hex(self):
754 return self._changectx.hex()
754 return self._changectx.hex()
755 def user(self):
755 def user(self):
756 return self._changectx.user()
756 return self._changectx.user()
757 def date(self):
757 def date(self):
758 return self._changectx.date()
758 return self._changectx.date()
759 def files(self):
759 def files(self):
760 return self._changectx.files()
760 return self._changectx.files()
761 def description(self):
761 def description(self):
762 return self._changectx.description()
762 return self._changectx.description()
763 def branch(self):
763 def branch(self):
764 return self._changectx.branch()
764 return self._changectx.branch()
765 def extra(self):
765 def extra(self):
766 return self._changectx.extra()
766 return self._changectx.extra()
767 def phase(self):
767 def phase(self):
768 return self._changectx.phase()
768 return self._changectx.phase()
769 def phasestr(self):
769 def phasestr(self):
770 return self._changectx.phasestr()
770 return self._changectx.phasestr()
771 def manifest(self):
771 def manifest(self):
772 return self._changectx.manifest()
772 return self._changectx.manifest()
773 def changectx(self):
773 def changectx(self):
774 return self._changectx
774 return self._changectx
775 def renamed(self):
775 def renamed(self):
776 return self._copied
776 return self._copied
777 def repo(self):
777 def repo(self):
778 return self._repo
778 return self._repo
779 def size(self):
779 def size(self):
780 return len(self.data())
780 return len(self.data())
781
781
782 def path(self):
782 def path(self):
783 return self._path
783 return self._path
784
784
785 def isbinary(self):
785 def isbinary(self):
786 try:
786 try:
787 return util.binary(self.data())
787 return util.binary(self.data())
788 except IOError:
788 except IOError:
789 return False
789 return False
790 def isexec(self):
790 def isexec(self):
791 return 'x' in self.flags()
791 return 'x' in self.flags()
792 def islink(self):
792 def islink(self):
793 return 'l' in self.flags()
793 return 'l' in self.flags()
794
794
795 def isabsent(self):
795 def isabsent(self):
796 """whether this filectx represents a file not in self._changectx
796 """whether this filectx represents a file not in self._changectx
797
797
798 This is mainly for merge code to detect change/delete conflicts. This is
798 This is mainly for merge code to detect change/delete conflicts. This is
799 expected to be True for all subclasses of basectx."""
799 expected to be True for all subclasses of basectx."""
800 return False
800 return False
801
801
802 _customcmp = False
802 _customcmp = False
803 def cmp(self, fctx):
803 def cmp(self, fctx):
804 """compare with other file context
804 """compare with other file context
805
805
806 returns True if different than fctx.
806 returns True if different than fctx.
807 """
807 """
808 if fctx._customcmp:
808 if fctx._customcmp:
809 return fctx.cmp(self)
809 return fctx.cmp(self)
810
810
811 if (fctx._filenode is None
811 if (fctx._filenode is None
812 and (self._repo._encodefilterpats
812 and (self._repo._encodefilterpats
813 # if file data starts with '\1\n', empty metadata block is
813 # if file data starts with '\1\n', empty metadata block is
814 # prepended, which adds 4 bytes to filelog.size().
814 # prepended, which adds 4 bytes to filelog.size().
815 or self.size() - 4 == fctx.size())
815 or self.size() - 4 == fctx.size())
816 or self.size() == fctx.size()):
816 or self.size() == fctx.size()):
817 return self._filelog.cmp(self._filenode, fctx.data())
817 return self._filelog.cmp(self._filenode, fctx.data())
818
818
819 return True
819 return True
820
820
821 def _adjustlinkrev(self, srcrev, inclusive=False):
821 def _adjustlinkrev(self, srcrev, inclusive=False):
822 """return the first ancestor of <srcrev> introducing <fnode>
822 """return the first ancestor of <srcrev> introducing <fnode>
823
823
824 If the linkrev of the file revision does not point to an ancestor of
824 If the linkrev of the file revision does not point to an ancestor of
825 srcrev, we'll walk down the ancestors until we find one introducing
825 srcrev, we'll walk down the ancestors until we find one introducing
826 this file revision.
826 this file revision.
827
827
828 :srcrev: the changeset revision we search ancestors from
828 :srcrev: the changeset revision we search ancestors from
829 :inclusive: if true, the src revision will also be checked
829 :inclusive: if true, the src revision will also be checked
830 """
830 """
831 repo = self._repo
831 repo = self._repo
832 cl = repo.unfiltered().changelog
832 cl = repo.unfiltered().changelog
833 mfl = repo.manifestlog
833 mfl = repo.manifestlog
834 # fetch the linkrev
834 # fetch the linkrev
835 lkr = self.linkrev()
835 lkr = self.linkrev()
836 # hack to reuse ancestor computation when searching for renames
836 # hack to reuse ancestor computation when searching for renames
837 memberanc = getattr(self, '_ancestrycontext', None)
837 memberanc = getattr(self, '_ancestrycontext', None)
838 iteranc = None
838 iteranc = None
839 if srcrev is None:
839 if srcrev is None:
840 # wctx case, used by workingfilectx during mergecopy
840 # wctx case, used by workingfilectx during mergecopy
841 revs = [p.rev() for p in self._repo[None].parents()]
841 revs = [p.rev() for p in self._repo[None].parents()]
842 inclusive = True # we skipped the real (revless) source
842 inclusive = True # we skipped the real (revless) source
843 else:
843 else:
844 revs = [srcrev]
844 revs = [srcrev]
845 if memberanc is None:
845 if memberanc is None:
846 memberanc = iteranc = cl.ancestors(revs, lkr,
846 memberanc = iteranc = cl.ancestors(revs, lkr,
847 inclusive=inclusive)
847 inclusive=inclusive)
848 # check if this linkrev is an ancestor of srcrev
848 # check if this linkrev is an ancestor of srcrev
849 if lkr not in memberanc:
849 if lkr not in memberanc:
850 if iteranc is None:
850 if iteranc is None:
851 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
851 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
852 fnode = self._filenode
852 fnode = self._filenode
853 path = self._path
853 path = self._path
854 for a in iteranc:
854 for a in iteranc:
855 ac = cl.read(a) # get changeset data (we avoid object creation)
855 ac = cl.read(a) # get changeset data (we avoid object creation)
856 if path in ac[3]: # checking the 'files' field.
856 if path in ac[3]: # checking the 'files' field.
857 # The file has been touched, check if the content is
857 # The file has been touched, check if the content is
858 # similar to the one we search for.
858 # similar to the one we search for.
859 if fnode == mfl[ac[0]].readfast().get(path):
859 if fnode == mfl[ac[0]].readfast().get(path):
860 return a
860 return a
861 # In theory, we should never get out of that loop without a result.
861 # In theory, we should never get out of that loop without a result.
862 # But if manifest uses a buggy file revision (not children of the
862 # But if manifest uses a buggy file revision (not children of the
863 # one it replaces) we could. Such a buggy situation will likely
863 # one it replaces) we could. Such a buggy situation will likely
864 # result is crash somewhere else at to some point.
864 # result is crash somewhere else at to some point.
865 return lkr
865 return lkr
866
866
867 def introrev(self):
867 def introrev(self):
868 """return the rev of the changeset which introduced this file revision
868 """return the rev of the changeset which introduced this file revision
869
869
870 This method is different from linkrev because it take into account the
870 This method is different from linkrev because it take into account the
871 changeset the filectx was created from. It ensures the returned
871 changeset the filectx was created from. It ensures the returned
872 revision is one of its ancestors. This prevents bugs from
872 revision is one of its ancestors. This prevents bugs from
873 'linkrev-shadowing' when a file revision is used by multiple
873 'linkrev-shadowing' when a file revision is used by multiple
874 changesets.
874 changesets.
875 """
875 """
876 lkr = self.linkrev()
876 lkr = self.linkrev()
877 attrs = vars(self)
877 attrs = vars(self)
878 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
878 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
879 if noctx or self.rev() == lkr:
879 if noctx or self.rev() == lkr:
880 return self.linkrev()
880 return self.linkrev()
881 return self._adjustlinkrev(self.rev(), inclusive=True)
881 return self._adjustlinkrev(self.rev(), inclusive=True)
882
882
883 def _parentfilectx(self, path, fileid, filelog):
883 def _parentfilectx(self, path, fileid, filelog):
884 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
884 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
885 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
885 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
886 if '_changeid' in vars(self) or '_changectx' in vars(self):
886 if '_changeid' in vars(self) or '_changectx' in vars(self):
887 # If self is associated with a changeset (probably explicitly
887 # If self is associated with a changeset (probably explicitly
888 # fed), ensure the created filectx is associated with a
888 # fed), ensure the created filectx is associated with a
889 # changeset that is an ancestor of self.changectx.
889 # changeset that is an ancestor of self.changectx.
890 # This lets us later use _adjustlinkrev to get a correct link.
890 # This lets us later use _adjustlinkrev to get a correct link.
891 fctx._descendantrev = self.rev()
891 fctx._descendantrev = self.rev()
892 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
892 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 elif '_descendantrev' in vars(self):
893 elif '_descendantrev' in vars(self):
894 # Otherwise propagate _descendantrev if we have one associated.
894 # Otherwise propagate _descendantrev if we have one associated.
895 fctx._descendantrev = self._descendantrev
895 fctx._descendantrev = self._descendantrev
896 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
896 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
897 return fctx
897 return fctx
898
898
899 def parents(self):
899 def parents(self):
900 _path = self._path
900 _path = self._path
901 fl = self._filelog
901 fl = self._filelog
902 parents = self._filelog.parents(self._filenode)
902 parents = self._filelog.parents(self._filenode)
903 pl = [(_path, node, fl) for node in parents if node != nullid]
903 pl = [(_path, node, fl) for node in parents if node != nullid]
904
904
905 r = fl.renamed(self._filenode)
905 r = fl.renamed(self._filenode)
906 if r:
906 if r:
907 # - In the simple rename case, both parent are nullid, pl is empty.
907 # - In the simple rename case, both parent are nullid, pl is empty.
908 # - In case of merge, only one of the parent is null id and should
908 # - In case of merge, only one of the parent is null id and should
909 # be replaced with the rename information. This parent is -always-
909 # be replaced with the rename information. This parent is -always-
910 # the first one.
910 # the first one.
911 #
911 #
912 # As null id have always been filtered out in the previous list
912 # As null id have always been filtered out in the previous list
913 # comprehension, inserting to 0 will always result in "replacing
913 # comprehension, inserting to 0 will always result in "replacing
914 # first nullid parent with rename information.
914 # first nullid parent with rename information.
915 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
915 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
916
916
917 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
917 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
918
918
919 def p1(self):
919 def p1(self):
920 return self.parents()[0]
920 return self.parents()[0]
921
921
922 def p2(self):
922 def p2(self):
923 p = self.parents()
923 p = self.parents()
924 if len(p) == 2:
924 if len(p) == 2:
925 return p[1]
925 return p[1]
926 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
926 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
927
927
928 def annotate(self, follow=False, linenumber=False, skiprevs=None,
928 def annotate(self, follow=False, linenumber=False, skiprevs=None,
929 diffopts=None):
929 diffopts=None):
930 '''returns a list of tuples of ((ctx, number), line) for each line
930 '''returns a list of tuples of ((ctx, number), line) for each line
931 in the file, where ctx is the filectx of the node where
931 in the file, where ctx is the filectx of the node where
932 that line was last changed; if linenumber parameter is true, number is
932 that line was last changed; if linenumber parameter is true, number is
933 the line number at the first appearance in the managed file, otherwise,
933 the line number at the first appearance in the managed file, otherwise,
934 number has a fixed value of False.
934 number has a fixed value of False.
935 '''
935 '''
936
936
937 def lines(text):
937 def lines(text):
938 if text.endswith("\n"):
938 if text.endswith("\n"):
939 return text.count("\n")
939 return text.count("\n")
940 return text.count("\n") + int(bool(text))
940 return text.count("\n") + int(bool(text))
941
941
942 if linenumber:
942 if linenumber:
943 def decorate(text, rev):
943 def decorate(text, rev):
944 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
944 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
945 else:
945 else:
946 def decorate(text, rev):
946 def decorate(text, rev):
947 return ([(rev, False)] * lines(text), text)
947 return ([(rev, False)] * lines(text), text)
948
948
949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950
950
951 def parents(f):
951 def parents(f):
952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 # isn't an ancestor of the srcrev.
955 # isn't an ancestor of the srcrev.
956 f._changeid
956 f._changeid
957 pl = f.parents()
957 pl = f.parents()
958
958
959 # Don't return renamed parents if we aren't following.
959 # Don't return renamed parents if we aren't following.
960 if not follow:
960 if not follow:
961 pl = [p for p in pl if p.path() == f.path()]
961 pl = [p for p in pl if p.path() == f.path()]
962
962
963 # renamed filectx won't have a filelog yet, so set it
963 # renamed filectx won't have a filelog yet, so set it
964 # from the cache to save time
964 # from the cache to save time
965 for p in pl:
965 for p in pl:
966 if not '_filelog' in p.__dict__:
966 if not '_filelog' in p.__dict__:
967 p._filelog = getlog(p.path())
967 p._filelog = getlog(p.path())
968
968
969 return pl
969 return pl
970
970
971 # use linkrev to find the first changeset where self appeared
971 # use linkrev to find the first changeset where self appeared
972 base = self
972 base = self
973 introrev = self.introrev()
973 introrev = self.introrev()
974 if self.rev() != introrev:
974 if self.rev() != introrev:
975 base = self.filectx(self.filenode(), changeid=introrev)
975 base = self.filectx(self.filenode(), changeid=introrev)
976 if getattr(base, '_ancestrycontext', None) is None:
976 if getattr(base, '_ancestrycontext', None) is None:
977 cl = self._repo.changelog
977 cl = self._repo.changelog
978 if introrev is None:
978 if introrev is None:
979 # wctx is not inclusive, but works because _ancestrycontext
979 # wctx is not inclusive, but works because _ancestrycontext
980 # is used to test filelog revisions
980 # is used to test filelog revisions
981 ac = cl.ancestors([p.rev() for p in base.parents()],
981 ac = cl.ancestors([p.rev() for p in base.parents()],
982 inclusive=True)
982 inclusive=True)
983 else:
983 else:
984 ac = cl.ancestors([introrev], inclusive=True)
984 ac = cl.ancestors([introrev], inclusive=True)
985 base._ancestrycontext = ac
985 base._ancestrycontext = ac
986
986
987 # This algorithm would prefer to be recursive, but Python is a
987 # This algorithm would prefer to be recursive, but Python is a
988 # bit recursion-hostile. Instead we do an iterative
988 # bit recursion-hostile. Instead we do an iterative
989 # depth-first search.
989 # depth-first search.
990
990
991 # 1st DFS pre-calculates pcache and needed
991 # 1st DFS pre-calculates pcache and needed
992 visit = [base]
992 visit = [base]
993 pcache = {}
993 pcache = {}
994 needed = {base: 1}
994 needed = {base: 1}
995 while visit:
995 while visit:
996 f = visit.pop()
996 f = visit.pop()
997 if f in pcache:
997 if f in pcache:
998 continue
998 continue
999 pl = parents(f)
999 pl = parents(f)
1000 pcache[f] = pl
1000 pcache[f] = pl
1001 for p in pl:
1001 for p in pl:
1002 needed[p] = needed.get(p, 0) + 1
1002 needed[p] = needed.get(p, 0) + 1
1003 if p not in pcache:
1003 if p not in pcache:
1004 visit.append(p)
1004 visit.append(p)
1005
1005
1006 # 2nd DFS does the actual annotate
1006 # 2nd DFS does the actual annotate
1007 visit[:] = [base]
1007 visit[:] = [base]
1008 hist = {}
1008 hist = {}
1009 while visit:
1009 while visit:
1010 f = visit[-1]
1010 f = visit[-1]
1011 if f in hist:
1011 if f in hist:
1012 visit.pop()
1012 visit.pop()
1013 continue
1013 continue
1014
1014
1015 ready = True
1015 ready = True
1016 pl = pcache[f]
1016 pl = pcache[f]
1017 for p in pl:
1017 for p in pl:
1018 if p not in hist:
1018 if p not in hist:
1019 ready = False
1019 ready = False
1020 visit.append(p)
1020 visit.append(p)
1021 if ready:
1021 if ready:
1022 visit.pop()
1022 visit.pop()
1023 curr = decorate(f.data(), f)
1023 curr = decorate(f.data(), f)
1024 skipchild = False
1024 skipchild = False
1025 if skiprevs is not None:
1025 if skiprevs is not None:
1026 skipchild = f._changeid in skiprevs
1026 skipchild = f._changeid in skiprevs
1027 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1027 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1028 diffopts)
1028 diffopts)
1029 for p in pl:
1029 for p in pl:
1030 if needed[p] == 1:
1030 if needed[p] == 1:
1031 del hist[p]
1031 del hist[p]
1032 del needed[p]
1032 del needed[p]
1033 else:
1033 else:
1034 needed[p] -= 1
1034 needed[p] -= 1
1035
1035
1036 hist[f] = curr
1036 hist[f] = curr
1037 del pcache[f]
1037 del pcache[f]
1038
1038
1039 return zip(hist[base][0], hist[base][1].splitlines(True))
1039 return zip(hist[base][0], hist[base][1].splitlines(True))
1040
1040
1041 def ancestors(self, followfirst=False):
1041 def ancestors(self, followfirst=False):
1042 visit = {}
1042 visit = {}
1043 c = self
1043 c = self
1044 if followfirst:
1044 if followfirst:
1045 cut = 1
1045 cut = 1
1046 else:
1046 else:
1047 cut = None
1047 cut = None
1048
1048
1049 while True:
1049 while True:
1050 for parent in c.parents()[:cut]:
1050 for parent in c.parents()[:cut]:
1051 visit[(parent.linkrev(), parent.filenode())] = parent
1051 visit[(parent.linkrev(), parent.filenode())] = parent
1052 if not visit:
1052 if not visit:
1053 break
1053 break
1054 c = visit.pop(max(visit))
1054 c = visit.pop(max(visit))
1055 yield c
1055 yield c
1056
1056
1057 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1057 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1058 r'''
1058 r'''
1059 Given parent and child fctxes and annotate data for parents, for all lines
1059 Given parent and child fctxes and annotate data for parents, for all lines
1060 in either parent that match the child, annotate the child with the parent's
1060 in either parent that match the child, annotate the child with the parent's
1061 data.
1061 data.
1062
1062
1063 Additionally, if `skipchild` is True, replace all other lines with parent
1063 Additionally, if `skipchild` is True, replace all other lines with parent
1064 annotate data as well such that child is never blamed for any lines.
1064 annotate data as well such that child is never blamed for any lines.
1065
1065
1066 >>> oldfctx = 'old'
1066 >>> oldfctx = 'old'
1067 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1067 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1068 >>> olddata = 'a\nb\n'
1068 >>> olddata = 'a\nb\n'
1069 >>> p1data = 'a\nb\nc\n'
1069 >>> p1data = 'a\nb\nc\n'
1070 >>> p2data = 'a\nc\nd\n'
1070 >>> p2data = 'a\nc\nd\n'
1071 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1071 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1072 >>> diffopts = mdiff.diffopts()
1072 >>> diffopts = mdiff.diffopts()
1073
1073
1074 >>> def decorate(text, rev):
1074 >>> def decorate(text, rev):
1075 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1075 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1076
1076
1077 Basic usage:
1077 Basic usage:
1078
1078
1079 >>> oldann = decorate(olddata, oldfctx)
1079 >>> oldann = decorate(olddata, oldfctx)
1080 >>> p1ann = decorate(p1data, p1fctx)
1080 >>> p1ann = decorate(p1data, p1fctx)
1081 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1081 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1082 >>> p1ann[0]
1082 >>> p1ann[0]
1083 [('old', 1), ('old', 2), ('p1', 3)]
1083 [('old', 1), ('old', 2), ('p1', 3)]
1084 >>> p2ann = decorate(p2data, p2fctx)
1084 >>> p2ann = decorate(p2data, p2fctx)
1085 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1085 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1086 >>> p2ann[0]
1086 >>> p2ann[0]
1087 [('old', 1), ('p2', 2), ('p2', 3)]
1087 [('old', 1), ('p2', 2), ('p2', 3)]
1088
1088
1089 Test with multiple parents (note the difference caused by ordering):
1089 Test with multiple parents (note the difference caused by ordering):
1090
1090
1091 >>> childann = decorate(childdata, childfctx)
1091 >>> childann = decorate(childdata, childfctx)
1092 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1092 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1093 ... diffopts)
1093 ... diffopts)
1094 >>> childann[0]
1094 >>> childann[0]
1095 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1095 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1096
1096
1097 >>> childann = decorate(childdata, childfctx)
1097 >>> childann = decorate(childdata, childfctx)
1098 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1098 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1099 ... diffopts)
1099 ... diffopts)
1100 >>> childann[0]
1100 >>> childann[0]
1101 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1101 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1102
1102
1103 Test with skipchild (note the difference caused by ordering):
1103 Test with skipchild (note the difference caused by ordering):
1104
1104
1105 >>> childann = decorate(childdata, childfctx)
1105 >>> childann = decorate(childdata, childfctx)
1106 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1106 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1107 ... diffopts)
1107 ... diffopts)
1108 >>> childann[0]
1108 >>> childann[0]
1109 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1109 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1110
1110
1111 >>> childann = decorate(childdata, childfctx)
1111 >>> childann = decorate(childdata, childfctx)
1112 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1112 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1113 ... diffopts)
1113 ... diffopts)
1114 >>> childann[0]
1114 >>> childann[0]
1115 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1115 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1116 '''
1116 '''
1117 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1117 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1118 for parent in parents]
1118 for parent in parents]
1119
1119
1120 if skipchild:
1120 if skipchild:
1121 # Need to iterate over the blocks twice -- make it a list
1121 # Need to iterate over the blocks twice -- make it a list
1122 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1122 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1123 # Mercurial currently prefers p2 over p1 for annotate.
1123 # Mercurial currently prefers p2 over p1 for annotate.
1124 # TODO: change this?
1124 # TODO: change this?
1125 for parent, blocks in pblocks:
1125 for parent, blocks in pblocks:
1126 for (a1, a2, b1, b2), t in blocks:
1126 for (a1, a2, b1, b2), t in blocks:
1127 # Changed blocks ('!') or blocks made only of blank lines ('~')
1127 # Changed blocks ('!') or blocks made only of blank lines ('~')
1128 # belong to the child.
1128 # belong to the child.
1129 if t == '=':
1129 if t == '=':
1130 child[0][b1:b2] = parent[0][a1:a2]
1130 child[0][b1:b2] = parent[0][a1:a2]
1131
1131
1132 if skipchild:
1132 if skipchild:
1133 # Now try and match up anything that couldn't be matched,
1133 # Now try and match up anything that couldn't be matched,
1134 # Reversing pblocks maintains bias towards p2, matching above
1134 # Reversing pblocks maintains bias towards p2, matching above
1135 # behavior.
1135 # behavior.
1136 pblocks.reverse()
1136 pblocks.reverse()
1137
1137
1138 # The heuristics are:
1138 # The heuristics are:
1139 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1139 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1140 # This could potentially be smarter but works well enough.
1140 # This could potentially be smarter but works well enough.
1141 # * For a non-matching section, do a best-effort fit. Match lines in
1141 # * For a non-matching section, do a best-effort fit. Match lines in
1142 # diff hunks 1:1, dropping lines as necessary.
1142 # diff hunks 1:1, dropping lines as necessary.
1143 # * Repeat the last line as a last resort.
1143 # * Repeat the last line as a last resort.
1144
1144
1145 # First, replace as much as possible without repeating the last line.
1145 # First, replace as much as possible without repeating the last line.
1146 remaining = [(parent, []) for parent, _blocks in pblocks]
1146 remaining = [(parent, []) for parent, _blocks in pblocks]
1147 for idx, (parent, blocks) in enumerate(pblocks):
1147 for idx, (parent, blocks) in enumerate(pblocks):
1148 for (a1, a2, b1, b2), _t in blocks:
1148 for (a1, a2, b1, b2), _t in blocks:
1149 if a2 - a1 >= b2 - b1:
1149 if a2 - a1 >= b2 - b1:
1150 for bk in xrange(b1, b2):
1150 for bk in xrange(b1, b2):
1151 if child[0][bk][0] == childfctx:
1151 if child[0][bk][0] == childfctx:
1152 ak = min(a1 + (bk - b1), a2 - 1)
1152 ak = min(a1 + (bk - b1), a2 - 1)
1153 child[0][bk] = parent[0][ak]
1153 child[0][bk] = parent[0][ak]
1154 else:
1154 else:
1155 remaining[idx][1].append((a1, a2, b1, b2))
1155 remaining[idx][1].append((a1, a2, b1, b2))
1156
1156
1157 # Then, look at anything left, which might involve repeating the last
1157 # Then, look at anything left, which might involve repeating the last
1158 # line.
1158 # line.
1159 for parent, blocks in remaining:
1159 for parent, blocks in remaining:
1160 for a1, a2, b1, b2 in blocks:
1160 for a1, a2, b1, b2 in blocks:
1161 for bk in xrange(b1, b2):
1161 for bk in xrange(b1, b2):
1162 if child[0][bk][0] == childfctx:
1162 if child[0][bk][0] == childfctx:
1163 ak = min(a1 + (bk - b1), a2 - 1)
1163 ak = min(a1 + (bk - b1), a2 - 1)
1164 child[0][bk] = parent[0][ak]
1164 child[0][bk] = parent[0][ak]
1165 return child
1165 return child
1166
1166
1167 class filectx(basefilectx):
1167 class filectx(basefilectx):
1168 """A filecontext object makes access to data related to a particular
1168 """A filecontext object makes access to data related to a particular
1169 filerevision convenient."""
1169 filerevision convenient."""
1170 def __init__(self, repo, path, changeid=None, fileid=None,
1170 def __init__(self, repo, path, changeid=None, fileid=None,
1171 filelog=None, changectx=None):
1171 filelog=None, changectx=None):
1172 """changeid can be a changeset revision, node, or tag.
1172 """changeid can be a changeset revision, node, or tag.
1173 fileid can be a file revision or node."""
1173 fileid can be a file revision or node."""
1174 self._repo = repo
1174 self._repo = repo
1175 self._path = path
1175 self._path = path
1176
1176
1177 assert (changeid is not None
1177 assert (changeid is not None
1178 or fileid is not None
1178 or fileid is not None
1179 or changectx is not None), \
1179 or changectx is not None), \
1180 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1180 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1181 % (changeid, fileid, changectx))
1181 % (changeid, fileid, changectx))
1182
1182
1183 if filelog is not None:
1183 if filelog is not None:
1184 self._filelog = filelog
1184 self._filelog = filelog
1185
1185
1186 if changeid is not None:
1186 if changeid is not None:
1187 self._changeid = changeid
1187 self._changeid = changeid
1188 if changectx is not None:
1188 if changectx is not None:
1189 self._changectx = changectx
1189 self._changectx = changectx
1190 if fileid is not None:
1190 if fileid is not None:
1191 self._fileid = fileid
1191 self._fileid = fileid
1192
1192
1193 @propertycache
1193 @propertycache
1194 def _changectx(self):
1194 def _changectx(self):
1195 try:
1195 try:
1196 return changectx(self._repo, self._changeid)
1196 return changectx(self._repo, self._changeid)
1197 except error.FilteredRepoLookupError:
1197 except error.FilteredRepoLookupError:
1198 # Linkrev may point to any revision in the repository. When the
1198 # Linkrev may point to any revision in the repository. When the
1199 # repository is filtered this may lead to `filectx` trying to build
1199 # repository is filtered this may lead to `filectx` trying to build
1200 # `changectx` for filtered revision. In such case we fallback to
1200 # `changectx` for filtered revision. In such case we fallback to
1201 # creating `changectx` on the unfiltered version of the reposition.
1201 # creating `changectx` on the unfiltered version of the reposition.
1202 # This fallback should not be an issue because `changectx` from
1202 # This fallback should not be an issue because `changectx` from
1203 # `filectx` are not used in complex operations that care about
1203 # `filectx` are not used in complex operations that care about
1204 # filtering.
1204 # filtering.
1205 #
1205 #
1206 # This fallback is a cheap and dirty fix that prevent several
1206 # This fallback is a cheap and dirty fix that prevent several
1207 # crashes. It does not ensure the behavior is correct. However the
1207 # crashes. It does not ensure the behavior is correct. However the
1208 # behavior was not correct before filtering either and "incorrect
1208 # behavior was not correct before filtering either and "incorrect
1209 # behavior" is seen as better as "crash"
1209 # behavior" is seen as better as "crash"
1210 #
1210 #
1211 # Linkrevs have several serious troubles with filtering that are
1211 # Linkrevs have several serious troubles with filtering that are
1212 # complicated to solve. Proper handling of the issue here should be
1212 # complicated to solve. Proper handling of the issue here should be
1213 # considered when solving linkrev issue are on the table.
1213 # considered when solving linkrev issue are on the table.
1214 return changectx(self._repo.unfiltered(), self._changeid)
1214 return changectx(self._repo.unfiltered(), self._changeid)
1215
1215
1216 def filectx(self, fileid, changeid=None):
1216 def filectx(self, fileid, changeid=None):
1217 '''opens an arbitrary revision of the file without
1217 '''opens an arbitrary revision of the file without
1218 opening a new filelog'''
1218 opening a new filelog'''
1219 return filectx(self._repo, self._path, fileid=fileid,
1219 return filectx(self._repo, self._path, fileid=fileid,
1220 filelog=self._filelog, changeid=changeid)
1220 filelog=self._filelog, changeid=changeid)
1221
1221
1222 def rawdata(self):
1222 def rawdata(self):
1223 return self._filelog.revision(self._filenode, raw=True)
1223 return self._filelog.revision(self._filenode, raw=True)
1224
1224
1225 def rawflags(self):
1225 def rawflags(self):
1226 """low-level revlog flags"""
1226 """low-level revlog flags"""
1227 return self._filelog.flags(self._filerev)
1227 return self._filelog.flags(self._filerev)
1228
1228
1229 def data(self):
1229 def data(self):
1230 try:
1230 try:
1231 return self._filelog.read(self._filenode)
1231 return self._filelog.read(self._filenode)
1232 except error.CensoredNodeError:
1232 except error.CensoredNodeError:
1233 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1233 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1234 return ""
1234 return ""
1235 raise error.Abort(_("censored node: %s") % short(self._filenode),
1235 raise error.Abort(_("censored node: %s") % short(self._filenode),
1236 hint=_("set censor.policy to ignore errors"))
1236 hint=_("set censor.policy to ignore errors"))
1237
1237
1238 def size(self):
1238 def size(self):
1239 return self._filelog.size(self._filerev)
1239 return self._filelog.size(self._filerev)
1240
1240
1241 @propertycache
1241 @propertycache
1242 def _copied(self):
1242 def _copied(self):
1243 """check if file was actually renamed in this changeset revision
1243 """check if file was actually renamed in this changeset revision
1244
1244
1245 If rename logged in file revision, we report copy for changeset only
1245 If rename logged in file revision, we report copy for changeset only
1246 if file revisions linkrev points back to the changeset in question
1246 if file revisions linkrev points back to the changeset in question
1247 or both changeset parents contain different file revisions.
1247 or both changeset parents contain different file revisions.
1248 """
1248 """
1249
1249
1250 renamed = self._filelog.renamed(self._filenode)
1250 renamed = self._filelog.renamed(self._filenode)
1251 if not renamed:
1251 if not renamed:
1252 return renamed
1252 return renamed
1253
1253
1254 if self.rev() == self.linkrev():
1254 if self.rev() == self.linkrev():
1255 return renamed
1255 return renamed
1256
1256
1257 name = self.path()
1257 name = self.path()
1258 fnode = self._filenode
1258 fnode = self._filenode
1259 for p in self._changectx.parents():
1259 for p in self._changectx.parents():
1260 try:
1260 try:
1261 if fnode == p.filenode(name):
1261 if fnode == p.filenode(name):
1262 return None
1262 return None
1263 except error.LookupError:
1263 except error.LookupError:
1264 pass
1264 pass
1265 return renamed
1265 return renamed
1266
1266
1267 def children(self):
1267 def children(self):
1268 # hard for renames
1268 # hard for renames
1269 c = self._filelog.children(self._filenode)
1269 c = self._filelog.children(self._filenode)
1270 return [filectx(self._repo, self._path, fileid=x,
1270 return [filectx(self._repo, self._path, fileid=x,
1271 filelog=self._filelog) for x in c]
1271 filelog=self._filelog) for x in c]
1272
1272
1273 class committablectx(basectx):
1273 class committablectx(basectx):
1274 """A committablectx object provides common functionality for a context that
1274 """A committablectx object provides common functionality for a context that
1275 wants the ability to commit, e.g. workingctx or memctx."""
1275 wants the ability to commit, e.g. workingctx or memctx."""
1276 def __init__(self, repo, text="", user=None, date=None, extra=None,
1276 def __init__(self, repo, text="", user=None, date=None, extra=None,
1277 changes=None):
1277 changes=None):
1278 self._repo = repo
1278 self._repo = repo
1279 self._rev = None
1279 self._rev = None
1280 self._node = None
1280 self._node = None
1281 self._text = text
1281 self._text = text
1282 if date:
1282 if date:
1283 self._date = util.parsedate(date)
1283 self._date = util.parsedate(date)
1284 if user:
1284 if user:
1285 self._user = user
1285 self._user = user
1286 if changes:
1286 if changes:
1287 self._status = changes
1287 self._status = changes
1288
1288
1289 self._extra = {}
1289 self._extra = {}
1290 if extra:
1290 if extra:
1291 self._extra = extra.copy()
1291 self._extra = extra.copy()
1292 if 'branch' not in self._extra:
1292 if 'branch' not in self._extra:
1293 try:
1293 try:
1294 branch = encoding.fromlocal(self._repo.dirstate.branch())
1294 branch = encoding.fromlocal(self._repo.dirstate.branch())
1295 except UnicodeDecodeError:
1295 except UnicodeDecodeError:
1296 raise error.Abort(_('branch name not in UTF-8!'))
1296 raise error.Abort(_('branch name not in UTF-8!'))
1297 self._extra['branch'] = branch
1297 self._extra['branch'] = branch
1298 if self._extra['branch'] == '':
1298 if self._extra['branch'] == '':
1299 self._extra['branch'] = 'default'
1299 self._extra['branch'] = 'default'
1300
1300
1301 def __bytes__(self):
1301 def __bytes__(self):
1302 return bytes(self._parents[0]) + "+"
1302 return bytes(self._parents[0]) + "+"
1303
1303
1304 __str__ = encoding.strmethod(__bytes__)
1304 __str__ = encoding.strmethod(__bytes__)
1305
1305
1306 def __nonzero__(self):
1306 def __nonzero__(self):
1307 return True
1307 return True
1308
1308
1309 __bool__ = __nonzero__
1309 __bool__ = __nonzero__
1310
1310
1311 def _buildflagfunc(self):
1311 def _buildflagfunc(self):
1312 # Create a fallback function for getting file flags when the
1312 # Create a fallback function for getting file flags when the
1313 # filesystem doesn't support them
1313 # filesystem doesn't support them
1314
1314
1315 copiesget = self._repo.dirstate.copies().get
1315 copiesget = self._repo.dirstate.copies().get
1316 parents = self.parents()
1316 parents = self.parents()
1317 if len(parents) < 2:
1317 if len(parents) < 2:
1318 # when we have one parent, it's easy: copy from parent
1318 # when we have one parent, it's easy: copy from parent
1319 man = parents[0].manifest()
1319 man = parents[0].manifest()
1320 def func(f):
1320 def func(f):
1321 f = copiesget(f, f)
1321 f = copiesget(f, f)
1322 return man.flags(f)
1322 return man.flags(f)
1323 else:
1323 else:
1324 # merges are tricky: we try to reconstruct the unstored
1324 # merges are tricky: we try to reconstruct the unstored
1325 # result from the merge (issue1802)
1325 # result from the merge (issue1802)
1326 p1, p2 = parents
1326 p1, p2 = parents
1327 pa = p1.ancestor(p2)
1327 pa = p1.ancestor(p2)
1328 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1328 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1329
1329
1330 def func(f):
1330 def func(f):
1331 f = copiesget(f, f) # may be wrong for merges with copies
1331 f = copiesget(f, f) # may be wrong for merges with copies
1332 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1332 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1333 if fl1 == fl2:
1333 if fl1 == fl2:
1334 return fl1
1334 return fl1
1335 if fl1 == fla:
1335 if fl1 == fla:
1336 return fl2
1336 return fl2
1337 if fl2 == fla:
1337 if fl2 == fla:
1338 return fl1
1338 return fl1
1339 return '' # punt for conflicts
1339 return '' # punt for conflicts
1340
1340
1341 return func
1341 return func
1342
1342
1343 @propertycache
1343 @propertycache
1344 def _flagfunc(self):
1344 def _flagfunc(self):
1345 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1345 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1346
1346
1347 @propertycache
1347 @propertycache
1348 def _status(self):
1348 def _status(self):
1349 return self._repo.status()
1349 return self._repo.status()
1350
1350
1351 @propertycache
1351 @propertycache
1352 def _user(self):
1352 def _user(self):
1353 return self._repo.ui.username()
1353 return self._repo.ui.username()
1354
1354
1355 @propertycache
1355 @propertycache
1356 def _date(self):
1356 def _date(self):
1357 ui = self._repo.ui
1357 ui = self._repo.ui
1358 date = ui.configdate('devel', 'default-date')
1358 date = ui.configdate('devel', 'default-date')
1359 if date is None:
1359 if date is None:
1360 date = util.makedate()
1360 date = util.makedate()
1361 return date
1361 return date
1362
1362
1363 def subrev(self, subpath):
1363 def subrev(self, subpath):
1364 return None
1364 return None
1365
1365
1366 def manifestnode(self):
1366 def manifestnode(self):
1367 return None
1367 return None
1368 def user(self):
1368 def user(self):
1369 return self._user or self._repo.ui.username()
1369 return self._user or self._repo.ui.username()
1370 def date(self):
1370 def date(self):
1371 return self._date
1371 return self._date
1372 def description(self):
1372 def description(self):
1373 return self._text
1373 return self._text
1374 def files(self):
1374 def files(self):
1375 return sorted(self._status.modified + self._status.added +
1375 return sorted(self._status.modified + self._status.added +
1376 self._status.removed)
1376 self._status.removed)
1377
1377
1378 def modified(self):
1378 def modified(self):
1379 return self._status.modified
1379 return self._status.modified
1380 def added(self):
1380 def added(self):
1381 return self._status.added
1381 return self._status.added
1382 def removed(self):
1382 def removed(self):
1383 return self._status.removed
1383 return self._status.removed
1384 def deleted(self):
1384 def deleted(self):
1385 return self._status.deleted
1385 return self._status.deleted
1386 def branch(self):
1386 def branch(self):
1387 return encoding.tolocal(self._extra['branch'])
1387 return encoding.tolocal(self._extra['branch'])
1388 def closesbranch(self):
1388 def closesbranch(self):
1389 return 'close' in self._extra
1389 return 'close' in self._extra
1390 def extra(self):
1390 def extra(self):
1391 return self._extra
1391 return self._extra
1392
1392
1393 def tags(self):
1393 def tags(self):
1394 return []
1394 return []
1395
1395
1396 def bookmarks(self):
1396 def bookmarks(self):
1397 b = []
1397 b = []
1398 for p in self.parents():
1398 for p in self.parents():
1399 b.extend(p.bookmarks())
1399 b.extend(p.bookmarks())
1400 return b
1400 return b
1401
1401
1402 def phase(self):
1402 def phase(self):
1403 phase = phases.draft # default phase to draft
1403 phase = phases.draft # default phase to draft
1404 for p in self.parents():
1404 for p in self.parents():
1405 phase = max(phase, p.phase())
1405 phase = max(phase, p.phase())
1406 return phase
1406 return phase
1407
1407
1408 def hidden(self):
1408 def hidden(self):
1409 return False
1409 return False
1410
1410
1411 def children(self):
1411 def children(self):
1412 return []
1412 return []
1413
1413
1414 def flags(self, path):
1414 def flags(self, path):
1415 if r'_manifest' in self.__dict__:
1415 if r'_manifest' in self.__dict__:
1416 try:
1416 try:
1417 return self._manifest.flags(path)
1417 return self._manifest.flags(path)
1418 except KeyError:
1418 except KeyError:
1419 return ''
1419 return ''
1420
1420
1421 try:
1421 try:
1422 return self._flagfunc(path)
1422 return self._flagfunc(path)
1423 except OSError:
1423 except OSError:
1424 return ''
1424 return ''
1425
1425
1426 def ancestor(self, c2):
1426 def ancestor(self, c2):
1427 """return the "best" ancestor context of self and c2"""
1427 """return the "best" ancestor context of self and c2"""
1428 return self._parents[0].ancestor(c2) # punt on two parents for now
1428 return self._parents[0].ancestor(c2) # punt on two parents for now
1429
1429
1430 def walk(self, match):
1430 def walk(self, match):
1431 '''Generates matching file names.'''
1431 '''Generates matching file names.'''
1432 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1432 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1433 True, False))
1433 True, False))
1434
1434
1435 def matches(self, match):
1435 def matches(self, match):
1436 return sorted(self._repo.dirstate.matches(match))
1436 return sorted(self._repo.dirstate.matches(match))
1437
1437
1438 def ancestors(self):
1438 def ancestors(self):
1439 for p in self._parents:
1439 for p in self._parents:
1440 yield p
1440 yield p
1441 for a in self._repo.changelog.ancestors(
1441 for a in self._repo.changelog.ancestors(
1442 [p.rev() for p in self._parents]):
1442 [p.rev() for p in self._parents]):
1443 yield changectx(self._repo, a)
1443 yield changectx(self._repo, a)
1444
1444
1445 def markcommitted(self, node):
1445 def markcommitted(self, node):
1446 """Perform post-commit cleanup necessary after committing this ctx
1446 """Perform post-commit cleanup necessary after committing this ctx
1447
1447
1448 Specifically, this updates backing stores this working context
1448 Specifically, this updates backing stores this working context
1449 wraps to reflect the fact that the changes reflected by this
1449 wraps to reflect the fact that the changes reflected by this
1450 workingctx have been committed. For example, it marks
1450 workingctx have been committed. For example, it marks
1451 modified and added files as normal in the dirstate.
1451 modified and added files as normal in the dirstate.
1452
1452
1453 """
1453 """
1454
1454
1455 with self._repo.dirstate.parentchange():
1455 with self._repo.dirstate.parentchange():
1456 for f in self.modified() + self.added():
1456 for f in self.modified() + self.added():
1457 self._repo.dirstate.normal(f)
1457 self._repo.dirstate.normal(f)
1458 for f in self.removed():
1458 for f in self.removed():
1459 self._repo.dirstate.drop(f)
1459 self._repo.dirstate.drop(f)
1460 self._repo.dirstate.setparents(node)
1460 self._repo.dirstate.setparents(node)
1461
1461
1462 # write changes out explicitly, because nesting wlock at
1462 # write changes out explicitly, because nesting wlock at
1463 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1463 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1464 # from immediately doing so for subsequent changing files
1464 # from immediately doing so for subsequent changing files
1465 self._repo.dirstate.write(self._repo.currenttransaction())
1465 self._repo.dirstate.write(self._repo.currenttransaction())
1466
1466
1467 def dirty(self, missing=False, merge=True, branch=True):
1467 def dirty(self, missing=False, merge=True, branch=True):
1468 return False
1468 return False
1469
1469
1470 class workingctx(committablectx):
1470 class workingctx(committablectx):
1471 """A workingctx object makes access to data related to
1471 """A workingctx object makes access to data related to
1472 the current working directory convenient.
1472 the current working directory convenient.
1473 date - any valid date string or (unixtime, offset), or None.
1473 date - any valid date string or (unixtime, offset), or None.
1474 user - username string, or None.
1474 user - username string, or None.
1475 extra - a dictionary of extra values, or None.
1475 extra - a dictionary of extra values, or None.
1476 changes - a list of file lists as returned by localrepo.status()
1476 changes - a list of file lists as returned by localrepo.status()
1477 or None to use the repository status.
1477 or None to use the repository status.
1478 """
1478 """
1479 def __init__(self, repo, text="", user=None, date=None, extra=None,
1479 def __init__(self, repo, text="", user=None, date=None, extra=None,
1480 changes=None):
1480 changes=None):
1481 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1481 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1482
1482
1483 def __iter__(self):
1483 def __iter__(self):
1484 d = self._repo.dirstate
1484 d = self._repo.dirstate
1485 for f in d:
1485 for f in d:
1486 if d[f] != 'r':
1486 if d[f] != 'r':
1487 yield f
1487 yield f
1488
1488
1489 def __contains__(self, key):
1489 def __contains__(self, key):
1490 return self._repo.dirstate[key] not in "?r"
1490 return self._repo.dirstate[key] not in "?r"
1491
1491
1492 def hex(self):
1492 def hex(self):
1493 return hex(wdirid)
1493 return hex(wdirid)
1494
1494
1495 @propertycache
1495 @propertycache
1496 def _parents(self):
1496 def _parents(self):
1497 p = self._repo.dirstate.parents()
1497 p = self._repo.dirstate.parents()
1498 if p[1] == nullid:
1498 if p[1] == nullid:
1499 p = p[:-1]
1499 p = p[:-1]
1500 return [changectx(self._repo, x) for x in p]
1500 return [changectx(self._repo, x) for x in p]
1501
1501
1502 def filectx(self, path, filelog=None):
1502 def filectx(self, path, filelog=None):
1503 """get a file context from the working directory"""
1503 """get a file context from the working directory"""
1504 return workingfilectx(self._repo, path, workingctx=self,
1504 return workingfilectx(self._repo, path, workingctx=self,
1505 filelog=filelog)
1505 filelog=filelog)
1506
1506
1507 def dirty(self, missing=False, merge=True, branch=True):
1507 def dirty(self, missing=False, merge=True, branch=True):
1508 "check whether a working directory is modified"
1508 "check whether a working directory is modified"
1509 # check subrepos first
1509 # check subrepos first
1510 for s in sorted(self.substate):
1510 for s in sorted(self.substate):
1511 if self.sub(s).dirty():
1511 if self.sub(s).dirty():
1512 return True
1512 return True
1513 # check current working dir
1513 # check current working dir
1514 return ((merge and self.p2()) or
1514 return ((merge and self.p2()) or
1515 (branch and self.branch() != self.p1().branch()) or
1515 (branch and self.branch() != self.p1().branch()) or
1516 self.modified() or self.added() or self.removed() or
1516 self.modified() or self.added() or self.removed() or
1517 (missing and self.deleted()))
1517 (missing and self.deleted()))
1518
1518
1519 def add(self, list, prefix=""):
1519 def add(self, list, prefix=""):
1520 join = lambda f: os.path.join(prefix, f)
1520 join = lambda f: os.path.join(prefix, f)
1521 with self._repo.wlock():
1521 with self._repo.wlock():
1522 ui, ds = self._repo.ui, self._repo.dirstate
1522 ui, ds = self._repo.ui, self._repo.dirstate
1523 rejected = []
1523 rejected = []
1524 lstat = self._repo.wvfs.lstat
1524 lstat = self._repo.wvfs.lstat
1525 for f in list:
1525 for f in list:
1526 scmutil.checkportable(ui, join(f))
1526 scmutil.checkportable(ui, join(f))
1527 try:
1527 try:
1528 st = lstat(f)
1528 st = lstat(f)
1529 except OSError:
1529 except OSError:
1530 ui.warn(_("%s does not exist!\n") % join(f))
1530 ui.warn(_("%s does not exist!\n") % join(f))
1531 rejected.append(f)
1531 rejected.append(f)
1532 continue
1532 continue
1533 if st.st_size > 10000000:
1533 if st.st_size > 10000000:
1534 ui.warn(_("%s: up to %d MB of RAM may be required "
1534 ui.warn(_("%s: up to %d MB of RAM may be required "
1535 "to manage this file\n"
1535 "to manage this file\n"
1536 "(use 'hg revert %s' to cancel the "
1536 "(use 'hg revert %s' to cancel the "
1537 "pending addition)\n")
1537 "pending addition)\n")
1538 % (f, 3 * st.st_size // 1000000, join(f)))
1538 % (f, 3 * st.st_size // 1000000, join(f)))
1539 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1539 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1540 ui.warn(_("%s not added: only files and symlinks "
1540 ui.warn(_("%s not added: only files and symlinks "
1541 "supported currently\n") % join(f))
1541 "supported currently\n") % join(f))
1542 rejected.append(f)
1542 rejected.append(f)
1543 elif ds[f] in 'amn':
1543 elif ds[f] in 'amn':
1544 ui.warn(_("%s already tracked!\n") % join(f))
1544 ui.warn(_("%s already tracked!\n") % join(f))
1545 elif ds[f] == 'r':
1545 elif ds[f] == 'r':
1546 ds.normallookup(f)
1546 ds.normallookup(f)
1547 else:
1547 else:
1548 ds.add(f)
1548 ds.add(f)
1549 return rejected
1549 return rejected
1550
1550
1551 def forget(self, files, prefix=""):
1551 def forget(self, files, prefix=""):
1552 join = lambda f: os.path.join(prefix, f)
1552 join = lambda f: os.path.join(prefix, f)
1553 with self._repo.wlock():
1553 with self._repo.wlock():
1554 rejected = []
1554 rejected = []
1555 for f in files:
1555 for f in files:
1556 if f not in self._repo.dirstate:
1556 if f not in self._repo.dirstate:
1557 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1557 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1558 rejected.append(f)
1558 rejected.append(f)
1559 elif self._repo.dirstate[f] != 'a':
1559 elif self._repo.dirstate[f] != 'a':
1560 self._repo.dirstate.remove(f)
1560 self._repo.dirstate.remove(f)
1561 else:
1561 else:
1562 self._repo.dirstate.drop(f)
1562 self._repo.dirstate.drop(f)
1563 return rejected
1563 return rejected
1564
1564
1565 def undelete(self, list):
1565 def undelete(self, list):
1566 pctxs = self.parents()
1566 pctxs = self.parents()
1567 with self._repo.wlock():
1567 with self._repo.wlock():
1568 for f in list:
1568 for f in list:
1569 if self._repo.dirstate[f] != 'r':
1569 if self._repo.dirstate[f] != 'r':
1570 self._repo.ui.warn(_("%s not removed!\n") % f)
1570 self._repo.ui.warn(_("%s not removed!\n") % f)
1571 else:
1571 else:
1572 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1572 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1573 t = fctx.data()
1573 t = fctx.data()
1574 self._repo.wwrite(f, t, fctx.flags())
1574 self._repo.wwrite(f, t, fctx.flags())
1575 self._repo.dirstate.normal(f)
1575 self._repo.dirstate.normal(f)
1576
1576
1577 def copy(self, source, dest):
1577 def copy(self, source, dest):
1578 try:
1578 try:
1579 st = self._repo.wvfs.lstat(dest)
1579 st = self._repo.wvfs.lstat(dest)
1580 except OSError as err:
1580 except OSError as err:
1581 if err.errno != errno.ENOENT:
1581 if err.errno != errno.ENOENT:
1582 raise
1582 raise
1583 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1583 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1584 return
1584 return
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1586 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1587 "symbolic link\n") % dest)
1587 "symbolic link\n") % dest)
1588 else:
1588 else:
1589 with self._repo.wlock():
1589 with self._repo.wlock():
1590 if self._repo.dirstate[dest] in '?':
1590 if self._repo.dirstate[dest] in '?':
1591 self._repo.dirstate.add(dest)
1591 self._repo.dirstate.add(dest)
1592 elif self._repo.dirstate[dest] in 'r':
1592 elif self._repo.dirstate[dest] in 'r':
1593 self._repo.dirstate.normallookup(dest)
1593 self._repo.dirstate.normallookup(dest)
1594 self._repo.dirstate.copy(source, dest)
1594 self._repo.dirstate.copy(source, dest)
1595
1595
1596 def match(self, pats=None, include=None, exclude=None, default='glob',
1596 def match(self, pats=None, include=None, exclude=None, default='glob',
1597 listsubrepos=False, badfn=None):
1597 listsubrepos=False, badfn=None):
1598 r = self._repo
1598 r = self._repo
1599
1599
1600 # Only a case insensitive filesystem needs magic to translate user input
1600 # Only a case insensitive filesystem needs magic to translate user input
1601 # to actual case in the filesystem.
1601 # to actual case in the filesystem.
1602 icasefs = not util.fscasesensitive(r.root)
1602 icasefs = not util.fscasesensitive(r.root)
1603 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1603 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1604 default, auditor=r.auditor, ctx=self,
1604 default, auditor=r.auditor, ctx=self,
1605 listsubrepos=listsubrepos, badfn=badfn,
1605 listsubrepos=listsubrepos, badfn=badfn,
1606 icasefs=icasefs)
1606 icasefs=icasefs)
1607
1607
1608 def _filtersuspectsymlink(self, files):
1608 def _filtersuspectsymlink(self, files):
1609 if not files or self._repo.dirstate._checklink:
1609 if not files or self._repo.dirstate._checklink:
1610 return files
1610 return files
1611
1611
1612 # Symlink placeholders may get non-symlink-like contents
1612 # Symlink placeholders may get non-symlink-like contents
1613 # via user error or dereferencing by NFS or Samba servers,
1613 # via user error or dereferencing by NFS or Samba servers,
1614 # so we filter out any placeholders that don't look like a
1614 # so we filter out any placeholders that don't look like a
1615 # symlink
1615 # symlink
1616 sane = []
1616 sane = []
1617 for f in files:
1617 for f in files:
1618 if self.flags(f) == 'l':
1618 if self.flags(f) == 'l':
1619 d = self[f].data()
1619 d = self[f].data()
1620 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1620 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1621 self._repo.ui.debug('ignoring suspect symlink placeholder'
1621 self._repo.ui.debug('ignoring suspect symlink placeholder'
1622 ' "%s"\n' % f)
1622 ' "%s"\n' % f)
1623 continue
1623 continue
1624 sane.append(f)
1624 sane.append(f)
1625 return sane
1625 return sane
1626
1626
1627 def _checklookup(self, files):
1627 def _checklookup(self, files):
1628 # check for any possibly clean files
1628 # check for any possibly clean files
1629 if not files:
1629 if not files:
1630 return [], [], []
1630 return [], [], []
1631
1631
1632 modified = []
1632 modified = []
1633 deleted = []
1633 deleted = []
1634 fixup = []
1634 fixup = []
1635 pctx = self._parents[0]
1635 pctx = self._parents[0]
1636 # do a full compare of any files that might have changed
1636 # do a full compare of any files that might have changed
1637 for f in sorted(files):
1637 for f in sorted(files):
1638 try:
1638 try:
1639 # This will return True for a file that got replaced by a
1639 # This will return True for a file that got replaced by a
1640 # directory in the interim, but fixing that is pretty hard.
1640 # directory in the interim, but fixing that is pretty hard.
1641 if (f not in pctx or self.flags(f) != pctx.flags(f)
1641 if (f not in pctx or self.flags(f) != pctx.flags(f)
1642 or pctx[f].cmp(self[f])):
1642 or pctx[f].cmp(self[f])):
1643 modified.append(f)
1643 modified.append(f)
1644 else:
1644 else:
1645 fixup.append(f)
1645 fixup.append(f)
1646 except (IOError, OSError):
1646 except (IOError, OSError):
1647 # A file become inaccessible in between? Mark it as deleted,
1647 # A file become inaccessible in between? Mark it as deleted,
1648 # matching dirstate behavior (issue5584).
1648 # matching dirstate behavior (issue5584).
1649 # The dirstate has more complex behavior around whether a
1649 # The dirstate has more complex behavior around whether a
1650 # missing file matches a directory, etc, but we don't need to
1650 # missing file matches a directory, etc, but we don't need to
1651 # bother with that: if f has made it to this point, we're sure
1651 # bother with that: if f has made it to this point, we're sure
1652 # it's in the dirstate.
1652 # it's in the dirstate.
1653 deleted.append(f)
1653 deleted.append(f)
1654
1654
1655 return modified, deleted, fixup
1655 return modified, deleted, fixup
1656
1656
1657 def _poststatusfixup(self, status, fixup):
1657 def _poststatusfixup(self, status, fixup):
1658 """update dirstate for files that are actually clean"""
1658 """update dirstate for files that are actually clean"""
1659 poststatus = self._repo.postdsstatus()
1659 poststatus = self._repo.postdsstatus()
1660 if fixup or poststatus:
1660 if fixup or poststatus:
1661 try:
1661 try:
1662 oldid = self._repo.dirstate.identity()
1662 oldid = self._repo.dirstate.identity()
1663
1663
1664 # updating the dirstate is optional
1664 # updating the dirstate is optional
1665 # so we don't wait on the lock
1665 # so we don't wait on the lock
1666 # wlock can invalidate the dirstate, so cache normal _after_
1666 # wlock can invalidate the dirstate, so cache normal _after_
1667 # taking the lock
1667 # taking the lock
1668 with self._repo.wlock(False):
1668 with self._repo.wlock(False):
1669 if self._repo.dirstate.identity() == oldid:
1669 if self._repo.dirstate.identity() == oldid:
1670 if fixup:
1670 if fixup:
1671 normal = self._repo.dirstate.normal
1671 normal = self._repo.dirstate.normal
1672 for f in fixup:
1672 for f in fixup:
1673 normal(f)
1673 normal(f)
1674 # write changes out explicitly, because nesting
1674 # write changes out explicitly, because nesting
1675 # wlock at runtime may prevent 'wlock.release()'
1675 # wlock at runtime may prevent 'wlock.release()'
1676 # after this block from doing so for subsequent
1676 # after this block from doing so for subsequent
1677 # changing files
1677 # changing files
1678 tr = self._repo.currenttransaction()
1678 tr = self._repo.currenttransaction()
1679 self._repo.dirstate.write(tr)
1679 self._repo.dirstate.write(tr)
1680
1680
1681 if poststatus:
1681 if poststatus:
1682 for ps in poststatus:
1682 for ps in poststatus:
1683 ps(self, status)
1683 ps(self, status)
1684 else:
1684 else:
1685 # in this case, writing changes out breaks
1685 # in this case, writing changes out breaks
1686 # consistency, because .hg/dirstate was
1686 # consistency, because .hg/dirstate was
1687 # already changed simultaneously after last
1687 # already changed simultaneously after last
1688 # caching (see also issue5584 for detail)
1688 # caching (see also issue5584 for detail)
1689 self._repo.ui.debug('skip updating dirstate: '
1689 self._repo.ui.debug('skip updating dirstate: '
1690 'identity mismatch\n')
1690 'identity mismatch\n')
1691 except error.LockError:
1691 except error.LockError:
1692 pass
1692 pass
1693 finally:
1693 finally:
1694 # Even if the wlock couldn't be grabbed, clear out the list.
1694 # Even if the wlock couldn't be grabbed, clear out the list.
1695 self._repo.clearpostdsstatus()
1695 self._repo.clearpostdsstatus()
1696
1696
1697 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1697 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1698 unknown=False):
1698 unknown=False):
1699 '''Gets the status from the dirstate -- internal use only.'''
1699 '''Gets the status from the dirstate -- internal use only.'''
1700 listignored, listclean, listunknown = ignored, clean, unknown
1700 listignored, listclean, listunknown = ignored, clean, unknown
1701 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1701 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1702 subrepos = []
1702 subrepos = []
1703 if '.hgsub' in self:
1703 if '.hgsub' in self:
1704 subrepos = sorted(self.substate)
1704 subrepos = sorted(self.substate)
1705 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1705 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1706 listclean, listunknown)
1706 listclean, listunknown)
1707
1707
1708 # check for any possibly clean files
1708 # check for any possibly clean files
1709 fixup = []
1709 fixup = []
1710 if cmp:
1710 if cmp:
1711 modified2, deleted2, fixup = self._checklookup(cmp)
1711 modified2, deleted2, fixup = self._checklookup(cmp)
1712 s.modified.extend(modified2)
1712 s.modified.extend(modified2)
1713 s.deleted.extend(deleted2)
1713 s.deleted.extend(deleted2)
1714
1714
1715 if fixup and listclean:
1715 if fixup and listclean:
1716 s.clean.extend(fixup)
1716 s.clean.extend(fixup)
1717
1717
1718 self._poststatusfixup(s, fixup)
1718 self._poststatusfixup(s, fixup)
1719
1719
1720 if match.always():
1720 if match.always():
1721 # cache for performance
1721 # cache for performance
1722 if s.unknown or s.ignored or s.clean:
1722 if s.unknown or s.ignored or s.clean:
1723 # "_status" is cached with list*=False in the normal route
1723 # "_status" is cached with list*=False in the normal route
1724 self._status = scmutil.status(s.modified, s.added, s.removed,
1724 self._status = scmutil.status(s.modified, s.added, s.removed,
1725 s.deleted, [], [], [])
1725 s.deleted, [], [], [])
1726 else:
1726 else:
1727 self._status = s
1727 self._status = s
1728
1728
1729 return s
1729 return s
1730
1730
1731 @propertycache
1731 @propertycache
1732 def _manifest(self):
1732 def _manifest(self):
1733 """generate a manifest corresponding to the values in self._status
1733 """generate a manifest corresponding to the values in self._status
1734
1734
1735 This reuse the file nodeid from parent, but we use special node
1735 This reuse the file nodeid from parent, but we use special node
1736 identifiers for added and modified files. This is used by manifests
1736 identifiers for added and modified files. This is used by manifests
1737 merge to see that files are different and by update logic to avoid
1737 merge to see that files are different and by update logic to avoid
1738 deleting newly added files.
1738 deleting newly added files.
1739 """
1739 """
1740 return self._buildstatusmanifest(self._status)
1740 return self._buildstatusmanifest(self._status)
1741
1741
1742 def _buildstatusmanifest(self, status):
1742 def _buildstatusmanifest(self, status):
1743 """Builds a manifest that includes the given status results."""
1743 """Builds a manifest that includes the given status results."""
1744 parents = self.parents()
1744 parents = self.parents()
1745
1745
1746 man = parents[0].manifest().copy()
1746 man = parents[0].manifest().copy()
1747
1747
1748 ff = self._flagfunc
1748 ff = self._flagfunc
1749 for i, l in ((addednodeid, status.added),
1749 for i, l in ((addednodeid, status.added),
1750 (modifiednodeid, status.modified)):
1750 (modifiednodeid, status.modified)):
1751 for f in l:
1751 for f in l:
1752 man[f] = i
1752 man[f] = i
1753 try:
1753 try:
1754 man.setflag(f, ff(f))
1754 man.setflag(f, ff(f))
1755 except OSError:
1755 except OSError:
1756 pass
1756 pass
1757
1757
1758 for f in status.deleted + status.removed:
1758 for f in status.deleted + status.removed:
1759 if f in man:
1759 if f in man:
1760 del man[f]
1760 del man[f]
1761
1761
1762 return man
1762 return man
1763
1763
1764 def _buildstatus(self, other, s, match, listignored, listclean,
1764 def _buildstatus(self, other, s, match, listignored, listclean,
1765 listunknown):
1765 listunknown):
1766 """build a status with respect to another context
1766 """build a status with respect to another context
1767
1767
1768 This includes logic for maintaining the fast path of status when
1768 This includes logic for maintaining the fast path of status when
1769 comparing the working directory against its parent, which is to skip
1769 comparing the working directory against its parent, which is to skip
1770 building a new manifest if self (working directory) is not comparing
1770 building a new manifest if self (working directory) is not comparing
1771 against its parent (repo['.']).
1771 against its parent (repo['.']).
1772 """
1772 """
1773 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1773 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1774 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1774 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1775 # might have accidentally ended up with the entire contents of the file
1775 # might have accidentally ended up with the entire contents of the file
1776 # they are supposed to be linking to.
1776 # they are supposed to be linking to.
1777 s.modified[:] = self._filtersuspectsymlink(s.modified)
1777 s.modified[:] = self._filtersuspectsymlink(s.modified)
1778 if other != self._repo['.']:
1778 if other != self._repo['.']:
1779 s = super(workingctx, self)._buildstatus(other, s, match,
1779 s = super(workingctx, self)._buildstatus(other, s, match,
1780 listignored, listclean,
1780 listignored, listclean,
1781 listunknown)
1781 listunknown)
1782 return s
1782 return s
1783
1783
1784 def _matchstatus(self, other, match):
1784 def _matchstatus(self, other, match):
1785 """override the match method with a filter for directory patterns
1785 """override the match method with a filter for directory patterns
1786
1786
1787 We use inheritance to customize the match.bad method only in cases of
1787 We use inheritance to customize the match.bad method only in cases of
1788 workingctx since it belongs only to the working directory when
1788 workingctx since it belongs only to the working directory when
1789 comparing against the parent changeset.
1789 comparing against the parent changeset.
1790
1790
1791 If we aren't comparing against the working directory's parent, then we
1791 If we aren't comparing against the working directory's parent, then we
1792 just use the default match object sent to us.
1792 just use the default match object sent to us.
1793 """
1793 """
1794 superself = super(workingctx, self)
1794 superself = super(workingctx, self)
1795 match = superself._matchstatus(other, match)
1795 match = superself._matchstatus(other, match)
1796 if other != self._repo['.']:
1796 if other != self._repo['.']:
1797 def bad(f, msg):
1797 def bad(f, msg):
1798 # 'f' may be a directory pattern from 'match.files()',
1798 # 'f' may be a directory pattern from 'match.files()',
1799 # so 'f not in ctx1' is not enough
1799 # so 'f not in ctx1' is not enough
1800 if f not in other and not other.hasdir(f):
1800 if f not in other and not other.hasdir(f):
1801 self._repo.ui.warn('%s: %s\n' %
1801 self._repo.ui.warn('%s: %s\n' %
1802 (self._repo.dirstate.pathto(f), msg))
1802 (self._repo.dirstate.pathto(f), msg))
1803 match.bad = bad
1803 match.bad = bad
1804 return match
1804 return match
1805
1805
1806 class committablefilectx(basefilectx):
1806 class committablefilectx(basefilectx):
1807 """A committablefilectx provides common functionality for a file context
1807 """A committablefilectx provides common functionality for a file context
1808 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1808 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1809 def __init__(self, repo, path, filelog=None, ctx=None):
1809 def __init__(self, repo, path, filelog=None, ctx=None):
1810 self._repo = repo
1810 self._repo = repo
1811 self._path = path
1811 self._path = path
1812 self._changeid = None
1812 self._changeid = None
1813 self._filerev = self._filenode = None
1813 self._filerev = self._filenode = None
1814
1814
1815 if filelog is not None:
1815 if filelog is not None:
1816 self._filelog = filelog
1816 self._filelog = filelog
1817 if ctx:
1817 if ctx:
1818 self._changectx = ctx
1818 self._changectx = ctx
1819
1819
1820 def __nonzero__(self):
1820 def __nonzero__(self):
1821 return True
1821 return True
1822
1822
1823 __bool__ = __nonzero__
1823 __bool__ = __nonzero__
1824
1824
1825 def linkrev(self):
1825 def linkrev(self):
1826 # linked to self._changectx no matter if file is modified or not
1826 # linked to self._changectx no matter if file is modified or not
1827 return self.rev()
1827 return self.rev()
1828
1828
1829 def parents(self):
1829 def parents(self):
1830 '''return parent filectxs, following copies if necessary'''
1830 '''return parent filectxs, following copies if necessary'''
1831 def filenode(ctx, path):
1831 def filenode(ctx, path):
1832 return ctx._manifest.get(path, nullid)
1832 return ctx._manifest.get(path, nullid)
1833
1833
1834 path = self._path
1834 path = self._path
1835 fl = self._filelog
1835 fl = self._filelog
1836 pcl = self._changectx._parents
1836 pcl = self._changectx._parents
1837 renamed = self.renamed()
1837 renamed = self.renamed()
1838
1838
1839 if renamed:
1839 if renamed:
1840 pl = [renamed + (None,)]
1840 pl = [renamed + (None,)]
1841 else:
1841 else:
1842 pl = [(path, filenode(pcl[0], path), fl)]
1842 pl = [(path, filenode(pcl[0], path), fl)]
1843
1843
1844 for pc in pcl[1:]:
1844 for pc in pcl[1:]:
1845 pl.append((path, filenode(pc, path), fl))
1845 pl.append((path, filenode(pc, path), fl))
1846
1846
1847 return [self._parentfilectx(p, fileid=n, filelog=l)
1847 return [self._parentfilectx(p, fileid=n, filelog=l)
1848 for p, n, l in pl if n != nullid]
1848 for p, n, l in pl if n != nullid]
1849
1849
1850 def children(self):
1850 def children(self):
1851 return []
1851 return []
1852
1852
1853 class workingfilectx(committablefilectx):
1853 class workingfilectx(committablefilectx):
1854 """A workingfilectx object makes access to data related to a particular
1854 """A workingfilectx object makes access to data related to a particular
1855 file in the working directory convenient."""
1855 file in the working directory convenient."""
1856 def __init__(self, repo, path, filelog=None, workingctx=None):
1856 def __init__(self, repo, path, filelog=None, workingctx=None):
1857 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1857 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1858
1858
1859 @propertycache
1859 @propertycache
1860 def _changectx(self):
1860 def _changectx(self):
1861 return workingctx(self._repo)
1861 return workingctx(self._repo)
1862
1862
1863 def data(self):
1863 def data(self):
1864 return self._repo.wread(self._path)
1864 return self._repo.wread(self._path)
1865 def renamed(self):
1865 def renamed(self):
1866 rp = self._repo.dirstate.copied(self._path)
1866 rp = self._repo.dirstate.copied(self._path)
1867 if not rp:
1867 if not rp:
1868 return None
1868 return None
1869 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1869 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1870
1870
1871 def size(self):
1871 def size(self):
1872 return self._repo.wvfs.lstat(self._path).st_size
1872 return self._repo.wvfs.lstat(self._path).st_size
1873 def date(self):
1873 def date(self):
1874 t, tz = self._changectx.date()
1874 t, tz = self._changectx.date()
1875 try:
1875 try:
1876 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1876 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1877 except OSError as err:
1877 except OSError as err:
1878 if err.errno != errno.ENOENT:
1878 if err.errno != errno.ENOENT:
1879 raise
1879 raise
1880 return (t, tz)
1880 return (t, tz)
1881
1881
1882 def cmp(self, fctx):
1882 def cmp(self, fctx):
1883 """compare with other file context
1883 """compare with other file context
1884
1884
1885 returns True if different than fctx.
1885 returns True if different than fctx.
1886 """
1886 """
1887 # fctx should be a filectx (not a workingfilectx)
1887 # fctx should be a filectx (not a workingfilectx)
1888 # invert comparison to reuse the same code path
1888 # invert comparison to reuse the same code path
1889 return fctx.cmp(self)
1889 return fctx.cmp(self)
1890
1890
1891 def remove(self, ignoremissing=False):
1891 def remove(self, ignoremissing=False):
1892 """wraps unlink for a repo's working directory"""
1892 """wraps unlink for a repo's working directory"""
1893 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1893 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1894
1894
1895 def write(self, data, flags):
1895 def write(self, data, flags, backgroundclose=False):
1896 """wraps repo.wwrite"""
1896 """wraps repo.wwrite"""
1897 self._repo.wwrite(self._path, data, flags)
1897 self._repo.wwrite(self._path, data, flags,
1898 backgroundclose=backgroundclose)
1898
1899
1899 def setflags(self, l, x):
1900 def setflags(self, l, x):
1900 self._repo.wvfs.setflags(self._path, l, x)
1901 self._repo.wvfs.setflags(self._path, l, x)
1901
1902
1902 class workingcommitctx(workingctx):
1903 class workingcommitctx(workingctx):
1903 """A workingcommitctx object makes access to data related to
1904 """A workingcommitctx object makes access to data related to
1904 the revision being committed convenient.
1905 the revision being committed convenient.
1905
1906
1906 This hides changes in the working directory, if they aren't
1907 This hides changes in the working directory, if they aren't
1907 committed in this context.
1908 committed in this context.
1908 """
1909 """
1909 def __init__(self, repo, changes,
1910 def __init__(self, repo, changes,
1910 text="", user=None, date=None, extra=None):
1911 text="", user=None, date=None, extra=None):
1911 super(workingctx, self).__init__(repo, text, user, date, extra,
1912 super(workingctx, self).__init__(repo, text, user, date, extra,
1912 changes)
1913 changes)
1913
1914
1914 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1915 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1915 unknown=False):
1916 unknown=False):
1916 """Return matched files only in ``self._status``
1917 """Return matched files only in ``self._status``
1917
1918
1918 Uncommitted files appear "clean" via this context, even if
1919 Uncommitted files appear "clean" via this context, even if
1919 they aren't actually so in the working directory.
1920 they aren't actually so in the working directory.
1920 """
1921 """
1921 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1922 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1922 if clean:
1923 if clean:
1923 clean = [f for f in self._manifest if f not in self._changedset]
1924 clean = [f for f in self._manifest if f not in self._changedset]
1924 else:
1925 else:
1925 clean = []
1926 clean = []
1926 return scmutil.status([f for f in self._status.modified if match(f)],
1927 return scmutil.status([f for f in self._status.modified if match(f)],
1927 [f for f in self._status.added if match(f)],
1928 [f for f in self._status.added if match(f)],
1928 [f for f in self._status.removed if match(f)],
1929 [f for f in self._status.removed if match(f)],
1929 [], [], [], clean)
1930 [], [], [], clean)
1930
1931
1931 @propertycache
1932 @propertycache
1932 def _changedset(self):
1933 def _changedset(self):
1933 """Return the set of files changed in this context
1934 """Return the set of files changed in this context
1934 """
1935 """
1935 changed = set(self._status.modified)
1936 changed = set(self._status.modified)
1936 changed.update(self._status.added)
1937 changed.update(self._status.added)
1937 changed.update(self._status.removed)
1938 changed.update(self._status.removed)
1938 return changed
1939 return changed
1939
1940
1940 def makecachingfilectxfn(func):
1941 def makecachingfilectxfn(func):
1941 """Create a filectxfn that caches based on the path.
1942 """Create a filectxfn that caches based on the path.
1942
1943
1943 We can't use util.cachefunc because it uses all arguments as the cache
1944 We can't use util.cachefunc because it uses all arguments as the cache
1944 key and this creates a cycle since the arguments include the repo and
1945 key and this creates a cycle since the arguments include the repo and
1945 memctx.
1946 memctx.
1946 """
1947 """
1947 cache = {}
1948 cache = {}
1948
1949
1949 def getfilectx(repo, memctx, path):
1950 def getfilectx(repo, memctx, path):
1950 if path not in cache:
1951 if path not in cache:
1951 cache[path] = func(repo, memctx, path)
1952 cache[path] = func(repo, memctx, path)
1952 return cache[path]
1953 return cache[path]
1953
1954
1954 return getfilectx
1955 return getfilectx
1955
1956
1956 def memfilefromctx(ctx):
1957 def memfilefromctx(ctx):
1957 """Given a context return a memfilectx for ctx[path]
1958 """Given a context return a memfilectx for ctx[path]
1958
1959
1959 This is a convenience method for building a memctx based on another
1960 This is a convenience method for building a memctx based on another
1960 context.
1961 context.
1961 """
1962 """
1962 def getfilectx(repo, memctx, path):
1963 def getfilectx(repo, memctx, path):
1963 fctx = ctx[path]
1964 fctx = ctx[path]
1964 # this is weird but apparently we only keep track of one parent
1965 # this is weird but apparently we only keep track of one parent
1965 # (why not only store that instead of a tuple?)
1966 # (why not only store that instead of a tuple?)
1966 copied = fctx.renamed()
1967 copied = fctx.renamed()
1967 if copied:
1968 if copied:
1968 copied = copied[0]
1969 copied = copied[0]
1969 return memfilectx(repo, path, fctx.data(),
1970 return memfilectx(repo, path, fctx.data(),
1970 islink=fctx.islink(), isexec=fctx.isexec(),
1971 islink=fctx.islink(), isexec=fctx.isexec(),
1971 copied=copied, memctx=memctx)
1972 copied=copied, memctx=memctx)
1972
1973
1973 return getfilectx
1974 return getfilectx
1974
1975
1975 def memfilefrompatch(patchstore):
1976 def memfilefrompatch(patchstore):
1976 """Given a patch (e.g. patchstore object) return a memfilectx
1977 """Given a patch (e.g. patchstore object) return a memfilectx
1977
1978
1978 This is a convenience method for building a memctx based on a patchstore.
1979 This is a convenience method for building a memctx based on a patchstore.
1979 """
1980 """
1980 def getfilectx(repo, memctx, path):
1981 def getfilectx(repo, memctx, path):
1981 data, mode, copied = patchstore.getfile(path)
1982 data, mode, copied = patchstore.getfile(path)
1982 if data is None:
1983 if data is None:
1983 return None
1984 return None
1984 islink, isexec = mode
1985 islink, isexec = mode
1985 return memfilectx(repo, path, data, islink=islink,
1986 return memfilectx(repo, path, data, islink=islink,
1986 isexec=isexec, copied=copied,
1987 isexec=isexec, copied=copied,
1987 memctx=memctx)
1988 memctx=memctx)
1988
1989
1989 return getfilectx
1990 return getfilectx
1990
1991
1991 class memctx(committablectx):
1992 class memctx(committablectx):
1992 """Use memctx to perform in-memory commits via localrepo.commitctx().
1993 """Use memctx to perform in-memory commits via localrepo.commitctx().
1993
1994
1994 Revision information is supplied at initialization time while
1995 Revision information is supplied at initialization time while
1995 related files data and is made available through a callback
1996 related files data and is made available through a callback
1996 mechanism. 'repo' is the current localrepo, 'parents' is a
1997 mechanism. 'repo' is the current localrepo, 'parents' is a
1997 sequence of two parent revisions identifiers (pass None for every
1998 sequence of two parent revisions identifiers (pass None for every
1998 missing parent), 'text' is the commit message and 'files' lists
1999 missing parent), 'text' is the commit message and 'files' lists
1999 names of files touched by the revision (normalized and relative to
2000 names of files touched by the revision (normalized and relative to
2000 repository root).
2001 repository root).
2001
2002
2002 filectxfn(repo, memctx, path) is a callable receiving the
2003 filectxfn(repo, memctx, path) is a callable receiving the
2003 repository, the current memctx object and the normalized path of
2004 repository, the current memctx object and the normalized path of
2004 requested file, relative to repository root. It is fired by the
2005 requested file, relative to repository root. It is fired by the
2005 commit function for every file in 'files', but calls order is
2006 commit function for every file in 'files', but calls order is
2006 undefined. If the file is available in the revision being
2007 undefined. If the file is available in the revision being
2007 committed (updated or added), filectxfn returns a memfilectx
2008 committed (updated or added), filectxfn returns a memfilectx
2008 object. If the file was removed, filectxfn return None for recent
2009 object. If the file was removed, filectxfn return None for recent
2009 Mercurial. Moved files are represented by marking the source file
2010 Mercurial. Moved files are represented by marking the source file
2010 removed and the new file added with copy information (see
2011 removed and the new file added with copy information (see
2011 memfilectx).
2012 memfilectx).
2012
2013
2013 user receives the committer name and defaults to current
2014 user receives the committer name and defaults to current
2014 repository username, date is the commit date in any format
2015 repository username, date is the commit date in any format
2015 supported by util.parsedate() and defaults to current date, extra
2016 supported by util.parsedate() and defaults to current date, extra
2016 is a dictionary of metadata or is left empty.
2017 is a dictionary of metadata or is left empty.
2017 """
2018 """
2018
2019
2019 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2020 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2020 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2021 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2021 # this field to determine what to do in filectxfn.
2022 # this field to determine what to do in filectxfn.
2022 _returnnoneformissingfiles = True
2023 _returnnoneformissingfiles = True
2023
2024
2024 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2025 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2025 date=None, extra=None, branch=None, editor=False):
2026 date=None, extra=None, branch=None, editor=False):
2026 super(memctx, self).__init__(repo, text, user, date, extra)
2027 super(memctx, self).__init__(repo, text, user, date, extra)
2027 self._rev = None
2028 self._rev = None
2028 self._node = None
2029 self._node = None
2029 parents = [(p or nullid) for p in parents]
2030 parents = [(p or nullid) for p in parents]
2030 p1, p2 = parents
2031 p1, p2 = parents
2031 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2032 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2032 files = sorted(set(files))
2033 files = sorted(set(files))
2033 self._files = files
2034 self._files = files
2034 if branch is not None:
2035 if branch is not None:
2035 self._extra['branch'] = encoding.fromlocal(branch)
2036 self._extra['branch'] = encoding.fromlocal(branch)
2036 self.substate = {}
2037 self.substate = {}
2037
2038
2038 if isinstance(filectxfn, patch.filestore):
2039 if isinstance(filectxfn, patch.filestore):
2039 filectxfn = memfilefrompatch(filectxfn)
2040 filectxfn = memfilefrompatch(filectxfn)
2040 elif not callable(filectxfn):
2041 elif not callable(filectxfn):
2041 # if store is not callable, wrap it in a function
2042 # if store is not callable, wrap it in a function
2042 filectxfn = memfilefromctx(filectxfn)
2043 filectxfn = memfilefromctx(filectxfn)
2043
2044
2044 # memoizing increases performance for e.g. vcs convert scenarios.
2045 # memoizing increases performance for e.g. vcs convert scenarios.
2045 self._filectxfn = makecachingfilectxfn(filectxfn)
2046 self._filectxfn = makecachingfilectxfn(filectxfn)
2046
2047
2047 if editor:
2048 if editor:
2048 self._text = editor(self._repo, self, [])
2049 self._text = editor(self._repo, self, [])
2049 self._repo.savecommitmessage(self._text)
2050 self._repo.savecommitmessage(self._text)
2050
2051
2051 def filectx(self, path, filelog=None):
2052 def filectx(self, path, filelog=None):
2052 """get a file context from the working directory
2053 """get a file context from the working directory
2053
2054
2054 Returns None if file doesn't exist and should be removed."""
2055 Returns None if file doesn't exist and should be removed."""
2055 return self._filectxfn(self._repo, self, path)
2056 return self._filectxfn(self._repo, self, path)
2056
2057
2057 def commit(self):
2058 def commit(self):
2058 """commit context to the repo"""
2059 """commit context to the repo"""
2059 return self._repo.commitctx(self)
2060 return self._repo.commitctx(self)
2060
2061
2061 @propertycache
2062 @propertycache
2062 def _manifest(self):
2063 def _manifest(self):
2063 """generate a manifest based on the return values of filectxfn"""
2064 """generate a manifest based on the return values of filectxfn"""
2064
2065
2065 # keep this simple for now; just worry about p1
2066 # keep this simple for now; just worry about p1
2066 pctx = self._parents[0]
2067 pctx = self._parents[0]
2067 man = pctx.manifest().copy()
2068 man = pctx.manifest().copy()
2068
2069
2069 for f in self._status.modified:
2070 for f in self._status.modified:
2070 p1node = nullid
2071 p1node = nullid
2071 p2node = nullid
2072 p2node = nullid
2072 p = pctx[f].parents() # if file isn't in pctx, check p2?
2073 p = pctx[f].parents() # if file isn't in pctx, check p2?
2073 if len(p) > 0:
2074 if len(p) > 0:
2074 p1node = p[0].filenode()
2075 p1node = p[0].filenode()
2075 if len(p) > 1:
2076 if len(p) > 1:
2076 p2node = p[1].filenode()
2077 p2node = p[1].filenode()
2077 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2078 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2078
2079
2079 for f in self._status.added:
2080 for f in self._status.added:
2080 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2081 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2081
2082
2082 for f in self._status.removed:
2083 for f in self._status.removed:
2083 if f in man:
2084 if f in man:
2084 del man[f]
2085 del man[f]
2085
2086
2086 return man
2087 return man
2087
2088
2088 @propertycache
2089 @propertycache
2089 def _status(self):
2090 def _status(self):
2090 """Calculate exact status from ``files`` specified at construction
2091 """Calculate exact status from ``files`` specified at construction
2091 """
2092 """
2092 man1 = self.p1().manifest()
2093 man1 = self.p1().manifest()
2093 p2 = self._parents[1]
2094 p2 = self._parents[1]
2094 # "1 < len(self._parents)" can't be used for checking
2095 # "1 < len(self._parents)" can't be used for checking
2095 # existence of the 2nd parent, because "memctx._parents" is
2096 # existence of the 2nd parent, because "memctx._parents" is
2096 # explicitly initialized by the list, of which length is 2.
2097 # explicitly initialized by the list, of which length is 2.
2097 if p2.node() != nullid:
2098 if p2.node() != nullid:
2098 man2 = p2.manifest()
2099 man2 = p2.manifest()
2099 managing = lambda f: f in man1 or f in man2
2100 managing = lambda f: f in man1 or f in man2
2100 else:
2101 else:
2101 managing = lambda f: f in man1
2102 managing = lambda f: f in man1
2102
2103
2103 modified, added, removed = [], [], []
2104 modified, added, removed = [], [], []
2104 for f in self._files:
2105 for f in self._files:
2105 if not managing(f):
2106 if not managing(f):
2106 added.append(f)
2107 added.append(f)
2107 elif self[f]:
2108 elif self[f]:
2108 modified.append(f)
2109 modified.append(f)
2109 else:
2110 else:
2110 removed.append(f)
2111 removed.append(f)
2111
2112
2112 return scmutil.status(modified, added, removed, [], [], [], [])
2113 return scmutil.status(modified, added, removed, [], [], [], [])
2113
2114
2114 class memfilectx(committablefilectx):
2115 class memfilectx(committablefilectx):
2115 """memfilectx represents an in-memory file to commit.
2116 """memfilectx represents an in-memory file to commit.
2116
2117
2117 See memctx and committablefilectx for more details.
2118 See memctx and committablefilectx for more details.
2118 """
2119 """
2119 def __init__(self, repo, path, data, islink=False,
2120 def __init__(self, repo, path, data, islink=False,
2120 isexec=False, copied=None, memctx=None):
2121 isexec=False, copied=None, memctx=None):
2121 """
2122 """
2122 path is the normalized file path relative to repository root.
2123 path is the normalized file path relative to repository root.
2123 data is the file content as a string.
2124 data is the file content as a string.
2124 islink is True if the file is a symbolic link.
2125 islink is True if the file is a symbolic link.
2125 isexec is True if the file is executable.
2126 isexec is True if the file is executable.
2126 copied is the source file path if current file was copied in the
2127 copied is the source file path if current file was copied in the
2127 revision being committed, or None."""
2128 revision being committed, or None."""
2128 super(memfilectx, self).__init__(repo, path, None, memctx)
2129 super(memfilectx, self).__init__(repo, path, None, memctx)
2129 self._data = data
2130 self._data = data
2130 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2131 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2131 self._copied = None
2132 self._copied = None
2132 if copied:
2133 if copied:
2133 self._copied = (copied, nullid)
2134 self._copied = (copied, nullid)
2134
2135
2135 def data(self):
2136 def data(self):
2136 return self._data
2137 return self._data
2137
2138
2138 def remove(self, ignoremissing=False):
2139 def remove(self, ignoremissing=False):
2139 """wraps unlink for a repo's working directory"""
2140 """wraps unlink for a repo's working directory"""
2140 # need to figure out what to do here
2141 # need to figure out what to do here
2141 del self._changectx[self._path]
2142 del self._changectx[self._path]
2142
2143
2143 def write(self, data, flags):
2144 def write(self, data, flags):
2144 """wraps repo.wwrite"""
2145 """wraps repo.wwrite"""
2145 self._data = data
2146 self._data = data
2146
2147
2147 class overlayfilectx(committablefilectx):
2148 class overlayfilectx(committablefilectx):
2148 """Like memfilectx but take an original filectx and optional parameters to
2149 """Like memfilectx but take an original filectx and optional parameters to
2149 override parts of it. This is useful when fctx.data() is expensive (i.e.
2150 override parts of it. This is useful when fctx.data() is expensive (i.e.
2150 flag processor is expensive) and raw data, flags, and filenode could be
2151 flag processor is expensive) and raw data, flags, and filenode could be
2151 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2152 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2152 """
2153 """
2153
2154
2154 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2155 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2155 copied=None, ctx=None):
2156 copied=None, ctx=None):
2156 """originalfctx: filecontext to duplicate
2157 """originalfctx: filecontext to duplicate
2157
2158
2158 datafunc: None or a function to override data (file content). It is a
2159 datafunc: None or a function to override data (file content). It is a
2159 function to be lazy. path, flags, copied, ctx: None or overridden value
2160 function to be lazy. path, flags, copied, ctx: None or overridden value
2160
2161
2161 copied could be (path, rev), or False. copied could also be just path,
2162 copied could be (path, rev), or False. copied could also be just path,
2162 and will be converted to (path, nullid). This simplifies some callers.
2163 and will be converted to (path, nullid). This simplifies some callers.
2163 """
2164 """
2164
2165
2165 if path is None:
2166 if path is None:
2166 path = originalfctx.path()
2167 path = originalfctx.path()
2167 if ctx is None:
2168 if ctx is None:
2168 ctx = originalfctx.changectx()
2169 ctx = originalfctx.changectx()
2169 ctxmatch = lambda: True
2170 ctxmatch = lambda: True
2170 else:
2171 else:
2171 ctxmatch = lambda: ctx == originalfctx.changectx()
2172 ctxmatch = lambda: ctx == originalfctx.changectx()
2172
2173
2173 repo = originalfctx.repo()
2174 repo = originalfctx.repo()
2174 flog = originalfctx.filelog()
2175 flog = originalfctx.filelog()
2175 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2176 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2176
2177
2177 if copied is None:
2178 if copied is None:
2178 copied = originalfctx.renamed()
2179 copied = originalfctx.renamed()
2179 copiedmatch = lambda: True
2180 copiedmatch = lambda: True
2180 else:
2181 else:
2181 if copied and not isinstance(copied, tuple):
2182 if copied and not isinstance(copied, tuple):
2182 # repo._filecommit will recalculate copyrev so nullid is okay
2183 # repo._filecommit will recalculate copyrev so nullid is okay
2183 copied = (copied, nullid)
2184 copied = (copied, nullid)
2184 copiedmatch = lambda: copied == originalfctx.renamed()
2185 copiedmatch = lambda: copied == originalfctx.renamed()
2185
2186
2186 # When data, copied (could affect data), ctx (could affect filelog
2187 # When data, copied (could affect data), ctx (could affect filelog
2187 # parents) are not overridden, rawdata, rawflags, and filenode may be
2188 # parents) are not overridden, rawdata, rawflags, and filenode may be
2188 # reused (repo._filecommit should double check filelog parents).
2189 # reused (repo._filecommit should double check filelog parents).
2189 #
2190 #
2190 # path, flags are not hashed in filelog (but in manifestlog) so they do
2191 # path, flags are not hashed in filelog (but in manifestlog) so they do
2191 # not affect reusable here.
2192 # not affect reusable here.
2192 #
2193 #
2193 # If ctx or copied is overridden to a same value with originalfctx,
2194 # If ctx or copied is overridden to a same value with originalfctx,
2194 # still consider it's reusable. originalfctx.renamed() may be a bit
2195 # still consider it's reusable. originalfctx.renamed() may be a bit
2195 # expensive so it's not called unless necessary. Assuming datafunc is
2196 # expensive so it's not called unless necessary. Assuming datafunc is
2196 # always expensive, do not call it for this "reusable" test.
2197 # always expensive, do not call it for this "reusable" test.
2197 reusable = datafunc is None and ctxmatch() and copiedmatch()
2198 reusable = datafunc is None and ctxmatch() and copiedmatch()
2198
2199
2199 if datafunc is None:
2200 if datafunc is None:
2200 datafunc = originalfctx.data
2201 datafunc = originalfctx.data
2201 if flags is None:
2202 if flags is None:
2202 flags = originalfctx.flags()
2203 flags = originalfctx.flags()
2203
2204
2204 self._datafunc = datafunc
2205 self._datafunc = datafunc
2205 self._flags = flags
2206 self._flags = flags
2206 self._copied = copied
2207 self._copied = copied
2207
2208
2208 if reusable:
2209 if reusable:
2209 # copy extra fields from originalfctx
2210 # copy extra fields from originalfctx
2210 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2211 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2211 for attr in attrs:
2212 for attr in attrs:
2212 if util.safehasattr(originalfctx, attr):
2213 if util.safehasattr(originalfctx, attr):
2213 setattr(self, attr, getattr(originalfctx, attr))
2214 setattr(self, attr, getattr(originalfctx, attr))
2214
2215
2215 def data(self):
2216 def data(self):
2216 return self._datafunc()
2217 return self._datafunc()
2217
2218
2218 class metadataonlyctx(committablectx):
2219 class metadataonlyctx(committablectx):
2219 """Like memctx but it's reusing the manifest of different commit.
2220 """Like memctx but it's reusing the manifest of different commit.
2220 Intended to be used by lightweight operations that are creating
2221 Intended to be used by lightweight operations that are creating
2221 metadata-only changes.
2222 metadata-only changes.
2222
2223
2223 Revision information is supplied at initialization time. 'repo' is the
2224 Revision information is supplied at initialization time. 'repo' is the
2224 current localrepo, 'ctx' is original revision which manifest we're reuisng
2225 current localrepo, 'ctx' is original revision which manifest we're reuisng
2225 'parents' is a sequence of two parent revisions identifiers (pass None for
2226 'parents' is a sequence of two parent revisions identifiers (pass None for
2226 every missing parent), 'text' is the commit.
2227 every missing parent), 'text' is the commit.
2227
2228
2228 user receives the committer name and defaults to current repository
2229 user receives the committer name and defaults to current repository
2229 username, date is the commit date in any format supported by
2230 username, date is the commit date in any format supported by
2230 util.parsedate() and defaults to current date, extra is a dictionary of
2231 util.parsedate() and defaults to current date, extra is a dictionary of
2231 metadata or is left empty.
2232 metadata or is left empty.
2232 """
2233 """
2233 def __new__(cls, repo, originalctx, *args, **kwargs):
2234 def __new__(cls, repo, originalctx, *args, **kwargs):
2234 return super(metadataonlyctx, cls).__new__(cls, repo)
2235 return super(metadataonlyctx, cls).__new__(cls, repo)
2235
2236
2236 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2237 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2237 extra=None, editor=False):
2238 extra=None, editor=False):
2238 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2239 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2239 self._rev = None
2240 self._rev = None
2240 self._node = None
2241 self._node = None
2241 self._originalctx = originalctx
2242 self._originalctx = originalctx
2242 self._manifestnode = originalctx.manifestnode()
2243 self._manifestnode = originalctx.manifestnode()
2243 parents = [(p or nullid) for p in parents]
2244 parents = [(p or nullid) for p in parents]
2244 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2245 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2245
2246
2246 # sanity check to ensure that the reused manifest parents are
2247 # sanity check to ensure that the reused manifest parents are
2247 # manifests of our commit parents
2248 # manifests of our commit parents
2248 mp1, mp2 = self.manifestctx().parents
2249 mp1, mp2 = self.manifestctx().parents
2249 if p1 != nullid and p1.manifestnode() != mp1:
2250 if p1 != nullid and p1.manifestnode() != mp1:
2250 raise RuntimeError('can\'t reuse the manifest: '
2251 raise RuntimeError('can\'t reuse the manifest: '
2251 'its p1 doesn\'t match the new ctx p1')
2252 'its p1 doesn\'t match the new ctx p1')
2252 if p2 != nullid and p2.manifestnode() != mp2:
2253 if p2 != nullid and p2.manifestnode() != mp2:
2253 raise RuntimeError('can\'t reuse the manifest: '
2254 raise RuntimeError('can\'t reuse the manifest: '
2254 'its p2 doesn\'t match the new ctx p2')
2255 'its p2 doesn\'t match the new ctx p2')
2255
2256
2256 self._files = originalctx.files()
2257 self._files = originalctx.files()
2257 self.substate = {}
2258 self.substate = {}
2258
2259
2259 if editor:
2260 if editor:
2260 self._text = editor(self._repo, self, [])
2261 self._text = editor(self._repo, self, [])
2261 self._repo.savecommitmessage(self._text)
2262 self._repo.savecommitmessage(self._text)
2262
2263
2263 def manifestnode(self):
2264 def manifestnode(self):
2264 return self._manifestnode
2265 return self._manifestnode
2265
2266
2266 @property
2267 @property
2267 def _manifestctx(self):
2268 def _manifestctx(self):
2268 return self._repo.manifestlog[self._manifestnode]
2269 return self._repo.manifestlog[self._manifestnode]
2269
2270
2270 def filectx(self, path, filelog=None):
2271 def filectx(self, path, filelog=None):
2271 return self._originalctx.filectx(path, filelog=filelog)
2272 return self._originalctx.filectx(path, filelog=filelog)
2272
2273
2273 def commit(self):
2274 def commit(self):
2274 """commit context to the repo"""
2275 """commit context to the repo"""
2275 return self._repo.commitctx(self)
2276 return self._repo.commitctx(self)
2276
2277
2277 @property
2278 @property
2278 def _manifest(self):
2279 def _manifest(self):
2279 return self._originalctx.manifest()
2280 return self._originalctx.manifest()
2280
2281
2281 @propertycache
2282 @propertycache
2282 def _status(self):
2283 def _status(self):
2283 """Calculate exact status from ``files`` specified in the ``origctx``
2284 """Calculate exact status from ``files`` specified in the ``origctx``
2284 and parents manifests.
2285 and parents manifests.
2285 """
2286 """
2286 man1 = self.p1().manifest()
2287 man1 = self.p1().manifest()
2287 p2 = self._parents[1]
2288 p2 = self._parents[1]
2288 # "1 < len(self._parents)" can't be used for checking
2289 # "1 < len(self._parents)" can't be used for checking
2289 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2290 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2290 # explicitly initialized by the list, of which length is 2.
2291 # explicitly initialized by the list, of which length is 2.
2291 if p2.node() != nullid:
2292 if p2.node() != nullid:
2292 man2 = p2.manifest()
2293 man2 = p2.manifest()
2293 managing = lambda f: f in man1 or f in man2
2294 managing = lambda f: f in man1 or f in man2
2294 else:
2295 else:
2295 managing = lambda f: f in man1
2296 managing = lambda f: f in man1
2296
2297
2297 modified, added, removed = [], [], []
2298 modified, added, removed = [], [], []
2298 for f in self._files:
2299 for f in self._files:
2299 if not managing(f):
2300 if not managing(f):
2300 added.append(f)
2301 added.append(f)
2301 elif self[f]:
2302 elif self[f]:
2302 modified.append(f)
2303 modified.append(f)
2303 else:
2304 else:
2304 removed.append(f)
2305 removed.append(f)
2305
2306
2306 return scmutil.status(modified, added, removed, [], [], [], [])
2307 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,1752 +1,1751
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import shutil
13 import shutil
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 copies,
28 error,
28 error,
29 filemerge,
29 filemerge,
30 match as matchmod,
30 match as matchmod,
31 obsolete,
31 obsolete,
32 pycompat,
32 pycompat,
33 scmutil,
33 scmutil,
34 subrepo,
34 subrepo,
35 util,
35 util,
36 worker,
36 worker,
37 )
37 )
38
38
39 _pack = struct.pack
39 _pack = struct.pack
40 _unpack = struct.unpack
40 _unpack = struct.unpack
41
41
42 def _droponode(data):
42 def _droponode(data):
43 # used for compatibility for v1
43 # used for compatibility for v1
44 bits = data.split('\0')
44 bits = data.split('\0')
45 bits = bits[:-2] + bits[-1:]
45 bits = bits[:-2] + bits[-1:]
46 return '\0'.join(bits)
46 return '\0'.join(bits)
47
47
48 class mergestate(object):
48 class mergestate(object):
49 '''track 3-way merge state of individual files
49 '''track 3-way merge state of individual files
50
50
51 The merge state is stored on disk when needed. Two files are used: one with
51 The merge state is stored on disk when needed. Two files are used: one with
52 an old format (version 1), and one with a new format (version 2). Version 2
52 an old format (version 1), and one with a new format (version 2). Version 2
53 stores a superset of the data in version 1, including new kinds of records
53 stores a superset of the data in version 1, including new kinds of records
54 in the future. For more about the new format, see the documentation for
54 in the future. For more about the new format, see the documentation for
55 `_readrecordsv2`.
55 `_readrecordsv2`.
56
56
57 Each record can contain arbitrary content, and has an associated type. This
57 Each record can contain arbitrary content, and has an associated type. This
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 `type` should be a letter. If `type` is uppercase, the record is mandatory:
59 versions of Mercurial that don't support it should abort. If `type` is
59 versions of Mercurial that don't support it should abort. If `type` is
60 lowercase, the record can be safely ignored.
60 lowercase, the record can be safely ignored.
61
61
62 Currently known records:
62 Currently known records:
63
63
64 L: the node of the "local" part of the merge (hexified version)
64 L: the node of the "local" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
65 O: the node of the "other" part of the merge (hexified version)
66 F: a file to be merged entry
66 F: a file to be merged entry
67 C: a change/delete or delete/change conflict
67 C: a change/delete or delete/change conflict
68 D: a file that the external merge driver will merge internally
68 D: a file that the external merge driver will merge internally
69 (experimental)
69 (experimental)
70 m: the external merge driver defined for this merge plus its run state
70 m: the external merge driver defined for this merge plus its run state
71 (experimental)
71 (experimental)
72 f: a (filename, dictionary) tuple of optional values for a given file
72 f: a (filename, dictionary) tuple of optional values for a given file
73 X: unsupported mandatory record type (used in tests)
73 X: unsupported mandatory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
74 x: unsupported advisory record type (used in tests)
75 l: the labels for the parts of the merge.
75 l: the labels for the parts of the merge.
76
76
77 Merge driver run states (experimental):
77 Merge driver run states (experimental):
78 u: driver-resolved files unmarked -- needs to be run next time we're about
78 u: driver-resolved files unmarked -- needs to be run next time we're about
79 to resolve or commit
79 to resolve or commit
80 m: driver-resolved files marked -- only needs to be run before commit
80 m: driver-resolved files marked -- only needs to be run before commit
81 s: success/skipped -- does not need to be run any more
81 s: success/skipped -- does not need to be run any more
82
82
83 '''
83 '''
84 statepathv1 = 'merge/state'
84 statepathv1 = 'merge/state'
85 statepathv2 = 'merge/state2'
85 statepathv2 = 'merge/state2'
86
86
87 @staticmethod
87 @staticmethod
88 def clean(repo, node=None, other=None, labels=None):
88 def clean(repo, node=None, other=None, labels=None):
89 """Initialize a brand new merge state, removing any existing state on
89 """Initialize a brand new merge state, removing any existing state on
90 disk."""
90 disk."""
91 ms = mergestate(repo)
91 ms = mergestate(repo)
92 ms.reset(node, other, labels)
92 ms.reset(node, other, labels)
93 return ms
93 return ms
94
94
95 @staticmethod
95 @staticmethod
96 def read(repo):
96 def read(repo):
97 """Initialize the merge state, reading it from disk."""
97 """Initialize the merge state, reading it from disk."""
98 ms = mergestate(repo)
98 ms = mergestate(repo)
99 ms._read()
99 ms._read()
100 return ms
100 return ms
101
101
102 def __init__(self, repo):
102 def __init__(self, repo):
103 """Initialize the merge state.
103 """Initialize the merge state.
104
104
105 Do not use this directly! Instead call read() or clean()."""
105 Do not use this directly! Instead call read() or clean()."""
106 self._repo = repo
106 self._repo = repo
107 self._dirty = False
107 self._dirty = False
108 self._labels = None
108 self._labels = None
109
109
110 def reset(self, node=None, other=None, labels=None):
110 def reset(self, node=None, other=None, labels=None):
111 self._state = {}
111 self._state = {}
112 self._stateextras = {}
112 self._stateextras = {}
113 self._local = None
113 self._local = None
114 self._other = None
114 self._other = None
115 self._labels = labels
115 self._labels = labels
116 for var in ('localctx', 'otherctx'):
116 for var in ('localctx', 'otherctx'):
117 if var in vars(self):
117 if var in vars(self):
118 delattr(self, var)
118 delattr(self, var)
119 if node:
119 if node:
120 self._local = node
120 self._local = node
121 self._other = other
121 self._other = other
122 self._readmergedriver = None
122 self._readmergedriver = None
123 if self.mergedriver:
123 if self.mergedriver:
124 self._mdstate = 's'
124 self._mdstate = 's'
125 else:
125 else:
126 self._mdstate = 'u'
126 self._mdstate = 'u'
127 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 shutil.rmtree(self._repo.vfs.join('merge'), True)
128 self._results = {}
128 self._results = {}
129 self._dirty = False
129 self._dirty = False
130
130
131 def _read(self):
131 def _read(self):
132 """Analyse each record content to restore a serialized state from disk
132 """Analyse each record content to restore a serialized state from disk
133
133
134 This function process "record" entry produced by the de-serialization
134 This function process "record" entry produced by the de-serialization
135 of on disk file.
135 of on disk file.
136 """
136 """
137 self._state = {}
137 self._state = {}
138 self._stateextras = {}
138 self._stateextras = {}
139 self._local = None
139 self._local = None
140 self._other = None
140 self._other = None
141 for var in ('localctx', 'otherctx'):
141 for var in ('localctx', 'otherctx'):
142 if var in vars(self):
142 if var in vars(self):
143 delattr(self, var)
143 delattr(self, var)
144 self._readmergedriver = None
144 self._readmergedriver = None
145 self._mdstate = 's'
145 self._mdstate = 's'
146 unsupported = set()
146 unsupported = set()
147 records = self._readrecords()
147 records = self._readrecords()
148 for rtype, record in records:
148 for rtype, record in records:
149 if rtype == 'L':
149 if rtype == 'L':
150 self._local = bin(record)
150 self._local = bin(record)
151 elif rtype == 'O':
151 elif rtype == 'O':
152 self._other = bin(record)
152 self._other = bin(record)
153 elif rtype == 'm':
153 elif rtype == 'm':
154 bits = record.split('\0', 1)
154 bits = record.split('\0', 1)
155 mdstate = bits[1]
155 mdstate = bits[1]
156 if len(mdstate) != 1 or mdstate not in 'ums':
156 if len(mdstate) != 1 or mdstate not in 'ums':
157 # the merge driver should be idempotent, so just rerun it
157 # the merge driver should be idempotent, so just rerun it
158 mdstate = 'u'
158 mdstate = 'u'
159
159
160 self._readmergedriver = bits[0]
160 self._readmergedriver = bits[0]
161 self._mdstate = mdstate
161 self._mdstate = mdstate
162 elif rtype in 'FDC':
162 elif rtype in 'FDC':
163 bits = record.split('\0')
163 bits = record.split('\0')
164 self._state[bits[0]] = bits[1:]
164 self._state[bits[0]] = bits[1:]
165 elif rtype == 'f':
165 elif rtype == 'f':
166 filename, rawextras = record.split('\0', 1)
166 filename, rawextras = record.split('\0', 1)
167 extraparts = rawextras.split('\0')
167 extraparts = rawextras.split('\0')
168 extras = {}
168 extras = {}
169 i = 0
169 i = 0
170 while i < len(extraparts):
170 while i < len(extraparts):
171 extras[extraparts[i]] = extraparts[i + 1]
171 extras[extraparts[i]] = extraparts[i + 1]
172 i += 2
172 i += 2
173
173
174 self._stateextras[filename] = extras
174 self._stateextras[filename] = extras
175 elif rtype == 'l':
175 elif rtype == 'l':
176 labels = record.split('\0', 2)
176 labels = record.split('\0', 2)
177 self._labels = [l for l in labels if len(l) > 0]
177 self._labels = [l for l in labels if len(l) > 0]
178 elif not rtype.islower():
178 elif not rtype.islower():
179 unsupported.add(rtype)
179 unsupported.add(rtype)
180 self._results = {}
180 self._results = {}
181 self._dirty = False
181 self._dirty = False
182
182
183 if unsupported:
183 if unsupported:
184 raise error.UnsupportedMergeRecords(unsupported)
184 raise error.UnsupportedMergeRecords(unsupported)
185
185
186 def _readrecords(self):
186 def _readrecords(self):
187 """Read merge state from disk and return a list of record (TYPE, data)
187 """Read merge state from disk and return a list of record (TYPE, data)
188
188
189 We read data from both v1 and v2 files and decide which one to use.
189 We read data from both v1 and v2 files and decide which one to use.
190
190
191 V1 has been used by version prior to 2.9.1 and contains less data than
191 V1 has been used by version prior to 2.9.1 and contains less data than
192 v2. We read both versions and check if no data in v2 contradicts
192 v2. We read both versions and check if no data in v2 contradicts
193 v1. If there is not contradiction we can safely assume that both v1
193 v1. If there is not contradiction we can safely assume that both v1
194 and v2 were written at the same time and use the extract data in v2. If
194 and v2 were written at the same time and use the extract data in v2. If
195 there is contradiction we ignore v2 content as we assume an old version
195 there is contradiction we ignore v2 content as we assume an old version
196 of Mercurial has overwritten the mergestate file and left an old v2
196 of Mercurial has overwritten the mergestate file and left an old v2
197 file around.
197 file around.
198
198
199 returns list of record [(TYPE, data), ...]"""
199 returns list of record [(TYPE, data), ...]"""
200 v1records = self._readrecordsv1()
200 v1records = self._readrecordsv1()
201 v2records = self._readrecordsv2()
201 v2records = self._readrecordsv2()
202 if self._v1v2match(v1records, v2records):
202 if self._v1v2match(v1records, v2records):
203 return v2records
203 return v2records
204 else:
204 else:
205 # v1 file is newer than v2 file, use it
205 # v1 file is newer than v2 file, use it
206 # we have to infer the "other" changeset of the merge
206 # we have to infer the "other" changeset of the merge
207 # we cannot do better than that with v1 of the format
207 # we cannot do better than that with v1 of the format
208 mctx = self._repo[None].parents()[-1]
208 mctx = self._repo[None].parents()[-1]
209 v1records.append(('O', mctx.hex()))
209 v1records.append(('O', mctx.hex()))
210 # add place holder "other" file node information
210 # add place holder "other" file node information
211 # nobody is using it yet so we do no need to fetch the data
211 # nobody is using it yet so we do no need to fetch the data
212 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 # if mctx was wrong `mctx[bits[-2]]` may fails.
213 for idx, r in enumerate(v1records):
213 for idx, r in enumerate(v1records):
214 if r[0] == 'F':
214 if r[0] == 'F':
215 bits = r[1].split('\0')
215 bits = r[1].split('\0')
216 bits.insert(-2, '')
216 bits.insert(-2, '')
217 v1records[idx] = (r[0], '\0'.join(bits))
217 v1records[idx] = (r[0], '\0'.join(bits))
218 return v1records
218 return v1records
219
219
220 def _v1v2match(self, v1records, v2records):
220 def _v1v2match(self, v1records, v2records):
221 oldv2 = set() # old format version of v2 record
221 oldv2 = set() # old format version of v2 record
222 for rec in v2records:
222 for rec in v2records:
223 if rec[0] == 'L':
223 if rec[0] == 'L':
224 oldv2.add(rec)
224 oldv2.add(rec)
225 elif rec[0] == 'F':
225 elif rec[0] == 'F':
226 # drop the onode data (not contained in v1)
226 # drop the onode data (not contained in v1)
227 oldv2.add(('F', _droponode(rec[1])))
227 oldv2.add(('F', _droponode(rec[1])))
228 for rec in v1records:
228 for rec in v1records:
229 if rec not in oldv2:
229 if rec not in oldv2:
230 return False
230 return False
231 else:
231 else:
232 return True
232 return True
233
233
234 def _readrecordsv1(self):
234 def _readrecordsv1(self):
235 """read on disk merge state for version 1 file
235 """read on disk merge state for version 1 file
236
236
237 returns list of record [(TYPE, data), ...]
237 returns list of record [(TYPE, data), ...]
238
238
239 Note: the "F" data from this file are one entry short
239 Note: the "F" data from this file are one entry short
240 (no "other file node" entry)
240 (no "other file node" entry)
241 """
241 """
242 records = []
242 records = []
243 try:
243 try:
244 f = self._repo.vfs(self.statepathv1)
244 f = self._repo.vfs(self.statepathv1)
245 for i, l in enumerate(f):
245 for i, l in enumerate(f):
246 if i == 0:
246 if i == 0:
247 records.append(('L', l[:-1]))
247 records.append(('L', l[:-1]))
248 else:
248 else:
249 records.append(('F', l[:-1]))
249 records.append(('F', l[:-1]))
250 f.close()
250 f.close()
251 except IOError as err:
251 except IOError as err:
252 if err.errno != errno.ENOENT:
252 if err.errno != errno.ENOENT:
253 raise
253 raise
254 return records
254 return records
255
255
256 def _readrecordsv2(self):
256 def _readrecordsv2(self):
257 """read on disk merge state for version 2 file
257 """read on disk merge state for version 2 file
258
258
259 This format is a list of arbitrary records of the form:
259 This format is a list of arbitrary records of the form:
260
260
261 [type][length][content]
261 [type][length][content]
262
262
263 `type` is a single character, `length` is a 4 byte integer, and
263 `type` is a single character, `length` is a 4 byte integer, and
264 `content` is an arbitrary byte sequence of length `length`.
264 `content` is an arbitrary byte sequence of length `length`.
265
265
266 Mercurial versions prior to 3.7 have a bug where if there are
266 Mercurial versions prior to 3.7 have a bug where if there are
267 unsupported mandatory merge records, attempting to clear out the merge
267 unsupported mandatory merge records, attempting to clear out the merge
268 state with hg update --clean or similar aborts. The 't' record type
268 state with hg update --clean or similar aborts. The 't' record type
269 works around that by writing out what those versions treat as an
269 works around that by writing out what those versions treat as an
270 advisory record, but later versions interpret as special: the first
270 advisory record, but later versions interpret as special: the first
271 character is the 'real' record type and everything onwards is the data.
271 character is the 'real' record type and everything onwards is the data.
272
272
273 Returns list of records [(TYPE, data), ...]."""
273 Returns list of records [(TYPE, data), ...]."""
274 records = []
274 records = []
275 try:
275 try:
276 f = self._repo.vfs(self.statepathv2)
276 f = self._repo.vfs(self.statepathv2)
277 data = f.read()
277 data = f.read()
278 off = 0
278 off = 0
279 end = len(data)
279 end = len(data)
280 while off < end:
280 while off < end:
281 rtype = data[off]
281 rtype = data[off]
282 off += 1
282 off += 1
283 length = _unpack('>I', data[off:(off + 4)])[0]
283 length = _unpack('>I', data[off:(off + 4)])[0]
284 off += 4
284 off += 4
285 record = data[off:(off + length)]
285 record = data[off:(off + length)]
286 off += length
286 off += length
287 if rtype == 't':
287 if rtype == 't':
288 rtype, record = record[0], record[1:]
288 rtype, record = record[0], record[1:]
289 records.append((rtype, record))
289 records.append((rtype, record))
290 f.close()
290 f.close()
291 except IOError as err:
291 except IOError as err:
292 if err.errno != errno.ENOENT:
292 if err.errno != errno.ENOENT:
293 raise
293 raise
294 return records
294 return records
295
295
296 @util.propertycache
296 @util.propertycache
297 def mergedriver(self):
297 def mergedriver(self):
298 # protect against the following:
298 # protect against the following:
299 # - A configures a malicious merge driver in their hgrc, then
299 # - A configures a malicious merge driver in their hgrc, then
300 # pauses the merge
300 # pauses the merge
301 # - A edits their hgrc to remove references to the merge driver
301 # - A edits their hgrc to remove references to the merge driver
302 # - A gives a copy of their entire repo, including .hg, to B
302 # - A gives a copy of their entire repo, including .hg, to B
303 # - B inspects .hgrc and finds it to be clean
303 # - B inspects .hgrc and finds it to be clean
304 # - B then continues the merge and the malicious merge driver
304 # - B then continues the merge and the malicious merge driver
305 # gets invoked
305 # gets invoked
306 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
307 if (self._readmergedriver is not None
307 if (self._readmergedriver is not None
308 and self._readmergedriver != configmergedriver):
308 and self._readmergedriver != configmergedriver):
309 raise error.ConfigError(
309 raise error.ConfigError(
310 _("merge driver changed since merge started"),
310 _("merge driver changed since merge started"),
311 hint=_("revert merge driver change or abort merge"))
311 hint=_("revert merge driver change or abort merge"))
312
312
313 return configmergedriver
313 return configmergedriver
314
314
315 @util.propertycache
315 @util.propertycache
316 def localctx(self):
316 def localctx(self):
317 if self._local is None:
317 if self._local is None:
318 msg = "localctx accessed but self._local isn't set"
318 msg = "localctx accessed but self._local isn't set"
319 raise error.ProgrammingError(msg)
319 raise error.ProgrammingError(msg)
320 return self._repo[self._local]
320 return self._repo[self._local]
321
321
322 @util.propertycache
322 @util.propertycache
323 def otherctx(self):
323 def otherctx(self):
324 if self._other is None:
324 if self._other is None:
325 msg = "otherctx accessed but self._other isn't set"
325 msg = "otherctx accessed but self._other isn't set"
326 raise error.ProgrammingError(msg)
326 raise error.ProgrammingError(msg)
327 return self._repo[self._other]
327 return self._repo[self._other]
328
328
329 def active(self):
329 def active(self):
330 """Whether mergestate is active.
330 """Whether mergestate is active.
331
331
332 Returns True if there appears to be mergestate. This is a rough proxy
332 Returns True if there appears to be mergestate. This is a rough proxy
333 for "is a merge in progress."
333 for "is a merge in progress."
334 """
334 """
335 # Check local variables before looking at filesystem for performance
335 # Check local variables before looking at filesystem for performance
336 # reasons.
336 # reasons.
337 return bool(self._local) or bool(self._state) or \
337 return bool(self._local) or bool(self._state) or \
338 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv1) or \
339 self._repo.vfs.exists(self.statepathv2)
339 self._repo.vfs.exists(self.statepathv2)
340
340
341 def commit(self):
341 def commit(self):
342 """Write current state on disk (if necessary)"""
342 """Write current state on disk (if necessary)"""
343 if self._dirty:
343 if self._dirty:
344 records = self._makerecords()
344 records = self._makerecords()
345 self._writerecords(records)
345 self._writerecords(records)
346 self._dirty = False
346 self._dirty = False
347
347
348 def _makerecords(self):
348 def _makerecords(self):
349 records = []
349 records = []
350 records.append(('L', hex(self._local)))
350 records.append(('L', hex(self._local)))
351 records.append(('O', hex(self._other)))
351 records.append(('O', hex(self._other)))
352 if self.mergedriver:
352 if self.mergedriver:
353 records.append(('m', '\0'.join([
353 records.append(('m', '\0'.join([
354 self.mergedriver, self._mdstate])))
354 self.mergedriver, self._mdstate])))
355 for d, v in self._state.iteritems():
355 for d, v in self._state.iteritems():
356 if v[0] == 'd':
356 if v[0] == 'd':
357 records.append(('D', '\0'.join([d] + v)))
357 records.append(('D', '\0'.join([d] + v)))
358 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
359 # older versions of Mercurial
359 # older versions of Mercurial
360 elif v[1] == nullhex or v[6] == nullhex:
360 elif v[1] == nullhex or v[6] == nullhex:
361 records.append(('C', '\0'.join([d] + v)))
361 records.append(('C', '\0'.join([d] + v)))
362 else:
362 else:
363 records.append(('F', '\0'.join([d] + v)))
363 records.append(('F', '\0'.join([d] + v)))
364 for filename, extras in sorted(self._stateextras.iteritems()):
364 for filename, extras in sorted(self._stateextras.iteritems()):
365 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
366 extras.iteritems())
366 extras.iteritems())
367 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 records.append(('f', '%s\0%s' % (filename, rawextras)))
368 if self._labels is not None:
368 if self._labels is not None:
369 labels = '\0'.join(self._labels)
369 labels = '\0'.join(self._labels)
370 records.append(('l', labels))
370 records.append(('l', labels))
371 return records
371 return records
372
372
373 def _writerecords(self, records):
373 def _writerecords(self, records):
374 """Write current state on disk (both v1 and v2)"""
374 """Write current state on disk (both v1 and v2)"""
375 self._writerecordsv1(records)
375 self._writerecordsv1(records)
376 self._writerecordsv2(records)
376 self._writerecordsv2(records)
377
377
378 def _writerecordsv1(self, records):
378 def _writerecordsv1(self, records):
379 """Write current state on disk in a version 1 file"""
379 """Write current state on disk in a version 1 file"""
380 f = self._repo.vfs(self.statepathv1, 'w')
380 f = self._repo.vfs(self.statepathv1, 'w')
381 irecords = iter(records)
381 irecords = iter(records)
382 lrecords = next(irecords)
382 lrecords = next(irecords)
383 assert lrecords[0] == 'L'
383 assert lrecords[0] == 'L'
384 f.write(hex(self._local) + '\n')
384 f.write(hex(self._local) + '\n')
385 for rtype, data in irecords:
385 for rtype, data in irecords:
386 if rtype == 'F':
386 if rtype == 'F':
387 f.write('%s\n' % _droponode(data))
387 f.write('%s\n' % _droponode(data))
388 f.close()
388 f.close()
389
389
390 def _writerecordsv2(self, records):
390 def _writerecordsv2(self, records):
391 """Write current state on disk in a version 2 file
391 """Write current state on disk in a version 2 file
392
392
393 See the docstring for _readrecordsv2 for why we use 't'."""
393 See the docstring for _readrecordsv2 for why we use 't'."""
394 # these are the records that all version 2 clients can read
394 # these are the records that all version 2 clients can read
395 whitelist = 'LOF'
395 whitelist = 'LOF'
396 f = self._repo.vfs(self.statepathv2, 'w')
396 f = self._repo.vfs(self.statepathv2, 'w')
397 for key, data in records:
397 for key, data in records:
398 assert len(key) == 1
398 assert len(key) == 1
399 if key not in whitelist:
399 if key not in whitelist:
400 key, data = 't', '%s%s' % (key, data)
400 key, data = 't', '%s%s' % (key, data)
401 format = '>sI%is' % len(data)
401 format = '>sI%is' % len(data)
402 f.write(_pack(format, key, len(data), data))
402 f.write(_pack(format, key, len(data), data))
403 f.close()
403 f.close()
404
404
405 def add(self, fcl, fco, fca, fd):
405 def add(self, fcl, fco, fca, fd):
406 """add a new (potentially?) conflicting file the merge state
406 """add a new (potentially?) conflicting file the merge state
407 fcl: file context for local,
407 fcl: file context for local,
408 fco: file context for remote,
408 fco: file context for remote,
409 fca: file context for ancestors,
409 fca: file context for ancestors,
410 fd: file path of the resulting merge.
410 fd: file path of the resulting merge.
411
411
412 note: also write the local version to the `.hg/merge` directory.
412 note: also write the local version to the `.hg/merge` directory.
413 """
413 """
414 if fcl.isabsent():
414 if fcl.isabsent():
415 hash = nullhex
415 hash = nullhex
416 else:
416 else:
417 hash = hashlib.sha1(fcl.path()).hexdigest()
417 hash = hashlib.sha1(fcl.path()).hexdigest()
418 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._repo.vfs.write('merge/' + hash, fcl.data())
419 self._state[fd] = ['u', hash, fcl.path(),
419 self._state[fd] = ['u', hash, fcl.path(),
420 fca.path(), hex(fca.filenode()),
420 fca.path(), hex(fca.filenode()),
421 fco.path(), hex(fco.filenode()),
421 fco.path(), hex(fco.filenode()),
422 fcl.flags()]
422 fcl.flags()]
423 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
424 self._dirty = True
424 self._dirty = True
425
425
426 def __contains__(self, dfile):
426 def __contains__(self, dfile):
427 return dfile in self._state
427 return dfile in self._state
428
428
429 def __getitem__(self, dfile):
429 def __getitem__(self, dfile):
430 return self._state[dfile][0]
430 return self._state[dfile][0]
431
431
432 def __iter__(self):
432 def __iter__(self):
433 return iter(sorted(self._state))
433 return iter(sorted(self._state))
434
434
435 def files(self):
435 def files(self):
436 return self._state.keys()
436 return self._state.keys()
437
437
438 def mark(self, dfile, state):
438 def mark(self, dfile, state):
439 self._state[dfile][0] = state
439 self._state[dfile][0] = state
440 self._dirty = True
440 self._dirty = True
441
441
442 def mdstate(self):
442 def mdstate(self):
443 return self._mdstate
443 return self._mdstate
444
444
445 def unresolved(self):
445 def unresolved(self):
446 """Obtain the paths of unresolved files."""
446 """Obtain the paths of unresolved files."""
447
447
448 for f, entry in self._state.items():
448 for f, entry in self._state.items():
449 if entry[0] == 'u':
449 if entry[0] == 'u':
450 yield f
450 yield f
451
451
452 def driverresolved(self):
452 def driverresolved(self):
453 """Obtain the paths of driver-resolved files."""
453 """Obtain the paths of driver-resolved files."""
454
454
455 for f, entry in self._state.items():
455 for f, entry in self._state.items():
456 if entry[0] == 'd':
456 if entry[0] == 'd':
457 yield f
457 yield f
458
458
459 def extras(self, filename):
459 def extras(self, filename):
460 return self._stateextras.setdefault(filename, {})
460 return self._stateextras.setdefault(filename, {})
461
461
462 def _resolve(self, preresolve, dfile, wctx):
462 def _resolve(self, preresolve, dfile, wctx):
463 """rerun merge process for file path `dfile`"""
463 """rerun merge process for file path `dfile`"""
464 if self[dfile] in 'rd':
464 if self[dfile] in 'rd':
465 return True, 0
465 return True, 0
466 stateentry = self._state[dfile]
466 stateentry = self._state[dfile]
467 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
468 octx = self._repo[self._other]
468 octx = self._repo[self._other]
469 extras = self.extras(dfile)
469 extras = self.extras(dfile)
470 anccommitnode = extras.get('ancestorlinknode')
470 anccommitnode = extras.get('ancestorlinknode')
471 if anccommitnode:
471 if anccommitnode:
472 actx = self._repo[anccommitnode]
472 actx = self._repo[anccommitnode]
473 else:
473 else:
474 actx = None
474 actx = None
475 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fcd = self._filectxorabsent(hash, wctx, dfile)
476 fco = self._filectxorabsent(onode, octx, ofile)
476 fco = self._filectxorabsent(onode, octx, ofile)
477 # TODO: move this to filectxorabsent
477 # TODO: move this to filectxorabsent
478 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
479 # "premerge" x flags
479 # "premerge" x flags
480 flo = fco.flags()
480 flo = fco.flags()
481 fla = fca.flags()
481 fla = fca.flags()
482 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
483 if fca.node() == nullid and flags != flo:
483 if fca.node() == nullid and flags != flo:
484 if preresolve:
484 if preresolve:
485 self._repo.ui.warn(
485 self._repo.ui.warn(
486 _('warning: cannot merge flags for %s '
486 _('warning: cannot merge flags for %s '
487 'without common ancestor - keeping local flags\n')
487 'without common ancestor - keeping local flags\n')
488 % afile)
488 % afile)
489 elif flags == fla:
489 elif flags == fla:
490 flags = flo
490 flags = flo
491 if preresolve:
491 if preresolve:
492 # restore local
492 # restore local
493 if hash != nullhex:
493 if hash != nullhex:
494 f = self._repo.vfs('merge/' + hash)
494 f = self._repo.vfs('merge/' + hash)
495 wctx[dfile].write(f.read(), flags)
495 wctx[dfile].write(f.read(), flags)
496 f.close()
496 f.close()
497 else:
497 else:
498 wctx[dfile].remove(ignoremissing=True)
498 wctx[dfile].remove(ignoremissing=True)
499 complete, r, deleted = filemerge.premerge(self._repo, self._local,
499 complete, r, deleted = filemerge.premerge(self._repo, self._local,
500 lfile, fcd, fco, fca,
500 lfile, fcd, fco, fca,
501 labels=self._labels)
501 labels=self._labels)
502 else:
502 else:
503 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
503 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
504 lfile, fcd, fco, fca,
504 lfile, fcd, fco, fca,
505 labels=self._labels)
505 labels=self._labels)
506 if r is None:
506 if r is None:
507 # no real conflict
507 # no real conflict
508 del self._state[dfile]
508 del self._state[dfile]
509 self._stateextras.pop(dfile, None)
509 self._stateextras.pop(dfile, None)
510 self._dirty = True
510 self._dirty = True
511 elif not r:
511 elif not r:
512 self.mark(dfile, 'r')
512 self.mark(dfile, 'r')
513
513
514 if complete:
514 if complete:
515 action = None
515 action = None
516 if deleted:
516 if deleted:
517 if fcd.isabsent():
517 if fcd.isabsent():
518 # dc: local picked. Need to drop if present, which may
518 # dc: local picked. Need to drop if present, which may
519 # happen on re-resolves.
519 # happen on re-resolves.
520 action = 'f'
520 action = 'f'
521 else:
521 else:
522 # cd: remote picked (or otherwise deleted)
522 # cd: remote picked (or otherwise deleted)
523 action = 'r'
523 action = 'r'
524 else:
524 else:
525 if fcd.isabsent(): # dc: remote picked
525 if fcd.isabsent(): # dc: remote picked
526 action = 'g'
526 action = 'g'
527 elif fco.isabsent(): # cd: local picked
527 elif fco.isabsent(): # cd: local picked
528 if dfile in self.localctx:
528 if dfile in self.localctx:
529 action = 'am'
529 action = 'am'
530 else:
530 else:
531 action = 'a'
531 action = 'a'
532 # else: regular merges (no action necessary)
532 # else: regular merges (no action necessary)
533 self._results[dfile] = r, action
533 self._results[dfile] = r, action
534
534
535 return complete, r
535 return complete, r
536
536
537 def _filectxorabsent(self, hexnode, ctx, f):
537 def _filectxorabsent(self, hexnode, ctx, f):
538 if hexnode == nullhex:
538 if hexnode == nullhex:
539 return filemerge.absentfilectx(ctx, f)
539 return filemerge.absentfilectx(ctx, f)
540 else:
540 else:
541 return ctx[f]
541 return ctx[f]
542
542
543 def preresolve(self, dfile, wctx):
543 def preresolve(self, dfile, wctx):
544 """run premerge process for dfile
544 """run premerge process for dfile
545
545
546 Returns whether the merge is complete, and the exit code."""
546 Returns whether the merge is complete, and the exit code."""
547 return self._resolve(True, dfile, wctx)
547 return self._resolve(True, dfile, wctx)
548
548
549 def resolve(self, dfile, wctx):
549 def resolve(self, dfile, wctx):
550 """run merge process (assuming premerge was run) for dfile
550 """run merge process (assuming premerge was run) for dfile
551
551
552 Returns the exit code of the merge."""
552 Returns the exit code of the merge."""
553 return self._resolve(False, dfile, wctx)[1]
553 return self._resolve(False, dfile, wctx)[1]
554
554
555 def counts(self):
555 def counts(self):
556 """return counts for updated, merged and removed files in this
556 """return counts for updated, merged and removed files in this
557 session"""
557 session"""
558 updated, merged, removed = 0, 0, 0
558 updated, merged, removed = 0, 0, 0
559 for r, action in self._results.itervalues():
559 for r, action in self._results.itervalues():
560 if r is None:
560 if r is None:
561 updated += 1
561 updated += 1
562 elif r == 0:
562 elif r == 0:
563 if action == 'r':
563 if action == 'r':
564 removed += 1
564 removed += 1
565 else:
565 else:
566 merged += 1
566 merged += 1
567 return updated, merged, removed
567 return updated, merged, removed
568
568
569 def unresolvedcount(self):
569 def unresolvedcount(self):
570 """get unresolved count for this merge (persistent)"""
570 """get unresolved count for this merge (persistent)"""
571 return len([True for f, entry in self._state.iteritems()
571 return len([True for f, entry in self._state.iteritems()
572 if entry[0] == 'u'])
572 if entry[0] == 'u'])
573
573
574 def actions(self):
574 def actions(self):
575 """return lists of actions to perform on the dirstate"""
575 """return lists of actions to perform on the dirstate"""
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 for f, (r, action) in self._results.iteritems():
577 for f, (r, action) in self._results.iteritems():
578 if action is not None:
578 if action is not None:
579 actions[action].append((f, None, "merge result"))
579 actions[action].append((f, None, "merge result"))
580 return actions
580 return actions
581
581
582 def recordactions(self):
582 def recordactions(self):
583 """record remove/add/get actions in the dirstate"""
583 """record remove/add/get actions in the dirstate"""
584 branchmerge = self._repo.dirstate.p2() != nullid
584 branchmerge = self._repo.dirstate.p2() != nullid
585 recordupdates(self._repo, self.actions(), branchmerge)
585 recordupdates(self._repo, self.actions(), branchmerge)
586
586
587 def queueremove(self, f):
587 def queueremove(self, f):
588 """queues a file to be removed from the dirstate
588 """queues a file to be removed from the dirstate
589
589
590 Meant for use by custom merge drivers."""
590 Meant for use by custom merge drivers."""
591 self._results[f] = 0, 'r'
591 self._results[f] = 0, 'r'
592
592
593 def queueadd(self, f):
593 def queueadd(self, f):
594 """queues a file to be added to the dirstate
594 """queues a file to be added to the dirstate
595
595
596 Meant for use by custom merge drivers."""
596 Meant for use by custom merge drivers."""
597 self._results[f] = 0, 'a'
597 self._results[f] = 0, 'a'
598
598
599 def queueget(self, f):
599 def queueget(self, f):
600 """queues a file to be marked modified in the dirstate
600 """queues a file to be marked modified in the dirstate
601
601
602 Meant for use by custom merge drivers."""
602 Meant for use by custom merge drivers."""
603 self._results[f] = 0, 'g'
603 self._results[f] = 0, 'g'
604
604
605 def _getcheckunknownconfig(repo, section, name):
605 def _getcheckunknownconfig(repo, section, name):
606 config = repo.ui.config(section, name, default='abort')
606 config = repo.ui.config(section, name, default='abort')
607 valid = ['abort', 'ignore', 'warn']
607 valid = ['abort', 'ignore', 'warn']
608 if config not in valid:
608 if config not in valid:
609 validstr = ', '.join(["'" + v + "'" for v in valid])
609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 raise error.ConfigError(_("%s.%s not valid "
610 raise error.ConfigError(_("%s.%s not valid "
611 "('%s' is none of %s)")
611 "('%s' is none of %s)")
612 % (section, name, config, validstr))
612 % (section, name, config, validstr))
613 return config
613 return config
614
614
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 if f2 is None:
616 if f2 is None:
617 f2 = f
617 f2 = f
618 return (repo.wvfs.audit.check(f)
618 return (repo.wvfs.audit.check(f)
619 and repo.wvfs.isfileorlink(f)
619 and repo.wvfs.isfileorlink(f)
620 and repo.dirstate.normalize(f) not in repo.dirstate
620 and repo.dirstate.normalize(f) not in repo.dirstate
621 and mctx[f2].cmp(wctx[f]))
621 and mctx[f2].cmp(wctx[f]))
622
622
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 """
624 """
625 Considers any actions that care about the presence of conflicting unknown
625 Considers any actions that care about the presence of conflicting unknown
626 files. For some actions, the result is to abort; for others, it is to
626 files. For some actions, the result is to abort; for others, it is to
627 choose a different action.
627 choose a different action.
628 """
628 """
629 conflicts = set()
629 conflicts = set()
630 warnconflicts = set()
630 warnconflicts = set()
631 abortconflicts = set()
631 abortconflicts = set()
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 if not force:
634 if not force:
635 def collectconflicts(conflicts, config):
635 def collectconflicts(conflicts, config):
636 if config == 'abort':
636 if config == 'abort':
637 abortconflicts.update(conflicts)
637 abortconflicts.update(conflicts)
638 elif config == 'warn':
638 elif config == 'warn':
639 warnconflicts.update(conflicts)
639 warnconflicts.update(conflicts)
640
640
641 for f, (m, args, msg) in actions.iteritems():
641 for f, (m, args, msg) in actions.iteritems():
642 if m in ('c', 'dc'):
642 if m in ('c', 'dc'):
643 if _checkunknownfile(repo, wctx, mctx, f):
643 if _checkunknownfile(repo, wctx, mctx, f):
644 conflicts.add(f)
644 conflicts.add(f)
645 elif m == 'dg':
645 elif m == 'dg':
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 conflicts.add(f)
647 conflicts.add(f)
648
648
649 ignoredconflicts = set([c for c in conflicts
649 ignoredconflicts = set([c for c in conflicts
650 if repo.dirstate._ignore(c)])
650 if repo.dirstate._ignore(c)])
651 unknownconflicts = conflicts - ignoredconflicts
651 unknownconflicts = conflicts - ignoredconflicts
652 collectconflicts(ignoredconflicts, ignoredconfig)
652 collectconflicts(ignoredconflicts, ignoredconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
654 else:
654 else:
655 for f, (m, args, msg) in actions.iteritems():
655 for f, (m, args, msg) in actions.iteritems():
656 if m == 'cm':
656 if m == 'cm':
657 fl2, anc = args
657 fl2, anc = args
658 different = _checkunknownfile(repo, wctx, mctx, f)
658 different = _checkunknownfile(repo, wctx, mctx, f)
659 if repo.dirstate._ignore(f):
659 if repo.dirstate._ignore(f):
660 config = ignoredconfig
660 config = ignoredconfig
661 else:
661 else:
662 config = unknownconfig
662 config = unknownconfig
663
663
664 # The behavior when force is True is described by this table:
664 # The behavior when force is True is described by this table:
665 # config different mergeforce | action backup
665 # config different mergeforce | action backup
666 # * n * | get n
666 # * n * | get n
667 # * y y | merge -
667 # * y y | merge -
668 # abort y n | merge - (1)
668 # abort y n | merge - (1)
669 # warn y n | warn + get y
669 # warn y n | warn + get y
670 # ignore y n | get y
670 # ignore y n | get y
671 #
671 #
672 # (1) this is probably the wrong behavior here -- we should
672 # (1) this is probably the wrong behavior here -- we should
673 # probably abort, but some actions like rebases currently
673 # probably abort, but some actions like rebases currently
674 # don't like an abort happening in the middle of
674 # don't like an abort happening in the middle of
675 # merge.update.
675 # merge.update.
676 if not different:
676 if not different:
677 actions[f] = ('g', (fl2, False), "remote created")
677 actions[f] = ('g', (fl2, False), "remote created")
678 elif mergeforce or config == 'abort':
678 elif mergeforce or config == 'abort':
679 actions[f] = ('m', (f, f, None, False, anc),
679 actions[f] = ('m', (f, f, None, False, anc),
680 "remote differs from untracked local")
680 "remote differs from untracked local")
681 elif config == 'abort':
681 elif config == 'abort':
682 abortconflicts.add(f)
682 abortconflicts.add(f)
683 else:
683 else:
684 if config == 'warn':
684 if config == 'warn':
685 warnconflicts.add(f)
685 warnconflicts.add(f)
686 actions[f] = ('g', (fl2, True), "remote created")
686 actions[f] = ('g', (fl2, True), "remote created")
687
687
688 for f in sorted(abortconflicts):
688 for f in sorted(abortconflicts):
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 if abortconflicts:
690 if abortconflicts:
691 raise error.Abort(_("untracked files in working directory "
691 raise error.Abort(_("untracked files in working directory "
692 "differ from files in requested revision"))
692 "differ from files in requested revision"))
693
693
694 for f in sorted(warnconflicts):
694 for f in sorted(warnconflicts):
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696
696
697 for f, (m, args, msg) in actions.iteritems():
697 for f, (m, args, msg) in actions.iteritems():
698 backup = f in conflicts
698 backup = f in conflicts
699 if m == 'c':
699 if m == 'c':
700 flags, = args
700 flags, = args
701 actions[f] = ('g', (flags, backup), msg)
701 actions[f] = ('g', (flags, backup), msg)
702
702
703 def _forgetremoved(wctx, mctx, branchmerge):
703 def _forgetremoved(wctx, mctx, branchmerge):
704 """
704 """
705 Forget removed files
705 Forget removed files
706
706
707 If we're jumping between revisions (as opposed to merging), and if
707 If we're jumping between revisions (as opposed to merging), and if
708 neither the working directory nor the target rev has the file,
708 neither the working directory nor the target rev has the file,
709 then we need to remove it from the dirstate, to prevent the
709 then we need to remove it from the dirstate, to prevent the
710 dirstate from listing the file when it is no longer in the
710 dirstate from listing the file when it is no longer in the
711 manifest.
711 manifest.
712
712
713 If we're merging, and the other revision has removed a file
713 If we're merging, and the other revision has removed a file
714 that is not present in the working directory, we need to mark it
714 that is not present in the working directory, we need to mark it
715 as removed.
715 as removed.
716 """
716 """
717
717
718 actions = {}
718 actions = {}
719 m = 'f'
719 m = 'f'
720 if branchmerge:
720 if branchmerge:
721 m = 'r'
721 m = 'r'
722 for f in wctx.deleted():
722 for f in wctx.deleted():
723 if f not in mctx:
723 if f not in mctx:
724 actions[f] = m, None, "forget deleted"
724 actions[f] = m, None, "forget deleted"
725
725
726 if not branchmerge:
726 if not branchmerge:
727 for f in wctx.removed():
727 for f in wctx.removed():
728 if f not in mctx:
728 if f not in mctx:
729 actions[f] = 'f', None, "forget removed"
729 actions[f] = 'f', None, "forget removed"
730
730
731 return actions
731 return actions
732
732
733 def _checkcollision(repo, wmf, actions):
733 def _checkcollision(repo, wmf, actions):
734 # build provisional merged manifest up
734 # build provisional merged manifest up
735 pmmf = set(wmf)
735 pmmf = set(wmf)
736
736
737 if actions:
737 if actions:
738 # k, dr, e and rd are no-op
738 # k, dr, e and rd are no-op
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 for f, args, msg in actions[m]:
740 for f, args, msg in actions[m]:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['r']:
742 for f, args, msg in actions['r']:
743 pmmf.discard(f)
743 pmmf.discard(f)
744 for f, args, msg in actions['dm']:
744 for f, args, msg in actions['dm']:
745 f2, flags = args
745 f2, flags = args
746 pmmf.discard(f2)
746 pmmf.discard(f2)
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['dg']:
748 for f, args, msg in actions['dg']:
749 pmmf.add(f)
749 pmmf.add(f)
750 for f, args, msg in actions['m']:
750 for f, args, msg in actions['m']:
751 f1, f2, fa, move, anc = args
751 f1, f2, fa, move, anc = args
752 if move:
752 if move:
753 pmmf.discard(f1)
753 pmmf.discard(f1)
754 pmmf.add(f)
754 pmmf.add(f)
755
755
756 # check case-folding collision in provisional merged manifest
756 # check case-folding collision in provisional merged manifest
757 foldmap = {}
757 foldmap = {}
758 for f in sorted(pmmf):
758 for f in sorted(pmmf):
759 fold = util.normcase(f)
759 fold = util.normcase(f)
760 if fold in foldmap:
760 if fold in foldmap:
761 raise error.Abort(_("case-folding collision between %s and %s")
761 raise error.Abort(_("case-folding collision between %s and %s")
762 % (f, foldmap[fold]))
762 % (f, foldmap[fold]))
763 foldmap[fold] = f
763 foldmap[fold] = f
764
764
765 # check case-folding of directories
765 # check case-folding of directories
766 foldprefix = unfoldprefix = lastfull = ''
766 foldprefix = unfoldprefix = lastfull = ''
767 for fold, f in sorted(foldmap.items()):
767 for fold, f in sorted(foldmap.items()):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 # the folded prefix matches but actual casing is different
769 # the folded prefix matches but actual casing is different
770 raise error.Abort(_("case-folding collision between "
770 raise error.Abort(_("case-folding collision between "
771 "%s and directory of %s") % (lastfull, f))
771 "%s and directory of %s") % (lastfull, f))
772 foldprefix = fold + '/'
772 foldprefix = fold + '/'
773 unfoldprefix = f + '/'
773 unfoldprefix = f + '/'
774 lastfull = f
774 lastfull = f
775
775
776 def driverpreprocess(repo, ms, wctx, labels=None):
776 def driverpreprocess(repo, ms, wctx, labels=None):
777 """run the preprocess step of the merge driver, if any
777 """run the preprocess step of the merge driver, if any
778
778
779 This is currently not implemented -- it's an extension point."""
779 This is currently not implemented -- it's an extension point."""
780 return True
780 return True
781
781
782 def driverconclude(repo, ms, wctx, labels=None):
782 def driverconclude(repo, ms, wctx, labels=None):
783 """run the conclude step of the merge driver, if any
783 """run the conclude step of the merge driver, if any
784
784
785 This is currently not implemented -- it's an extension point."""
785 This is currently not implemented -- it's an extension point."""
786 return True
786 return True
787
787
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 acceptremote, followcopies, forcefulldiff=False):
789 acceptremote, followcopies, forcefulldiff=False):
790 """
790 """
791 Merge wctx and p2 with ancestor pa and generate merge action list
791 Merge wctx and p2 with ancestor pa and generate merge action list
792
792
793 branchmerge and force are as passed in to update
793 branchmerge and force are as passed in to update
794 matcher = matcher to filter file lists
794 matcher = matcher to filter file lists
795 acceptremote = accept the incoming changes without prompting
795 acceptremote = accept the incoming changes without prompting
796 """
796 """
797 if matcher is not None and matcher.always():
797 if matcher is not None and matcher.always():
798 matcher = None
798 matcher = None
799
799
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801
801
802 # manifests fetched in order are going to be faster, so prime the caches
802 # manifests fetched in order are going to be faster, so prime the caches
803 [x.manifest() for x in
803 [x.manifest() for x in
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805
805
806 if followcopies:
806 if followcopies:
807 ret = copies.mergecopies(repo, wctx, p2, pa)
807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809
809
810 boolbm = pycompat.bytestr(bool(branchmerge))
810 boolbm = pycompat.bytestr(bool(branchmerge))
811 boolf = pycompat.bytestr(bool(force))
811 boolf = pycompat.bytestr(bool(force))
812 boolm = pycompat.bytestr(bool(matcher))
812 boolm = pycompat.bytestr(bool(matcher))
813 repo.ui.note(_("resolving manifests\n"))
813 repo.ui.note(_("resolving manifests\n"))
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 % (boolbm, boolf, boolm))
815 % (boolbm, boolf, boolm))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817
817
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 copied = set(copy.values())
819 copied = set(copy.values())
820 copied.update(movewithdir.values())
820 copied.update(movewithdir.values())
821
821
822 if '.hgsubstate' in m1:
822 if '.hgsubstate' in m1:
823 # check whether sub state is modified
823 # check whether sub state is modified
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 m1['.hgsubstate'] = modifiednodeid
825 m1['.hgsubstate'] = modifiednodeid
826
826
827 # Don't use m2-vs-ma optimization if:
827 # Don't use m2-vs-ma optimization if:
828 # - ma is the same as m1 or m2, which we're just going to diff again later
828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 # - The caller specifically asks for a full diff, which is useful during bid
829 # - The caller specifically asks for a full diff, which is useful during bid
830 # merge.
830 # merge.
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 # Identify which files are relevant to the merge, so we can limit the
832 # Identify which files are relevant to the merge, so we can limit the
833 # total m1-vs-m2 diff to just those files. This has significant
833 # total m1-vs-m2 diff to just those files. This has significant
834 # performance benefits in large repositories.
834 # performance benefits in large repositories.
835 relevantfiles = set(ma.diff(m2).keys())
835 relevantfiles = set(ma.diff(m2).keys())
836
836
837 # For copied and moved files, we need to add the source file too.
837 # For copied and moved files, we need to add the source file too.
838 for copykey, copyvalue in copy.iteritems():
838 for copykey, copyvalue in copy.iteritems():
839 if copyvalue in relevantfiles:
839 if copyvalue in relevantfiles:
840 relevantfiles.add(copykey)
840 relevantfiles.add(copykey)
841 for movedirkey in movewithdir:
841 for movedirkey in movewithdir:
842 relevantfiles.add(movedirkey)
842 relevantfiles.add(movedirkey)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845
845
846 diff = m1.diff(m2, match=matcher)
846 diff = m1.diff(m2, match=matcher)
847
847
848 if matcher is None:
848 if matcher is None:
849 matcher = matchmod.always('', '')
849 matcher = matchmod.always('', '')
850
850
851 actions = {}
851 actions = {}
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 if n1 and n2: # file exists on both local and remote side
853 if n1 and n2: # file exists on both local and remote side
854 if f not in ma:
854 if f not in ma:
855 fa = copy.get(f, None)
855 fa = copy.get(f, None)
856 if fa is not None:
856 if fa is not None:
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 "both renamed from " + fa)
858 "both renamed from " + fa)
859 else:
859 else:
860 actions[f] = ('m', (f, f, None, False, pa.node()),
860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 "both created")
861 "both created")
862 else:
862 else:
863 a = ma[f]
863 a = ma[f]
864 fla = ma.flags(f)
864 fla = ma.flags(f)
865 nol = 'l' not in fl1 + fl2 + fla
865 nol = 'l' not in fl1 + fl2 + fla
866 if n2 == a and fl2 == fla:
866 if n2 == a and fl2 == fla:
867 actions[f] = ('k' , (), "remote unchanged")
867 actions[f] = ('k' , (), "remote unchanged")
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 if n1 == n2: # optimization: keep local content
869 if n1 == n2: # optimization: keep local content
870 actions[f] = ('e', (fl2,), "update permissions")
870 actions[f] = ('e', (fl2,), "update permissions")
871 else:
871 else:
872 actions[f] = ('g', (fl2, False), "remote is newer")
872 actions[f] = ('g', (fl2, False), "remote is newer")
873 elif nol and n2 == a: # remote only changed 'x'
873 elif nol and n2 == a: # remote only changed 'x'
874 actions[f] = ('e', (fl2,), "update permissions")
874 actions[f] = ('e', (fl2,), "update permissions")
875 elif nol and n1 == a: # local only changed 'x'
875 elif nol and n1 == a: # local only changed 'x'
876 actions[f] = ('g', (fl1, False), "remote is newer")
876 actions[f] = ('g', (fl1, False), "remote is newer")
877 else: # both changed something
877 else: # both changed something
878 actions[f] = ('m', (f, f, f, False, pa.node()),
878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 "versions differ")
879 "versions differ")
880 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
883 elif f in movewithdir: # directory rename, move local
883 elif f in movewithdir: # directory rename, move local
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m2:
885 if f2 in m2:
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 "remote directory rename, both created")
887 "remote directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dm', (f, fl1),
889 actions[f2] = ('dm', (f, fl1),
890 "remote directory rename - move from " + f)
890 "remote directory rename - move from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 "local copied/moved from " + f2)
894 "local copied/moved from " + f2)
895 elif f in ma: # clean, a different, no remote
895 elif f in ma: # clean, a different, no remote
896 if n1 != ma[f]:
896 if n1 != ma[f]:
897 if acceptremote:
897 if acceptremote:
898 actions[f] = ('r', None, "remote delete")
898 actions[f] = ('r', None, "remote delete")
899 else:
899 else:
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 "prompt changed/deleted")
901 "prompt changed/deleted")
902 elif n1 == addednodeid:
902 elif n1 == addednodeid:
903 # This extra 'a' is added by working copy manifest to mark
903 # This extra 'a' is added by working copy manifest to mark
904 # the file as locally added. We should forget it instead of
904 # the file as locally added. We should forget it instead of
905 # deleting it.
905 # deleting it.
906 actions[f] = ('f', None, "remote deleted")
906 actions[f] = ('f', None, "remote deleted")
907 else:
907 else:
908 actions[f] = ('r', None, "other deleted")
908 actions[f] = ('r', None, "other deleted")
909 elif n2: # file exists only on remote side
909 elif n2: # file exists only on remote side
910 if f in copied:
910 if f in copied:
911 pass # we'll deal with it on m1 side
911 pass # we'll deal with it on m1 side
912 elif f in movewithdir:
912 elif f in movewithdir:
913 f2 = movewithdir[f]
913 f2 = movewithdir[f]
914 if f2 in m1:
914 if f2 in m1:
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 "local directory rename, both created")
916 "local directory rename, both created")
917 else:
917 else:
918 actions[f2] = ('dg', (f, fl2),
918 actions[f2] = ('dg', (f, fl2),
919 "local directory rename - get from " + f)
919 "local directory rename - get from " + f)
920 elif f in copy:
920 elif f in copy:
921 f2 = copy[f]
921 f2 = copy[f]
922 if f2 in m2:
922 if f2 in m2:
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 "remote copied from " + f2)
924 "remote copied from " + f2)
925 else:
925 else:
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 "remote moved from " + f2)
927 "remote moved from " + f2)
928 elif f not in ma:
928 elif f not in ma:
929 # local unknown, remote created: the logic is described by the
929 # local unknown, remote created: the logic is described by the
930 # following table:
930 # following table:
931 #
931 #
932 # force branchmerge different | action
932 # force branchmerge different | action
933 # n * * | create
933 # n * * | create
934 # y n * | create
934 # y n * | create
935 # y y n | create
935 # y y n | create
936 # y y y | merge
936 # y y y | merge
937 #
937 #
938 # Checking whether the files are different is expensive, so we
938 # Checking whether the files are different is expensive, so we
939 # don't do that when we can avoid it.
939 # don't do that when we can avoid it.
940 if not force:
940 if not force:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 elif not branchmerge:
942 elif not branchmerge:
943 actions[f] = ('c', (fl2,), "remote created")
943 actions[f] = ('c', (fl2,), "remote created")
944 else:
944 else:
945 actions[f] = ('cm', (fl2, pa.node()),
945 actions[f] = ('cm', (fl2, pa.node()),
946 "remote created, get or merge")
946 "remote created, get or merge")
947 elif n2 != ma[f]:
947 elif n2 != ma[f]:
948 df = None
948 df = None
949 for d in dirmove:
949 for d in dirmove:
950 if f.startswith(d):
950 if f.startswith(d):
951 # new file added in a directory that was moved
951 # new file added in a directory that was moved
952 df = dirmove[d] + f[len(d):]
952 df = dirmove[d] + f[len(d):]
953 break
953 break
954 if df is not None and df in m1:
954 if df is not None and df in m1:
955 actions[df] = ('m', (df, f, f, False, pa.node()),
955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 "local directory rename - respect move from " + f)
956 "local directory rename - respect move from " + f)
957 elif acceptremote:
957 elif acceptremote:
958 actions[f] = ('c', (fl2,), "remote recreating")
958 actions[f] = ('c', (fl2,), "remote recreating")
959 else:
959 else:
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 "prompt deleted/changed")
961 "prompt deleted/changed")
962
962
963 return actions, diverge, renamedelete
963 return actions, diverge, renamedelete
964
964
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 """Resolves false conflicts where the nodeid changed but the content
966 """Resolves false conflicts where the nodeid changed but the content
967 remained the same."""
967 remained the same."""
968
968
969 for f, (m, args, msg) in actions.items():
969 for f, (m, args, msg) in actions.items():
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 # local did change but ended up with same content
971 # local did change but ended up with same content
972 actions[f] = 'r', None, "prompt same"
972 actions[f] = 'r', None, "prompt same"
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 # remote did change but ended up with same content
974 # remote did change but ended up with same content
975 del actions[f] # don't get = keep local deleted
975 del actions[f] # don't get = keep local deleted
976
976
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 acceptremote, followcopies, matcher=None,
978 acceptremote, followcopies, matcher=None,
979 mergeforce=False):
979 mergeforce=False):
980 "Calculate the actions needed to merge mctx into wctx using ancestors"
980 "Calculate the actions needed to merge mctx into wctx using ancestors"
981 if len(ancestors) == 1: # default
981 if len(ancestors) == 1: # default
982 actions, diverge, renamedelete = manifestmerge(
982 actions, diverge, renamedelete = manifestmerge(
983 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
983 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
984 acceptremote, followcopies)
984 acceptremote, followcopies)
985 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
985 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
986
986
987 else: # only when merge.preferancestor=* - the default
987 else: # only when merge.preferancestor=* - the default
988 repo.ui.note(
988 repo.ui.note(
989 _("note: merging %s and %s using bids from ancestors %s\n") %
989 _("note: merging %s and %s using bids from ancestors %s\n") %
990 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
990 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
991
991
992 # Call for bids
992 # Call for bids
993 fbids = {} # mapping filename to bids (action method to list af actions)
993 fbids = {} # mapping filename to bids (action method to list af actions)
994 diverge, renamedelete = None, None
994 diverge, renamedelete = None, None
995 for ancestor in ancestors:
995 for ancestor in ancestors:
996 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
996 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
997 actions, diverge1, renamedelete1 = manifestmerge(
997 actions, diverge1, renamedelete1 = manifestmerge(
998 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
998 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
999 acceptremote, followcopies, forcefulldiff=True)
999 acceptremote, followcopies, forcefulldiff=True)
1000 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1000 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1001
1001
1002 # Track the shortest set of warning on the theory that bid
1002 # Track the shortest set of warning on the theory that bid
1003 # merge will correctly incorporate more information
1003 # merge will correctly incorporate more information
1004 if diverge is None or len(diverge1) < len(diverge):
1004 if diverge is None or len(diverge1) < len(diverge):
1005 diverge = diverge1
1005 diverge = diverge1
1006 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1006 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1007 renamedelete = renamedelete1
1007 renamedelete = renamedelete1
1008
1008
1009 for f, a in sorted(actions.iteritems()):
1009 for f, a in sorted(actions.iteritems()):
1010 m, args, msg = a
1010 m, args, msg = a
1011 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1011 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1012 if f in fbids:
1012 if f in fbids:
1013 d = fbids[f]
1013 d = fbids[f]
1014 if m in d:
1014 if m in d:
1015 d[m].append(a)
1015 d[m].append(a)
1016 else:
1016 else:
1017 d[m] = [a]
1017 d[m] = [a]
1018 else:
1018 else:
1019 fbids[f] = {m: [a]}
1019 fbids[f] = {m: [a]}
1020
1020
1021 # Pick the best bid for each file
1021 # Pick the best bid for each file
1022 repo.ui.note(_('\nauction for merging merge bids\n'))
1022 repo.ui.note(_('\nauction for merging merge bids\n'))
1023 actions = {}
1023 actions = {}
1024 dms = [] # filenames that have dm actions
1024 dms = [] # filenames that have dm actions
1025 for f, bids in sorted(fbids.items()):
1025 for f, bids in sorted(fbids.items()):
1026 # bids is a mapping from action method to list af actions
1026 # bids is a mapping from action method to list af actions
1027 # Consensus?
1027 # Consensus?
1028 if len(bids) == 1: # all bids are the same kind of method
1028 if len(bids) == 1: # all bids are the same kind of method
1029 m, l = bids.items()[0]
1029 m, l = bids.items()[0]
1030 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1030 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1031 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1031 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1032 actions[f] = l[0]
1032 actions[f] = l[0]
1033 if m == 'dm':
1033 if m == 'dm':
1034 dms.append(f)
1034 dms.append(f)
1035 continue
1035 continue
1036 # If keep is an option, just do it.
1036 # If keep is an option, just do it.
1037 if 'k' in bids:
1037 if 'k' in bids:
1038 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1038 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1039 actions[f] = bids['k'][0]
1039 actions[f] = bids['k'][0]
1040 continue
1040 continue
1041 # If there are gets and they all agree [how could they not?], do it.
1041 # If there are gets and they all agree [how could they not?], do it.
1042 if 'g' in bids:
1042 if 'g' in bids:
1043 ga0 = bids['g'][0]
1043 ga0 = bids['g'][0]
1044 if all(a == ga0 for a in bids['g'][1:]):
1044 if all(a == ga0 for a in bids['g'][1:]):
1045 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1045 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1046 actions[f] = ga0
1046 actions[f] = ga0
1047 continue
1047 continue
1048 # TODO: Consider other simple actions such as mode changes
1048 # TODO: Consider other simple actions such as mode changes
1049 # Handle inefficient democrazy.
1049 # Handle inefficient democrazy.
1050 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1050 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1051 for m, l in sorted(bids.items()):
1051 for m, l in sorted(bids.items()):
1052 for _f, args, msg in l:
1052 for _f, args, msg in l:
1053 repo.ui.note(' %s -> %s\n' % (msg, m))
1053 repo.ui.note(' %s -> %s\n' % (msg, m))
1054 # Pick random action. TODO: Instead, prompt user when resolving
1054 # Pick random action. TODO: Instead, prompt user when resolving
1055 m, l = bids.items()[0]
1055 m, l = bids.items()[0]
1056 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1056 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1057 (f, m))
1057 (f, m))
1058 actions[f] = l[0]
1058 actions[f] = l[0]
1059 if m == 'dm':
1059 if m == 'dm':
1060 dms.append(f)
1060 dms.append(f)
1061 continue
1061 continue
1062 # Work around 'dm' that can cause multiple actions for the same file
1062 # Work around 'dm' that can cause multiple actions for the same file
1063 for f in dms:
1063 for f in dms:
1064 dm, (f0, flags), msg = actions[f]
1064 dm, (f0, flags), msg = actions[f]
1065 assert dm == 'dm', dm
1065 assert dm == 'dm', dm
1066 if f0 in actions and actions[f0][0] == 'r':
1066 if f0 in actions and actions[f0][0] == 'r':
1067 # We have one bid for removing a file and another for moving it.
1067 # We have one bid for removing a file and another for moving it.
1068 # These two could be merged as first move and then delete ...
1068 # These two could be merged as first move and then delete ...
1069 # but instead drop moving and just delete.
1069 # but instead drop moving and just delete.
1070 del actions[f]
1070 del actions[f]
1071 repo.ui.note(_('end of auction\n\n'))
1071 repo.ui.note(_('end of auction\n\n'))
1072
1072
1073 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1073 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1074
1074
1075 if wctx.rev() is None:
1075 if wctx.rev() is None:
1076 fractions = _forgetremoved(wctx, mctx, branchmerge)
1076 fractions = _forgetremoved(wctx, mctx, branchmerge)
1077 actions.update(fractions)
1077 actions.update(fractions)
1078
1078
1079 return actions, diverge, renamedelete
1079 return actions, diverge, renamedelete
1080
1080
1081 def batchremove(repo, wctx, actions):
1081 def batchremove(repo, wctx, actions):
1082 """apply removes to the working directory
1082 """apply removes to the working directory
1083
1083
1084 yields tuples for progress updates
1084 yields tuples for progress updates
1085 """
1085 """
1086 verbose = repo.ui.verbose
1086 verbose = repo.ui.verbose
1087 audit = repo.wvfs.audit
1087 audit = repo.wvfs.audit
1088 try:
1088 try:
1089 cwd = pycompat.getcwd()
1089 cwd = pycompat.getcwd()
1090 except OSError as err:
1090 except OSError as err:
1091 if err.errno != errno.ENOENT:
1091 if err.errno != errno.ENOENT:
1092 raise
1092 raise
1093 cwd = None
1093 cwd = None
1094 i = 0
1094 i = 0
1095 for f, args, msg in actions:
1095 for f, args, msg in actions:
1096 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1096 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1097 if verbose:
1097 if verbose:
1098 repo.ui.note(_("removing %s\n") % f)
1098 repo.ui.note(_("removing %s\n") % f)
1099 audit(f)
1099 audit(f)
1100 try:
1100 try:
1101 wctx[f].remove(ignoremissing=True)
1101 wctx[f].remove(ignoremissing=True)
1102 except OSError as inst:
1102 except OSError as inst:
1103 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1103 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1104 (f, inst.strerror))
1104 (f, inst.strerror))
1105 if i == 100:
1105 if i == 100:
1106 yield i, f
1106 yield i, f
1107 i = 0
1107 i = 0
1108 i += 1
1108 i += 1
1109 if i > 0:
1109 if i > 0:
1110 yield i, f
1110 yield i, f
1111 if cwd:
1111 if cwd:
1112 # cwd was present before we started to remove files
1112 # cwd was present before we started to remove files
1113 # let's check if it is present after we removed them
1113 # let's check if it is present after we removed them
1114 try:
1114 try:
1115 pycompat.getcwd()
1115 pycompat.getcwd()
1116 except OSError as err:
1116 except OSError as err:
1117 if err.errno != errno.ENOENT:
1117 if err.errno != errno.ENOENT:
1118 raise
1118 raise
1119 # Print a warning if cwd was deleted
1119 # Print a warning if cwd was deleted
1120 repo.ui.warn(_("current directory was removed\n"
1120 repo.ui.warn(_("current directory was removed\n"
1121 "(consider changing to repo root: %s)\n") %
1121 "(consider changing to repo root: %s)\n") %
1122 repo.root)
1122 repo.root)
1123
1123
1124 def batchget(repo, mctx, wctx, actions):
1124 def batchget(repo, mctx, wctx, actions):
1125 """apply gets to the working directory
1125 """apply gets to the working directory
1126
1126
1127 mctx is the context to get from
1127 mctx is the context to get from
1128
1128
1129 yields tuples for progress updates
1129 yields tuples for progress updates
1130 """
1130 """
1131 verbose = repo.ui.verbose
1131 verbose = repo.ui.verbose
1132 fctx = mctx.filectx
1132 fctx = mctx.filectx
1133 wwrite = repo.wwrite
1134 ui = repo.ui
1133 ui = repo.ui
1135 i = 0
1134 i = 0
1136 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1135 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1137 for f, (flags, backup), msg in actions:
1136 for f, (flags, backup), msg in actions:
1138 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1137 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1139 if verbose:
1138 if verbose:
1140 repo.ui.note(_("getting %s\n") % f)
1139 repo.ui.note(_("getting %s\n") % f)
1141
1140
1142 if backup:
1141 if backup:
1143 absf = repo.wjoin(f)
1142 absf = repo.wjoin(f)
1144 orig = scmutil.origpath(ui, repo, absf)
1143 orig = scmutil.origpath(ui, repo, absf)
1145 try:
1144 try:
1146 if repo.wvfs.isfileorlink(f):
1145 if repo.wvfs.isfileorlink(f):
1147 util.rename(absf, orig)
1146 util.rename(absf, orig)
1148 except OSError as e:
1147 except OSError as e:
1149 if e.errno != errno.ENOENT:
1148 if e.errno != errno.ENOENT:
1150 raise
1149 raise
1151
1150
1152 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1151 if repo.wvfs.isdir(f) and not repo.wvfs.islink(f):
1153 repo.wvfs.removedirs(f)
1152 repo.wvfs.removedirs(f)
1154 wwrite(f, fctx(f).data(), flags, backgroundclose=True)
1153 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1155 if i == 100:
1154 if i == 100:
1156 yield i, f
1155 yield i, f
1157 i = 0
1156 i = 0
1158 i += 1
1157 i += 1
1159 if i > 0:
1158 if i > 0:
1160 yield i, f
1159 yield i, f
1161
1160
1162 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1161 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1163 """apply the merge action list to the working directory
1162 """apply the merge action list to the working directory
1164
1163
1165 wctx is the working copy context
1164 wctx is the working copy context
1166 mctx is the context to be merged into the working copy
1165 mctx is the context to be merged into the working copy
1167
1166
1168 Return a tuple of counts (updated, merged, removed, unresolved) that
1167 Return a tuple of counts (updated, merged, removed, unresolved) that
1169 describes how many files were affected by the update.
1168 describes how many files were affected by the update.
1170 """
1169 """
1171
1170
1172 updated, merged, removed = 0, 0, 0
1171 updated, merged, removed = 0, 0, 0
1173 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1172 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1174 moves = []
1173 moves = []
1175 for m, l in actions.items():
1174 for m, l in actions.items():
1176 l.sort()
1175 l.sort()
1177
1176
1178 # 'cd' and 'dc' actions are treated like other merge conflicts
1177 # 'cd' and 'dc' actions are treated like other merge conflicts
1179 mergeactions = sorted(actions['cd'])
1178 mergeactions = sorted(actions['cd'])
1180 mergeactions.extend(sorted(actions['dc']))
1179 mergeactions.extend(sorted(actions['dc']))
1181 mergeactions.extend(actions['m'])
1180 mergeactions.extend(actions['m'])
1182 for f, args, msg in mergeactions:
1181 for f, args, msg in mergeactions:
1183 f1, f2, fa, move, anc = args
1182 f1, f2, fa, move, anc = args
1184 if f == '.hgsubstate': # merged internally
1183 if f == '.hgsubstate': # merged internally
1185 continue
1184 continue
1186 if f1 is None:
1185 if f1 is None:
1187 fcl = filemerge.absentfilectx(wctx, fa)
1186 fcl = filemerge.absentfilectx(wctx, fa)
1188 else:
1187 else:
1189 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1188 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1190 fcl = wctx[f1]
1189 fcl = wctx[f1]
1191 if f2 is None:
1190 if f2 is None:
1192 fco = filemerge.absentfilectx(mctx, fa)
1191 fco = filemerge.absentfilectx(mctx, fa)
1193 else:
1192 else:
1194 fco = mctx[f2]
1193 fco = mctx[f2]
1195 actx = repo[anc]
1194 actx = repo[anc]
1196 if fa in actx:
1195 if fa in actx:
1197 fca = actx[fa]
1196 fca = actx[fa]
1198 else:
1197 else:
1199 # TODO: move to absentfilectx
1198 # TODO: move to absentfilectx
1200 fca = repo.filectx(f1, fileid=nullrev)
1199 fca = repo.filectx(f1, fileid=nullrev)
1201 ms.add(fcl, fco, fca, f)
1200 ms.add(fcl, fco, fca, f)
1202 if f1 != f and move:
1201 if f1 != f and move:
1203 moves.append(f1)
1202 moves.append(f1)
1204
1203
1205 audit = repo.wvfs.audit
1204 audit = repo.wvfs.audit
1206 _updating = _('updating')
1205 _updating = _('updating')
1207 _files = _('files')
1206 _files = _('files')
1208 progress = repo.ui.progress
1207 progress = repo.ui.progress
1209
1208
1210 # remove renamed files after safely stored
1209 # remove renamed files after safely stored
1211 for f in moves:
1210 for f in moves:
1212 if os.path.lexists(repo.wjoin(f)):
1211 if os.path.lexists(repo.wjoin(f)):
1213 repo.ui.debug("removing %s\n" % f)
1212 repo.ui.debug("removing %s\n" % f)
1214 audit(f)
1213 audit(f)
1215 wctx[f].remove()
1214 wctx[f].remove()
1216
1215
1217 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1216 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1218
1217
1219 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1218 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1220 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1219 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1221
1220
1222 # remove in parallel (must come first)
1221 # remove in parallel (must come first)
1223 z = 0
1222 z = 0
1224 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1223 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1225 actions['r'])
1224 actions['r'])
1226 for i, item in prog:
1225 for i, item in prog:
1227 z += i
1226 z += i
1228 progress(_updating, z, item=item, total=numupdates, unit=_files)
1227 progress(_updating, z, item=item, total=numupdates, unit=_files)
1229 removed = len(actions['r'])
1228 removed = len(actions['r'])
1230
1229
1231 # get in parallel
1230 # get in parallel
1232 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1231 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1233 actions['g'])
1232 actions['g'])
1234 for i, item in prog:
1233 for i, item in prog:
1235 z += i
1234 z += i
1236 progress(_updating, z, item=item, total=numupdates, unit=_files)
1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1237 updated = len(actions['g'])
1236 updated = len(actions['g'])
1238
1237
1239 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1238 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1240 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1239 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1241
1240
1242 # forget (manifest only, just log it) (must come first)
1241 # forget (manifest only, just log it) (must come first)
1243 for f, args, msg in actions['f']:
1242 for f, args, msg in actions['f']:
1244 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1243 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1245 z += 1
1244 z += 1
1246 progress(_updating, z, item=f, total=numupdates, unit=_files)
1245 progress(_updating, z, item=f, total=numupdates, unit=_files)
1247
1246
1248 # re-add (manifest only, just log it)
1247 # re-add (manifest only, just log it)
1249 for f, args, msg in actions['a']:
1248 for f, args, msg in actions['a']:
1250 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1249 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1251 z += 1
1250 z += 1
1252 progress(_updating, z, item=f, total=numupdates, unit=_files)
1251 progress(_updating, z, item=f, total=numupdates, unit=_files)
1253
1252
1254 # re-add/mark as modified (manifest only, just log it)
1253 # re-add/mark as modified (manifest only, just log it)
1255 for f, args, msg in actions['am']:
1254 for f, args, msg in actions['am']:
1256 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1255 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1257 z += 1
1256 z += 1
1258 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1259
1258
1260 # keep (noop, just log it)
1259 # keep (noop, just log it)
1261 for f, args, msg in actions['k']:
1260 for f, args, msg in actions['k']:
1262 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1261 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1263 # no progress
1262 # no progress
1264
1263
1265 # directory rename, move local
1264 # directory rename, move local
1266 for f, args, msg in actions['dm']:
1265 for f, args, msg in actions['dm']:
1267 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1266 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1268 z += 1
1267 z += 1
1269 progress(_updating, z, item=f, total=numupdates, unit=_files)
1268 progress(_updating, z, item=f, total=numupdates, unit=_files)
1270 f0, flags = args
1269 f0, flags = args
1271 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1270 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1272 audit(f)
1271 audit(f)
1273 wctx[f].write(wctx.filectx(f0).data(), flags)
1272 wctx[f].write(wctx.filectx(f0).data(), flags)
1274 wctx[f0].remove()
1273 wctx[f0].remove()
1275 updated += 1
1274 updated += 1
1276
1275
1277 # local directory rename, get
1276 # local directory rename, get
1278 for f, args, msg in actions['dg']:
1277 for f, args, msg in actions['dg']:
1279 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1278 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1280 z += 1
1279 z += 1
1281 progress(_updating, z, item=f, total=numupdates, unit=_files)
1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1282 f0, flags = args
1281 f0, flags = args
1283 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1282 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1284 wctx[f].write(mctx.filectx(f0).data(), flags)
1283 wctx[f].write(mctx.filectx(f0).data(), flags)
1285 updated += 1
1284 updated += 1
1286
1285
1287 # exec
1286 # exec
1288 for f, args, msg in actions['e']:
1287 for f, args, msg in actions['e']:
1289 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1288 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1290 z += 1
1289 z += 1
1291 progress(_updating, z, item=f, total=numupdates, unit=_files)
1290 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 flags, = args
1291 flags, = args
1293 audit(f)
1292 audit(f)
1294 wctx[f].setflags('l' in flags, 'x' in flags)
1293 wctx[f].setflags('l' in flags, 'x' in flags)
1295 updated += 1
1294 updated += 1
1296
1295
1297 # the ordering is important here -- ms.mergedriver will raise if the merge
1296 # the ordering is important here -- ms.mergedriver will raise if the merge
1298 # driver has changed, and we want to be able to bypass it when overwrite is
1297 # driver has changed, and we want to be able to bypass it when overwrite is
1299 # True
1298 # True
1300 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1299 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1301
1300
1302 if usemergedriver:
1301 if usemergedriver:
1303 ms.commit()
1302 ms.commit()
1304 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1303 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1305 # the driver might leave some files unresolved
1304 # the driver might leave some files unresolved
1306 unresolvedf = set(ms.unresolved())
1305 unresolvedf = set(ms.unresolved())
1307 if not proceed:
1306 if not proceed:
1308 # XXX setting unresolved to at least 1 is a hack to make sure we
1307 # XXX setting unresolved to at least 1 is a hack to make sure we
1309 # error out
1308 # error out
1310 return updated, merged, removed, max(len(unresolvedf), 1)
1309 return updated, merged, removed, max(len(unresolvedf), 1)
1311 newactions = []
1310 newactions = []
1312 for f, args, msg in mergeactions:
1311 for f, args, msg in mergeactions:
1313 if f in unresolvedf:
1312 if f in unresolvedf:
1314 newactions.append((f, args, msg))
1313 newactions.append((f, args, msg))
1315 mergeactions = newactions
1314 mergeactions = newactions
1316
1315
1317 # premerge
1316 # premerge
1318 tocomplete = []
1317 tocomplete = []
1319 for f, args, msg in mergeactions:
1318 for f, args, msg in mergeactions:
1320 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1319 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1321 z += 1
1320 z += 1
1322 progress(_updating, z, item=f, total=numupdates, unit=_files)
1321 progress(_updating, z, item=f, total=numupdates, unit=_files)
1323 if f == '.hgsubstate': # subrepo states need updating
1322 if f == '.hgsubstate': # subrepo states need updating
1324 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1323 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1325 overwrite, labels)
1324 overwrite, labels)
1326 continue
1325 continue
1327 audit(f)
1326 audit(f)
1328 complete, r = ms.preresolve(f, wctx)
1327 complete, r = ms.preresolve(f, wctx)
1329 if not complete:
1328 if not complete:
1330 numupdates += 1
1329 numupdates += 1
1331 tocomplete.append((f, args, msg))
1330 tocomplete.append((f, args, msg))
1332
1331
1333 # merge
1332 # merge
1334 for f, args, msg in tocomplete:
1333 for f, args, msg in tocomplete:
1335 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1334 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1336 z += 1
1335 z += 1
1337 progress(_updating, z, item=f, total=numupdates, unit=_files)
1336 progress(_updating, z, item=f, total=numupdates, unit=_files)
1338 ms.resolve(f, wctx)
1337 ms.resolve(f, wctx)
1339
1338
1340 ms.commit()
1339 ms.commit()
1341
1340
1342 unresolved = ms.unresolvedcount()
1341 unresolved = ms.unresolvedcount()
1343
1342
1344 if usemergedriver and not unresolved and ms.mdstate() != 's':
1343 if usemergedriver and not unresolved and ms.mdstate() != 's':
1345 if not driverconclude(repo, ms, wctx, labels=labels):
1344 if not driverconclude(repo, ms, wctx, labels=labels):
1346 # XXX setting unresolved to at least 1 is a hack to make sure we
1345 # XXX setting unresolved to at least 1 is a hack to make sure we
1347 # error out
1346 # error out
1348 unresolved = max(unresolved, 1)
1347 unresolved = max(unresolved, 1)
1349
1348
1350 ms.commit()
1349 ms.commit()
1351
1350
1352 msupdated, msmerged, msremoved = ms.counts()
1351 msupdated, msmerged, msremoved = ms.counts()
1353 updated += msupdated
1352 updated += msupdated
1354 merged += msmerged
1353 merged += msmerged
1355 removed += msremoved
1354 removed += msremoved
1356
1355
1357 extraactions = ms.actions()
1356 extraactions = ms.actions()
1358 if extraactions:
1357 if extraactions:
1359 mfiles = set(a[0] for a in actions['m'])
1358 mfiles = set(a[0] for a in actions['m'])
1360 for k, acts in extraactions.iteritems():
1359 for k, acts in extraactions.iteritems():
1361 actions[k].extend(acts)
1360 actions[k].extend(acts)
1362 # Remove these files from actions['m'] as well. This is important
1361 # Remove these files from actions['m'] as well. This is important
1363 # because in recordupdates, files in actions['m'] are processed
1362 # because in recordupdates, files in actions['m'] are processed
1364 # after files in other actions, and the merge driver might add
1363 # after files in other actions, and the merge driver might add
1365 # files to those actions via extraactions above. This can lead to a
1364 # files to those actions via extraactions above. This can lead to a
1366 # file being recorded twice, with poor results. This is especially
1365 # file being recorded twice, with poor results. This is especially
1367 # problematic for actions['r'] (currently only possible with the
1366 # problematic for actions['r'] (currently only possible with the
1368 # merge driver in the initial merge process; interrupted merges
1367 # merge driver in the initial merge process; interrupted merges
1369 # don't go through this flow).
1368 # don't go through this flow).
1370 #
1369 #
1371 # The real fix here is to have indexes by both file and action so
1370 # The real fix here is to have indexes by both file and action so
1372 # that when the action for a file is changed it is automatically
1371 # that when the action for a file is changed it is automatically
1373 # reflected in the other action lists. But that involves a more
1372 # reflected in the other action lists. But that involves a more
1374 # complex data structure, so this will do for now.
1373 # complex data structure, so this will do for now.
1375 #
1374 #
1376 # We don't need to do the same operation for 'dc' and 'cd' because
1375 # We don't need to do the same operation for 'dc' and 'cd' because
1377 # those lists aren't consulted again.
1376 # those lists aren't consulted again.
1378 mfiles.difference_update(a[0] for a in acts)
1377 mfiles.difference_update(a[0] for a in acts)
1379
1378
1380 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1379 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1381
1380
1382 progress(_updating, None, total=numupdates, unit=_files)
1381 progress(_updating, None, total=numupdates, unit=_files)
1383
1382
1384 return updated, merged, removed, unresolved
1383 return updated, merged, removed, unresolved
1385
1384
1386 def recordupdates(repo, actions, branchmerge):
1385 def recordupdates(repo, actions, branchmerge):
1387 "record merge actions to the dirstate"
1386 "record merge actions to the dirstate"
1388 # remove (must come first)
1387 # remove (must come first)
1389 for f, args, msg in actions.get('r', []):
1388 for f, args, msg in actions.get('r', []):
1390 if branchmerge:
1389 if branchmerge:
1391 repo.dirstate.remove(f)
1390 repo.dirstate.remove(f)
1392 else:
1391 else:
1393 repo.dirstate.drop(f)
1392 repo.dirstate.drop(f)
1394
1393
1395 # forget (must come first)
1394 # forget (must come first)
1396 for f, args, msg in actions.get('f', []):
1395 for f, args, msg in actions.get('f', []):
1397 repo.dirstate.drop(f)
1396 repo.dirstate.drop(f)
1398
1397
1399 # re-add
1398 # re-add
1400 for f, args, msg in actions.get('a', []):
1399 for f, args, msg in actions.get('a', []):
1401 repo.dirstate.add(f)
1400 repo.dirstate.add(f)
1402
1401
1403 # re-add/mark as modified
1402 # re-add/mark as modified
1404 for f, args, msg in actions.get('am', []):
1403 for f, args, msg in actions.get('am', []):
1405 if branchmerge:
1404 if branchmerge:
1406 repo.dirstate.normallookup(f)
1405 repo.dirstate.normallookup(f)
1407 else:
1406 else:
1408 repo.dirstate.add(f)
1407 repo.dirstate.add(f)
1409
1408
1410 # exec change
1409 # exec change
1411 for f, args, msg in actions.get('e', []):
1410 for f, args, msg in actions.get('e', []):
1412 repo.dirstate.normallookup(f)
1411 repo.dirstate.normallookup(f)
1413
1412
1414 # keep
1413 # keep
1415 for f, args, msg in actions.get('k', []):
1414 for f, args, msg in actions.get('k', []):
1416 pass
1415 pass
1417
1416
1418 # get
1417 # get
1419 for f, args, msg in actions.get('g', []):
1418 for f, args, msg in actions.get('g', []):
1420 if branchmerge:
1419 if branchmerge:
1421 repo.dirstate.otherparent(f)
1420 repo.dirstate.otherparent(f)
1422 else:
1421 else:
1423 repo.dirstate.normal(f)
1422 repo.dirstate.normal(f)
1424
1423
1425 # merge
1424 # merge
1426 for f, args, msg in actions.get('m', []):
1425 for f, args, msg in actions.get('m', []):
1427 f1, f2, fa, move, anc = args
1426 f1, f2, fa, move, anc = args
1428 if branchmerge:
1427 if branchmerge:
1429 # We've done a branch merge, mark this file as merged
1428 # We've done a branch merge, mark this file as merged
1430 # so that we properly record the merger later
1429 # so that we properly record the merger later
1431 repo.dirstate.merge(f)
1430 repo.dirstate.merge(f)
1432 if f1 != f2: # copy/rename
1431 if f1 != f2: # copy/rename
1433 if move:
1432 if move:
1434 repo.dirstate.remove(f1)
1433 repo.dirstate.remove(f1)
1435 if f1 != f:
1434 if f1 != f:
1436 repo.dirstate.copy(f1, f)
1435 repo.dirstate.copy(f1, f)
1437 else:
1436 else:
1438 repo.dirstate.copy(f2, f)
1437 repo.dirstate.copy(f2, f)
1439 else:
1438 else:
1440 # We've update-merged a locally modified file, so
1439 # We've update-merged a locally modified file, so
1441 # we set the dirstate to emulate a normal checkout
1440 # we set the dirstate to emulate a normal checkout
1442 # of that file some time in the past. Thus our
1441 # of that file some time in the past. Thus our
1443 # merge will appear as a normal local file
1442 # merge will appear as a normal local file
1444 # modification.
1443 # modification.
1445 if f2 == f: # file not locally copied/moved
1444 if f2 == f: # file not locally copied/moved
1446 repo.dirstate.normallookup(f)
1445 repo.dirstate.normallookup(f)
1447 if move:
1446 if move:
1448 repo.dirstate.drop(f1)
1447 repo.dirstate.drop(f1)
1449
1448
1450 # directory rename, move local
1449 # directory rename, move local
1451 for f, args, msg in actions.get('dm', []):
1450 for f, args, msg in actions.get('dm', []):
1452 f0, flag = args
1451 f0, flag = args
1453 if branchmerge:
1452 if branchmerge:
1454 repo.dirstate.add(f)
1453 repo.dirstate.add(f)
1455 repo.dirstate.remove(f0)
1454 repo.dirstate.remove(f0)
1456 repo.dirstate.copy(f0, f)
1455 repo.dirstate.copy(f0, f)
1457 else:
1456 else:
1458 repo.dirstate.normal(f)
1457 repo.dirstate.normal(f)
1459 repo.dirstate.drop(f0)
1458 repo.dirstate.drop(f0)
1460
1459
1461 # directory rename, get
1460 # directory rename, get
1462 for f, args, msg in actions.get('dg', []):
1461 for f, args, msg in actions.get('dg', []):
1463 f0, flag = args
1462 f0, flag = args
1464 if branchmerge:
1463 if branchmerge:
1465 repo.dirstate.add(f)
1464 repo.dirstate.add(f)
1466 repo.dirstate.copy(f0, f)
1465 repo.dirstate.copy(f0, f)
1467 else:
1466 else:
1468 repo.dirstate.normal(f)
1467 repo.dirstate.normal(f)
1469
1468
1470 def update(repo, node, branchmerge, force, ancestor=None,
1469 def update(repo, node, branchmerge, force, ancestor=None,
1471 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1470 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1472 updatecheck=None):
1471 updatecheck=None):
1473 """
1472 """
1474 Perform a merge between the working directory and the given node
1473 Perform a merge between the working directory and the given node
1475
1474
1476 node = the node to update to
1475 node = the node to update to
1477 branchmerge = whether to merge between branches
1476 branchmerge = whether to merge between branches
1478 force = whether to force branch merging or file overwriting
1477 force = whether to force branch merging or file overwriting
1479 matcher = a matcher to filter file lists (dirstate not updated)
1478 matcher = a matcher to filter file lists (dirstate not updated)
1480 mergeancestor = whether it is merging with an ancestor. If true,
1479 mergeancestor = whether it is merging with an ancestor. If true,
1481 we should accept the incoming changes for any prompts that occur.
1480 we should accept the incoming changes for any prompts that occur.
1482 If false, merging with an ancestor (fast-forward) is only allowed
1481 If false, merging with an ancestor (fast-forward) is only allowed
1483 between different named branches. This flag is used by rebase extension
1482 between different named branches. This flag is used by rebase extension
1484 as a temporary fix and should be avoided in general.
1483 as a temporary fix and should be avoided in general.
1485 labels = labels to use for base, local and other
1484 labels = labels to use for base, local and other
1486 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1485 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1487 this is True, then 'force' should be True as well.
1486 this is True, then 'force' should be True as well.
1488
1487
1489 The table below shows all the behaviors of the update command
1488 The table below shows all the behaviors of the update command
1490 given the -c and -C or no options, whether the working directory
1489 given the -c and -C or no options, whether the working directory
1491 is dirty, whether a revision is specified, and the relationship of
1490 is dirty, whether a revision is specified, and the relationship of
1492 the parent rev to the target rev (linear or not). Match from top first. The
1491 the parent rev to the target rev (linear or not). Match from top first. The
1493 -n option doesn't exist on the command line, but represents the
1492 -n option doesn't exist on the command line, but represents the
1494 experimental.updatecheck=noconflict option.
1493 experimental.updatecheck=noconflict option.
1495
1494
1496 This logic is tested by test-update-branches.t.
1495 This logic is tested by test-update-branches.t.
1497
1496
1498 -c -C -n -m dirty rev linear | result
1497 -c -C -n -m dirty rev linear | result
1499 y y * * * * * | (1)
1498 y y * * * * * | (1)
1500 y * y * * * * | (1)
1499 y * y * * * * | (1)
1501 y * * y * * * | (1)
1500 y * * y * * * | (1)
1502 * y y * * * * | (1)
1501 * y y * * * * | (1)
1503 * y * y * * * | (1)
1502 * y * y * * * | (1)
1504 * * y y * * * | (1)
1503 * * y y * * * | (1)
1505 * * * * * n n | x
1504 * * * * * n n | x
1506 * * * * n * * | ok
1505 * * * * n * * | ok
1507 n n n n y * y | merge
1506 n n n n y * y | merge
1508 n n n n y y n | (2)
1507 n n n n y y n | (2)
1509 n n n y y * * | merge
1508 n n n y y * * | merge
1510 n n y n y * * | merge if no conflict
1509 n n y n y * * | merge if no conflict
1511 n y n n y * * | discard
1510 n y n n y * * | discard
1512 y n n n y * * | (3)
1511 y n n n y * * | (3)
1513
1512
1514 x = can't happen
1513 x = can't happen
1515 * = don't-care
1514 * = don't-care
1516 1 = incompatible options (checked in commands.py)
1515 1 = incompatible options (checked in commands.py)
1517 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1516 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1518 3 = abort: uncommitted changes (checked in commands.py)
1517 3 = abort: uncommitted changes (checked in commands.py)
1519
1518
1520 Return the same tuple as applyupdates().
1519 Return the same tuple as applyupdates().
1521 """
1520 """
1522
1521
1523 # This function used to find the default destination if node was None, but
1522 # This function used to find the default destination if node was None, but
1524 # that's now in destutil.py.
1523 # that's now in destutil.py.
1525 assert node is not None
1524 assert node is not None
1526 if not branchmerge and not force:
1525 if not branchmerge and not force:
1527 # TODO: remove the default once all callers that pass branchmerge=False
1526 # TODO: remove the default once all callers that pass branchmerge=False
1528 # and force=False pass a value for updatecheck. We may want to allow
1527 # and force=False pass a value for updatecheck. We may want to allow
1529 # updatecheck='abort' to better suppport some of these callers.
1528 # updatecheck='abort' to better suppport some of these callers.
1530 if updatecheck is None:
1529 if updatecheck is None:
1531 updatecheck = 'linear'
1530 updatecheck = 'linear'
1532 assert updatecheck in ('none', 'linear', 'noconflict')
1531 assert updatecheck in ('none', 'linear', 'noconflict')
1533 # If we're doing a partial update, we need to skip updating
1532 # If we're doing a partial update, we need to skip updating
1534 # the dirstate, so make a note of any partial-ness to the
1533 # the dirstate, so make a note of any partial-ness to the
1535 # update here.
1534 # update here.
1536 if matcher is None or matcher.always():
1535 if matcher is None or matcher.always():
1537 partial = False
1536 partial = False
1538 else:
1537 else:
1539 partial = True
1538 partial = True
1540 with repo.wlock():
1539 with repo.wlock():
1541 wc = repo[None]
1540 wc = repo[None]
1542 pl = wc.parents()
1541 pl = wc.parents()
1543 p1 = pl[0]
1542 p1 = pl[0]
1544 pas = [None]
1543 pas = [None]
1545 if ancestor is not None:
1544 if ancestor is not None:
1546 pas = [repo[ancestor]]
1545 pas = [repo[ancestor]]
1547
1546
1548 overwrite = force and not branchmerge
1547 overwrite = force and not branchmerge
1549
1548
1550 p2 = repo[node]
1549 p2 = repo[node]
1551 if pas[0] is None:
1550 if pas[0] is None:
1552 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1551 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1553 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1552 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1554 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1553 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1555 else:
1554 else:
1556 pas = [p1.ancestor(p2, warn=branchmerge)]
1555 pas = [p1.ancestor(p2, warn=branchmerge)]
1557
1556
1558 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1557 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1559
1558
1560 ### check phase
1559 ### check phase
1561 if not overwrite:
1560 if not overwrite:
1562 if len(pl) > 1:
1561 if len(pl) > 1:
1563 raise error.Abort(_("outstanding uncommitted merge"))
1562 raise error.Abort(_("outstanding uncommitted merge"))
1564 ms = mergestate.read(repo)
1563 ms = mergestate.read(repo)
1565 if list(ms.unresolved()):
1564 if list(ms.unresolved()):
1566 raise error.Abort(_("outstanding merge conflicts"))
1565 raise error.Abort(_("outstanding merge conflicts"))
1567 if branchmerge:
1566 if branchmerge:
1568 if pas == [p2]:
1567 if pas == [p2]:
1569 raise error.Abort(_("merging with a working directory ancestor"
1568 raise error.Abort(_("merging with a working directory ancestor"
1570 " has no effect"))
1569 " has no effect"))
1571 elif pas == [p1]:
1570 elif pas == [p1]:
1572 if not mergeancestor and wc.branch() == p2.branch():
1571 if not mergeancestor and wc.branch() == p2.branch():
1573 raise error.Abort(_("nothing to merge"),
1572 raise error.Abort(_("nothing to merge"),
1574 hint=_("use 'hg update' "
1573 hint=_("use 'hg update' "
1575 "or check 'hg heads'"))
1574 "or check 'hg heads'"))
1576 if not force and (wc.files() or wc.deleted()):
1575 if not force and (wc.files() or wc.deleted()):
1577 raise error.Abort(_("uncommitted changes"),
1576 raise error.Abort(_("uncommitted changes"),
1578 hint=_("use 'hg status' to list changes"))
1577 hint=_("use 'hg status' to list changes"))
1579 for s in sorted(wc.substate):
1578 for s in sorted(wc.substate):
1580 wc.sub(s).bailifchanged()
1579 wc.sub(s).bailifchanged()
1581
1580
1582 elif not overwrite:
1581 elif not overwrite:
1583 if p1 == p2: # no-op update
1582 if p1 == p2: # no-op update
1584 # call the hooks and exit early
1583 # call the hooks and exit early
1585 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1584 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1586 repo.hook('update', parent1=xp2, parent2='', error=0)
1585 repo.hook('update', parent1=xp2, parent2='', error=0)
1587 return 0, 0, 0, 0
1586 return 0, 0, 0, 0
1588
1587
1589 if (updatecheck == 'linear' and
1588 if (updatecheck == 'linear' and
1590 pas not in ([p1], [p2])): # nonlinear
1589 pas not in ([p1], [p2])): # nonlinear
1591 dirty = wc.dirty(missing=True)
1590 dirty = wc.dirty(missing=True)
1592 if dirty:
1591 if dirty:
1593 # Branching is a bit strange to ensure we do the minimal
1592 # Branching is a bit strange to ensure we do the minimal
1594 # amount of call to obsolete.foreground.
1593 # amount of call to obsolete.foreground.
1595 foreground = obsolete.foreground(repo, [p1.node()])
1594 foreground = obsolete.foreground(repo, [p1.node()])
1596 # note: the <node> variable contains a random identifier
1595 # note: the <node> variable contains a random identifier
1597 if repo[node].node() in foreground:
1596 if repo[node].node() in foreground:
1598 pass # allow updating to successors
1597 pass # allow updating to successors
1599 else:
1598 else:
1600 msg = _("uncommitted changes")
1599 msg = _("uncommitted changes")
1601 hint = _("commit or update --clean to discard changes")
1600 hint = _("commit or update --clean to discard changes")
1602 raise error.UpdateAbort(msg, hint=hint)
1601 raise error.UpdateAbort(msg, hint=hint)
1603 else:
1602 else:
1604 # Allow jumping branches if clean and specific rev given
1603 # Allow jumping branches if clean and specific rev given
1605 pass
1604 pass
1606
1605
1607 if overwrite:
1606 if overwrite:
1608 pas = [wc]
1607 pas = [wc]
1609 elif not branchmerge:
1608 elif not branchmerge:
1610 pas = [p1]
1609 pas = [p1]
1611
1610
1612 # deprecated config: merge.followcopies
1611 # deprecated config: merge.followcopies
1613 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1612 followcopies = repo.ui.configbool('merge', 'followcopies', True)
1614 if overwrite:
1613 if overwrite:
1615 followcopies = False
1614 followcopies = False
1616 elif not pas[0]:
1615 elif not pas[0]:
1617 followcopies = False
1616 followcopies = False
1618 if not branchmerge and not wc.dirty(missing=True):
1617 if not branchmerge and not wc.dirty(missing=True):
1619 followcopies = False
1618 followcopies = False
1620
1619
1621 ### calculate phase
1620 ### calculate phase
1622 actionbyfile, diverge, renamedelete = calculateupdates(
1621 actionbyfile, diverge, renamedelete = calculateupdates(
1623 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1622 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1624 followcopies, matcher=matcher, mergeforce=mergeforce)
1623 followcopies, matcher=matcher, mergeforce=mergeforce)
1625
1624
1626 if updatecheck == 'noconflict':
1625 if updatecheck == 'noconflict':
1627 for f, (m, args, msg) in actionbyfile.iteritems():
1626 for f, (m, args, msg) in actionbyfile.iteritems():
1628 if m not in ('g', 'k', 'e', 'r'):
1627 if m not in ('g', 'k', 'e', 'r'):
1629 msg = _("conflicting changes")
1628 msg = _("conflicting changes")
1630 hint = _("commit or update --clean to discard changes")
1629 hint = _("commit or update --clean to discard changes")
1631 raise error.Abort(msg, hint=hint)
1630 raise error.Abort(msg, hint=hint)
1632
1631
1633 # Prompt and create actions. Most of this is in the resolve phase
1632 # Prompt and create actions. Most of this is in the resolve phase
1634 # already, but we can't handle .hgsubstate in filemerge or
1633 # already, but we can't handle .hgsubstate in filemerge or
1635 # subrepo.submerge yet so we have to keep prompting for it.
1634 # subrepo.submerge yet so we have to keep prompting for it.
1636 if '.hgsubstate' in actionbyfile:
1635 if '.hgsubstate' in actionbyfile:
1637 f = '.hgsubstate'
1636 f = '.hgsubstate'
1638 m, args, msg = actionbyfile[f]
1637 m, args, msg = actionbyfile[f]
1639 prompts = filemerge.partextras(labels)
1638 prompts = filemerge.partextras(labels)
1640 prompts['f'] = f
1639 prompts['f'] = f
1641 if m == 'cd':
1640 if m == 'cd':
1642 if repo.ui.promptchoice(
1641 if repo.ui.promptchoice(
1643 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1642 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1644 "use (c)hanged version or (d)elete?"
1643 "use (c)hanged version or (d)elete?"
1645 "$$ &Changed $$ &Delete") % prompts, 0):
1644 "$$ &Changed $$ &Delete") % prompts, 0):
1646 actionbyfile[f] = ('r', None, "prompt delete")
1645 actionbyfile[f] = ('r', None, "prompt delete")
1647 elif f in p1:
1646 elif f in p1:
1648 actionbyfile[f] = ('am', None, "prompt keep")
1647 actionbyfile[f] = ('am', None, "prompt keep")
1649 else:
1648 else:
1650 actionbyfile[f] = ('a', None, "prompt keep")
1649 actionbyfile[f] = ('a', None, "prompt keep")
1651 elif m == 'dc':
1650 elif m == 'dc':
1652 f1, f2, fa, move, anc = args
1651 f1, f2, fa, move, anc = args
1653 flags = p2[f2].flags()
1652 flags = p2[f2].flags()
1654 if repo.ui.promptchoice(
1653 if repo.ui.promptchoice(
1655 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1654 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1656 "use (c)hanged version or leave (d)eleted?"
1655 "use (c)hanged version or leave (d)eleted?"
1657 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1656 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1658 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1657 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1659 else:
1658 else:
1660 del actionbyfile[f]
1659 del actionbyfile[f]
1661
1660
1662 # Convert to dictionary-of-lists format
1661 # Convert to dictionary-of-lists format
1663 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1662 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1664 for f, (m, args, msg) in actionbyfile.iteritems():
1663 for f, (m, args, msg) in actionbyfile.iteritems():
1665 if m not in actions:
1664 if m not in actions:
1666 actions[m] = []
1665 actions[m] = []
1667 actions[m].append((f, args, msg))
1666 actions[m].append((f, args, msg))
1668
1667
1669 if not util.fscasesensitive(repo.path):
1668 if not util.fscasesensitive(repo.path):
1670 # check collision between files only in p2 for clean update
1669 # check collision between files only in p2 for clean update
1671 if (not branchmerge and
1670 if (not branchmerge and
1672 (force or not wc.dirty(missing=True, branch=False))):
1671 (force or not wc.dirty(missing=True, branch=False))):
1673 _checkcollision(repo, p2.manifest(), None)
1672 _checkcollision(repo, p2.manifest(), None)
1674 else:
1673 else:
1675 _checkcollision(repo, wc.manifest(), actions)
1674 _checkcollision(repo, wc.manifest(), actions)
1676
1675
1677 # divergent renames
1676 # divergent renames
1678 for f, fl in sorted(diverge.iteritems()):
1677 for f, fl in sorted(diverge.iteritems()):
1679 repo.ui.warn(_("note: possible conflict - %s was renamed "
1678 repo.ui.warn(_("note: possible conflict - %s was renamed "
1680 "multiple times to:\n") % f)
1679 "multiple times to:\n") % f)
1681 for nf in fl:
1680 for nf in fl:
1682 repo.ui.warn(" %s\n" % nf)
1681 repo.ui.warn(" %s\n" % nf)
1683
1682
1684 # rename and delete
1683 # rename and delete
1685 for f, fl in sorted(renamedelete.iteritems()):
1684 for f, fl in sorted(renamedelete.iteritems()):
1686 repo.ui.warn(_("note: possible conflict - %s was deleted "
1685 repo.ui.warn(_("note: possible conflict - %s was deleted "
1687 "and renamed to:\n") % f)
1686 "and renamed to:\n") % f)
1688 for nf in fl:
1687 for nf in fl:
1689 repo.ui.warn(" %s\n" % nf)
1688 repo.ui.warn(" %s\n" % nf)
1690
1689
1691 ### apply phase
1690 ### apply phase
1692 if not branchmerge: # just jump to the new rev
1691 if not branchmerge: # just jump to the new rev
1693 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1692 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1694 if not partial:
1693 if not partial:
1695 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1694 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1696 # note that we're in the middle of an update
1695 # note that we're in the middle of an update
1697 repo.vfs.write('updatestate', p2.hex())
1696 repo.vfs.write('updatestate', p2.hex())
1698
1697
1699 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1698 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1700
1699
1701 if not partial:
1700 if not partial:
1702 with repo.dirstate.parentchange():
1701 with repo.dirstate.parentchange():
1703 repo.setparents(fp1, fp2)
1702 repo.setparents(fp1, fp2)
1704 recordupdates(repo, actions, branchmerge)
1703 recordupdates(repo, actions, branchmerge)
1705 # update completed, clear state
1704 # update completed, clear state
1706 util.unlink(repo.vfs.join('updatestate'))
1705 util.unlink(repo.vfs.join('updatestate'))
1707
1706
1708 if not branchmerge:
1707 if not branchmerge:
1709 repo.dirstate.setbranch(p2.branch())
1708 repo.dirstate.setbranch(p2.branch())
1710
1709
1711 if not partial:
1710 if not partial:
1712 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1711 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1713 return stats
1712 return stats
1714
1713
1715 def graft(repo, ctx, pctx, labels, keepparent=False):
1714 def graft(repo, ctx, pctx, labels, keepparent=False):
1716 """Do a graft-like merge.
1715 """Do a graft-like merge.
1717
1716
1718 This is a merge where the merge ancestor is chosen such that one
1717 This is a merge where the merge ancestor is chosen such that one
1719 or more changesets are grafted onto the current changeset. In
1718 or more changesets are grafted onto the current changeset. In
1720 addition to the merge, this fixes up the dirstate to include only
1719 addition to the merge, this fixes up the dirstate to include only
1721 a single parent (if keepparent is False) and tries to duplicate any
1720 a single parent (if keepparent is False) and tries to duplicate any
1722 renames/copies appropriately.
1721 renames/copies appropriately.
1723
1722
1724 ctx - changeset to rebase
1723 ctx - changeset to rebase
1725 pctx - merge base, usually ctx.p1()
1724 pctx - merge base, usually ctx.p1()
1726 labels - merge labels eg ['local', 'graft']
1725 labels - merge labels eg ['local', 'graft']
1727 keepparent - keep second parent if any
1726 keepparent - keep second parent if any
1728
1727
1729 """
1728 """
1730 # If we're grafting a descendant onto an ancestor, be sure to pass
1729 # If we're grafting a descendant onto an ancestor, be sure to pass
1731 # mergeancestor=True to update. This does two things: 1) allows the merge if
1730 # mergeancestor=True to update. This does two things: 1) allows the merge if
1732 # the destination is the same as the parent of the ctx (so we can use graft
1731 # the destination is the same as the parent of the ctx (so we can use graft
1733 # to copy commits), and 2) informs update that the incoming changes are
1732 # to copy commits), and 2) informs update that the incoming changes are
1734 # newer than the destination so it doesn't prompt about "remote changed foo
1733 # newer than the destination so it doesn't prompt about "remote changed foo
1735 # which local deleted".
1734 # which local deleted".
1736 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1735 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1737
1736
1738 stats = update(repo, ctx.node(), True, True, pctx.node(),
1737 stats = update(repo, ctx.node(), True, True, pctx.node(),
1739 mergeancestor=mergeancestor, labels=labels)
1738 mergeancestor=mergeancestor, labels=labels)
1740
1739
1741 pother = nullid
1740 pother = nullid
1742 parents = ctx.parents()
1741 parents = ctx.parents()
1743 if keepparent and len(parents) == 2 and pctx in parents:
1742 if keepparent and len(parents) == 2 and pctx in parents:
1744 parents.remove(pctx)
1743 parents.remove(pctx)
1745 pother = parents[0].node()
1744 pother = parents[0].node()
1746
1745
1747 with repo.dirstate.parentchange():
1746 with repo.dirstate.parentchange():
1748 repo.setparents(repo['.'].node(), pother)
1747 repo.setparents(repo['.'].node(), pother)
1749 repo.dirstate.write(repo.currenttransaction())
1748 repo.dirstate.write(repo.currenttransaction())
1750 # fix up dirstate for copies and renames
1749 # fix up dirstate for copies and renames
1751 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1750 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1752 return stats
1751 return stats
General Comments 0
You need to be logged in to leave comments. Login now