##// END OF EJS Templates
merge: make merge.preferancestor type and default consistent...
Matt Mackall -
r25844:18541e95 default
parent child Browse files
Show More
@@ -1,1928 +1,1929
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, wdirid, short, hex, bin
8 from node import nullid, nullrev, wdirid, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 return iter(self._manifest)
69 return iter(self._manifest)
70
70
71 def _manifestmatches(self, match, s):
71 def _manifestmatches(self, match, s):
72 """generate a new manifest filtered by the match argument
72 """generate a new manifest filtered by the match argument
73
73
74 This method is for internal use only and mainly exists to provide an
74 This method is for internal use only and mainly exists to provide an
75 object oriented way for other contexts to customize the manifest
75 object oriented way for other contexts to customize the manifest
76 generation.
76 generation.
77 """
77 """
78 return self.manifest().matches(match)
78 return self.manifest().matches(match)
79
79
80 def _matchstatus(self, other, match):
80 def _matchstatus(self, other, match):
81 """return match.always if match is none
81 """return match.always if match is none
82
82
83 This internal method provides a way for child objects to override the
83 This internal method provides a way for child objects to override the
84 match operator.
84 match operator.
85 """
85 """
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87
87
88 def _buildstatus(self, other, s, match, listignored, listclean,
88 def _buildstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """build a status with respect to another context"""
90 """build a status with respect to another context"""
91 # Load earliest manifest first for caching reasons. More specifically,
91 # Load earliest manifest first for caching reasons. More specifically,
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # 1000 and cache it so that when you read 1001, we just need to apply a
94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # delta to what's in the cache. So that's one full reconstruction + one
95 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta application.
96 # delta application.
97 if self.rev() is not None and self.rev() < other.rev():
97 if self.rev() is not None and self.rev() < other.rev():
98 self.manifest()
98 self.manifest()
99 mf1 = other._manifestmatches(match, s)
99 mf1 = other._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
101
101
102 modified, added = [], []
102 modified, added = [], []
103 removed = []
103 removed = []
104 clean = []
104 clean = []
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deletedset = set(deleted)
106 deletedset = set(deleted)
107 d = mf1.diff(mf2, clean=listclean)
107 d = mf1.diff(mf2, clean=listclean)
108 for fn, value in d.iteritems():
108 for fn, value in d.iteritems():
109 if fn in deletedset:
109 if fn in deletedset:
110 continue
110 continue
111 if value is None:
111 if value is None:
112 clean.append(fn)
112 clean.append(fn)
113 continue
113 continue
114 (node1, flag1), (node2, flag2) = value
114 (node1, flag1), (node2, flag2) = value
115 if node1 is None:
115 if node1 is None:
116 added.append(fn)
116 added.append(fn)
117 elif node2 is None:
117 elif node2 is None:
118 removed.append(fn)
118 removed.append(fn)
119 elif node2 != _newnode:
119 elif node2 != _newnode:
120 # The file was not a new file in mf2, so an entry
120 # The file was not a new file in mf2, so an entry
121 # from diff is really a difference.
121 # from diff is really a difference.
122 modified.append(fn)
122 modified.append(fn)
123 elif self[fn].cmp(other[fn]):
123 elif self[fn].cmp(other[fn]):
124 # node2 was newnode, but the working file doesn't
124 # node2 was newnode, but the working file doesn't
125 # match the one in mf1.
125 # match the one in mf1.
126 modified.append(fn)
126 modified.append(fn)
127 else:
127 else:
128 clean.append(fn)
128 clean.append(fn)
129
129
130 if removed:
130 if removed:
131 # need to filter files if they are already reported as removed
131 # need to filter files if they are already reported as removed
132 unknown = [fn for fn in unknown if fn not in mf1]
132 unknown = [fn for fn in unknown if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
134 # if they're deleted, don't report them as removed
134 # if they're deleted, don't report them as removed
135 removed = [fn for fn in removed if fn not in deletedset]
135 removed = [fn for fn in removed if fn not in deletedset]
136
136
137 return scmutil.status(modified, added, removed, deleted, unknown,
137 return scmutil.status(modified, added, removed, deleted, unknown,
138 ignored, clean)
138 ignored, clean)
139
139
140 @propertycache
140 @propertycache
141 def substate(self):
141 def substate(self):
142 return subrepo.state(self, self._repo.ui)
142 return subrepo.state(self, self._repo.ui)
143
143
144 def subrev(self, subpath):
144 def subrev(self, subpath):
145 return self.substate[subpath][1]
145 return self.substate[subpath][1]
146
146
147 def rev(self):
147 def rev(self):
148 return self._rev
148 return self._rev
149 def node(self):
149 def node(self):
150 return self._node
150 return self._node
151 def hex(self):
151 def hex(self):
152 return hex(self.node())
152 return hex(self.node())
153 def manifest(self):
153 def manifest(self):
154 return self._manifest
154 return self._manifest
155 def repo(self):
155 def repo(self):
156 return self._repo
156 return self._repo
157 def phasestr(self):
157 def phasestr(self):
158 return phases.phasenames[self.phase()]
158 return phases.phasenames[self.phase()]
159 def mutable(self):
159 def mutable(self):
160 return self.phase() > phases.public
160 return self.phase() > phases.public
161
161
162 def getfileset(self, expr):
162 def getfileset(self, expr):
163 return fileset.getfileset(self, expr)
163 return fileset.getfileset(self, expr)
164
164
165 def obsolete(self):
165 def obsolete(self):
166 """True if the changeset is obsolete"""
166 """True if the changeset is obsolete"""
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168
168
169 def extinct(self):
169 def extinct(self):
170 """True if the changeset is extinct"""
170 """True if the changeset is extinct"""
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172
172
173 def unstable(self):
173 def unstable(self):
174 """True if the changeset is not obsolete but it's ancestor are"""
174 """True if the changeset is not obsolete but it's ancestor are"""
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176
176
177 def bumped(self):
177 def bumped(self):
178 """True if the changeset try to be a successor of a public changeset
178 """True if the changeset try to be a successor of a public changeset
179
179
180 Only non-public and non-obsolete changesets may be bumped.
180 Only non-public and non-obsolete changesets may be bumped.
181 """
181 """
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183
183
184 def divergent(self):
184 def divergent(self):
185 """Is a successors of a changeset with multiple possible successors set
185 """Is a successors of a changeset with multiple possible successors set
186
186
187 Only non-public and non-obsolete changesets may be divergent.
187 Only non-public and non-obsolete changesets may be divergent.
188 """
188 """
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190
190
191 def troubled(self):
191 def troubled(self):
192 """True if the changeset is either unstable, bumped or divergent"""
192 """True if the changeset is either unstable, bumped or divergent"""
193 return self.unstable() or self.bumped() or self.divergent()
193 return self.unstable() or self.bumped() or self.divergent()
194
194
195 def troubles(self):
195 def troubles(self):
196 """return the list of troubles affecting this changesets.
196 """return the list of troubles affecting this changesets.
197
197
198 Troubles are returned as strings. possible values are:
198 Troubles are returned as strings. possible values are:
199 - unstable,
199 - unstable,
200 - bumped,
200 - bumped,
201 - divergent.
201 - divergent.
202 """
202 """
203 troubles = []
203 troubles = []
204 if self.unstable():
204 if self.unstable():
205 troubles.append('unstable')
205 troubles.append('unstable')
206 if self.bumped():
206 if self.bumped():
207 troubles.append('bumped')
207 troubles.append('bumped')
208 if self.divergent():
208 if self.divergent():
209 troubles.append('divergent')
209 troubles.append('divergent')
210 return troubles
210 return troubles
211
211
212 def parents(self):
212 def parents(self):
213 """return contexts for each parent changeset"""
213 """return contexts for each parent changeset"""
214 return self._parents
214 return self._parents
215
215
216 def p1(self):
216 def p1(self):
217 return self._parents[0]
217 return self._parents[0]
218
218
219 def p2(self):
219 def p2(self):
220 if len(self._parents) == 2:
220 if len(self._parents) == 2:
221 return self._parents[1]
221 return self._parents[1]
222 return changectx(self._repo, -1)
222 return changectx(self._repo, -1)
223
223
224 def _fileinfo(self, path):
224 def _fileinfo(self, path):
225 if '_manifest' in self.__dict__:
225 if '_manifest' in self.__dict__:
226 try:
226 try:
227 return self._manifest[path], self._manifest.flags(path)
227 return self._manifest[path], self._manifest.flags(path)
228 except KeyError:
228 except KeyError:
229 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
230 _('not found in manifest'))
230 _('not found in manifest'))
231 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 if path in self._manifestdelta:
232 if path in self._manifestdelta:
233 return (self._manifestdelta[path],
233 return (self._manifestdelta[path],
234 self._manifestdelta.flags(path))
234 self._manifestdelta.flags(path))
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 if not node:
236 if not node:
237 raise error.ManifestLookupError(self._node, path,
237 raise error.ManifestLookupError(self._node, path,
238 _('not found in manifest'))
238 _('not found in manifest'))
239
239
240 return node, flag
240 return node, flag
241
241
242 def filenode(self, path):
242 def filenode(self, path):
243 return self._fileinfo(path)[0]
243 return self._fileinfo(path)[0]
244
244
245 def flags(self, path):
245 def flags(self, path):
246 try:
246 try:
247 return self._fileinfo(path)[1]
247 return self._fileinfo(path)[1]
248 except error.LookupError:
248 except error.LookupError:
249 return ''
249 return ''
250
250
251 def sub(self, path):
251 def sub(self, path):
252 '''return a subrepo for the stored revision of path, never wdir()'''
252 '''return a subrepo for the stored revision of path, never wdir()'''
253 return subrepo.subrepo(self, path)
253 return subrepo.subrepo(self, path)
254
254
255 def nullsub(self, path, pctx):
255 def nullsub(self, path, pctx):
256 return subrepo.nullsubrepo(self, path, pctx)
256 return subrepo.nullsubrepo(self, path, pctx)
257
257
258 def workingsub(self, path):
258 def workingsub(self, path):
259 '''return a subrepo for the stored revision, or wdir if this is a wdir
259 '''return a subrepo for the stored revision, or wdir if this is a wdir
260 context.
260 context.
261 '''
261 '''
262 return subrepo.subrepo(self, path, allowwdir=True)
262 return subrepo.subrepo(self, path, allowwdir=True)
263
263
264 def match(self, pats=[], include=None, exclude=None, default='glob',
264 def match(self, pats=[], include=None, exclude=None, default='glob',
265 listsubrepos=False, badfn=None):
265 listsubrepos=False, badfn=None):
266 r = self._repo
266 r = self._repo
267 return matchmod.match(r.root, r.getcwd(), pats,
267 return matchmod.match(r.root, r.getcwd(), pats,
268 include, exclude, default,
268 include, exclude, default,
269 auditor=r.auditor, ctx=self,
269 auditor=r.auditor, ctx=self,
270 listsubrepos=listsubrepos, badfn=badfn)
270 listsubrepos=listsubrepos, badfn=badfn)
271
271
272 def diff(self, ctx2=None, match=None, **opts):
272 def diff(self, ctx2=None, match=None, **opts):
273 """Returns a diff generator for the given contexts and matcher"""
273 """Returns a diff generator for the given contexts and matcher"""
274 if ctx2 is None:
274 if ctx2 is None:
275 ctx2 = self.p1()
275 ctx2 = self.p1()
276 if ctx2 is not None:
276 if ctx2 is not None:
277 ctx2 = self._repo[ctx2]
277 ctx2 = self._repo[ctx2]
278 diffopts = patch.diffopts(self._repo.ui, opts)
278 diffopts = patch.diffopts(self._repo.ui, opts)
279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
280
280
281 def dirs(self):
281 def dirs(self):
282 return self._manifest.dirs()
282 return self._manifest.dirs()
283
283
284 def hasdir(self, dir):
284 def hasdir(self, dir):
285 return self._manifest.hasdir(dir)
285 return self._manifest.hasdir(dir)
286
286
287 def dirty(self, missing=False, merge=True, branch=True):
287 def dirty(self, missing=False, merge=True, branch=True):
288 return False
288 return False
289
289
290 def status(self, other=None, match=None, listignored=False,
290 def status(self, other=None, match=None, listignored=False,
291 listclean=False, listunknown=False, listsubrepos=False):
291 listclean=False, listunknown=False, listsubrepos=False):
292 """return status of files between two nodes or node and working
292 """return status of files between two nodes or node and working
293 directory.
293 directory.
294
294
295 If other is None, compare this node with working directory.
295 If other is None, compare this node with working directory.
296
296
297 returns (modified, added, removed, deleted, unknown, ignored, clean)
297 returns (modified, added, removed, deleted, unknown, ignored, clean)
298 """
298 """
299
299
300 ctx1 = self
300 ctx1 = self
301 ctx2 = self._repo[other]
301 ctx2 = self._repo[other]
302
302
303 # This next code block is, admittedly, fragile logic that tests for
303 # This next code block is, admittedly, fragile logic that tests for
304 # reversing the contexts and wouldn't need to exist if it weren't for
304 # reversing the contexts and wouldn't need to exist if it weren't for
305 # the fast (and common) code path of comparing the working directory
305 # the fast (and common) code path of comparing the working directory
306 # with its first parent.
306 # with its first parent.
307 #
307 #
308 # What we're aiming for here is the ability to call:
308 # What we're aiming for here is the ability to call:
309 #
309 #
310 # workingctx.status(parentctx)
310 # workingctx.status(parentctx)
311 #
311 #
312 # If we always built the manifest for each context and compared those,
312 # If we always built the manifest for each context and compared those,
313 # then we'd be done. But the special case of the above call means we
313 # then we'd be done. But the special case of the above call means we
314 # just copy the manifest of the parent.
314 # just copy the manifest of the parent.
315 reversed = False
315 reversed = False
316 if (not isinstance(ctx1, changectx)
316 if (not isinstance(ctx1, changectx)
317 and isinstance(ctx2, changectx)):
317 and isinstance(ctx2, changectx)):
318 reversed = True
318 reversed = True
319 ctx1, ctx2 = ctx2, ctx1
319 ctx1, ctx2 = ctx2, ctx1
320
320
321 match = ctx2._matchstatus(ctx1, match)
321 match = ctx2._matchstatus(ctx1, match)
322 r = scmutil.status([], [], [], [], [], [], [])
322 r = scmutil.status([], [], [], [], [], [], [])
323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
324 listunknown)
324 listunknown)
325
325
326 if reversed:
326 if reversed:
327 # Reverse added and removed. Clear deleted, unknown and ignored as
327 # Reverse added and removed. Clear deleted, unknown and ignored as
328 # these make no sense to reverse.
328 # these make no sense to reverse.
329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
330 r.clean)
330 r.clean)
331
331
332 if listsubrepos:
332 if listsubrepos:
333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
334 rev2 = ctx2.subrev(subpath)
334 rev2 = ctx2.subrev(subpath)
335 try:
335 try:
336 submatch = matchmod.narrowmatcher(subpath, match)
336 submatch = matchmod.narrowmatcher(subpath, match)
337 s = sub.status(rev2, match=submatch, ignored=listignored,
337 s = sub.status(rev2, match=submatch, ignored=listignored,
338 clean=listclean, unknown=listunknown,
338 clean=listclean, unknown=listunknown,
339 listsubrepos=True)
339 listsubrepos=True)
340 for rfiles, sfiles in zip(r, s):
340 for rfiles, sfiles in zip(r, s):
341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
342 except error.LookupError:
342 except error.LookupError:
343 self._repo.ui.status(_("skipping missing "
343 self._repo.ui.status(_("skipping missing "
344 "subrepository: %s\n") % subpath)
344 "subrepository: %s\n") % subpath)
345
345
346 for l in r:
346 for l in r:
347 l.sort()
347 l.sort()
348
348
349 return r
349 return r
350
350
351
351
352 def makememctx(repo, parents, text, user, date, branch, files, store,
352 def makememctx(repo, parents, text, user, date, branch, files, store,
353 editor=None, extra=None):
353 editor=None, extra=None):
354 def getfilectx(repo, memctx, path):
354 def getfilectx(repo, memctx, path):
355 data, mode, copied = store.getfile(path)
355 data, mode, copied = store.getfile(path)
356 if data is None:
356 if data is None:
357 return None
357 return None
358 islink, isexec = mode
358 islink, isexec = mode
359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
360 copied=copied, memctx=memctx)
360 copied=copied, memctx=memctx)
361 if extra is None:
361 if extra is None:
362 extra = {}
362 extra = {}
363 if branch:
363 if branch:
364 extra['branch'] = encoding.fromlocal(branch)
364 extra['branch'] = encoding.fromlocal(branch)
365 ctx = memctx(repo, parents, text, files, getfilectx, user,
365 ctx = memctx(repo, parents, text, files, getfilectx, user,
366 date, extra, editor)
366 date, extra, editor)
367 return ctx
367 return ctx
368
368
369 class changectx(basectx):
369 class changectx(basectx):
370 """A changecontext object makes access to data related to a particular
370 """A changecontext object makes access to data related to a particular
371 changeset convenient. It represents a read-only context already present in
371 changeset convenient. It represents a read-only context already present in
372 the repo."""
372 the repo."""
373 def __init__(self, repo, changeid=''):
373 def __init__(self, repo, changeid=''):
374 """changeid is a revision number, node, or tag"""
374 """changeid is a revision number, node, or tag"""
375
375
376 # since basectx.__new__ already took care of copying the object, we
376 # since basectx.__new__ already took care of copying the object, we
377 # don't need to do anything in __init__, so we just exit here
377 # don't need to do anything in __init__, so we just exit here
378 if isinstance(changeid, basectx):
378 if isinstance(changeid, basectx):
379 return
379 return
380
380
381 if changeid == '':
381 if changeid == '':
382 changeid = '.'
382 changeid = '.'
383 self._repo = repo
383 self._repo = repo
384
384
385 try:
385 try:
386 if isinstance(changeid, int):
386 if isinstance(changeid, int):
387 self._node = repo.changelog.node(changeid)
387 self._node = repo.changelog.node(changeid)
388 self._rev = changeid
388 self._rev = changeid
389 return
389 return
390 if isinstance(changeid, long):
390 if isinstance(changeid, long):
391 changeid = str(changeid)
391 changeid = str(changeid)
392 if changeid == 'null':
392 if changeid == 'null':
393 self._node = nullid
393 self._node = nullid
394 self._rev = nullrev
394 self._rev = nullrev
395 return
395 return
396 if changeid == 'tip':
396 if changeid == 'tip':
397 self._node = repo.changelog.tip()
397 self._node = repo.changelog.tip()
398 self._rev = repo.changelog.rev(self._node)
398 self._rev = repo.changelog.rev(self._node)
399 return
399 return
400 if changeid == '.' or changeid == repo.dirstate.p1():
400 if changeid == '.' or changeid == repo.dirstate.p1():
401 # this is a hack to delay/avoid loading obsmarkers
401 # this is a hack to delay/avoid loading obsmarkers
402 # when we know that '.' won't be hidden
402 # when we know that '.' won't be hidden
403 self._node = repo.dirstate.p1()
403 self._node = repo.dirstate.p1()
404 self._rev = repo.unfiltered().changelog.rev(self._node)
404 self._rev = repo.unfiltered().changelog.rev(self._node)
405 return
405 return
406 if len(changeid) == 20:
406 if len(changeid) == 20:
407 try:
407 try:
408 self._node = changeid
408 self._node = changeid
409 self._rev = repo.changelog.rev(changeid)
409 self._rev = repo.changelog.rev(changeid)
410 return
410 return
411 except error.FilteredRepoLookupError:
411 except error.FilteredRepoLookupError:
412 raise
412 raise
413 except LookupError:
413 except LookupError:
414 pass
414 pass
415
415
416 try:
416 try:
417 r = int(changeid)
417 r = int(changeid)
418 if str(r) != changeid:
418 if str(r) != changeid:
419 raise ValueError
419 raise ValueError
420 l = len(repo.changelog)
420 l = len(repo.changelog)
421 if r < 0:
421 if r < 0:
422 r += l
422 r += l
423 if r < 0 or r >= l:
423 if r < 0 or r >= l:
424 raise ValueError
424 raise ValueError
425 self._rev = r
425 self._rev = r
426 self._node = repo.changelog.node(r)
426 self._node = repo.changelog.node(r)
427 return
427 return
428 except error.FilteredIndexError:
428 except error.FilteredIndexError:
429 raise
429 raise
430 except (ValueError, OverflowError, IndexError):
430 except (ValueError, OverflowError, IndexError):
431 pass
431 pass
432
432
433 if len(changeid) == 40:
433 if len(changeid) == 40:
434 try:
434 try:
435 self._node = bin(changeid)
435 self._node = bin(changeid)
436 self._rev = repo.changelog.rev(self._node)
436 self._rev = repo.changelog.rev(self._node)
437 return
437 return
438 except error.FilteredLookupError:
438 except error.FilteredLookupError:
439 raise
439 raise
440 except (TypeError, LookupError):
440 except (TypeError, LookupError):
441 pass
441 pass
442
442
443 # lookup bookmarks through the name interface
443 # lookup bookmarks through the name interface
444 try:
444 try:
445 self._node = repo.names.singlenode(repo, changeid)
445 self._node = repo.names.singlenode(repo, changeid)
446 self._rev = repo.changelog.rev(self._node)
446 self._rev = repo.changelog.rev(self._node)
447 return
447 return
448 except KeyError:
448 except KeyError:
449 pass
449 pass
450 except error.FilteredRepoLookupError:
450 except error.FilteredRepoLookupError:
451 raise
451 raise
452 except error.RepoLookupError:
452 except error.RepoLookupError:
453 pass
453 pass
454
454
455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
456 if self._node is not None:
456 if self._node is not None:
457 self._rev = repo.changelog.rev(self._node)
457 self._rev = repo.changelog.rev(self._node)
458 return
458 return
459
459
460 # lookup failed
460 # lookup failed
461 # check if it might have come from damaged dirstate
461 # check if it might have come from damaged dirstate
462 #
462 #
463 # XXX we could avoid the unfiltered if we had a recognizable
463 # XXX we could avoid the unfiltered if we had a recognizable
464 # exception for filtered changeset access
464 # exception for filtered changeset access
465 if changeid in repo.unfiltered().dirstate.parents():
465 if changeid in repo.unfiltered().dirstate.parents():
466 msg = _("working directory has unknown parent '%s'!")
466 msg = _("working directory has unknown parent '%s'!")
467 raise error.Abort(msg % short(changeid))
467 raise error.Abort(msg % short(changeid))
468 try:
468 try:
469 if len(changeid) == 20:
469 if len(changeid) == 20:
470 changeid = hex(changeid)
470 changeid = hex(changeid)
471 except TypeError:
471 except TypeError:
472 pass
472 pass
473 except (error.FilteredIndexError, error.FilteredLookupError,
473 except (error.FilteredIndexError, error.FilteredLookupError,
474 error.FilteredRepoLookupError):
474 error.FilteredRepoLookupError):
475 if repo.filtername.startswith('visible'):
475 if repo.filtername.startswith('visible'):
476 msg = _("hidden revision '%s'") % changeid
476 msg = _("hidden revision '%s'") % changeid
477 hint = _('use --hidden to access hidden revisions')
477 hint = _('use --hidden to access hidden revisions')
478 raise error.FilteredRepoLookupError(msg, hint=hint)
478 raise error.FilteredRepoLookupError(msg, hint=hint)
479 msg = _("filtered revision '%s' (not in '%s' subset)")
479 msg = _("filtered revision '%s' (not in '%s' subset)")
480 msg %= (changeid, repo.filtername)
480 msg %= (changeid, repo.filtername)
481 raise error.FilteredRepoLookupError(msg)
481 raise error.FilteredRepoLookupError(msg)
482 except IndexError:
482 except IndexError:
483 pass
483 pass
484 raise error.RepoLookupError(
484 raise error.RepoLookupError(
485 _("unknown revision '%s'") % changeid)
485 _("unknown revision '%s'") % changeid)
486
486
487 def __hash__(self):
487 def __hash__(self):
488 try:
488 try:
489 return hash(self._rev)
489 return hash(self._rev)
490 except AttributeError:
490 except AttributeError:
491 return id(self)
491 return id(self)
492
492
493 def __nonzero__(self):
493 def __nonzero__(self):
494 return self._rev != nullrev
494 return self._rev != nullrev
495
495
496 @propertycache
496 @propertycache
497 def _changeset(self):
497 def _changeset(self):
498 return self._repo.changelog.read(self.rev())
498 return self._repo.changelog.read(self.rev())
499
499
500 @propertycache
500 @propertycache
501 def _manifest(self):
501 def _manifest(self):
502 return self._repo.manifest.read(self._changeset[0])
502 return self._repo.manifest.read(self._changeset[0])
503
503
504 @propertycache
504 @propertycache
505 def _manifestdelta(self):
505 def _manifestdelta(self):
506 return self._repo.manifest.readdelta(self._changeset[0])
506 return self._repo.manifest.readdelta(self._changeset[0])
507
507
508 @propertycache
508 @propertycache
509 def _parents(self):
509 def _parents(self):
510 p = self._repo.changelog.parentrevs(self._rev)
510 p = self._repo.changelog.parentrevs(self._rev)
511 if p[1] == nullrev:
511 if p[1] == nullrev:
512 p = p[:-1]
512 p = p[:-1]
513 return [changectx(self._repo, x) for x in p]
513 return [changectx(self._repo, x) for x in p]
514
514
515 def changeset(self):
515 def changeset(self):
516 return self._changeset
516 return self._changeset
517 def manifestnode(self):
517 def manifestnode(self):
518 return self._changeset[0]
518 return self._changeset[0]
519
519
520 def user(self):
520 def user(self):
521 return self._changeset[1]
521 return self._changeset[1]
522 def date(self):
522 def date(self):
523 return self._changeset[2]
523 return self._changeset[2]
524 def files(self):
524 def files(self):
525 return self._changeset[3]
525 return self._changeset[3]
526 def description(self):
526 def description(self):
527 return self._changeset[4]
527 return self._changeset[4]
528 def branch(self):
528 def branch(self):
529 return encoding.tolocal(self._changeset[5].get("branch"))
529 return encoding.tolocal(self._changeset[5].get("branch"))
530 def closesbranch(self):
530 def closesbranch(self):
531 return 'close' in self._changeset[5]
531 return 'close' in self._changeset[5]
532 def extra(self):
532 def extra(self):
533 return self._changeset[5]
533 return self._changeset[5]
534 def tags(self):
534 def tags(self):
535 return self._repo.nodetags(self._node)
535 return self._repo.nodetags(self._node)
536 def bookmarks(self):
536 def bookmarks(self):
537 return self._repo.nodebookmarks(self._node)
537 return self._repo.nodebookmarks(self._node)
538 def phase(self):
538 def phase(self):
539 return self._repo._phasecache.phase(self._repo, self._rev)
539 return self._repo._phasecache.phase(self._repo, self._rev)
540 def hidden(self):
540 def hidden(self):
541 return self._rev in repoview.filterrevs(self._repo, 'visible')
541 return self._rev in repoview.filterrevs(self._repo, 'visible')
542
542
543 def children(self):
543 def children(self):
544 """return contexts for each child changeset"""
544 """return contexts for each child changeset"""
545 c = self._repo.changelog.children(self._node)
545 c = self._repo.changelog.children(self._node)
546 return [changectx(self._repo, x) for x in c]
546 return [changectx(self._repo, x) for x in c]
547
547
548 def ancestors(self):
548 def ancestors(self):
549 for a in self._repo.changelog.ancestors([self._rev]):
549 for a in self._repo.changelog.ancestors([self._rev]):
550 yield changectx(self._repo, a)
550 yield changectx(self._repo, a)
551
551
552 def descendants(self):
552 def descendants(self):
553 for d in self._repo.changelog.descendants([self._rev]):
553 for d in self._repo.changelog.descendants([self._rev]):
554 yield changectx(self._repo, d)
554 yield changectx(self._repo, d)
555
555
556 def filectx(self, path, fileid=None, filelog=None):
556 def filectx(self, path, fileid=None, filelog=None):
557 """get a file context from this changeset"""
557 """get a file context from this changeset"""
558 if fileid is None:
558 if fileid is None:
559 fileid = self.filenode(path)
559 fileid = self.filenode(path)
560 return filectx(self._repo, path, fileid=fileid,
560 return filectx(self._repo, path, fileid=fileid,
561 changectx=self, filelog=filelog)
561 changectx=self, filelog=filelog)
562
562
563 def ancestor(self, c2, warn=False):
563 def ancestor(self, c2, warn=False):
564 """return the "best" ancestor context of self and c2
564 """return the "best" ancestor context of self and c2
565
565
566 If there are multiple candidates, it will show a message and check
566 If there are multiple candidates, it will show a message and check
567 merge.preferancestor configuration before falling back to the
567 merge.preferancestor configuration before falling back to the
568 revlog ancestor."""
568 revlog ancestor."""
569 # deal with workingctxs
569 # deal with workingctxs
570 n2 = c2._node
570 n2 = c2._node
571 if n2 is None:
571 if n2 is None:
572 n2 = c2._parents[0]._node
572 n2 = c2._parents[0]._node
573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
574 if not cahs:
574 if not cahs:
575 anc = nullid
575 anc = nullid
576 elif len(cahs) == 1:
576 elif len(cahs) == 1:
577 anc = cahs[0]
577 anc = cahs[0]
578 else:
578 else:
579 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 # experimental config: merge.preferancestor
580 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
580 try:
581 try:
581 ctx = changectx(self._repo, r)
582 ctx = changectx(self._repo, r)
582 except error.RepoLookupError:
583 except error.RepoLookupError:
583 continue
584 continue
584 anc = ctx.node()
585 anc = ctx.node()
585 if anc in cahs:
586 if anc in cahs:
586 break
587 break
587 else:
588 else:
588 anc = self._repo.changelog.ancestor(self._node, n2)
589 anc = self._repo.changelog.ancestor(self._node, n2)
589 if warn:
590 if warn:
590 self._repo.ui.status(
591 self._repo.ui.status(
591 (_("note: using %s as ancestor of %s and %s\n") %
592 (_("note: using %s as ancestor of %s and %s\n") %
592 (short(anc), short(self._node), short(n2))) +
593 (short(anc), short(self._node), short(n2))) +
593 ''.join(_(" alternatively, use --config "
594 ''.join(_(" alternatively, use --config "
594 "merge.preferancestor=%s\n") %
595 "merge.preferancestor=%s\n") %
595 short(n) for n in sorted(cahs) if n != anc))
596 short(n) for n in sorted(cahs) if n != anc))
596 return changectx(self._repo, anc)
597 return changectx(self._repo, anc)
597
598
598 def descendant(self, other):
599 def descendant(self, other):
599 """True if other is descendant of this changeset"""
600 """True if other is descendant of this changeset"""
600 return self._repo.changelog.descendant(self._rev, other._rev)
601 return self._repo.changelog.descendant(self._rev, other._rev)
601
602
602 def walk(self, match):
603 def walk(self, match):
603 '''Generates matching file names.'''
604 '''Generates matching file names.'''
604
605
605 # Wrap match.bad method to have message with nodeid
606 # Wrap match.bad method to have message with nodeid
606 def bad(fn, msg):
607 def bad(fn, msg):
607 # The manifest doesn't know about subrepos, so don't complain about
608 # The manifest doesn't know about subrepos, so don't complain about
608 # paths into valid subrepos.
609 # paths into valid subrepos.
609 if any(fn == s or fn.startswith(s + '/')
610 if any(fn == s or fn.startswith(s + '/')
610 for s in self.substate):
611 for s in self.substate):
611 return
612 return
612 match.bad(fn, _('no such file in rev %s') % self)
613 match.bad(fn, _('no such file in rev %s') % self)
613
614
614 m = matchmod.badmatch(match, bad)
615 m = matchmod.badmatch(match, bad)
615 return self._manifest.walk(m)
616 return self._manifest.walk(m)
616
617
617 def matches(self, match):
618 def matches(self, match):
618 return self.walk(match)
619 return self.walk(match)
619
620
620 class basefilectx(object):
621 class basefilectx(object):
621 """A filecontext object represents the common logic for its children:
622 """A filecontext object represents the common logic for its children:
622 filectx: read-only access to a filerevision that is already present
623 filectx: read-only access to a filerevision that is already present
623 in the repo,
624 in the repo,
624 workingfilectx: a filecontext that represents files from the working
625 workingfilectx: a filecontext that represents files from the working
625 directory,
626 directory,
626 memfilectx: a filecontext that represents files in-memory."""
627 memfilectx: a filecontext that represents files in-memory."""
627 def __new__(cls, repo, path, *args, **kwargs):
628 def __new__(cls, repo, path, *args, **kwargs):
628 return super(basefilectx, cls).__new__(cls)
629 return super(basefilectx, cls).__new__(cls)
629
630
630 @propertycache
631 @propertycache
631 def _filelog(self):
632 def _filelog(self):
632 return self._repo.file(self._path)
633 return self._repo.file(self._path)
633
634
634 @propertycache
635 @propertycache
635 def _changeid(self):
636 def _changeid(self):
636 if '_changeid' in self.__dict__:
637 if '_changeid' in self.__dict__:
637 return self._changeid
638 return self._changeid
638 elif '_changectx' in self.__dict__:
639 elif '_changectx' in self.__dict__:
639 return self._changectx.rev()
640 return self._changectx.rev()
640 elif '_descendantrev' in self.__dict__:
641 elif '_descendantrev' in self.__dict__:
641 # this file context was created from a revision with a known
642 # this file context was created from a revision with a known
642 # descendant, we can (lazily) correct for linkrev aliases
643 # descendant, we can (lazily) correct for linkrev aliases
643 return self._adjustlinkrev(self._path, self._filelog,
644 return self._adjustlinkrev(self._path, self._filelog,
644 self._filenode, self._descendantrev)
645 self._filenode, self._descendantrev)
645 else:
646 else:
646 return self._filelog.linkrev(self._filerev)
647 return self._filelog.linkrev(self._filerev)
647
648
648 @propertycache
649 @propertycache
649 def _filenode(self):
650 def _filenode(self):
650 if '_fileid' in self.__dict__:
651 if '_fileid' in self.__dict__:
651 return self._filelog.lookup(self._fileid)
652 return self._filelog.lookup(self._fileid)
652 else:
653 else:
653 return self._changectx.filenode(self._path)
654 return self._changectx.filenode(self._path)
654
655
655 @propertycache
656 @propertycache
656 def _filerev(self):
657 def _filerev(self):
657 return self._filelog.rev(self._filenode)
658 return self._filelog.rev(self._filenode)
658
659
659 @propertycache
660 @propertycache
660 def _repopath(self):
661 def _repopath(self):
661 return self._path
662 return self._path
662
663
663 def __nonzero__(self):
664 def __nonzero__(self):
664 try:
665 try:
665 self._filenode
666 self._filenode
666 return True
667 return True
667 except error.LookupError:
668 except error.LookupError:
668 # file is missing
669 # file is missing
669 return False
670 return False
670
671
671 def __str__(self):
672 def __str__(self):
672 return "%s@%s" % (self.path(), self._changectx)
673 return "%s@%s" % (self.path(), self._changectx)
673
674
674 def __repr__(self):
675 def __repr__(self):
675 return "<%s %s>" % (type(self).__name__, str(self))
676 return "<%s %s>" % (type(self).__name__, str(self))
676
677
677 def __hash__(self):
678 def __hash__(self):
678 try:
679 try:
679 return hash((self._path, self._filenode))
680 return hash((self._path, self._filenode))
680 except AttributeError:
681 except AttributeError:
681 return id(self)
682 return id(self)
682
683
683 def __eq__(self, other):
684 def __eq__(self, other):
684 try:
685 try:
685 return (type(self) == type(other) and self._path == other._path
686 return (type(self) == type(other) and self._path == other._path
686 and self._filenode == other._filenode)
687 and self._filenode == other._filenode)
687 except AttributeError:
688 except AttributeError:
688 return False
689 return False
689
690
690 def __ne__(self, other):
691 def __ne__(self, other):
691 return not (self == other)
692 return not (self == other)
692
693
693 def filerev(self):
694 def filerev(self):
694 return self._filerev
695 return self._filerev
695 def filenode(self):
696 def filenode(self):
696 return self._filenode
697 return self._filenode
697 def flags(self):
698 def flags(self):
698 return self._changectx.flags(self._path)
699 return self._changectx.flags(self._path)
699 def filelog(self):
700 def filelog(self):
700 return self._filelog
701 return self._filelog
701 def rev(self):
702 def rev(self):
702 return self._changeid
703 return self._changeid
703 def linkrev(self):
704 def linkrev(self):
704 return self._filelog.linkrev(self._filerev)
705 return self._filelog.linkrev(self._filerev)
705 def node(self):
706 def node(self):
706 return self._changectx.node()
707 return self._changectx.node()
707 def hex(self):
708 def hex(self):
708 return self._changectx.hex()
709 return self._changectx.hex()
709 def user(self):
710 def user(self):
710 return self._changectx.user()
711 return self._changectx.user()
711 def date(self):
712 def date(self):
712 return self._changectx.date()
713 return self._changectx.date()
713 def files(self):
714 def files(self):
714 return self._changectx.files()
715 return self._changectx.files()
715 def description(self):
716 def description(self):
716 return self._changectx.description()
717 return self._changectx.description()
717 def branch(self):
718 def branch(self):
718 return self._changectx.branch()
719 return self._changectx.branch()
719 def extra(self):
720 def extra(self):
720 return self._changectx.extra()
721 return self._changectx.extra()
721 def phase(self):
722 def phase(self):
722 return self._changectx.phase()
723 return self._changectx.phase()
723 def phasestr(self):
724 def phasestr(self):
724 return self._changectx.phasestr()
725 return self._changectx.phasestr()
725 def manifest(self):
726 def manifest(self):
726 return self._changectx.manifest()
727 return self._changectx.manifest()
727 def changectx(self):
728 def changectx(self):
728 return self._changectx
729 return self._changectx
729 def repo(self):
730 def repo(self):
730 return self._repo
731 return self._repo
731
732
732 def path(self):
733 def path(self):
733 return self._path
734 return self._path
734
735
735 def isbinary(self):
736 def isbinary(self):
736 try:
737 try:
737 return util.binary(self.data())
738 return util.binary(self.data())
738 except IOError:
739 except IOError:
739 return False
740 return False
740 def isexec(self):
741 def isexec(self):
741 return 'x' in self.flags()
742 return 'x' in self.flags()
742 def islink(self):
743 def islink(self):
743 return 'l' in self.flags()
744 return 'l' in self.flags()
744
745
745 def cmp(self, fctx):
746 def cmp(self, fctx):
746 """compare with other file context
747 """compare with other file context
747
748
748 returns True if different than fctx.
749 returns True if different than fctx.
749 """
750 """
750 if (fctx._filerev is None
751 if (fctx._filerev is None
751 and (self._repo._encodefilterpats
752 and (self._repo._encodefilterpats
752 # if file data starts with '\1\n', empty metadata block is
753 # if file data starts with '\1\n', empty metadata block is
753 # prepended, which adds 4 bytes to filelog.size().
754 # prepended, which adds 4 bytes to filelog.size().
754 or self.size() - 4 == fctx.size())
755 or self.size() - 4 == fctx.size())
755 or self.size() == fctx.size()):
756 or self.size() == fctx.size()):
756 return self._filelog.cmp(self._filenode, fctx.data())
757 return self._filelog.cmp(self._filenode, fctx.data())
757
758
758 return True
759 return True
759
760
760 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
761 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
761 """return the first ancestor of <srcrev> introducing <fnode>
762 """return the first ancestor of <srcrev> introducing <fnode>
762
763
763 If the linkrev of the file revision does not point to an ancestor of
764 If the linkrev of the file revision does not point to an ancestor of
764 srcrev, we'll walk down the ancestors until we find one introducing
765 srcrev, we'll walk down the ancestors until we find one introducing
765 this file revision.
766 this file revision.
766
767
767 :repo: a localrepository object (used to access changelog and manifest)
768 :repo: a localrepository object (used to access changelog and manifest)
768 :path: the file path
769 :path: the file path
769 :fnode: the nodeid of the file revision
770 :fnode: the nodeid of the file revision
770 :filelog: the filelog of this path
771 :filelog: the filelog of this path
771 :srcrev: the changeset revision we search ancestors from
772 :srcrev: the changeset revision we search ancestors from
772 :inclusive: if true, the src revision will also be checked
773 :inclusive: if true, the src revision will also be checked
773 """
774 """
774 repo = self._repo
775 repo = self._repo
775 cl = repo.unfiltered().changelog
776 cl = repo.unfiltered().changelog
776 ma = repo.manifest
777 ma = repo.manifest
777 # fetch the linkrev
778 # fetch the linkrev
778 fr = filelog.rev(fnode)
779 fr = filelog.rev(fnode)
779 lkr = filelog.linkrev(fr)
780 lkr = filelog.linkrev(fr)
780 # hack to reuse ancestor computation when searching for renames
781 # hack to reuse ancestor computation when searching for renames
781 memberanc = getattr(self, '_ancestrycontext', None)
782 memberanc = getattr(self, '_ancestrycontext', None)
782 iteranc = None
783 iteranc = None
783 if srcrev is None:
784 if srcrev is None:
784 # wctx case, used by workingfilectx during mergecopy
785 # wctx case, used by workingfilectx during mergecopy
785 revs = [p.rev() for p in self._repo[None].parents()]
786 revs = [p.rev() for p in self._repo[None].parents()]
786 inclusive = True # we skipped the real (revless) source
787 inclusive = True # we skipped the real (revless) source
787 else:
788 else:
788 revs = [srcrev]
789 revs = [srcrev]
789 if memberanc is None:
790 if memberanc is None:
790 memberanc = iteranc = cl.ancestors(revs, lkr,
791 memberanc = iteranc = cl.ancestors(revs, lkr,
791 inclusive=inclusive)
792 inclusive=inclusive)
792 # check if this linkrev is an ancestor of srcrev
793 # check if this linkrev is an ancestor of srcrev
793 if lkr not in memberanc:
794 if lkr not in memberanc:
794 if iteranc is None:
795 if iteranc is None:
795 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
796 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
796 for a in iteranc:
797 for a in iteranc:
797 ac = cl.read(a) # get changeset data (we avoid object creation)
798 ac = cl.read(a) # get changeset data (we avoid object creation)
798 if path in ac[3]: # checking the 'files' field.
799 if path in ac[3]: # checking the 'files' field.
799 # The file has been touched, check if the content is
800 # The file has been touched, check if the content is
800 # similar to the one we search for.
801 # similar to the one we search for.
801 if fnode == ma.readfast(ac[0]).get(path):
802 if fnode == ma.readfast(ac[0]).get(path):
802 return a
803 return a
803 # In theory, we should never get out of that loop without a result.
804 # In theory, we should never get out of that loop without a result.
804 # But if manifest uses a buggy file revision (not children of the
805 # But if manifest uses a buggy file revision (not children of the
805 # one it replaces) we could. Such a buggy situation will likely
806 # one it replaces) we could. Such a buggy situation will likely
806 # result is crash somewhere else at to some point.
807 # result is crash somewhere else at to some point.
807 return lkr
808 return lkr
808
809
809 def introrev(self):
810 def introrev(self):
810 """return the rev of the changeset which introduced this file revision
811 """return the rev of the changeset which introduced this file revision
811
812
812 This method is different from linkrev because it take into account the
813 This method is different from linkrev because it take into account the
813 changeset the filectx was created from. It ensures the returned
814 changeset the filectx was created from. It ensures the returned
814 revision is one of its ancestors. This prevents bugs from
815 revision is one of its ancestors. This prevents bugs from
815 'linkrev-shadowing' when a file revision is used by multiple
816 'linkrev-shadowing' when a file revision is used by multiple
816 changesets.
817 changesets.
817 """
818 """
818 lkr = self.linkrev()
819 lkr = self.linkrev()
819 attrs = vars(self)
820 attrs = vars(self)
820 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
821 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
821 if noctx or self.rev() == lkr:
822 if noctx or self.rev() == lkr:
822 return self.linkrev()
823 return self.linkrev()
823 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
824 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
824 self.rev(), inclusive=True)
825 self.rev(), inclusive=True)
825
826
826 def _parentfilectx(self, path, fileid, filelog):
827 def _parentfilectx(self, path, fileid, filelog):
827 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 # If self is associated with a changeset (probably explicitly
831 # If self is associated with a changeset (probably explicitly
831 # fed), ensure the created filectx is associated with a
832 # fed), ensure the created filectx is associated with a
832 # changeset that is an ancestor of self.changectx.
833 # changeset that is an ancestor of self.changectx.
833 # This lets us later use _adjustlinkrev to get a correct link.
834 # This lets us later use _adjustlinkrev to get a correct link.
834 fctx._descendantrev = self.rev()
835 fctx._descendantrev = self.rev()
835 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 elif '_descendantrev' in vars(self):
837 elif '_descendantrev' in vars(self):
837 # Otherwise propagate _descendantrev if we have one associated.
838 # Otherwise propagate _descendantrev if we have one associated.
838 fctx._descendantrev = self._descendantrev
839 fctx._descendantrev = self._descendantrev
839 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 return fctx
841 return fctx
841
842
842 def parents(self):
843 def parents(self):
843 _path = self._path
844 _path = self._path
844 fl = self._filelog
845 fl = self._filelog
845 parents = self._filelog.parents(self._filenode)
846 parents = self._filelog.parents(self._filenode)
846 pl = [(_path, node, fl) for node in parents if node != nullid]
847 pl = [(_path, node, fl) for node in parents if node != nullid]
847
848
848 r = fl.renamed(self._filenode)
849 r = fl.renamed(self._filenode)
849 if r:
850 if r:
850 # - In the simple rename case, both parent are nullid, pl is empty.
851 # - In the simple rename case, both parent are nullid, pl is empty.
851 # - In case of merge, only one of the parent is null id and should
852 # - In case of merge, only one of the parent is null id and should
852 # be replaced with the rename information. This parent is -always-
853 # be replaced with the rename information. This parent is -always-
853 # the first one.
854 # the first one.
854 #
855 #
855 # As null id have always been filtered out in the previous list
856 # As null id have always been filtered out in the previous list
856 # comprehension, inserting to 0 will always result in "replacing
857 # comprehension, inserting to 0 will always result in "replacing
857 # first nullid parent with rename information.
858 # first nullid parent with rename information.
858 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859
860
860 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861
862
862 def p1(self):
863 def p1(self):
863 return self.parents()[0]
864 return self.parents()[0]
864
865
865 def p2(self):
866 def p2(self):
866 p = self.parents()
867 p = self.parents()
867 if len(p) == 2:
868 if len(p) == 2:
868 return p[1]
869 return p[1]
869 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870
871
871 def annotate(self, follow=False, linenumber=None, diffopts=None):
872 def annotate(self, follow=False, linenumber=None, diffopts=None):
872 '''returns a list of tuples of (ctx, line) for each line
873 '''returns a list of tuples of (ctx, line) for each line
873 in the file, where ctx is the filectx of the node where
874 in the file, where ctx is the filectx of the node where
874 that line was last changed.
875 that line was last changed.
875 This returns tuples of ((ctx, linenumber), line) for each line,
876 This returns tuples of ((ctx, linenumber), line) for each line,
876 if "linenumber" parameter is NOT "None".
877 if "linenumber" parameter is NOT "None".
877 In such tuples, linenumber means one at the first appearance
878 In such tuples, linenumber means one at the first appearance
878 in the managed file.
879 in the managed file.
879 To reduce annotation cost,
880 To reduce annotation cost,
880 this returns fixed value(False is used) as linenumber,
881 this returns fixed value(False is used) as linenumber,
881 if "linenumber" parameter is "False".'''
882 if "linenumber" parameter is "False".'''
882
883
883 if linenumber is None:
884 if linenumber is None:
884 def decorate(text, rev):
885 def decorate(text, rev):
885 return ([rev] * len(text.splitlines()), text)
886 return ([rev] * len(text.splitlines()), text)
886 elif linenumber:
887 elif linenumber:
887 def decorate(text, rev):
888 def decorate(text, rev):
888 size = len(text.splitlines())
889 size = len(text.splitlines())
889 return ([(rev, i) for i in xrange(1, size + 1)], text)
890 return ([(rev, i) for i in xrange(1, size + 1)], text)
890 else:
891 else:
891 def decorate(text, rev):
892 def decorate(text, rev):
892 return ([(rev, False)] * len(text.splitlines()), text)
893 return ([(rev, False)] * len(text.splitlines()), text)
893
894
894 def pair(parent, child):
895 def pair(parent, child):
895 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
896 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
896 refine=True)
897 refine=True)
897 for (a1, a2, b1, b2), t in blocks:
898 for (a1, a2, b1, b2), t in blocks:
898 # Changed blocks ('!') or blocks made only of blank lines ('~')
899 # Changed blocks ('!') or blocks made only of blank lines ('~')
899 # belong to the child.
900 # belong to the child.
900 if t == '=':
901 if t == '=':
901 child[0][b1:b2] = parent[0][a1:a2]
902 child[0][b1:b2] = parent[0][a1:a2]
902 return child
903 return child
903
904
904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905
906
906 def parents(f):
907 def parents(f):
907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 # isn't an ancestor of the srcrev.
911 # isn't an ancestor of the srcrev.
911 f._changeid
912 f._changeid
912 pl = f.parents()
913 pl = f.parents()
913
914
914 # Don't return renamed parents if we aren't following.
915 # Don't return renamed parents if we aren't following.
915 if not follow:
916 if not follow:
916 pl = [p for p in pl if p.path() == f.path()]
917 pl = [p for p in pl if p.path() == f.path()]
917
918
918 # renamed filectx won't have a filelog yet, so set it
919 # renamed filectx won't have a filelog yet, so set it
919 # from the cache to save time
920 # from the cache to save time
920 for p in pl:
921 for p in pl:
921 if not '_filelog' in p.__dict__:
922 if not '_filelog' in p.__dict__:
922 p._filelog = getlog(p.path())
923 p._filelog = getlog(p.path())
923
924
924 return pl
925 return pl
925
926
926 # use linkrev to find the first changeset where self appeared
927 # use linkrev to find the first changeset where self appeared
927 base = self
928 base = self
928 introrev = self.introrev()
929 introrev = self.introrev()
929 if self.rev() != introrev:
930 if self.rev() != introrev:
930 base = self.filectx(self.filenode(), changeid=introrev)
931 base = self.filectx(self.filenode(), changeid=introrev)
931 if getattr(base, '_ancestrycontext', None) is None:
932 if getattr(base, '_ancestrycontext', None) is None:
932 cl = self._repo.changelog
933 cl = self._repo.changelog
933 if introrev is None:
934 if introrev is None:
934 # wctx is not inclusive, but works because _ancestrycontext
935 # wctx is not inclusive, but works because _ancestrycontext
935 # is used to test filelog revisions
936 # is used to test filelog revisions
936 ac = cl.ancestors([p.rev() for p in base.parents()],
937 ac = cl.ancestors([p.rev() for p in base.parents()],
937 inclusive=True)
938 inclusive=True)
938 else:
939 else:
939 ac = cl.ancestors([introrev], inclusive=True)
940 ac = cl.ancestors([introrev], inclusive=True)
940 base._ancestrycontext = ac
941 base._ancestrycontext = ac
941
942
942 # This algorithm would prefer to be recursive, but Python is a
943 # This algorithm would prefer to be recursive, but Python is a
943 # bit recursion-hostile. Instead we do an iterative
944 # bit recursion-hostile. Instead we do an iterative
944 # depth-first search.
945 # depth-first search.
945
946
946 visit = [base]
947 visit = [base]
947 hist = {}
948 hist = {}
948 pcache = {}
949 pcache = {}
949 needed = {base: 1}
950 needed = {base: 1}
950 while visit:
951 while visit:
951 f = visit[-1]
952 f = visit[-1]
952 pcached = f in pcache
953 pcached = f in pcache
953 if not pcached:
954 if not pcached:
954 pcache[f] = parents(f)
955 pcache[f] = parents(f)
955
956
956 ready = True
957 ready = True
957 pl = pcache[f]
958 pl = pcache[f]
958 for p in pl:
959 for p in pl:
959 if p not in hist:
960 if p not in hist:
960 ready = False
961 ready = False
961 visit.append(p)
962 visit.append(p)
962 if not pcached:
963 if not pcached:
963 needed[p] = needed.get(p, 0) + 1
964 needed[p] = needed.get(p, 0) + 1
964 if ready:
965 if ready:
965 visit.pop()
966 visit.pop()
966 reusable = f in hist
967 reusable = f in hist
967 if reusable:
968 if reusable:
968 curr = hist[f]
969 curr = hist[f]
969 else:
970 else:
970 curr = decorate(f.data(), f)
971 curr = decorate(f.data(), f)
971 for p in pl:
972 for p in pl:
972 if not reusable:
973 if not reusable:
973 curr = pair(hist[p], curr)
974 curr = pair(hist[p], curr)
974 if needed[p] == 1:
975 if needed[p] == 1:
975 del hist[p]
976 del hist[p]
976 del needed[p]
977 del needed[p]
977 else:
978 else:
978 needed[p] -= 1
979 needed[p] -= 1
979
980
980 hist[f] = curr
981 hist[f] = curr
981 pcache[f] = []
982 pcache[f] = []
982
983
983 return zip(hist[base][0], hist[base][1].splitlines(True))
984 return zip(hist[base][0], hist[base][1].splitlines(True))
984
985
985 def ancestors(self, followfirst=False):
986 def ancestors(self, followfirst=False):
986 visit = {}
987 visit = {}
987 c = self
988 c = self
988 if followfirst:
989 if followfirst:
989 cut = 1
990 cut = 1
990 else:
991 else:
991 cut = None
992 cut = None
992
993
993 while True:
994 while True:
994 for parent in c.parents()[:cut]:
995 for parent in c.parents()[:cut]:
995 visit[(parent.linkrev(), parent.filenode())] = parent
996 visit[(parent.linkrev(), parent.filenode())] = parent
996 if not visit:
997 if not visit:
997 break
998 break
998 c = visit.pop(max(visit))
999 c = visit.pop(max(visit))
999 yield c
1000 yield c
1000
1001
1001 class filectx(basefilectx):
1002 class filectx(basefilectx):
1002 """A filecontext object makes access to data related to a particular
1003 """A filecontext object makes access to data related to a particular
1003 filerevision convenient."""
1004 filerevision convenient."""
1004 def __init__(self, repo, path, changeid=None, fileid=None,
1005 def __init__(self, repo, path, changeid=None, fileid=None,
1005 filelog=None, changectx=None):
1006 filelog=None, changectx=None):
1006 """changeid can be a changeset revision, node, or tag.
1007 """changeid can be a changeset revision, node, or tag.
1007 fileid can be a file revision or node."""
1008 fileid can be a file revision or node."""
1008 self._repo = repo
1009 self._repo = repo
1009 self._path = path
1010 self._path = path
1010
1011
1011 assert (changeid is not None
1012 assert (changeid is not None
1012 or fileid is not None
1013 or fileid is not None
1013 or changectx is not None), \
1014 or changectx is not None), \
1014 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1015 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1015 % (changeid, fileid, changectx))
1016 % (changeid, fileid, changectx))
1016
1017
1017 if filelog is not None:
1018 if filelog is not None:
1018 self._filelog = filelog
1019 self._filelog = filelog
1019
1020
1020 if changeid is not None:
1021 if changeid is not None:
1021 self._changeid = changeid
1022 self._changeid = changeid
1022 if changectx is not None:
1023 if changectx is not None:
1023 self._changectx = changectx
1024 self._changectx = changectx
1024 if fileid is not None:
1025 if fileid is not None:
1025 self._fileid = fileid
1026 self._fileid = fileid
1026
1027
1027 @propertycache
1028 @propertycache
1028 def _changectx(self):
1029 def _changectx(self):
1029 try:
1030 try:
1030 return changectx(self._repo, self._changeid)
1031 return changectx(self._repo, self._changeid)
1031 except error.FilteredRepoLookupError:
1032 except error.FilteredRepoLookupError:
1032 # Linkrev may point to any revision in the repository. When the
1033 # Linkrev may point to any revision in the repository. When the
1033 # repository is filtered this may lead to `filectx` trying to build
1034 # repository is filtered this may lead to `filectx` trying to build
1034 # `changectx` for filtered revision. In such case we fallback to
1035 # `changectx` for filtered revision. In such case we fallback to
1035 # creating `changectx` on the unfiltered version of the reposition.
1036 # creating `changectx` on the unfiltered version of the reposition.
1036 # This fallback should not be an issue because `changectx` from
1037 # This fallback should not be an issue because `changectx` from
1037 # `filectx` are not used in complex operations that care about
1038 # `filectx` are not used in complex operations that care about
1038 # filtering.
1039 # filtering.
1039 #
1040 #
1040 # This fallback is a cheap and dirty fix that prevent several
1041 # This fallback is a cheap and dirty fix that prevent several
1041 # crashes. It does not ensure the behavior is correct. However the
1042 # crashes. It does not ensure the behavior is correct. However the
1042 # behavior was not correct before filtering either and "incorrect
1043 # behavior was not correct before filtering either and "incorrect
1043 # behavior" is seen as better as "crash"
1044 # behavior" is seen as better as "crash"
1044 #
1045 #
1045 # Linkrevs have several serious troubles with filtering that are
1046 # Linkrevs have several serious troubles with filtering that are
1046 # complicated to solve. Proper handling of the issue here should be
1047 # complicated to solve. Proper handling of the issue here should be
1047 # considered when solving linkrev issue are on the table.
1048 # considered when solving linkrev issue are on the table.
1048 return changectx(self._repo.unfiltered(), self._changeid)
1049 return changectx(self._repo.unfiltered(), self._changeid)
1049
1050
1050 def filectx(self, fileid, changeid=None):
1051 def filectx(self, fileid, changeid=None):
1051 '''opens an arbitrary revision of the file without
1052 '''opens an arbitrary revision of the file without
1052 opening a new filelog'''
1053 opening a new filelog'''
1053 return filectx(self._repo, self._path, fileid=fileid,
1054 return filectx(self._repo, self._path, fileid=fileid,
1054 filelog=self._filelog, changeid=changeid)
1055 filelog=self._filelog, changeid=changeid)
1055
1056
1056 def data(self):
1057 def data(self):
1057 try:
1058 try:
1058 return self._filelog.read(self._filenode)
1059 return self._filelog.read(self._filenode)
1059 except error.CensoredNodeError:
1060 except error.CensoredNodeError:
1060 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1061 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1061 return ""
1062 return ""
1062 raise util.Abort(_("censored node: %s") % short(self._filenode),
1063 raise util.Abort(_("censored node: %s") % short(self._filenode),
1063 hint=_("set censor.policy to ignore errors"))
1064 hint=_("set censor.policy to ignore errors"))
1064
1065
1065 def size(self):
1066 def size(self):
1066 return self._filelog.size(self._filerev)
1067 return self._filelog.size(self._filerev)
1067
1068
1068 def renamed(self):
1069 def renamed(self):
1069 """check if file was actually renamed in this changeset revision
1070 """check if file was actually renamed in this changeset revision
1070
1071
1071 If rename logged in file revision, we report copy for changeset only
1072 If rename logged in file revision, we report copy for changeset only
1072 if file revisions linkrev points back to the changeset in question
1073 if file revisions linkrev points back to the changeset in question
1073 or both changeset parents contain different file revisions.
1074 or both changeset parents contain different file revisions.
1074 """
1075 """
1075
1076
1076 renamed = self._filelog.renamed(self._filenode)
1077 renamed = self._filelog.renamed(self._filenode)
1077 if not renamed:
1078 if not renamed:
1078 return renamed
1079 return renamed
1079
1080
1080 if self.rev() == self.linkrev():
1081 if self.rev() == self.linkrev():
1081 return renamed
1082 return renamed
1082
1083
1083 name = self.path()
1084 name = self.path()
1084 fnode = self._filenode
1085 fnode = self._filenode
1085 for p in self._changectx.parents():
1086 for p in self._changectx.parents():
1086 try:
1087 try:
1087 if fnode == p.filenode(name):
1088 if fnode == p.filenode(name):
1088 return None
1089 return None
1089 except error.LookupError:
1090 except error.LookupError:
1090 pass
1091 pass
1091 return renamed
1092 return renamed
1092
1093
1093 def children(self):
1094 def children(self):
1094 # hard for renames
1095 # hard for renames
1095 c = self._filelog.children(self._filenode)
1096 c = self._filelog.children(self._filenode)
1096 return [filectx(self._repo, self._path, fileid=x,
1097 return [filectx(self._repo, self._path, fileid=x,
1097 filelog=self._filelog) for x in c]
1098 filelog=self._filelog) for x in c]
1098
1099
1099 class committablectx(basectx):
1100 class committablectx(basectx):
1100 """A committablectx object provides common functionality for a context that
1101 """A committablectx object provides common functionality for a context that
1101 wants the ability to commit, e.g. workingctx or memctx."""
1102 wants the ability to commit, e.g. workingctx or memctx."""
1102 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 changes=None):
1104 changes=None):
1104 self._repo = repo
1105 self._repo = repo
1105 self._rev = None
1106 self._rev = None
1106 self._node = None
1107 self._node = None
1107 self._text = text
1108 self._text = text
1108 if date:
1109 if date:
1109 self._date = util.parsedate(date)
1110 self._date = util.parsedate(date)
1110 if user:
1111 if user:
1111 self._user = user
1112 self._user = user
1112 if changes:
1113 if changes:
1113 self._status = changes
1114 self._status = changes
1114
1115
1115 self._extra = {}
1116 self._extra = {}
1116 if extra:
1117 if extra:
1117 self._extra = extra.copy()
1118 self._extra = extra.copy()
1118 if 'branch' not in self._extra:
1119 if 'branch' not in self._extra:
1119 try:
1120 try:
1120 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 except UnicodeDecodeError:
1122 except UnicodeDecodeError:
1122 raise util.Abort(_('branch name not in UTF-8!'))
1123 raise util.Abort(_('branch name not in UTF-8!'))
1123 self._extra['branch'] = branch
1124 self._extra['branch'] = branch
1124 if self._extra['branch'] == '':
1125 if self._extra['branch'] == '':
1125 self._extra['branch'] = 'default'
1126 self._extra['branch'] = 'default'
1126
1127
1127 def __str__(self):
1128 def __str__(self):
1128 return str(self._parents[0]) + "+"
1129 return str(self._parents[0]) + "+"
1129
1130
1130 def __nonzero__(self):
1131 def __nonzero__(self):
1131 return True
1132 return True
1132
1133
1133 def _buildflagfunc(self):
1134 def _buildflagfunc(self):
1134 # Create a fallback function for getting file flags when the
1135 # Create a fallback function for getting file flags when the
1135 # filesystem doesn't support them
1136 # filesystem doesn't support them
1136
1137
1137 copiesget = self._repo.dirstate.copies().get
1138 copiesget = self._repo.dirstate.copies().get
1138
1139
1139 if len(self._parents) < 2:
1140 if len(self._parents) < 2:
1140 # when we have one parent, it's easy: copy from parent
1141 # when we have one parent, it's easy: copy from parent
1141 man = self._parents[0].manifest()
1142 man = self._parents[0].manifest()
1142 def func(f):
1143 def func(f):
1143 f = copiesget(f, f)
1144 f = copiesget(f, f)
1144 return man.flags(f)
1145 return man.flags(f)
1145 else:
1146 else:
1146 # merges are tricky: we try to reconstruct the unstored
1147 # merges are tricky: we try to reconstruct the unstored
1147 # result from the merge (issue1802)
1148 # result from the merge (issue1802)
1148 p1, p2 = self._parents
1149 p1, p2 = self._parents
1149 pa = p1.ancestor(p2)
1150 pa = p1.ancestor(p2)
1150 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1151 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1151
1152
1152 def func(f):
1153 def func(f):
1153 f = copiesget(f, f) # may be wrong for merges with copies
1154 f = copiesget(f, f) # may be wrong for merges with copies
1154 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1155 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1155 if fl1 == fl2:
1156 if fl1 == fl2:
1156 return fl1
1157 return fl1
1157 if fl1 == fla:
1158 if fl1 == fla:
1158 return fl2
1159 return fl2
1159 if fl2 == fla:
1160 if fl2 == fla:
1160 return fl1
1161 return fl1
1161 return '' # punt for conflicts
1162 return '' # punt for conflicts
1162
1163
1163 return func
1164 return func
1164
1165
1165 @propertycache
1166 @propertycache
1166 def _flagfunc(self):
1167 def _flagfunc(self):
1167 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1168 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1168
1169
1169 @propertycache
1170 @propertycache
1170 def _manifest(self):
1171 def _manifest(self):
1171 """generate a manifest corresponding to the values in self._status
1172 """generate a manifest corresponding to the values in self._status
1172
1173
1173 This reuse the file nodeid from parent, but we append an extra letter
1174 This reuse the file nodeid from parent, but we append an extra letter
1174 when modified. Modified files get an extra 'm' while added files get
1175 when modified. Modified files get an extra 'm' while added files get
1175 an extra 'a'. This is used by manifests merge to see that files
1176 an extra 'a'. This is used by manifests merge to see that files
1176 are different and by update logic to avoid deleting newly added files.
1177 are different and by update logic to avoid deleting newly added files.
1177 """
1178 """
1178
1179
1179 man1 = self._parents[0].manifest()
1180 man1 = self._parents[0].manifest()
1180 man = man1.copy()
1181 man = man1.copy()
1181 if len(self._parents) > 1:
1182 if len(self._parents) > 1:
1182 man2 = self.p2().manifest()
1183 man2 = self.p2().manifest()
1183 def getman(f):
1184 def getman(f):
1184 if f in man1:
1185 if f in man1:
1185 return man1
1186 return man1
1186 return man2
1187 return man2
1187 else:
1188 else:
1188 getman = lambda f: man1
1189 getman = lambda f: man1
1189
1190
1190 copied = self._repo.dirstate.copies()
1191 copied = self._repo.dirstate.copies()
1191 ff = self._flagfunc
1192 ff = self._flagfunc
1192 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1193 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1193 for f in l:
1194 for f in l:
1194 orig = copied.get(f, f)
1195 orig = copied.get(f, f)
1195 man[f] = getman(orig).get(orig, nullid) + i
1196 man[f] = getman(orig).get(orig, nullid) + i
1196 try:
1197 try:
1197 man.setflag(f, ff(f))
1198 man.setflag(f, ff(f))
1198 except OSError:
1199 except OSError:
1199 pass
1200 pass
1200
1201
1201 for f in self._status.deleted + self._status.removed:
1202 for f in self._status.deleted + self._status.removed:
1202 if f in man:
1203 if f in man:
1203 del man[f]
1204 del man[f]
1204
1205
1205 return man
1206 return man
1206
1207
1207 @propertycache
1208 @propertycache
1208 def _status(self):
1209 def _status(self):
1209 return self._repo.status()
1210 return self._repo.status()
1210
1211
1211 @propertycache
1212 @propertycache
1212 def _user(self):
1213 def _user(self):
1213 return self._repo.ui.username()
1214 return self._repo.ui.username()
1214
1215
1215 @propertycache
1216 @propertycache
1216 def _date(self):
1217 def _date(self):
1217 return util.makedate()
1218 return util.makedate()
1218
1219
1219 def subrev(self, subpath):
1220 def subrev(self, subpath):
1220 return None
1221 return None
1221
1222
1222 def manifestnode(self):
1223 def manifestnode(self):
1223 return None
1224 return None
1224 def user(self):
1225 def user(self):
1225 return self._user or self._repo.ui.username()
1226 return self._user or self._repo.ui.username()
1226 def date(self):
1227 def date(self):
1227 return self._date
1228 return self._date
1228 def description(self):
1229 def description(self):
1229 return self._text
1230 return self._text
1230 def files(self):
1231 def files(self):
1231 return sorted(self._status.modified + self._status.added +
1232 return sorted(self._status.modified + self._status.added +
1232 self._status.removed)
1233 self._status.removed)
1233
1234
1234 def modified(self):
1235 def modified(self):
1235 return self._status.modified
1236 return self._status.modified
1236 def added(self):
1237 def added(self):
1237 return self._status.added
1238 return self._status.added
1238 def removed(self):
1239 def removed(self):
1239 return self._status.removed
1240 return self._status.removed
1240 def deleted(self):
1241 def deleted(self):
1241 return self._status.deleted
1242 return self._status.deleted
1242 def branch(self):
1243 def branch(self):
1243 return encoding.tolocal(self._extra['branch'])
1244 return encoding.tolocal(self._extra['branch'])
1244 def closesbranch(self):
1245 def closesbranch(self):
1245 return 'close' in self._extra
1246 return 'close' in self._extra
1246 def extra(self):
1247 def extra(self):
1247 return self._extra
1248 return self._extra
1248
1249
1249 def tags(self):
1250 def tags(self):
1250 return []
1251 return []
1251
1252
1252 def bookmarks(self):
1253 def bookmarks(self):
1253 b = []
1254 b = []
1254 for p in self.parents():
1255 for p in self.parents():
1255 b.extend(p.bookmarks())
1256 b.extend(p.bookmarks())
1256 return b
1257 return b
1257
1258
1258 def phase(self):
1259 def phase(self):
1259 phase = phases.draft # default phase to draft
1260 phase = phases.draft # default phase to draft
1260 for p in self.parents():
1261 for p in self.parents():
1261 phase = max(phase, p.phase())
1262 phase = max(phase, p.phase())
1262 return phase
1263 return phase
1263
1264
1264 def hidden(self):
1265 def hidden(self):
1265 return False
1266 return False
1266
1267
1267 def children(self):
1268 def children(self):
1268 return []
1269 return []
1269
1270
1270 def flags(self, path):
1271 def flags(self, path):
1271 if '_manifest' in self.__dict__:
1272 if '_manifest' in self.__dict__:
1272 try:
1273 try:
1273 return self._manifest.flags(path)
1274 return self._manifest.flags(path)
1274 except KeyError:
1275 except KeyError:
1275 return ''
1276 return ''
1276
1277
1277 try:
1278 try:
1278 return self._flagfunc(path)
1279 return self._flagfunc(path)
1279 except OSError:
1280 except OSError:
1280 return ''
1281 return ''
1281
1282
1282 def ancestor(self, c2):
1283 def ancestor(self, c2):
1283 """return the "best" ancestor context of self and c2"""
1284 """return the "best" ancestor context of self and c2"""
1284 return self._parents[0].ancestor(c2) # punt on two parents for now
1285 return self._parents[0].ancestor(c2) # punt on two parents for now
1285
1286
1286 def walk(self, match):
1287 def walk(self, match):
1287 '''Generates matching file names.'''
1288 '''Generates matching file names.'''
1288 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1289 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1289 True, False))
1290 True, False))
1290
1291
1291 def matches(self, match):
1292 def matches(self, match):
1292 return sorted(self._repo.dirstate.matches(match))
1293 return sorted(self._repo.dirstate.matches(match))
1293
1294
1294 def ancestors(self):
1295 def ancestors(self):
1295 for p in self._parents:
1296 for p in self._parents:
1296 yield p
1297 yield p
1297 for a in self._repo.changelog.ancestors(
1298 for a in self._repo.changelog.ancestors(
1298 [p.rev() for p in self._parents]):
1299 [p.rev() for p in self._parents]):
1299 yield changectx(self._repo, a)
1300 yield changectx(self._repo, a)
1300
1301
1301 def markcommitted(self, node):
1302 def markcommitted(self, node):
1302 """Perform post-commit cleanup necessary after committing this ctx
1303 """Perform post-commit cleanup necessary after committing this ctx
1303
1304
1304 Specifically, this updates backing stores this working context
1305 Specifically, this updates backing stores this working context
1305 wraps to reflect the fact that the changes reflected by this
1306 wraps to reflect the fact that the changes reflected by this
1306 workingctx have been committed. For example, it marks
1307 workingctx have been committed. For example, it marks
1307 modified and added files as normal in the dirstate.
1308 modified and added files as normal in the dirstate.
1308
1309
1309 """
1310 """
1310
1311
1311 self._repo.dirstate.beginparentchange()
1312 self._repo.dirstate.beginparentchange()
1312 for f in self.modified() + self.added():
1313 for f in self.modified() + self.added():
1313 self._repo.dirstate.normal(f)
1314 self._repo.dirstate.normal(f)
1314 for f in self.removed():
1315 for f in self.removed():
1315 self._repo.dirstate.drop(f)
1316 self._repo.dirstate.drop(f)
1316 self._repo.dirstate.setparents(node)
1317 self._repo.dirstate.setparents(node)
1317 self._repo.dirstate.endparentchange()
1318 self._repo.dirstate.endparentchange()
1318
1319
1319 # write changes out explicitly, because nesting wlock at
1320 # write changes out explicitly, because nesting wlock at
1320 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1321 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1321 # from immediately doing so for subsequent changing files
1322 # from immediately doing so for subsequent changing files
1322 self._repo.dirstate.write()
1323 self._repo.dirstate.write()
1323
1324
1324 class workingctx(committablectx):
1325 class workingctx(committablectx):
1325 """A workingctx object makes access to data related to
1326 """A workingctx object makes access to data related to
1326 the current working directory convenient.
1327 the current working directory convenient.
1327 date - any valid date string or (unixtime, offset), or None.
1328 date - any valid date string or (unixtime, offset), or None.
1328 user - username string, or None.
1329 user - username string, or None.
1329 extra - a dictionary of extra values, or None.
1330 extra - a dictionary of extra values, or None.
1330 changes - a list of file lists as returned by localrepo.status()
1331 changes - a list of file lists as returned by localrepo.status()
1331 or None to use the repository status.
1332 or None to use the repository status.
1332 """
1333 """
1333 def __init__(self, repo, text="", user=None, date=None, extra=None,
1334 def __init__(self, repo, text="", user=None, date=None, extra=None,
1334 changes=None):
1335 changes=None):
1335 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1336 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1336
1337
1337 def __iter__(self):
1338 def __iter__(self):
1338 d = self._repo.dirstate
1339 d = self._repo.dirstate
1339 for f in d:
1340 for f in d:
1340 if d[f] != 'r':
1341 if d[f] != 'r':
1341 yield f
1342 yield f
1342
1343
1343 def __contains__(self, key):
1344 def __contains__(self, key):
1344 return self._repo.dirstate[key] not in "?r"
1345 return self._repo.dirstate[key] not in "?r"
1345
1346
1346 def hex(self):
1347 def hex(self):
1347 return hex(wdirid)
1348 return hex(wdirid)
1348
1349
1349 @propertycache
1350 @propertycache
1350 def _parents(self):
1351 def _parents(self):
1351 p = self._repo.dirstate.parents()
1352 p = self._repo.dirstate.parents()
1352 if p[1] == nullid:
1353 if p[1] == nullid:
1353 p = p[:-1]
1354 p = p[:-1]
1354 return [changectx(self._repo, x) for x in p]
1355 return [changectx(self._repo, x) for x in p]
1355
1356
1356 def filectx(self, path, filelog=None):
1357 def filectx(self, path, filelog=None):
1357 """get a file context from the working directory"""
1358 """get a file context from the working directory"""
1358 return workingfilectx(self._repo, path, workingctx=self,
1359 return workingfilectx(self._repo, path, workingctx=self,
1359 filelog=filelog)
1360 filelog=filelog)
1360
1361
1361 def dirty(self, missing=False, merge=True, branch=True):
1362 def dirty(self, missing=False, merge=True, branch=True):
1362 "check whether a working directory is modified"
1363 "check whether a working directory is modified"
1363 # check subrepos first
1364 # check subrepos first
1364 for s in sorted(self.substate):
1365 for s in sorted(self.substate):
1365 if self.sub(s).dirty():
1366 if self.sub(s).dirty():
1366 return True
1367 return True
1367 # check current working dir
1368 # check current working dir
1368 return ((merge and self.p2()) or
1369 return ((merge and self.p2()) or
1369 (branch and self.branch() != self.p1().branch()) or
1370 (branch and self.branch() != self.p1().branch()) or
1370 self.modified() or self.added() or self.removed() or
1371 self.modified() or self.added() or self.removed() or
1371 (missing and self.deleted()))
1372 (missing and self.deleted()))
1372
1373
1373 def add(self, list, prefix=""):
1374 def add(self, list, prefix=""):
1374 join = lambda f: os.path.join(prefix, f)
1375 join = lambda f: os.path.join(prefix, f)
1375 wlock = self._repo.wlock()
1376 wlock = self._repo.wlock()
1376 ui, ds = self._repo.ui, self._repo.dirstate
1377 ui, ds = self._repo.ui, self._repo.dirstate
1377 try:
1378 try:
1378 rejected = []
1379 rejected = []
1379 lstat = self._repo.wvfs.lstat
1380 lstat = self._repo.wvfs.lstat
1380 for f in list:
1381 for f in list:
1381 scmutil.checkportable(ui, join(f))
1382 scmutil.checkportable(ui, join(f))
1382 try:
1383 try:
1383 st = lstat(f)
1384 st = lstat(f)
1384 except OSError:
1385 except OSError:
1385 ui.warn(_("%s does not exist!\n") % join(f))
1386 ui.warn(_("%s does not exist!\n") % join(f))
1386 rejected.append(f)
1387 rejected.append(f)
1387 continue
1388 continue
1388 if st.st_size > 10000000:
1389 if st.st_size > 10000000:
1389 ui.warn(_("%s: up to %d MB of RAM may be required "
1390 ui.warn(_("%s: up to %d MB of RAM may be required "
1390 "to manage this file\n"
1391 "to manage this file\n"
1391 "(use 'hg revert %s' to cancel the "
1392 "(use 'hg revert %s' to cancel the "
1392 "pending addition)\n")
1393 "pending addition)\n")
1393 % (f, 3 * st.st_size // 1000000, join(f)))
1394 % (f, 3 * st.st_size // 1000000, join(f)))
1394 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 ui.warn(_("%s not added: only files and symlinks "
1396 ui.warn(_("%s not added: only files and symlinks "
1396 "supported currently\n") % join(f))
1397 "supported currently\n") % join(f))
1397 rejected.append(f)
1398 rejected.append(f)
1398 elif ds[f] in 'amn':
1399 elif ds[f] in 'amn':
1399 ui.warn(_("%s already tracked!\n") % join(f))
1400 ui.warn(_("%s already tracked!\n") % join(f))
1400 elif ds[f] == 'r':
1401 elif ds[f] == 'r':
1401 ds.normallookup(f)
1402 ds.normallookup(f)
1402 else:
1403 else:
1403 ds.add(f)
1404 ds.add(f)
1404 return rejected
1405 return rejected
1405 finally:
1406 finally:
1406 wlock.release()
1407 wlock.release()
1407
1408
1408 def forget(self, files, prefix=""):
1409 def forget(self, files, prefix=""):
1409 join = lambda f: os.path.join(prefix, f)
1410 join = lambda f: os.path.join(prefix, f)
1410 wlock = self._repo.wlock()
1411 wlock = self._repo.wlock()
1411 try:
1412 try:
1412 rejected = []
1413 rejected = []
1413 for f in files:
1414 for f in files:
1414 if f not in self._repo.dirstate:
1415 if f not in self._repo.dirstate:
1415 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1416 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1416 rejected.append(f)
1417 rejected.append(f)
1417 elif self._repo.dirstate[f] != 'a':
1418 elif self._repo.dirstate[f] != 'a':
1418 self._repo.dirstate.remove(f)
1419 self._repo.dirstate.remove(f)
1419 else:
1420 else:
1420 self._repo.dirstate.drop(f)
1421 self._repo.dirstate.drop(f)
1421 return rejected
1422 return rejected
1422 finally:
1423 finally:
1423 wlock.release()
1424 wlock.release()
1424
1425
1425 def undelete(self, list):
1426 def undelete(self, list):
1426 pctxs = self.parents()
1427 pctxs = self.parents()
1427 wlock = self._repo.wlock()
1428 wlock = self._repo.wlock()
1428 try:
1429 try:
1429 for f in list:
1430 for f in list:
1430 if self._repo.dirstate[f] != 'r':
1431 if self._repo.dirstate[f] != 'r':
1431 self._repo.ui.warn(_("%s not removed!\n") % f)
1432 self._repo.ui.warn(_("%s not removed!\n") % f)
1432 else:
1433 else:
1433 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1434 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1434 t = fctx.data()
1435 t = fctx.data()
1435 self._repo.wwrite(f, t, fctx.flags())
1436 self._repo.wwrite(f, t, fctx.flags())
1436 self._repo.dirstate.normal(f)
1437 self._repo.dirstate.normal(f)
1437 finally:
1438 finally:
1438 wlock.release()
1439 wlock.release()
1439
1440
1440 def copy(self, source, dest):
1441 def copy(self, source, dest):
1441 try:
1442 try:
1442 st = self._repo.wvfs.lstat(dest)
1443 st = self._repo.wvfs.lstat(dest)
1443 except OSError as err:
1444 except OSError as err:
1444 if err.errno != errno.ENOENT:
1445 if err.errno != errno.ENOENT:
1445 raise
1446 raise
1446 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1447 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1447 return
1448 return
1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1449 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1450 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1450 "symbolic link\n") % dest)
1451 "symbolic link\n") % dest)
1451 else:
1452 else:
1452 wlock = self._repo.wlock()
1453 wlock = self._repo.wlock()
1453 try:
1454 try:
1454 if self._repo.dirstate[dest] in '?':
1455 if self._repo.dirstate[dest] in '?':
1455 self._repo.dirstate.add(dest)
1456 self._repo.dirstate.add(dest)
1456 elif self._repo.dirstate[dest] in 'r':
1457 elif self._repo.dirstate[dest] in 'r':
1457 self._repo.dirstate.normallookup(dest)
1458 self._repo.dirstate.normallookup(dest)
1458 self._repo.dirstate.copy(source, dest)
1459 self._repo.dirstate.copy(source, dest)
1459 finally:
1460 finally:
1460 wlock.release()
1461 wlock.release()
1461
1462
1462 def match(self, pats=[], include=None, exclude=None, default='glob',
1463 def match(self, pats=[], include=None, exclude=None, default='glob',
1463 listsubrepos=False, badfn=None):
1464 listsubrepos=False, badfn=None):
1464 r = self._repo
1465 r = self._repo
1465
1466
1466 # Only a case insensitive filesystem needs magic to translate user input
1467 # Only a case insensitive filesystem needs magic to translate user input
1467 # to actual case in the filesystem.
1468 # to actual case in the filesystem.
1468 if not util.checkcase(r.root):
1469 if not util.checkcase(r.root):
1469 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1470 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1470 exclude, default, r.auditor, self,
1471 exclude, default, r.auditor, self,
1471 listsubrepos=listsubrepos,
1472 listsubrepos=listsubrepos,
1472 badfn=badfn)
1473 badfn=badfn)
1473 return matchmod.match(r.root, r.getcwd(), pats,
1474 return matchmod.match(r.root, r.getcwd(), pats,
1474 include, exclude, default,
1475 include, exclude, default,
1475 auditor=r.auditor, ctx=self,
1476 auditor=r.auditor, ctx=self,
1476 listsubrepos=listsubrepos, badfn=badfn)
1477 listsubrepos=listsubrepos, badfn=badfn)
1477
1478
1478 def _filtersuspectsymlink(self, files):
1479 def _filtersuspectsymlink(self, files):
1479 if not files or self._repo.dirstate._checklink:
1480 if not files or self._repo.dirstate._checklink:
1480 return files
1481 return files
1481
1482
1482 # Symlink placeholders may get non-symlink-like contents
1483 # Symlink placeholders may get non-symlink-like contents
1483 # via user error or dereferencing by NFS or Samba servers,
1484 # via user error or dereferencing by NFS or Samba servers,
1484 # so we filter out any placeholders that don't look like a
1485 # so we filter out any placeholders that don't look like a
1485 # symlink
1486 # symlink
1486 sane = []
1487 sane = []
1487 for f in files:
1488 for f in files:
1488 if self.flags(f) == 'l':
1489 if self.flags(f) == 'l':
1489 d = self[f].data()
1490 d = self[f].data()
1490 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1491 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1491 self._repo.ui.debug('ignoring suspect symlink placeholder'
1492 self._repo.ui.debug('ignoring suspect symlink placeholder'
1492 ' "%s"\n' % f)
1493 ' "%s"\n' % f)
1493 continue
1494 continue
1494 sane.append(f)
1495 sane.append(f)
1495 return sane
1496 return sane
1496
1497
1497 def _checklookup(self, files):
1498 def _checklookup(self, files):
1498 # check for any possibly clean files
1499 # check for any possibly clean files
1499 if not files:
1500 if not files:
1500 return [], []
1501 return [], []
1501
1502
1502 modified = []
1503 modified = []
1503 fixup = []
1504 fixup = []
1504 pctx = self._parents[0]
1505 pctx = self._parents[0]
1505 # do a full compare of any files that might have changed
1506 # do a full compare of any files that might have changed
1506 for f in sorted(files):
1507 for f in sorted(files):
1507 if (f not in pctx or self.flags(f) != pctx.flags(f)
1508 if (f not in pctx or self.flags(f) != pctx.flags(f)
1508 or pctx[f].cmp(self[f])):
1509 or pctx[f].cmp(self[f])):
1509 modified.append(f)
1510 modified.append(f)
1510 else:
1511 else:
1511 fixup.append(f)
1512 fixup.append(f)
1512
1513
1513 # update dirstate for files that are actually clean
1514 # update dirstate for files that are actually clean
1514 if fixup:
1515 if fixup:
1515 try:
1516 try:
1516 # updating the dirstate is optional
1517 # updating the dirstate is optional
1517 # so we don't wait on the lock
1518 # so we don't wait on the lock
1518 # wlock can invalidate the dirstate, so cache normal _after_
1519 # wlock can invalidate the dirstate, so cache normal _after_
1519 # taking the lock
1520 # taking the lock
1520 wlock = self._repo.wlock(False)
1521 wlock = self._repo.wlock(False)
1521 normal = self._repo.dirstate.normal
1522 normal = self._repo.dirstate.normal
1522 try:
1523 try:
1523 for f in fixup:
1524 for f in fixup:
1524 normal(f)
1525 normal(f)
1525 # write changes out explicitly, because nesting
1526 # write changes out explicitly, because nesting
1526 # wlock at runtime may prevent 'wlock.release()'
1527 # wlock at runtime may prevent 'wlock.release()'
1527 # below from doing so for subsequent changing files
1528 # below from doing so for subsequent changing files
1528 self._repo.dirstate.write()
1529 self._repo.dirstate.write()
1529 finally:
1530 finally:
1530 wlock.release()
1531 wlock.release()
1531 except error.LockError:
1532 except error.LockError:
1532 pass
1533 pass
1533 return modified, fixup
1534 return modified, fixup
1534
1535
1535 def _manifestmatches(self, match, s):
1536 def _manifestmatches(self, match, s):
1536 """Slow path for workingctx
1537 """Slow path for workingctx
1537
1538
1538 The fast path is when we compare the working directory to its parent
1539 The fast path is when we compare the working directory to its parent
1539 which means this function is comparing with a non-parent; therefore we
1540 which means this function is comparing with a non-parent; therefore we
1540 need to build a manifest and return what matches.
1541 need to build a manifest and return what matches.
1541 """
1542 """
1542 mf = self._repo['.']._manifestmatches(match, s)
1543 mf = self._repo['.']._manifestmatches(match, s)
1543 for f in s.modified + s.added:
1544 for f in s.modified + s.added:
1544 mf[f] = _newnode
1545 mf[f] = _newnode
1545 mf.setflag(f, self.flags(f))
1546 mf.setflag(f, self.flags(f))
1546 for f in s.removed:
1547 for f in s.removed:
1547 if f in mf:
1548 if f in mf:
1548 del mf[f]
1549 del mf[f]
1549 return mf
1550 return mf
1550
1551
1551 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1552 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1552 unknown=False):
1553 unknown=False):
1553 '''Gets the status from the dirstate -- internal use only.'''
1554 '''Gets the status from the dirstate -- internal use only.'''
1554 listignored, listclean, listunknown = ignored, clean, unknown
1555 listignored, listclean, listunknown = ignored, clean, unknown
1555 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1556 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1556 subrepos = []
1557 subrepos = []
1557 if '.hgsub' in self:
1558 if '.hgsub' in self:
1558 subrepos = sorted(self.substate)
1559 subrepos = sorted(self.substate)
1559 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1560 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1560 listclean, listunknown)
1561 listclean, listunknown)
1561
1562
1562 # check for any possibly clean files
1563 # check for any possibly clean files
1563 if cmp:
1564 if cmp:
1564 modified2, fixup = self._checklookup(cmp)
1565 modified2, fixup = self._checklookup(cmp)
1565 s.modified.extend(modified2)
1566 s.modified.extend(modified2)
1566
1567
1567 # update dirstate for files that are actually clean
1568 # update dirstate for files that are actually clean
1568 if fixup and listclean:
1569 if fixup and listclean:
1569 s.clean.extend(fixup)
1570 s.clean.extend(fixup)
1570
1571
1571 if match.always():
1572 if match.always():
1572 # cache for performance
1573 # cache for performance
1573 if s.unknown or s.ignored or s.clean:
1574 if s.unknown or s.ignored or s.clean:
1574 # "_status" is cached with list*=False in the normal route
1575 # "_status" is cached with list*=False in the normal route
1575 self._status = scmutil.status(s.modified, s.added, s.removed,
1576 self._status = scmutil.status(s.modified, s.added, s.removed,
1576 s.deleted, [], [], [])
1577 s.deleted, [], [], [])
1577 else:
1578 else:
1578 self._status = s
1579 self._status = s
1579
1580
1580 return s
1581 return s
1581
1582
1582 def _buildstatus(self, other, s, match, listignored, listclean,
1583 def _buildstatus(self, other, s, match, listignored, listclean,
1583 listunknown):
1584 listunknown):
1584 """build a status with respect to another context
1585 """build a status with respect to another context
1585
1586
1586 This includes logic for maintaining the fast path of status when
1587 This includes logic for maintaining the fast path of status when
1587 comparing the working directory against its parent, which is to skip
1588 comparing the working directory against its parent, which is to skip
1588 building a new manifest if self (working directory) is not comparing
1589 building a new manifest if self (working directory) is not comparing
1589 against its parent (repo['.']).
1590 against its parent (repo['.']).
1590 """
1591 """
1591 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1592 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1592 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1593 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1593 # might have accidentally ended up with the entire contents of the file
1594 # might have accidentally ended up with the entire contents of the file
1594 # they are supposed to be linking to.
1595 # they are supposed to be linking to.
1595 s.modified[:] = self._filtersuspectsymlink(s.modified)
1596 s.modified[:] = self._filtersuspectsymlink(s.modified)
1596 if other != self._repo['.']:
1597 if other != self._repo['.']:
1597 s = super(workingctx, self)._buildstatus(other, s, match,
1598 s = super(workingctx, self)._buildstatus(other, s, match,
1598 listignored, listclean,
1599 listignored, listclean,
1599 listunknown)
1600 listunknown)
1600 return s
1601 return s
1601
1602
1602 def _matchstatus(self, other, match):
1603 def _matchstatus(self, other, match):
1603 """override the match method with a filter for directory patterns
1604 """override the match method with a filter for directory patterns
1604
1605
1605 We use inheritance to customize the match.bad method only in cases of
1606 We use inheritance to customize the match.bad method only in cases of
1606 workingctx since it belongs only to the working directory when
1607 workingctx since it belongs only to the working directory when
1607 comparing against the parent changeset.
1608 comparing against the parent changeset.
1608
1609
1609 If we aren't comparing against the working directory's parent, then we
1610 If we aren't comparing against the working directory's parent, then we
1610 just use the default match object sent to us.
1611 just use the default match object sent to us.
1611 """
1612 """
1612 superself = super(workingctx, self)
1613 superself = super(workingctx, self)
1613 match = superself._matchstatus(other, match)
1614 match = superself._matchstatus(other, match)
1614 if other != self._repo['.']:
1615 if other != self._repo['.']:
1615 def bad(f, msg):
1616 def bad(f, msg):
1616 # 'f' may be a directory pattern from 'match.files()',
1617 # 'f' may be a directory pattern from 'match.files()',
1617 # so 'f not in ctx1' is not enough
1618 # so 'f not in ctx1' is not enough
1618 if f not in other and not other.hasdir(f):
1619 if f not in other and not other.hasdir(f):
1619 self._repo.ui.warn('%s: %s\n' %
1620 self._repo.ui.warn('%s: %s\n' %
1620 (self._repo.dirstate.pathto(f), msg))
1621 (self._repo.dirstate.pathto(f), msg))
1621 match.bad = bad
1622 match.bad = bad
1622 return match
1623 return match
1623
1624
1624 class committablefilectx(basefilectx):
1625 class committablefilectx(basefilectx):
1625 """A committablefilectx provides common functionality for a file context
1626 """A committablefilectx provides common functionality for a file context
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1628 def __init__(self, repo, path, filelog=None, ctx=None):
1628 self._repo = repo
1629 self._repo = repo
1629 self._path = path
1630 self._path = path
1630 self._changeid = None
1631 self._changeid = None
1631 self._filerev = self._filenode = None
1632 self._filerev = self._filenode = None
1632
1633
1633 if filelog is not None:
1634 if filelog is not None:
1634 self._filelog = filelog
1635 self._filelog = filelog
1635 if ctx:
1636 if ctx:
1636 self._changectx = ctx
1637 self._changectx = ctx
1637
1638
1638 def __nonzero__(self):
1639 def __nonzero__(self):
1639 return True
1640 return True
1640
1641
1641 def linkrev(self):
1642 def linkrev(self):
1642 # linked to self._changectx no matter if file is modified or not
1643 # linked to self._changectx no matter if file is modified or not
1643 return self.rev()
1644 return self.rev()
1644
1645
1645 def parents(self):
1646 def parents(self):
1646 '''return parent filectxs, following copies if necessary'''
1647 '''return parent filectxs, following copies if necessary'''
1647 def filenode(ctx, path):
1648 def filenode(ctx, path):
1648 return ctx._manifest.get(path, nullid)
1649 return ctx._manifest.get(path, nullid)
1649
1650
1650 path = self._path
1651 path = self._path
1651 fl = self._filelog
1652 fl = self._filelog
1652 pcl = self._changectx._parents
1653 pcl = self._changectx._parents
1653 renamed = self.renamed()
1654 renamed = self.renamed()
1654
1655
1655 if renamed:
1656 if renamed:
1656 pl = [renamed + (None,)]
1657 pl = [renamed + (None,)]
1657 else:
1658 else:
1658 pl = [(path, filenode(pcl[0], path), fl)]
1659 pl = [(path, filenode(pcl[0], path), fl)]
1659
1660
1660 for pc in pcl[1:]:
1661 for pc in pcl[1:]:
1661 pl.append((path, filenode(pc, path), fl))
1662 pl.append((path, filenode(pc, path), fl))
1662
1663
1663 return [self._parentfilectx(p, fileid=n, filelog=l)
1664 return [self._parentfilectx(p, fileid=n, filelog=l)
1664 for p, n, l in pl if n != nullid]
1665 for p, n, l in pl if n != nullid]
1665
1666
1666 def children(self):
1667 def children(self):
1667 return []
1668 return []
1668
1669
1669 class workingfilectx(committablefilectx):
1670 class workingfilectx(committablefilectx):
1670 """A workingfilectx object makes access to data related to a particular
1671 """A workingfilectx object makes access to data related to a particular
1671 file in the working directory convenient."""
1672 file in the working directory convenient."""
1672 def __init__(self, repo, path, filelog=None, workingctx=None):
1673 def __init__(self, repo, path, filelog=None, workingctx=None):
1673 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1674 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1674
1675
1675 @propertycache
1676 @propertycache
1676 def _changectx(self):
1677 def _changectx(self):
1677 return workingctx(self._repo)
1678 return workingctx(self._repo)
1678
1679
1679 def data(self):
1680 def data(self):
1680 return self._repo.wread(self._path)
1681 return self._repo.wread(self._path)
1681 def renamed(self):
1682 def renamed(self):
1682 rp = self._repo.dirstate.copied(self._path)
1683 rp = self._repo.dirstate.copied(self._path)
1683 if not rp:
1684 if not rp:
1684 return None
1685 return None
1685 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1686 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1686
1687
1687 def size(self):
1688 def size(self):
1688 return self._repo.wvfs.lstat(self._path).st_size
1689 return self._repo.wvfs.lstat(self._path).st_size
1689 def date(self):
1690 def date(self):
1690 t, tz = self._changectx.date()
1691 t, tz = self._changectx.date()
1691 try:
1692 try:
1692 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1693 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1693 except OSError as err:
1694 except OSError as err:
1694 if err.errno != errno.ENOENT:
1695 if err.errno != errno.ENOENT:
1695 raise
1696 raise
1696 return (t, tz)
1697 return (t, tz)
1697
1698
1698 def cmp(self, fctx):
1699 def cmp(self, fctx):
1699 """compare with other file context
1700 """compare with other file context
1700
1701
1701 returns True if different than fctx.
1702 returns True if different than fctx.
1702 """
1703 """
1703 # fctx should be a filectx (not a workingfilectx)
1704 # fctx should be a filectx (not a workingfilectx)
1704 # invert comparison to reuse the same code path
1705 # invert comparison to reuse the same code path
1705 return fctx.cmp(self)
1706 return fctx.cmp(self)
1706
1707
1707 def remove(self, ignoremissing=False):
1708 def remove(self, ignoremissing=False):
1708 """wraps unlink for a repo's working directory"""
1709 """wraps unlink for a repo's working directory"""
1709 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1710 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1710
1711
1711 def write(self, data, flags):
1712 def write(self, data, flags):
1712 """wraps repo.wwrite"""
1713 """wraps repo.wwrite"""
1713 self._repo.wwrite(self._path, data, flags)
1714 self._repo.wwrite(self._path, data, flags)
1714
1715
1715 class workingcommitctx(workingctx):
1716 class workingcommitctx(workingctx):
1716 """A workingcommitctx object makes access to data related to
1717 """A workingcommitctx object makes access to data related to
1717 the revision being committed convenient.
1718 the revision being committed convenient.
1718
1719
1719 This hides changes in the working directory, if they aren't
1720 This hides changes in the working directory, if they aren't
1720 committed in this context.
1721 committed in this context.
1721 """
1722 """
1722 def __init__(self, repo, changes,
1723 def __init__(self, repo, changes,
1723 text="", user=None, date=None, extra=None):
1724 text="", user=None, date=None, extra=None):
1724 super(workingctx, self).__init__(repo, text, user, date, extra,
1725 super(workingctx, self).__init__(repo, text, user, date, extra,
1725 changes)
1726 changes)
1726
1727
1727 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1728 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1728 unknown=False):
1729 unknown=False):
1729 """Return matched files only in ``self._status``
1730 """Return matched files only in ``self._status``
1730
1731
1731 Uncommitted files appear "clean" via this context, even if
1732 Uncommitted files appear "clean" via this context, even if
1732 they aren't actually so in the working directory.
1733 they aren't actually so in the working directory.
1733 """
1734 """
1734 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1735 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1735 if clean:
1736 if clean:
1736 clean = [f for f in self._manifest if f not in self._changedset]
1737 clean = [f for f in self._manifest if f not in self._changedset]
1737 else:
1738 else:
1738 clean = []
1739 clean = []
1739 return scmutil.status([f for f in self._status.modified if match(f)],
1740 return scmutil.status([f for f in self._status.modified if match(f)],
1740 [f for f in self._status.added if match(f)],
1741 [f for f in self._status.added if match(f)],
1741 [f for f in self._status.removed if match(f)],
1742 [f for f in self._status.removed if match(f)],
1742 [], [], [], clean)
1743 [], [], [], clean)
1743
1744
1744 @propertycache
1745 @propertycache
1745 def _changedset(self):
1746 def _changedset(self):
1746 """Return the set of files changed in this context
1747 """Return the set of files changed in this context
1747 """
1748 """
1748 changed = set(self._status.modified)
1749 changed = set(self._status.modified)
1749 changed.update(self._status.added)
1750 changed.update(self._status.added)
1750 changed.update(self._status.removed)
1751 changed.update(self._status.removed)
1751 return changed
1752 return changed
1752
1753
1753 class memctx(committablectx):
1754 class memctx(committablectx):
1754 """Use memctx to perform in-memory commits via localrepo.commitctx().
1755 """Use memctx to perform in-memory commits via localrepo.commitctx().
1755
1756
1756 Revision information is supplied at initialization time while
1757 Revision information is supplied at initialization time while
1757 related files data and is made available through a callback
1758 related files data and is made available through a callback
1758 mechanism. 'repo' is the current localrepo, 'parents' is a
1759 mechanism. 'repo' is the current localrepo, 'parents' is a
1759 sequence of two parent revisions identifiers (pass None for every
1760 sequence of two parent revisions identifiers (pass None for every
1760 missing parent), 'text' is the commit message and 'files' lists
1761 missing parent), 'text' is the commit message and 'files' lists
1761 names of files touched by the revision (normalized and relative to
1762 names of files touched by the revision (normalized and relative to
1762 repository root).
1763 repository root).
1763
1764
1764 filectxfn(repo, memctx, path) is a callable receiving the
1765 filectxfn(repo, memctx, path) is a callable receiving the
1765 repository, the current memctx object and the normalized path of
1766 repository, the current memctx object and the normalized path of
1766 requested file, relative to repository root. It is fired by the
1767 requested file, relative to repository root. It is fired by the
1767 commit function for every file in 'files', but calls order is
1768 commit function for every file in 'files', but calls order is
1768 undefined. If the file is available in the revision being
1769 undefined. If the file is available in the revision being
1769 committed (updated or added), filectxfn returns a memfilectx
1770 committed (updated or added), filectxfn returns a memfilectx
1770 object. If the file was removed, filectxfn raises an
1771 object. If the file was removed, filectxfn raises an
1771 IOError. Moved files are represented by marking the source file
1772 IOError. Moved files are represented by marking the source file
1772 removed and the new file added with copy information (see
1773 removed and the new file added with copy information (see
1773 memfilectx).
1774 memfilectx).
1774
1775
1775 user receives the committer name and defaults to current
1776 user receives the committer name and defaults to current
1776 repository username, date is the commit date in any format
1777 repository username, date is the commit date in any format
1777 supported by util.parsedate() and defaults to current date, extra
1778 supported by util.parsedate() and defaults to current date, extra
1778 is a dictionary of metadata or is left empty.
1779 is a dictionary of metadata or is left empty.
1779 """
1780 """
1780
1781
1781 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1782 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1782 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1783 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1783 # this field to determine what to do in filectxfn.
1784 # this field to determine what to do in filectxfn.
1784 _returnnoneformissingfiles = True
1785 _returnnoneformissingfiles = True
1785
1786
1786 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1787 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1787 date=None, extra=None, editor=False):
1788 date=None, extra=None, editor=False):
1788 super(memctx, self).__init__(repo, text, user, date, extra)
1789 super(memctx, self).__init__(repo, text, user, date, extra)
1789 self._rev = None
1790 self._rev = None
1790 self._node = None
1791 self._node = None
1791 parents = [(p or nullid) for p in parents]
1792 parents = [(p or nullid) for p in parents]
1792 p1, p2 = parents
1793 p1, p2 = parents
1793 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1794 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1794 files = sorted(set(files))
1795 files = sorted(set(files))
1795 self._files = files
1796 self._files = files
1796 self.substate = {}
1797 self.substate = {}
1797
1798
1798 # if store is not callable, wrap it in a function
1799 # if store is not callable, wrap it in a function
1799 if not callable(filectxfn):
1800 if not callable(filectxfn):
1800 def getfilectx(repo, memctx, path):
1801 def getfilectx(repo, memctx, path):
1801 fctx = filectxfn[path]
1802 fctx = filectxfn[path]
1802 # this is weird but apparently we only keep track of one parent
1803 # this is weird but apparently we only keep track of one parent
1803 # (why not only store that instead of a tuple?)
1804 # (why not only store that instead of a tuple?)
1804 copied = fctx.renamed()
1805 copied = fctx.renamed()
1805 if copied:
1806 if copied:
1806 copied = copied[0]
1807 copied = copied[0]
1807 return memfilectx(repo, path, fctx.data(),
1808 return memfilectx(repo, path, fctx.data(),
1808 islink=fctx.islink(), isexec=fctx.isexec(),
1809 islink=fctx.islink(), isexec=fctx.isexec(),
1809 copied=copied, memctx=memctx)
1810 copied=copied, memctx=memctx)
1810 self._filectxfn = getfilectx
1811 self._filectxfn = getfilectx
1811 else:
1812 else:
1812 # "util.cachefunc" reduces invocation of possibly expensive
1813 # "util.cachefunc" reduces invocation of possibly expensive
1813 # "filectxfn" for performance (e.g. converting from another VCS)
1814 # "filectxfn" for performance (e.g. converting from another VCS)
1814 self._filectxfn = util.cachefunc(filectxfn)
1815 self._filectxfn = util.cachefunc(filectxfn)
1815
1816
1816 if extra:
1817 if extra:
1817 self._extra = extra.copy()
1818 self._extra = extra.copy()
1818 else:
1819 else:
1819 self._extra = {}
1820 self._extra = {}
1820
1821
1821 if self._extra.get('branch', '') == '':
1822 if self._extra.get('branch', '') == '':
1822 self._extra['branch'] = 'default'
1823 self._extra['branch'] = 'default'
1823
1824
1824 if editor:
1825 if editor:
1825 self._text = editor(self._repo, self, [])
1826 self._text = editor(self._repo, self, [])
1826 self._repo.savecommitmessage(self._text)
1827 self._repo.savecommitmessage(self._text)
1827
1828
1828 def filectx(self, path, filelog=None):
1829 def filectx(self, path, filelog=None):
1829 """get a file context from the working directory
1830 """get a file context from the working directory
1830
1831
1831 Returns None if file doesn't exist and should be removed."""
1832 Returns None if file doesn't exist and should be removed."""
1832 return self._filectxfn(self._repo, self, path)
1833 return self._filectxfn(self._repo, self, path)
1833
1834
1834 def commit(self):
1835 def commit(self):
1835 """commit context to the repo"""
1836 """commit context to the repo"""
1836 return self._repo.commitctx(self)
1837 return self._repo.commitctx(self)
1837
1838
1838 @propertycache
1839 @propertycache
1839 def _manifest(self):
1840 def _manifest(self):
1840 """generate a manifest based on the return values of filectxfn"""
1841 """generate a manifest based on the return values of filectxfn"""
1841
1842
1842 # keep this simple for now; just worry about p1
1843 # keep this simple for now; just worry about p1
1843 pctx = self._parents[0]
1844 pctx = self._parents[0]
1844 man = pctx.manifest().copy()
1845 man = pctx.manifest().copy()
1845
1846
1846 for f in self._status.modified:
1847 for f in self._status.modified:
1847 p1node = nullid
1848 p1node = nullid
1848 p2node = nullid
1849 p2node = nullid
1849 p = pctx[f].parents() # if file isn't in pctx, check p2?
1850 p = pctx[f].parents() # if file isn't in pctx, check p2?
1850 if len(p) > 0:
1851 if len(p) > 0:
1851 p1node = p[0].node()
1852 p1node = p[0].node()
1852 if len(p) > 1:
1853 if len(p) > 1:
1853 p2node = p[1].node()
1854 p2node = p[1].node()
1854 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1855 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1855
1856
1856 for f in self._status.added:
1857 for f in self._status.added:
1857 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1858 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1858
1859
1859 for f in self._status.removed:
1860 for f in self._status.removed:
1860 if f in man:
1861 if f in man:
1861 del man[f]
1862 del man[f]
1862
1863
1863 return man
1864 return man
1864
1865
1865 @propertycache
1866 @propertycache
1866 def _status(self):
1867 def _status(self):
1867 """Calculate exact status from ``files`` specified at construction
1868 """Calculate exact status from ``files`` specified at construction
1868 """
1869 """
1869 man1 = self.p1().manifest()
1870 man1 = self.p1().manifest()
1870 p2 = self._parents[1]
1871 p2 = self._parents[1]
1871 # "1 < len(self._parents)" can't be used for checking
1872 # "1 < len(self._parents)" can't be used for checking
1872 # existence of the 2nd parent, because "memctx._parents" is
1873 # existence of the 2nd parent, because "memctx._parents" is
1873 # explicitly initialized by the list, of which length is 2.
1874 # explicitly initialized by the list, of which length is 2.
1874 if p2.node() != nullid:
1875 if p2.node() != nullid:
1875 man2 = p2.manifest()
1876 man2 = p2.manifest()
1876 managing = lambda f: f in man1 or f in man2
1877 managing = lambda f: f in man1 or f in man2
1877 else:
1878 else:
1878 managing = lambda f: f in man1
1879 managing = lambda f: f in man1
1879
1880
1880 modified, added, removed = [], [], []
1881 modified, added, removed = [], [], []
1881 for f in self._files:
1882 for f in self._files:
1882 if not managing(f):
1883 if not managing(f):
1883 added.append(f)
1884 added.append(f)
1884 elif self[f]:
1885 elif self[f]:
1885 modified.append(f)
1886 modified.append(f)
1886 else:
1887 else:
1887 removed.append(f)
1888 removed.append(f)
1888
1889
1889 return scmutil.status(modified, added, removed, [], [], [], [])
1890 return scmutil.status(modified, added, removed, [], [], [], [])
1890
1891
1891 class memfilectx(committablefilectx):
1892 class memfilectx(committablefilectx):
1892 """memfilectx represents an in-memory file to commit.
1893 """memfilectx represents an in-memory file to commit.
1893
1894
1894 See memctx and committablefilectx for more details.
1895 See memctx and committablefilectx for more details.
1895 """
1896 """
1896 def __init__(self, repo, path, data, islink=False,
1897 def __init__(self, repo, path, data, islink=False,
1897 isexec=False, copied=None, memctx=None):
1898 isexec=False, copied=None, memctx=None):
1898 """
1899 """
1899 path is the normalized file path relative to repository root.
1900 path is the normalized file path relative to repository root.
1900 data is the file content as a string.
1901 data is the file content as a string.
1901 islink is True if the file is a symbolic link.
1902 islink is True if the file is a symbolic link.
1902 isexec is True if the file is executable.
1903 isexec is True if the file is executable.
1903 copied is the source file path if current file was copied in the
1904 copied is the source file path if current file was copied in the
1904 revision being committed, or None."""
1905 revision being committed, or None."""
1905 super(memfilectx, self).__init__(repo, path, None, memctx)
1906 super(memfilectx, self).__init__(repo, path, None, memctx)
1906 self._data = data
1907 self._data = data
1907 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1908 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1908 self._copied = None
1909 self._copied = None
1909 if copied:
1910 if copied:
1910 self._copied = (copied, nullid)
1911 self._copied = (copied, nullid)
1911
1912
1912 def data(self):
1913 def data(self):
1913 return self._data
1914 return self._data
1914 def size(self):
1915 def size(self):
1915 return len(self.data())
1916 return len(self.data())
1916 def flags(self):
1917 def flags(self):
1917 return self._flags
1918 return self._flags
1918 def renamed(self):
1919 def renamed(self):
1919 return self._copied
1920 return self._copied
1920
1921
1921 def remove(self, ignoremissing=False):
1922 def remove(self, ignoremissing=False):
1922 """wraps unlink for a repo's working directory"""
1923 """wraps unlink for a repo's working directory"""
1923 # need to figure out what to do here
1924 # need to figure out what to do here
1924 del self._changectx[self._path]
1925 del self._changectx[self._path]
1925
1926
1926 def write(self, data, flags):
1927 def write(self, data, flags):
1927 """wraps repo.wwrite"""
1928 """wraps repo.wwrite"""
1928 self._data = data
1929 self._data = data
@@ -1,1198 +1,1198
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error as errormod, util, filemerge, copies, subrepo, worker
13 import error as errormod, util, filemerge, copies, subrepo, worker
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split('\0')
21 bits = data.split('\0')
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return '\0'.join(bits)
23 return '\0'.join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = 'merge/state'
48 statepathv1 = 'merge/state'
49 statepathv2 = 'merge/state2'
49 statepathv2 = 'merge/state2'
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join('merge'), True)
63 shutil.rmtree(self._repo.join('merge'), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == 'F':
81 elif rtype == 'F':
82 bits = record.split('\0')
82 bits = record.split('\0')
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split('\0')
124 bits = r[1].split('\0')
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], '\0'.join(bits))
126 v1records[idx] = (r[0], '\0'.join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.vfs(self.statepathv1)
141 f = self._repo.vfs(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError as err:
148 except IOError as err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.vfs(self.statepathv2)
160 f = self._repo.vfs(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError as err:
173 except IOError as err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.vfs.exists(self.statepathv1) or \
187 self._repo.vfs.exists(self.statepathv1) or \
188 self._repo.vfs.exists(self.statepathv2)
188 self._repo.vfs.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(('L', hex(self._local)))
194 records.append(('L', hex(self._local)))
195 records.append(('O', hex(self._other)))
195 records.append(('O', hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(('F', '\0'.join([d] + v)))
197 records.append(('F', '\0'.join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.vfs(self.statepathv1, 'w')
208 f = self._repo.vfs(self.statepathv1, 'w')
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + '\n')
212 f.write(hex(self._local) + '\n')
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == 'F':
214 if rtype == 'F':
215 f.write('%s\n' % _droponode(data))
215 f.write('%s\n' % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.vfs(self.statepathv2, 'w')
220 f = self._repo.vfs(self.statepathv2, 'w')
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = '>sI%is' % len(data)
223 format = '>sI%is' % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.vfs.write('merge/' + hash, fcl.data())
237 self._repo.vfs.write('merge/' + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.vfs('merge/' + hash)
287 f = self._repo.vfs('merge/' + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
300 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
301 if f2 is None:
301 if f2 is None:
302 f2 = f
302 f2 = f
303 return (os.path.isfile(repo.wjoin(f))
303 return (os.path.isfile(repo.wjoin(f))
304 and repo.wvfs.audit.check(f)
304 and repo.wvfs.audit.check(f)
305 and repo.dirstate.normalize(f) not in repo.dirstate
305 and repo.dirstate.normalize(f) not in repo.dirstate
306 and mctx[f2].cmp(wctx[f]))
306 and mctx[f2].cmp(wctx[f]))
307
307
308 def _checkunknownfiles(repo, wctx, mctx, force, actions):
308 def _checkunknownfiles(repo, wctx, mctx, force, actions):
309 """
309 """
310 Considers any actions that care about the presence of conflicting unknown
310 Considers any actions that care about the presence of conflicting unknown
311 files. For some actions, the result is to abort; for others, it is to
311 files. For some actions, the result is to abort; for others, it is to
312 choose a different action.
312 choose a different action.
313 """
313 """
314 aborts = []
314 aborts = []
315 if not force:
315 if not force:
316 for f, (m, args, msg) in actions.iteritems():
316 for f, (m, args, msg) in actions.iteritems():
317 if m in ('c', 'dc'):
317 if m in ('c', 'dc'):
318 if _checkunknownfile(repo, wctx, mctx, f):
318 if _checkunknownfile(repo, wctx, mctx, f):
319 aborts.append(f)
319 aborts.append(f)
320 elif m == 'dg':
320 elif m == 'dg':
321 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
321 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
322 aborts.append(f)
322 aborts.append(f)
323
323
324 for f in sorted(aborts):
324 for f in sorted(aborts):
325 repo.ui.warn(_("%s: untracked file differs\n") % f)
325 repo.ui.warn(_("%s: untracked file differs\n") % f)
326 if aborts:
326 if aborts:
327 raise util.Abort(_("untracked files in working directory differ "
327 raise util.Abort(_("untracked files in working directory differ "
328 "from files in requested revision"))
328 "from files in requested revision"))
329
329
330 for f, (m, args, msg) in actions.iteritems():
330 for f, (m, args, msg) in actions.iteritems():
331 if m == 'c':
331 if m == 'c':
332 actions[f] = ('g', args, msg)
332 actions[f] = ('g', args, msg)
333 elif m == 'cm':
333 elif m == 'cm':
334 fl2, anc = args
334 fl2, anc = args
335 different = _checkunknownfile(repo, wctx, mctx, f)
335 different = _checkunknownfile(repo, wctx, mctx, f)
336 if different:
336 if different:
337 actions[f] = ('m', (f, f, None, False, anc),
337 actions[f] = ('m', (f, f, None, False, anc),
338 "remote differs from untracked local")
338 "remote differs from untracked local")
339 else:
339 else:
340 actions[f] = ('g', (fl2,), "remote created")
340 actions[f] = ('g', (fl2,), "remote created")
341
341
342 def _forgetremoved(wctx, mctx, branchmerge):
342 def _forgetremoved(wctx, mctx, branchmerge):
343 """
343 """
344 Forget removed files
344 Forget removed files
345
345
346 If we're jumping between revisions (as opposed to merging), and if
346 If we're jumping between revisions (as opposed to merging), and if
347 neither the working directory nor the target rev has the file,
347 neither the working directory nor the target rev has the file,
348 then we need to remove it from the dirstate, to prevent the
348 then we need to remove it from the dirstate, to prevent the
349 dirstate from listing the file when it is no longer in the
349 dirstate from listing the file when it is no longer in the
350 manifest.
350 manifest.
351
351
352 If we're merging, and the other revision has removed a file
352 If we're merging, and the other revision has removed a file
353 that is not present in the working directory, we need to mark it
353 that is not present in the working directory, we need to mark it
354 as removed.
354 as removed.
355 """
355 """
356
356
357 actions = {}
357 actions = {}
358 m = 'f'
358 m = 'f'
359 if branchmerge:
359 if branchmerge:
360 m = 'r'
360 m = 'r'
361 for f in wctx.deleted():
361 for f in wctx.deleted():
362 if f not in mctx:
362 if f not in mctx:
363 actions[f] = m, None, "forget deleted"
363 actions[f] = m, None, "forget deleted"
364
364
365 if not branchmerge:
365 if not branchmerge:
366 for f in wctx.removed():
366 for f in wctx.removed():
367 if f not in mctx:
367 if f not in mctx:
368 actions[f] = 'f', None, "forget removed"
368 actions[f] = 'f', None, "forget removed"
369
369
370 return actions
370 return actions
371
371
372 def _checkcollision(repo, wmf, actions):
372 def _checkcollision(repo, wmf, actions):
373 # build provisional merged manifest up
373 # build provisional merged manifest up
374 pmmf = set(wmf)
374 pmmf = set(wmf)
375
375
376 if actions:
376 if actions:
377 # k, dr, e and rd are no-op
377 # k, dr, e and rd are no-op
378 for m in 'a', 'f', 'g', 'cd', 'dc':
378 for m in 'a', 'f', 'g', 'cd', 'dc':
379 for f, args, msg in actions[m]:
379 for f, args, msg in actions[m]:
380 pmmf.add(f)
380 pmmf.add(f)
381 for f, args, msg in actions['r']:
381 for f, args, msg in actions['r']:
382 pmmf.discard(f)
382 pmmf.discard(f)
383 for f, args, msg in actions['dm']:
383 for f, args, msg in actions['dm']:
384 f2, flags = args
384 f2, flags = args
385 pmmf.discard(f2)
385 pmmf.discard(f2)
386 pmmf.add(f)
386 pmmf.add(f)
387 for f, args, msg in actions['dg']:
387 for f, args, msg in actions['dg']:
388 pmmf.add(f)
388 pmmf.add(f)
389 for f, args, msg in actions['m']:
389 for f, args, msg in actions['m']:
390 f1, f2, fa, move, anc = args
390 f1, f2, fa, move, anc = args
391 if move:
391 if move:
392 pmmf.discard(f1)
392 pmmf.discard(f1)
393 pmmf.add(f)
393 pmmf.add(f)
394
394
395 # check case-folding collision in provisional merged manifest
395 # check case-folding collision in provisional merged manifest
396 foldmap = {}
396 foldmap = {}
397 for f in sorted(pmmf):
397 for f in sorted(pmmf):
398 fold = util.normcase(f)
398 fold = util.normcase(f)
399 if fold in foldmap:
399 if fold in foldmap:
400 raise util.Abort(_("case-folding collision between %s and %s")
400 raise util.Abort(_("case-folding collision between %s and %s")
401 % (f, foldmap[fold]))
401 % (f, foldmap[fold]))
402 foldmap[fold] = f
402 foldmap[fold] = f
403
403
404 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
404 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
405 acceptremote, followcopies):
405 acceptremote, followcopies):
406 """
406 """
407 Merge p1 and p2 with ancestor pa and generate merge action list
407 Merge p1 and p2 with ancestor pa and generate merge action list
408
408
409 branchmerge and force are as passed in to update
409 branchmerge and force are as passed in to update
410 partial = function to filter file lists
410 partial = function to filter file lists
411 acceptremote = accept the incoming changes without prompting
411 acceptremote = accept the incoming changes without prompting
412 """
412 """
413
413
414 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
414 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
415
415
416 # manifests fetched in order are going to be faster, so prime the caches
416 # manifests fetched in order are going to be faster, so prime the caches
417 [x.manifest() for x in
417 [x.manifest() for x in
418 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
418 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
419
419
420 if followcopies:
420 if followcopies:
421 ret = copies.mergecopies(repo, wctx, p2, pa)
421 ret = copies.mergecopies(repo, wctx, p2, pa)
422 copy, movewithdir, diverge, renamedelete = ret
422 copy, movewithdir, diverge, renamedelete = ret
423
423
424 repo.ui.note(_("resolving manifests\n"))
424 repo.ui.note(_("resolving manifests\n"))
425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
426 % (bool(branchmerge), bool(force), bool(partial)))
426 % (bool(branchmerge), bool(force), bool(partial)))
427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
428
428
429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
430 copied = set(copy.values())
430 copied = set(copy.values())
431 copied.update(movewithdir.values())
431 copied.update(movewithdir.values())
432
432
433 if '.hgsubstate' in m1:
433 if '.hgsubstate' in m1:
434 # check whether sub state is modified
434 # check whether sub state is modified
435 for s in sorted(wctx.substate):
435 for s in sorted(wctx.substate):
436 if wctx.sub(s).dirty():
436 if wctx.sub(s).dirty():
437 m1['.hgsubstate'] += '+'
437 m1['.hgsubstate'] += '+'
438 break
438 break
439
439
440 # Compare manifests
440 # Compare manifests
441 diff = m1.diff(m2)
441 diff = m1.diff(m2)
442
442
443 actions = {}
443 actions = {}
444 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
444 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
445 if partial and not partial(f):
445 if partial and not partial(f):
446 continue
446 continue
447 if n1 and n2: # file exists on both local and remote side
447 if n1 and n2: # file exists on both local and remote side
448 if f not in ma:
448 if f not in ma:
449 fa = copy.get(f, None)
449 fa = copy.get(f, None)
450 if fa is not None:
450 if fa is not None:
451 actions[f] = ('m', (f, f, fa, False, pa.node()),
451 actions[f] = ('m', (f, f, fa, False, pa.node()),
452 "both renamed from " + fa)
452 "both renamed from " + fa)
453 else:
453 else:
454 actions[f] = ('m', (f, f, None, False, pa.node()),
454 actions[f] = ('m', (f, f, None, False, pa.node()),
455 "both created")
455 "both created")
456 else:
456 else:
457 a = ma[f]
457 a = ma[f]
458 fla = ma.flags(f)
458 fla = ma.flags(f)
459 nol = 'l' not in fl1 + fl2 + fla
459 nol = 'l' not in fl1 + fl2 + fla
460 if n2 == a and fl2 == fla:
460 if n2 == a and fl2 == fla:
461 actions[f] = ('k' , (), "remote unchanged")
461 actions[f] = ('k' , (), "remote unchanged")
462 elif n1 == a and fl1 == fla: # local unchanged - use remote
462 elif n1 == a and fl1 == fla: # local unchanged - use remote
463 if n1 == n2: # optimization: keep local content
463 if n1 == n2: # optimization: keep local content
464 actions[f] = ('e', (fl2,), "update permissions")
464 actions[f] = ('e', (fl2,), "update permissions")
465 else:
465 else:
466 actions[f] = ('g', (fl2,), "remote is newer")
466 actions[f] = ('g', (fl2,), "remote is newer")
467 elif nol and n2 == a: # remote only changed 'x'
467 elif nol and n2 == a: # remote only changed 'x'
468 actions[f] = ('e', (fl2,), "update permissions")
468 actions[f] = ('e', (fl2,), "update permissions")
469 elif nol and n1 == a: # local only changed 'x'
469 elif nol and n1 == a: # local only changed 'x'
470 actions[f] = ('g', (fl1,), "remote is newer")
470 actions[f] = ('g', (fl1,), "remote is newer")
471 else: # both changed something
471 else: # both changed something
472 actions[f] = ('m', (f, f, f, False, pa.node()),
472 actions[f] = ('m', (f, f, f, False, pa.node()),
473 "versions differ")
473 "versions differ")
474 elif n1: # file exists only on local side
474 elif n1: # file exists only on local side
475 if f in copied:
475 if f in copied:
476 pass # we'll deal with it on m2 side
476 pass # we'll deal with it on m2 side
477 elif f in movewithdir: # directory rename, move local
477 elif f in movewithdir: # directory rename, move local
478 f2 = movewithdir[f]
478 f2 = movewithdir[f]
479 if f2 in m2:
479 if f2 in m2:
480 actions[f2] = ('m', (f, f2, None, True, pa.node()),
480 actions[f2] = ('m', (f, f2, None, True, pa.node()),
481 "remote directory rename, both created")
481 "remote directory rename, both created")
482 else:
482 else:
483 actions[f2] = ('dm', (f, fl1),
483 actions[f2] = ('dm', (f, fl1),
484 "remote directory rename - move from " + f)
484 "remote directory rename - move from " + f)
485 elif f in copy:
485 elif f in copy:
486 f2 = copy[f]
486 f2 = copy[f]
487 actions[f] = ('m', (f, f2, f2, False, pa.node()),
487 actions[f] = ('m', (f, f2, f2, False, pa.node()),
488 "local copied/moved from " + f2)
488 "local copied/moved from " + f2)
489 elif f in ma: # clean, a different, no remote
489 elif f in ma: # clean, a different, no remote
490 if n1 != ma[f]:
490 if n1 != ma[f]:
491 if acceptremote:
491 if acceptremote:
492 actions[f] = ('r', None, "remote delete")
492 actions[f] = ('r', None, "remote delete")
493 else:
493 else:
494 actions[f] = ('cd', None, "prompt changed/deleted")
494 actions[f] = ('cd', None, "prompt changed/deleted")
495 elif n1[20:] == 'a':
495 elif n1[20:] == 'a':
496 # This extra 'a' is added by working copy manifest to mark
496 # This extra 'a' is added by working copy manifest to mark
497 # the file as locally added. We should forget it instead of
497 # the file as locally added. We should forget it instead of
498 # deleting it.
498 # deleting it.
499 actions[f] = ('f', None, "remote deleted")
499 actions[f] = ('f', None, "remote deleted")
500 else:
500 else:
501 actions[f] = ('r', None, "other deleted")
501 actions[f] = ('r', None, "other deleted")
502 elif n2: # file exists only on remote side
502 elif n2: # file exists only on remote side
503 if f in copied:
503 if f in copied:
504 pass # we'll deal with it on m1 side
504 pass # we'll deal with it on m1 side
505 elif f in movewithdir:
505 elif f in movewithdir:
506 f2 = movewithdir[f]
506 f2 = movewithdir[f]
507 if f2 in m1:
507 if f2 in m1:
508 actions[f2] = ('m', (f2, f, None, False, pa.node()),
508 actions[f2] = ('m', (f2, f, None, False, pa.node()),
509 "local directory rename, both created")
509 "local directory rename, both created")
510 else:
510 else:
511 actions[f2] = ('dg', (f, fl2),
511 actions[f2] = ('dg', (f, fl2),
512 "local directory rename - get from " + f)
512 "local directory rename - get from " + f)
513 elif f in copy:
513 elif f in copy:
514 f2 = copy[f]
514 f2 = copy[f]
515 if f2 in m2:
515 if f2 in m2:
516 actions[f] = ('m', (f2, f, f2, False, pa.node()),
516 actions[f] = ('m', (f2, f, f2, False, pa.node()),
517 "remote copied from " + f2)
517 "remote copied from " + f2)
518 else:
518 else:
519 actions[f] = ('m', (f2, f, f2, True, pa.node()),
519 actions[f] = ('m', (f2, f, f2, True, pa.node()),
520 "remote moved from " + f2)
520 "remote moved from " + f2)
521 elif f not in ma:
521 elif f not in ma:
522 # local unknown, remote created: the logic is described by the
522 # local unknown, remote created: the logic is described by the
523 # following table:
523 # following table:
524 #
524 #
525 # force branchmerge different | action
525 # force branchmerge different | action
526 # n * * | create
526 # n * * | create
527 # y n * | create
527 # y n * | create
528 # y y n | create
528 # y y n | create
529 # y y y | merge
529 # y y y | merge
530 #
530 #
531 # Checking whether the files are different is expensive, so we
531 # Checking whether the files are different is expensive, so we
532 # don't do that when we can avoid it.
532 # don't do that when we can avoid it.
533 if not force:
533 if not force:
534 actions[f] = ('c', (fl2,), "remote created")
534 actions[f] = ('c', (fl2,), "remote created")
535 elif not branchmerge:
535 elif not branchmerge:
536 actions[f] = ('c', (fl2,), "remote created")
536 actions[f] = ('c', (fl2,), "remote created")
537 else:
537 else:
538 actions[f] = ('cm', (fl2, pa.node()),
538 actions[f] = ('cm', (fl2, pa.node()),
539 "remote created, get or merge")
539 "remote created, get or merge")
540 elif n2 != ma[f]:
540 elif n2 != ma[f]:
541 if acceptremote:
541 if acceptremote:
542 actions[f] = ('c', (fl2,), "remote recreating")
542 actions[f] = ('c', (fl2,), "remote recreating")
543 else:
543 else:
544 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
544 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
545
545
546 return actions, diverge, renamedelete
546 return actions, diverge, renamedelete
547
547
548 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
548 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
549 """Resolves false conflicts where the nodeid changed but the content
549 """Resolves false conflicts where the nodeid changed but the content
550 remained the same."""
550 remained the same."""
551
551
552 for f, (m, args, msg) in actions.items():
552 for f, (m, args, msg) in actions.items():
553 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
553 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
554 # local did change but ended up with same content
554 # local did change but ended up with same content
555 actions[f] = 'r', None, "prompt same"
555 actions[f] = 'r', None, "prompt same"
556 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
556 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
557 # remote did change but ended up with same content
557 # remote did change but ended up with same content
558 del actions[f] # don't get = keep local deleted
558 del actions[f] # don't get = keep local deleted
559
559
560 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
560 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
561 acceptremote, followcopies):
561 acceptremote, followcopies):
562 "Calculate the actions needed to merge mctx into wctx using ancestors"
562 "Calculate the actions needed to merge mctx into wctx using ancestors"
563
563
564 if len(ancestors) == 1: # default
564 if len(ancestors) == 1: # default
565 actions, diverge, renamedelete = manifestmerge(
565 actions, diverge, renamedelete = manifestmerge(
566 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
566 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
567 acceptremote, followcopies)
567 acceptremote, followcopies)
568 _checkunknownfiles(repo, wctx, mctx, force, actions)
568 _checkunknownfiles(repo, wctx, mctx, force, actions)
569
569
570 else: # only when merge.preferancestor=* - the default
570 else: # only when merge.preferancestor=* - the default
571 repo.ui.note(
571 repo.ui.note(
572 _("note: merging %s and %s using bids from ancestors %s\n") %
572 _("note: merging %s and %s using bids from ancestors %s\n") %
573 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
573 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
574
574
575 # Call for bids
575 # Call for bids
576 fbids = {} # mapping filename to bids (action method to list af actions)
576 fbids = {} # mapping filename to bids (action method to list af actions)
577 diverge, renamedelete = None, None
577 diverge, renamedelete = None, None
578 for ancestor in ancestors:
578 for ancestor in ancestors:
579 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
579 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
580 actions, diverge1, renamedelete1 = manifestmerge(
580 actions, diverge1, renamedelete1 = manifestmerge(
581 repo, wctx, mctx, ancestor, branchmerge, force, partial,
581 repo, wctx, mctx, ancestor, branchmerge, force, partial,
582 acceptremote, followcopies)
582 acceptremote, followcopies)
583 _checkunknownfiles(repo, wctx, mctx, force, actions)
583 _checkunknownfiles(repo, wctx, mctx, force, actions)
584 if diverge is None: # and renamedelete is None.
584 if diverge is None: # and renamedelete is None.
585 # Arbitrarily pick warnings from first iteration
585 # Arbitrarily pick warnings from first iteration
586 diverge = diverge1
586 diverge = diverge1
587 renamedelete = renamedelete1
587 renamedelete = renamedelete1
588 for f, a in sorted(actions.iteritems()):
588 for f, a in sorted(actions.iteritems()):
589 m, args, msg = a
589 m, args, msg = a
590 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
590 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
591 if f in fbids:
591 if f in fbids:
592 d = fbids[f]
592 d = fbids[f]
593 if m in d:
593 if m in d:
594 d[m].append(a)
594 d[m].append(a)
595 else:
595 else:
596 d[m] = [a]
596 d[m] = [a]
597 else:
597 else:
598 fbids[f] = {m: [a]}
598 fbids[f] = {m: [a]}
599
599
600 # Pick the best bid for each file
600 # Pick the best bid for each file
601 repo.ui.note(_('\nauction for merging merge bids\n'))
601 repo.ui.note(_('\nauction for merging merge bids\n'))
602 actions = {}
602 actions = {}
603 for f, bids in sorted(fbids.items()):
603 for f, bids in sorted(fbids.items()):
604 # bids is a mapping from action method to list af actions
604 # bids is a mapping from action method to list af actions
605 # Consensus?
605 # Consensus?
606 if len(bids) == 1: # all bids are the same kind of method
606 if len(bids) == 1: # all bids are the same kind of method
607 m, l = bids.items()[0]
607 m, l = bids.items()[0]
608 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
608 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
609 repo.ui.note(" %s: consensus for %s\n" % (f, m))
609 repo.ui.note(" %s: consensus for %s\n" % (f, m))
610 actions[f] = l[0]
610 actions[f] = l[0]
611 continue
611 continue
612 # If keep is an option, just do it.
612 # If keep is an option, just do it.
613 if 'k' in bids:
613 if 'k' in bids:
614 repo.ui.note(" %s: picking 'keep' action\n" % f)
614 repo.ui.note(" %s: picking 'keep' action\n" % f)
615 actions[f] = bids['k'][0]
615 actions[f] = bids['k'][0]
616 continue
616 continue
617 # If there are gets and they all agree [how could they not?], do it.
617 # If there are gets and they all agree [how could they not?], do it.
618 if 'g' in bids:
618 if 'g' in bids:
619 ga0 = bids['g'][0]
619 ga0 = bids['g'][0]
620 if all(a == ga0 for a in bids['g'][1:]):
620 if all(a == ga0 for a in bids['g'][1:]):
621 repo.ui.note(" %s: picking 'get' action\n" % f)
621 repo.ui.note(" %s: picking 'get' action\n" % f)
622 actions[f] = ga0
622 actions[f] = ga0
623 continue
623 continue
624 # TODO: Consider other simple actions such as mode changes
624 # TODO: Consider other simple actions such as mode changes
625 # Handle inefficient democrazy.
625 # Handle inefficient democrazy.
626 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
626 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
627 for m, l in sorted(bids.items()):
627 for m, l in sorted(bids.items()):
628 for _f, args, msg in l:
628 for _f, args, msg in l:
629 repo.ui.note(' %s -> %s\n' % (msg, m))
629 repo.ui.note(' %s -> %s\n' % (msg, m))
630 # Pick random action. TODO: Instead, prompt user when resolving
630 # Pick random action. TODO: Instead, prompt user when resolving
631 m, l = bids.items()[0]
631 m, l = bids.items()[0]
632 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
632 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
633 (f, m))
633 (f, m))
634 actions[f] = l[0]
634 actions[f] = l[0]
635 continue
635 continue
636 repo.ui.note(_('end of auction\n\n'))
636 repo.ui.note(_('end of auction\n\n'))
637
637
638 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
638 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
639
639
640 if wctx.rev() is None:
640 if wctx.rev() is None:
641 fractions = _forgetremoved(wctx, mctx, branchmerge)
641 fractions = _forgetremoved(wctx, mctx, branchmerge)
642 actions.update(fractions)
642 actions.update(fractions)
643
643
644 return actions, diverge, renamedelete
644 return actions, diverge, renamedelete
645
645
646 def batchremove(repo, actions):
646 def batchremove(repo, actions):
647 """apply removes to the working directory
647 """apply removes to the working directory
648
648
649 yields tuples for progress updates
649 yields tuples for progress updates
650 """
650 """
651 verbose = repo.ui.verbose
651 verbose = repo.ui.verbose
652 unlink = util.unlinkpath
652 unlink = util.unlinkpath
653 wjoin = repo.wjoin
653 wjoin = repo.wjoin
654 audit = repo.wvfs.audit
654 audit = repo.wvfs.audit
655 i = 0
655 i = 0
656 for f, args, msg in actions:
656 for f, args, msg in actions:
657 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
657 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
658 if verbose:
658 if verbose:
659 repo.ui.note(_("removing %s\n") % f)
659 repo.ui.note(_("removing %s\n") % f)
660 audit(f)
660 audit(f)
661 try:
661 try:
662 unlink(wjoin(f), ignoremissing=True)
662 unlink(wjoin(f), ignoremissing=True)
663 except OSError as inst:
663 except OSError as inst:
664 repo.ui.warn(_("update failed to remove %s: %s!\n") %
664 repo.ui.warn(_("update failed to remove %s: %s!\n") %
665 (f, inst.strerror))
665 (f, inst.strerror))
666 if i == 100:
666 if i == 100:
667 yield i, f
667 yield i, f
668 i = 0
668 i = 0
669 i += 1
669 i += 1
670 if i > 0:
670 if i > 0:
671 yield i, f
671 yield i, f
672
672
673 def batchget(repo, mctx, actions):
673 def batchget(repo, mctx, actions):
674 """apply gets to the working directory
674 """apply gets to the working directory
675
675
676 mctx is the context to get from
676 mctx is the context to get from
677
677
678 yields tuples for progress updates
678 yields tuples for progress updates
679 """
679 """
680 verbose = repo.ui.verbose
680 verbose = repo.ui.verbose
681 fctx = mctx.filectx
681 fctx = mctx.filectx
682 wwrite = repo.wwrite
682 wwrite = repo.wwrite
683 i = 0
683 i = 0
684 for f, args, msg in actions:
684 for f, args, msg in actions:
685 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
685 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
686 if verbose:
686 if verbose:
687 repo.ui.note(_("getting %s\n") % f)
687 repo.ui.note(_("getting %s\n") % f)
688 wwrite(f, fctx(f).data(), args[0])
688 wwrite(f, fctx(f).data(), args[0])
689 if i == 100:
689 if i == 100:
690 yield i, f
690 yield i, f
691 i = 0
691 i = 0
692 i += 1
692 i += 1
693 if i > 0:
693 if i > 0:
694 yield i, f
694 yield i, f
695
695
696 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
696 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
697 """apply the merge action list to the working directory
697 """apply the merge action list to the working directory
698
698
699 wctx is the working copy context
699 wctx is the working copy context
700 mctx is the context to be merged into the working copy
700 mctx is the context to be merged into the working copy
701
701
702 Return a tuple of counts (updated, merged, removed, unresolved) that
702 Return a tuple of counts (updated, merged, removed, unresolved) that
703 describes how many files were affected by the update.
703 describes how many files were affected by the update.
704 """
704 """
705
705
706 updated, merged, removed, unresolved = 0, 0, 0, 0
706 updated, merged, removed, unresolved = 0, 0, 0, 0
707 ms = mergestate(repo)
707 ms = mergestate(repo)
708 ms.reset(wctx.p1().node(), mctx.node())
708 ms.reset(wctx.p1().node(), mctx.node())
709 moves = []
709 moves = []
710 for m, l in actions.items():
710 for m, l in actions.items():
711 l.sort()
711 l.sort()
712
712
713 # prescan for merges
713 # prescan for merges
714 for f, args, msg in actions['m']:
714 for f, args, msg in actions['m']:
715 f1, f2, fa, move, anc = args
715 f1, f2, fa, move, anc = args
716 if f == '.hgsubstate': # merged internally
716 if f == '.hgsubstate': # merged internally
717 continue
717 continue
718 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
718 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
719 fcl = wctx[f1]
719 fcl = wctx[f1]
720 fco = mctx[f2]
720 fco = mctx[f2]
721 actx = repo[anc]
721 actx = repo[anc]
722 if fa in actx:
722 if fa in actx:
723 fca = actx[fa]
723 fca = actx[fa]
724 else:
724 else:
725 fca = repo.filectx(f1, fileid=nullrev)
725 fca = repo.filectx(f1, fileid=nullrev)
726 ms.add(fcl, fco, fca, f)
726 ms.add(fcl, fco, fca, f)
727 if f1 != f and move:
727 if f1 != f and move:
728 moves.append(f1)
728 moves.append(f1)
729
729
730 audit = repo.wvfs.audit
730 audit = repo.wvfs.audit
731 _updating = _('updating')
731 _updating = _('updating')
732 _files = _('files')
732 _files = _('files')
733 progress = repo.ui.progress
733 progress = repo.ui.progress
734
734
735 # remove renamed files after safely stored
735 # remove renamed files after safely stored
736 for f in moves:
736 for f in moves:
737 if os.path.lexists(repo.wjoin(f)):
737 if os.path.lexists(repo.wjoin(f)):
738 repo.ui.debug("removing %s\n" % f)
738 repo.ui.debug("removing %s\n" % f)
739 audit(f)
739 audit(f)
740 util.unlinkpath(repo.wjoin(f))
740 util.unlinkpath(repo.wjoin(f))
741
741
742 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
742 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
743
743
744 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
744 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
745 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
745 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
746
746
747 # remove in parallel (must come first)
747 # remove in parallel (must come first)
748 z = 0
748 z = 0
749 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
749 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
750 for i, item in prog:
750 for i, item in prog:
751 z += i
751 z += i
752 progress(_updating, z, item=item, total=numupdates, unit=_files)
752 progress(_updating, z, item=item, total=numupdates, unit=_files)
753 removed = len(actions['r'])
753 removed = len(actions['r'])
754
754
755 # get in parallel
755 # get in parallel
756 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
756 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
757 for i, item in prog:
757 for i, item in prog:
758 z += i
758 z += i
759 progress(_updating, z, item=item, total=numupdates, unit=_files)
759 progress(_updating, z, item=item, total=numupdates, unit=_files)
760 updated = len(actions['g'])
760 updated = len(actions['g'])
761
761
762 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
762 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
763 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
763 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
764
764
765 # forget (manifest only, just log it) (must come first)
765 # forget (manifest only, just log it) (must come first)
766 for f, args, msg in actions['f']:
766 for f, args, msg in actions['f']:
767 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
767 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
768 z += 1
768 z += 1
769 progress(_updating, z, item=f, total=numupdates, unit=_files)
769 progress(_updating, z, item=f, total=numupdates, unit=_files)
770
770
771 # re-add (manifest only, just log it)
771 # re-add (manifest only, just log it)
772 for f, args, msg in actions['a']:
772 for f, args, msg in actions['a']:
773 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
773 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
774 z += 1
774 z += 1
775 progress(_updating, z, item=f, total=numupdates, unit=_files)
775 progress(_updating, z, item=f, total=numupdates, unit=_files)
776
776
777 # keep (noop, just log it)
777 # keep (noop, just log it)
778 for f, args, msg in actions['k']:
778 for f, args, msg in actions['k']:
779 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
779 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
780 # no progress
780 # no progress
781
781
782 # merge
782 # merge
783 for f, args, msg in actions['m']:
783 for f, args, msg in actions['m']:
784 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
784 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
785 z += 1
785 z += 1
786 progress(_updating, z, item=f, total=numupdates, unit=_files)
786 progress(_updating, z, item=f, total=numupdates, unit=_files)
787 if f == '.hgsubstate': # subrepo states need updating
787 if f == '.hgsubstate': # subrepo states need updating
788 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
788 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
789 overwrite)
789 overwrite)
790 continue
790 continue
791 audit(f)
791 audit(f)
792 r = ms.resolve(f, wctx, labels=labels)
792 r = ms.resolve(f, wctx, labels=labels)
793 if r is not None and r > 0:
793 if r is not None and r > 0:
794 unresolved += 1
794 unresolved += 1
795 else:
795 else:
796 if r is None:
796 if r is None:
797 updated += 1
797 updated += 1
798 else:
798 else:
799 merged += 1
799 merged += 1
800
800
801 # directory rename, move local
801 # directory rename, move local
802 for f, args, msg in actions['dm']:
802 for f, args, msg in actions['dm']:
803 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
803 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
804 z += 1
804 z += 1
805 progress(_updating, z, item=f, total=numupdates, unit=_files)
805 progress(_updating, z, item=f, total=numupdates, unit=_files)
806 f0, flags = args
806 f0, flags = args
807 repo.ui.note(_("moving %s to %s\n") % (f0, f))
807 repo.ui.note(_("moving %s to %s\n") % (f0, f))
808 audit(f)
808 audit(f)
809 repo.wwrite(f, wctx.filectx(f0).data(), flags)
809 repo.wwrite(f, wctx.filectx(f0).data(), flags)
810 util.unlinkpath(repo.wjoin(f0))
810 util.unlinkpath(repo.wjoin(f0))
811 updated += 1
811 updated += 1
812
812
813 # local directory rename, get
813 # local directory rename, get
814 for f, args, msg in actions['dg']:
814 for f, args, msg in actions['dg']:
815 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
815 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
816 z += 1
816 z += 1
817 progress(_updating, z, item=f, total=numupdates, unit=_files)
817 progress(_updating, z, item=f, total=numupdates, unit=_files)
818 f0, flags = args
818 f0, flags = args
819 repo.ui.note(_("getting %s to %s\n") % (f0, f))
819 repo.ui.note(_("getting %s to %s\n") % (f0, f))
820 repo.wwrite(f, mctx.filectx(f0).data(), flags)
820 repo.wwrite(f, mctx.filectx(f0).data(), flags)
821 updated += 1
821 updated += 1
822
822
823 # exec
823 # exec
824 for f, args, msg in actions['e']:
824 for f, args, msg in actions['e']:
825 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
825 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
826 z += 1
826 z += 1
827 progress(_updating, z, item=f, total=numupdates, unit=_files)
827 progress(_updating, z, item=f, total=numupdates, unit=_files)
828 flags, = args
828 flags, = args
829 audit(f)
829 audit(f)
830 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
830 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
831 updated += 1
831 updated += 1
832
832
833 ms.commit()
833 ms.commit()
834 progress(_updating, None, total=numupdates, unit=_files)
834 progress(_updating, None, total=numupdates, unit=_files)
835
835
836 return updated, merged, removed, unresolved
836 return updated, merged, removed, unresolved
837
837
838 def recordupdates(repo, actions, branchmerge):
838 def recordupdates(repo, actions, branchmerge):
839 "record merge actions to the dirstate"
839 "record merge actions to the dirstate"
840 # remove (must come first)
840 # remove (must come first)
841 for f, args, msg in actions['r']:
841 for f, args, msg in actions['r']:
842 if branchmerge:
842 if branchmerge:
843 repo.dirstate.remove(f)
843 repo.dirstate.remove(f)
844 else:
844 else:
845 repo.dirstate.drop(f)
845 repo.dirstate.drop(f)
846
846
847 # forget (must come first)
847 # forget (must come first)
848 for f, args, msg in actions['f']:
848 for f, args, msg in actions['f']:
849 repo.dirstate.drop(f)
849 repo.dirstate.drop(f)
850
850
851 # re-add
851 # re-add
852 for f, args, msg in actions['a']:
852 for f, args, msg in actions['a']:
853 if not branchmerge:
853 if not branchmerge:
854 repo.dirstate.add(f)
854 repo.dirstate.add(f)
855
855
856 # exec change
856 # exec change
857 for f, args, msg in actions['e']:
857 for f, args, msg in actions['e']:
858 repo.dirstate.normallookup(f)
858 repo.dirstate.normallookup(f)
859
859
860 # keep
860 # keep
861 for f, args, msg in actions['k']:
861 for f, args, msg in actions['k']:
862 pass
862 pass
863
863
864 # get
864 # get
865 for f, args, msg in actions['g']:
865 for f, args, msg in actions['g']:
866 if branchmerge:
866 if branchmerge:
867 repo.dirstate.otherparent(f)
867 repo.dirstate.otherparent(f)
868 else:
868 else:
869 repo.dirstate.normal(f)
869 repo.dirstate.normal(f)
870
870
871 # merge
871 # merge
872 for f, args, msg in actions['m']:
872 for f, args, msg in actions['m']:
873 f1, f2, fa, move, anc = args
873 f1, f2, fa, move, anc = args
874 if branchmerge:
874 if branchmerge:
875 # We've done a branch merge, mark this file as merged
875 # We've done a branch merge, mark this file as merged
876 # so that we properly record the merger later
876 # so that we properly record the merger later
877 repo.dirstate.merge(f)
877 repo.dirstate.merge(f)
878 if f1 != f2: # copy/rename
878 if f1 != f2: # copy/rename
879 if move:
879 if move:
880 repo.dirstate.remove(f1)
880 repo.dirstate.remove(f1)
881 if f1 != f:
881 if f1 != f:
882 repo.dirstate.copy(f1, f)
882 repo.dirstate.copy(f1, f)
883 else:
883 else:
884 repo.dirstate.copy(f2, f)
884 repo.dirstate.copy(f2, f)
885 else:
885 else:
886 # We've update-merged a locally modified file, so
886 # We've update-merged a locally modified file, so
887 # we set the dirstate to emulate a normal checkout
887 # we set the dirstate to emulate a normal checkout
888 # of that file some time in the past. Thus our
888 # of that file some time in the past. Thus our
889 # merge will appear as a normal local file
889 # merge will appear as a normal local file
890 # modification.
890 # modification.
891 if f2 == f: # file not locally copied/moved
891 if f2 == f: # file not locally copied/moved
892 repo.dirstate.normallookup(f)
892 repo.dirstate.normallookup(f)
893 if move:
893 if move:
894 repo.dirstate.drop(f1)
894 repo.dirstate.drop(f1)
895
895
896 # directory rename, move local
896 # directory rename, move local
897 for f, args, msg in actions['dm']:
897 for f, args, msg in actions['dm']:
898 f0, flag = args
898 f0, flag = args
899 if branchmerge:
899 if branchmerge:
900 repo.dirstate.add(f)
900 repo.dirstate.add(f)
901 repo.dirstate.remove(f0)
901 repo.dirstate.remove(f0)
902 repo.dirstate.copy(f0, f)
902 repo.dirstate.copy(f0, f)
903 else:
903 else:
904 repo.dirstate.normal(f)
904 repo.dirstate.normal(f)
905 repo.dirstate.drop(f0)
905 repo.dirstate.drop(f0)
906
906
907 # directory rename, get
907 # directory rename, get
908 for f, args, msg in actions['dg']:
908 for f, args, msg in actions['dg']:
909 f0, flag = args
909 f0, flag = args
910 if branchmerge:
910 if branchmerge:
911 repo.dirstate.add(f)
911 repo.dirstate.add(f)
912 repo.dirstate.copy(f0, f)
912 repo.dirstate.copy(f0, f)
913 else:
913 else:
914 repo.dirstate.normal(f)
914 repo.dirstate.normal(f)
915
915
916 def update(repo, node, branchmerge, force, partial, ancestor=None,
916 def update(repo, node, branchmerge, force, partial, ancestor=None,
917 mergeancestor=False, labels=None):
917 mergeancestor=False, labels=None):
918 """
918 """
919 Perform a merge between the working directory and the given node
919 Perform a merge between the working directory and the given node
920
920
921 node = the node to update to, or None if unspecified
921 node = the node to update to, or None if unspecified
922 branchmerge = whether to merge between branches
922 branchmerge = whether to merge between branches
923 force = whether to force branch merging or file overwriting
923 force = whether to force branch merging or file overwriting
924 partial = a function to filter file lists (dirstate not updated)
924 partial = a function to filter file lists (dirstate not updated)
925 mergeancestor = whether it is merging with an ancestor. If true,
925 mergeancestor = whether it is merging with an ancestor. If true,
926 we should accept the incoming changes for any prompts that occur.
926 we should accept the incoming changes for any prompts that occur.
927 If false, merging with an ancestor (fast-forward) is only allowed
927 If false, merging with an ancestor (fast-forward) is only allowed
928 between different named branches. This flag is used by rebase extension
928 between different named branches. This flag is used by rebase extension
929 as a temporary fix and should be avoided in general.
929 as a temporary fix and should be avoided in general.
930
930
931 The table below shows all the behaviors of the update command
931 The table below shows all the behaviors of the update command
932 given the -c and -C or no options, whether the working directory
932 given the -c and -C or no options, whether the working directory
933 is dirty, whether a revision is specified, and the relationship of
933 is dirty, whether a revision is specified, and the relationship of
934 the parent rev to the target rev (linear, on the same named
934 the parent rev to the target rev (linear, on the same named
935 branch, or on another named branch).
935 branch, or on another named branch).
936
936
937 This logic is tested by test-update-branches.t.
937 This logic is tested by test-update-branches.t.
938
938
939 -c -C dirty rev | linear same cross
939 -c -C dirty rev | linear same cross
940 n n n n | ok (1) x
940 n n n n | ok (1) x
941 n n n y | ok ok ok
941 n n n y | ok ok ok
942 n n y n | merge (2) (2)
942 n n y n | merge (2) (2)
943 n n y y | merge (3) (3)
943 n n y y | merge (3) (3)
944 n y * * | --- discard ---
944 n y * * | --- discard ---
945 y n y * | --- (4) ---
945 y n y * | --- (4) ---
946 y n n * | --- ok ---
946 y n n * | --- ok ---
947 y y * * | --- (5) ---
947 y y * * | --- (5) ---
948
948
949 x = can't happen
949 x = can't happen
950 * = don't-care
950 * = don't-care
951 1 = abort: not a linear update (merge or update --check to force update)
951 1 = abort: not a linear update (merge or update --check to force update)
952 2 = abort: uncommitted changes (commit and merge, or update --clean to
952 2 = abort: uncommitted changes (commit and merge, or update --clean to
953 discard changes)
953 discard changes)
954 3 = abort: uncommitted changes (commit or update --clean to discard changes)
954 3 = abort: uncommitted changes (commit or update --clean to discard changes)
955 4 = abort: uncommitted changes (checked in commands.py)
955 4 = abort: uncommitted changes (checked in commands.py)
956 5 = incompatible options (checked in commands.py)
956 5 = incompatible options (checked in commands.py)
957
957
958 Return the same tuple as applyupdates().
958 Return the same tuple as applyupdates().
959 """
959 """
960
960
961 onode = node
961 onode = node
962 wlock = repo.wlock()
962 wlock = repo.wlock()
963 try:
963 try:
964 wc = repo[None]
964 wc = repo[None]
965 pl = wc.parents()
965 pl = wc.parents()
966 p1 = pl[0]
966 p1 = pl[0]
967 pas = [None]
967 pas = [None]
968 if ancestor is not None:
968 if ancestor is not None:
969 pas = [repo[ancestor]]
969 pas = [repo[ancestor]]
970
970
971 if node is None:
971 if node is None:
972 # Here is where we should consider bookmarks, divergent bookmarks,
972 # Here is where we should consider bookmarks, divergent bookmarks,
973 # foreground changesets (successors), and tip of current branch;
973 # foreground changesets (successors), and tip of current branch;
974 # but currently we are only checking the branch tips.
974 # but currently we are only checking the branch tips.
975 try:
975 try:
976 node = repo.branchtip(wc.branch())
976 node = repo.branchtip(wc.branch())
977 except errormod.RepoLookupError:
977 except errormod.RepoLookupError:
978 if wc.branch() == 'default': # no default branch!
978 if wc.branch() == 'default': # no default branch!
979 node = repo.lookup('tip') # update to tip
979 node = repo.lookup('tip') # update to tip
980 else:
980 else:
981 raise util.Abort(_("branch %s not found") % wc.branch())
981 raise util.Abort(_("branch %s not found") % wc.branch())
982
982
983 if p1.obsolete() and not p1.children():
983 if p1.obsolete() and not p1.children():
984 # allow updating to successors
984 # allow updating to successors
985 successors = obsolete.successorssets(repo, p1.node())
985 successors = obsolete.successorssets(repo, p1.node())
986
986
987 # behavior of certain cases is as follows,
987 # behavior of certain cases is as follows,
988 #
988 #
989 # divergent changesets: update to highest rev, similar to what
989 # divergent changesets: update to highest rev, similar to what
990 # is currently done when there are more than one head
990 # is currently done when there are more than one head
991 # (i.e. 'tip')
991 # (i.e. 'tip')
992 #
992 #
993 # replaced changesets: same as divergent except we know there
993 # replaced changesets: same as divergent except we know there
994 # is no conflict
994 # is no conflict
995 #
995 #
996 # pruned changeset: no update is done; though, we could
996 # pruned changeset: no update is done; though, we could
997 # consider updating to the first non-obsolete parent,
997 # consider updating to the first non-obsolete parent,
998 # similar to what is current done for 'hg prune'
998 # similar to what is current done for 'hg prune'
999
999
1000 if successors:
1000 if successors:
1001 # flatten the list here handles both divergent (len > 1)
1001 # flatten the list here handles both divergent (len > 1)
1002 # and the usual case (len = 1)
1002 # and the usual case (len = 1)
1003 successors = [n for sub in successors for n in sub]
1003 successors = [n for sub in successors for n in sub]
1004
1004
1005 # get the max revision for the given successors set,
1005 # get the max revision for the given successors set,
1006 # i.e. the 'tip' of a set
1006 # i.e. the 'tip' of a set
1007 node = repo.revs('max(%ln)', successors).first()
1007 node = repo.revs('max(%ln)', successors).first()
1008 pas = [p1]
1008 pas = [p1]
1009
1009
1010 overwrite = force and not branchmerge
1010 overwrite = force and not branchmerge
1011
1011
1012 p2 = repo[node]
1012 p2 = repo[node]
1013 if pas[0] is None:
1013 if pas[0] is None:
1014 if repo.ui.config('merge', 'preferancestor', '*') == '*':
1014 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1015 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1015 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1016 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1016 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1017 else:
1017 else:
1018 pas = [p1.ancestor(p2, warn=branchmerge)]
1018 pas = [p1.ancestor(p2, warn=branchmerge)]
1019
1019
1020 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1020 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1021
1021
1022 ### check phase
1022 ### check phase
1023 if not overwrite and len(pl) > 1:
1023 if not overwrite and len(pl) > 1:
1024 raise util.Abort(_("outstanding uncommitted merge"))
1024 raise util.Abort(_("outstanding uncommitted merge"))
1025 if branchmerge:
1025 if branchmerge:
1026 if pas == [p2]:
1026 if pas == [p2]:
1027 raise util.Abort(_("merging with a working directory ancestor"
1027 raise util.Abort(_("merging with a working directory ancestor"
1028 " has no effect"))
1028 " has no effect"))
1029 elif pas == [p1]:
1029 elif pas == [p1]:
1030 if not mergeancestor and p1.branch() == p2.branch():
1030 if not mergeancestor and p1.branch() == p2.branch():
1031 raise util.Abort(_("nothing to merge"),
1031 raise util.Abort(_("nothing to merge"),
1032 hint=_("use 'hg update' "
1032 hint=_("use 'hg update' "
1033 "or check 'hg heads'"))
1033 "or check 'hg heads'"))
1034 if not force and (wc.files() or wc.deleted()):
1034 if not force and (wc.files() or wc.deleted()):
1035 raise util.Abort(_("uncommitted changes"),
1035 raise util.Abort(_("uncommitted changes"),
1036 hint=_("use 'hg status' to list changes"))
1036 hint=_("use 'hg status' to list changes"))
1037 for s in sorted(wc.substate):
1037 for s in sorted(wc.substate):
1038 wc.sub(s).bailifchanged()
1038 wc.sub(s).bailifchanged()
1039
1039
1040 elif not overwrite:
1040 elif not overwrite:
1041 if p1 == p2: # no-op update
1041 if p1 == p2: # no-op update
1042 # call the hooks and exit early
1042 # call the hooks and exit early
1043 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1043 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1044 repo.hook('update', parent1=xp2, parent2='', error=0)
1044 repo.hook('update', parent1=xp2, parent2='', error=0)
1045 return 0, 0, 0, 0
1045 return 0, 0, 0, 0
1046
1046
1047 if pas not in ([p1], [p2]): # nonlinear
1047 if pas not in ([p1], [p2]): # nonlinear
1048 dirty = wc.dirty(missing=True)
1048 dirty = wc.dirty(missing=True)
1049 if dirty or onode is None:
1049 if dirty or onode is None:
1050 # Branching is a bit strange to ensure we do the minimal
1050 # Branching is a bit strange to ensure we do the minimal
1051 # amount of call to obsolete.background.
1051 # amount of call to obsolete.background.
1052 foreground = obsolete.foreground(repo, [p1.node()])
1052 foreground = obsolete.foreground(repo, [p1.node()])
1053 # note: the <node> variable contains a random identifier
1053 # note: the <node> variable contains a random identifier
1054 if repo[node].node() in foreground:
1054 if repo[node].node() in foreground:
1055 pas = [p1] # allow updating to successors
1055 pas = [p1] # allow updating to successors
1056 elif dirty:
1056 elif dirty:
1057 msg = _("uncommitted changes")
1057 msg = _("uncommitted changes")
1058 if onode is None:
1058 if onode is None:
1059 hint = _("commit and merge, or update --clean to"
1059 hint = _("commit and merge, or update --clean to"
1060 " discard changes")
1060 " discard changes")
1061 else:
1061 else:
1062 hint = _("commit or update --clean to discard"
1062 hint = _("commit or update --clean to discard"
1063 " changes")
1063 " changes")
1064 raise util.Abort(msg, hint=hint)
1064 raise util.Abort(msg, hint=hint)
1065 else: # node is none
1065 else: # node is none
1066 msg = _("not a linear update")
1066 msg = _("not a linear update")
1067 hint = _("merge or update --check to force update")
1067 hint = _("merge or update --check to force update")
1068 raise util.Abort(msg, hint=hint)
1068 raise util.Abort(msg, hint=hint)
1069 else:
1069 else:
1070 # Allow jumping branches if clean and specific rev given
1070 # Allow jumping branches if clean and specific rev given
1071 pas = [p1]
1071 pas = [p1]
1072
1072
1073 # deprecated config: merge.followcopies
1073 # deprecated config: merge.followcopies
1074 followcopies = False
1074 followcopies = False
1075 if overwrite:
1075 if overwrite:
1076 pas = [wc]
1076 pas = [wc]
1077 elif pas == [p2]: # backwards
1077 elif pas == [p2]: # backwards
1078 pas = [wc.p1()]
1078 pas = [wc.p1()]
1079 elif not branchmerge and not wc.dirty(missing=True):
1079 elif not branchmerge and not wc.dirty(missing=True):
1080 pass
1080 pass
1081 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1081 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1082 followcopies = True
1082 followcopies = True
1083
1083
1084 ### calculate phase
1084 ### calculate phase
1085 actionbyfile, diverge, renamedelete = calculateupdates(
1085 actionbyfile, diverge, renamedelete = calculateupdates(
1086 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1086 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1087 followcopies)
1087 followcopies)
1088 # Convert to dictionary-of-lists format
1088 # Convert to dictionary-of-lists format
1089 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1089 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1090 for f, (m, args, msg) in actionbyfile.iteritems():
1090 for f, (m, args, msg) in actionbyfile.iteritems():
1091 if m not in actions:
1091 if m not in actions:
1092 actions[m] = []
1092 actions[m] = []
1093 actions[m].append((f, args, msg))
1093 actions[m].append((f, args, msg))
1094
1094
1095 if not util.checkcase(repo.path):
1095 if not util.checkcase(repo.path):
1096 # check collision between files only in p2 for clean update
1096 # check collision between files only in p2 for clean update
1097 if (not branchmerge and
1097 if (not branchmerge and
1098 (force or not wc.dirty(missing=True, branch=False))):
1098 (force or not wc.dirty(missing=True, branch=False))):
1099 _checkcollision(repo, p2.manifest(), None)
1099 _checkcollision(repo, p2.manifest(), None)
1100 else:
1100 else:
1101 _checkcollision(repo, wc.manifest(), actions)
1101 _checkcollision(repo, wc.manifest(), actions)
1102
1102
1103 # Prompt and create actions. TODO: Move this towards resolve phase.
1103 # Prompt and create actions. TODO: Move this towards resolve phase.
1104 for f, args, msg in sorted(actions['cd']):
1104 for f, args, msg in sorted(actions['cd']):
1105 if repo.ui.promptchoice(
1105 if repo.ui.promptchoice(
1106 _("local changed %s which remote deleted\n"
1106 _("local changed %s which remote deleted\n"
1107 "use (c)hanged version or (d)elete?"
1107 "use (c)hanged version or (d)elete?"
1108 "$$ &Changed $$ &Delete") % f, 0):
1108 "$$ &Changed $$ &Delete") % f, 0):
1109 actions['r'].append((f, None, "prompt delete"))
1109 actions['r'].append((f, None, "prompt delete"))
1110 else:
1110 else:
1111 actions['a'].append((f, None, "prompt keep"))
1111 actions['a'].append((f, None, "prompt keep"))
1112 del actions['cd'][:]
1112 del actions['cd'][:]
1113
1113
1114 for f, args, msg in sorted(actions['dc']):
1114 for f, args, msg in sorted(actions['dc']):
1115 flags, = args
1115 flags, = args
1116 if repo.ui.promptchoice(
1116 if repo.ui.promptchoice(
1117 _("remote changed %s which local deleted\n"
1117 _("remote changed %s which local deleted\n"
1118 "use (c)hanged version or leave (d)eleted?"
1118 "use (c)hanged version or leave (d)eleted?"
1119 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1119 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1120 actions['g'].append((f, (flags,), "prompt recreating"))
1120 actions['g'].append((f, (flags,), "prompt recreating"))
1121 del actions['dc'][:]
1121 del actions['dc'][:]
1122
1122
1123 ### apply phase
1123 ### apply phase
1124 if not branchmerge: # just jump to the new rev
1124 if not branchmerge: # just jump to the new rev
1125 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1125 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1126 if not partial:
1126 if not partial:
1127 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1127 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1128 # note that we're in the middle of an update
1128 # note that we're in the middle of an update
1129 repo.vfs.write('updatestate', p2.hex())
1129 repo.vfs.write('updatestate', p2.hex())
1130
1130
1131 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1131 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1132
1132
1133 # divergent renames
1133 # divergent renames
1134 for f, fl in sorted(diverge.iteritems()):
1134 for f, fl in sorted(diverge.iteritems()):
1135 repo.ui.warn(_("note: possible conflict - %s was renamed "
1135 repo.ui.warn(_("note: possible conflict - %s was renamed "
1136 "multiple times to:\n") % f)
1136 "multiple times to:\n") % f)
1137 for nf in fl:
1137 for nf in fl:
1138 repo.ui.warn(" %s\n" % nf)
1138 repo.ui.warn(" %s\n" % nf)
1139
1139
1140 # rename and delete
1140 # rename and delete
1141 for f, fl in sorted(renamedelete.iteritems()):
1141 for f, fl in sorted(renamedelete.iteritems()):
1142 repo.ui.warn(_("note: possible conflict - %s was deleted "
1142 repo.ui.warn(_("note: possible conflict - %s was deleted "
1143 "and renamed to:\n") % f)
1143 "and renamed to:\n") % f)
1144 for nf in fl:
1144 for nf in fl:
1145 repo.ui.warn(" %s\n" % nf)
1145 repo.ui.warn(" %s\n" % nf)
1146
1146
1147 if not partial:
1147 if not partial:
1148 repo.dirstate.beginparentchange()
1148 repo.dirstate.beginparentchange()
1149 repo.setparents(fp1, fp2)
1149 repo.setparents(fp1, fp2)
1150 recordupdates(repo, actions, branchmerge)
1150 recordupdates(repo, actions, branchmerge)
1151 # update completed, clear state
1151 # update completed, clear state
1152 util.unlink(repo.join('updatestate'))
1152 util.unlink(repo.join('updatestate'))
1153
1153
1154 if not branchmerge:
1154 if not branchmerge:
1155 repo.dirstate.setbranch(p2.branch())
1155 repo.dirstate.setbranch(p2.branch())
1156 repo.dirstate.endparentchange()
1156 repo.dirstate.endparentchange()
1157 finally:
1157 finally:
1158 wlock.release()
1158 wlock.release()
1159
1159
1160 if not partial:
1160 if not partial:
1161 def updatehook(parent1=xp1, parent2=xp2, error=stats[3]):
1161 def updatehook(parent1=xp1, parent2=xp2, error=stats[3]):
1162 repo.hook('update', parent1=parent1, parent2=parent2, error=error)
1162 repo.hook('update', parent1=parent1, parent2=parent2, error=error)
1163 repo._afterlock(updatehook)
1163 repo._afterlock(updatehook)
1164 return stats
1164 return stats
1165
1165
1166 def graft(repo, ctx, pctx, labels):
1166 def graft(repo, ctx, pctx, labels):
1167 """Do a graft-like merge.
1167 """Do a graft-like merge.
1168
1168
1169 This is a merge where the merge ancestor is chosen such that one
1169 This is a merge where the merge ancestor is chosen such that one
1170 or more changesets are grafted onto the current changeset. In
1170 or more changesets are grafted onto the current changeset. In
1171 addition to the merge, this fixes up the dirstate to include only
1171 addition to the merge, this fixes up the dirstate to include only
1172 a single parent and tries to duplicate any renames/copies
1172 a single parent and tries to duplicate any renames/copies
1173 appropriately.
1173 appropriately.
1174
1174
1175 ctx - changeset to rebase
1175 ctx - changeset to rebase
1176 pctx - merge base, usually ctx.p1()
1176 pctx - merge base, usually ctx.p1()
1177 labels - merge labels eg ['local', 'graft']
1177 labels - merge labels eg ['local', 'graft']
1178
1178
1179 """
1179 """
1180 # If we're grafting a descendant onto an ancestor, be sure to pass
1180 # If we're grafting a descendant onto an ancestor, be sure to pass
1181 # mergeancestor=True to update. This does two things: 1) allows the merge if
1181 # mergeancestor=True to update. This does two things: 1) allows the merge if
1182 # the destination is the same as the parent of the ctx (so we can use graft
1182 # the destination is the same as the parent of the ctx (so we can use graft
1183 # to copy commits), and 2) informs update that the incoming changes are
1183 # to copy commits), and 2) informs update that the incoming changes are
1184 # newer than the destination so it doesn't prompt about "remote changed foo
1184 # newer than the destination so it doesn't prompt about "remote changed foo
1185 # which local deleted".
1185 # which local deleted".
1186 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1186 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1187
1187
1188 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1188 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1189 mergeancestor=mergeancestor, labels=labels)
1189 mergeancestor=mergeancestor, labels=labels)
1190
1190
1191 # drop the second merge parent
1191 # drop the second merge parent
1192 repo.dirstate.beginparentchange()
1192 repo.dirstate.beginparentchange()
1193 repo.setparents(repo['.'].node(), nullid)
1193 repo.setparents(repo['.'].node(), nullid)
1194 repo.dirstate.write()
1194 repo.dirstate.write()
1195 # fix up dirstate for copies and renames
1195 # fix up dirstate for copies and renames
1196 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1196 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1197 repo.dirstate.endparentchange()
1197 repo.dirstate.endparentchange()
1198 return stats
1198 return stats
General Comments 0
You need to be logged in to leave comments. Login now