##// END OF EJS Templates
ancestor: silence multiple ancestor warning outside of merge (issue4234)...
Matt Mackall -
r21203:9f12d866 stable
parent child Browse files
Show More
@@ -1,1400 +1,1401 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16
16
17 propertycache = util.propertycache
17 propertycache = util.propertycache
18
18
19 class basectx(object):
19 class basectx(object):
20 """A basectx object represents the common logic for its children:
20 """A basectx object represents the common logic for its children:
21 changectx: read-only context that is already present in the repo,
21 changectx: read-only context that is already present in the repo,
22 workingctx: a context that represents the working directory and can
22 workingctx: a context that represents the working directory and can
23 be committed,
23 be committed,
24 memctx: a context that represents changes in-memory and can also
24 memctx: a context that represents changes in-memory and can also
25 be committed."""
25 be committed."""
26 def __new__(cls, repo, changeid='', *args, **kwargs):
26 def __new__(cls, repo, changeid='', *args, **kwargs):
27 if isinstance(changeid, basectx):
27 if isinstance(changeid, basectx):
28 return changeid
28 return changeid
29
29
30 o = super(basectx, cls).__new__(cls)
30 o = super(basectx, cls).__new__(cls)
31
31
32 o._repo = repo
32 o._repo = repo
33 o._rev = nullrev
33 o._rev = nullrev
34 o._node = nullid
34 o._node = nullid
35
35
36 return o
36 return o
37
37
38 def __str__(self):
38 def __str__(self):
39 return short(self.node())
39 return short(self.node())
40
40
41 def __int__(self):
41 def __int__(self):
42 return self.rev()
42 return self.rev()
43
43
44 def __repr__(self):
44 def __repr__(self):
45 return "<%s %s>" % (type(self).__name__, str(self))
45 return "<%s %s>" % (type(self).__name__, str(self))
46
46
47 def __eq__(self, other):
47 def __eq__(self, other):
48 try:
48 try:
49 return type(self) == type(other) and self._rev == other._rev
49 return type(self) == type(other) and self._rev == other._rev
50 except AttributeError:
50 except AttributeError:
51 return False
51 return False
52
52
53 def __ne__(self, other):
53 def __ne__(self, other):
54 return not (self == other)
54 return not (self == other)
55
55
56 def __contains__(self, key):
56 def __contains__(self, key):
57 return key in self._manifest
57 return key in self._manifest
58
58
59 def __getitem__(self, key):
59 def __getitem__(self, key):
60 return self.filectx(key)
60 return self.filectx(key)
61
61
62 def __iter__(self):
62 def __iter__(self):
63 for f in sorted(self._manifest):
63 for f in sorted(self._manifest):
64 yield f
64 yield f
65
65
66 @propertycache
66 @propertycache
67 def substate(self):
67 def substate(self):
68 return subrepo.state(self, self._repo.ui)
68 return subrepo.state(self, self._repo.ui)
69
69
70 def rev(self):
70 def rev(self):
71 return self._rev
71 return self._rev
72 def node(self):
72 def node(self):
73 return self._node
73 return self._node
74 def hex(self):
74 def hex(self):
75 return hex(self.node())
75 return hex(self.node())
76 def manifest(self):
76 def manifest(self):
77 return self._manifest
77 return self._manifest
78 def phasestr(self):
78 def phasestr(self):
79 return phases.phasenames[self.phase()]
79 return phases.phasenames[self.phase()]
80 def mutable(self):
80 def mutable(self):
81 return self.phase() > phases.public
81 return self.phase() > phases.public
82
82
83 def getfileset(self, expr):
83 def getfileset(self, expr):
84 return fileset.getfileset(self, expr)
84 return fileset.getfileset(self, expr)
85
85
86 def obsolete(self):
86 def obsolete(self):
87 """True if the changeset is obsolete"""
87 """True if the changeset is obsolete"""
88 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
88 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
89
89
90 def extinct(self):
90 def extinct(self):
91 """True if the changeset is extinct"""
91 """True if the changeset is extinct"""
92 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
92 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
93
93
94 def unstable(self):
94 def unstable(self):
95 """True if the changeset is not obsolete but it's ancestor are"""
95 """True if the changeset is not obsolete but it's ancestor are"""
96 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
96 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
97
97
98 def bumped(self):
98 def bumped(self):
99 """True if the changeset try to be a successor of a public changeset
99 """True if the changeset try to be a successor of a public changeset
100
100
101 Only non-public and non-obsolete changesets may be bumped.
101 Only non-public and non-obsolete changesets may be bumped.
102 """
102 """
103 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
103 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
104
104
105 def divergent(self):
105 def divergent(self):
106 """Is a successors of a changeset with multiple possible successors set
106 """Is a successors of a changeset with multiple possible successors set
107
107
108 Only non-public and non-obsolete changesets may be divergent.
108 Only non-public and non-obsolete changesets may be divergent.
109 """
109 """
110 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
110 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
111
111
112 def troubled(self):
112 def troubled(self):
113 """True if the changeset is either unstable, bumped or divergent"""
113 """True if the changeset is either unstable, bumped or divergent"""
114 return self.unstable() or self.bumped() or self.divergent()
114 return self.unstable() or self.bumped() or self.divergent()
115
115
116 def troubles(self):
116 def troubles(self):
117 """return the list of troubles affecting this changesets.
117 """return the list of troubles affecting this changesets.
118
118
119 Troubles are returned as strings. possible values are:
119 Troubles are returned as strings. possible values are:
120 - unstable,
120 - unstable,
121 - bumped,
121 - bumped,
122 - divergent.
122 - divergent.
123 """
123 """
124 troubles = []
124 troubles = []
125 if self.unstable():
125 if self.unstable():
126 troubles.append('unstable')
126 troubles.append('unstable')
127 if self.bumped():
127 if self.bumped():
128 troubles.append('bumped')
128 troubles.append('bumped')
129 if self.divergent():
129 if self.divergent():
130 troubles.append('divergent')
130 troubles.append('divergent')
131 return troubles
131 return troubles
132
132
133 def parents(self):
133 def parents(self):
134 """return contexts for each parent changeset"""
134 """return contexts for each parent changeset"""
135 return self._parents
135 return self._parents
136
136
137 def p1(self):
137 def p1(self):
138 return self._parents[0]
138 return self._parents[0]
139
139
140 def p2(self):
140 def p2(self):
141 if len(self._parents) == 2:
141 if len(self._parents) == 2:
142 return self._parents[1]
142 return self._parents[1]
143 return changectx(self._repo, -1)
143 return changectx(self._repo, -1)
144
144
145 def _fileinfo(self, path):
145 def _fileinfo(self, path):
146 if '_manifest' in self.__dict__:
146 if '_manifest' in self.__dict__:
147 try:
147 try:
148 return self._manifest[path], self._manifest.flags(path)
148 return self._manifest[path], self._manifest.flags(path)
149 except KeyError:
149 except KeyError:
150 raise error.ManifestLookupError(self._node, path,
150 raise error.ManifestLookupError(self._node, path,
151 _('not found in manifest'))
151 _('not found in manifest'))
152 if '_manifestdelta' in self.__dict__ or path in self.files():
152 if '_manifestdelta' in self.__dict__ or path in self.files():
153 if path in self._manifestdelta:
153 if path in self._manifestdelta:
154 return (self._manifestdelta[path],
154 return (self._manifestdelta[path],
155 self._manifestdelta.flags(path))
155 self._manifestdelta.flags(path))
156 node, flag = self._repo.manifest.find(self._changeset[0], path)
156 node, flag = self._repo.manifest.find(self._changeset[0], path)
157 if not node:
157 if not node:
158 raise error.ManifestLookupError(self._node, path,
158 raise error.ManifestLookupError(self._node, path,
159 _('not found in manifest'))
159 _('not found in manifest'))
160
160
161 return node, flag
161 return node, flag
162
162
163 def filenode(self, path):
163 def filenode(self, path):
164 return self._fileinfo(path)[0]
164 return self._fileinfo(path)[0]
165
165
166 def flags(self, path):
166 def flags(self, path):
167 try:
167 try:
168 return self._fileinfo(path)[1]
168 return self._fileinfo(path)[1]
169 except error.LookupError:
169 except error.LookupError:
170 return ''
170 return ''
171
171
172 def sub(self, path):
172 def sub(self, path):
173 return subrepo.subrepo(self, path)
173 return subrepo.subrepo(self, path)
174
174
175 def match(self, pats=[], include=None, exclude=None, default='glob'):
175 def match(self, pats=[], include=None, exclude=None, default='glob'):
176 r = self._repo
176 r = self._repo
177 return matchmod.match(r.root, r.getcwd(), pats,
177 return matchmod.match(r.root, r.getcwd(), pats,
178 include, exclude, default,
178 include, exclude, default,
179 auditor=r.auditor, ctx=self)
179 auditor=r.auditor, ctx=self)
180
180
181 def diff(self, ctx2=None, match=None, **opts):
181 def diff(self, ctx2=None, match=None, **opts):
182 """Returns a diff generator for the given contexts and matcher"""
182 """Returns a diff generator for the given contexts and matcher"""
183 if ctx2 is None:
183 if ctx2 is None:
184 ctx2 = self.p1()
184 ctx2 = self.p1()
185 if ctx2 is not None:
185 if ctx2 is not None:
186 ctx2 = self._repo[ctx2]
186 ctx2 = self._repo[ctx2]
187 diffopts = patch.diffopts(self._repo.ui, opts)
187 diffopts = patch.diffopts(self._repo.ui, opts)
188 return patch.diff(self._repo, ctx2.node(), self.node(),
188 return patch.diff(self._repo, ctx2.node(), self.node(),
189 match=match, opts=diffopts)
189 match=match, opts=diffopts)
190
190
191 @propertycache
191 @propertycache
192 def _dirs(self):
192 def _dirs(self):
193 return scmutil.dirs(self._manifest)
193 return scmutil.dirs(self._manifest)
194
194
195 def dirs(self):
195 def dirs(self):
196 return self._dirs
196 return self._dirs
197
197
198 def dirty(self):
198 def dirty(self):
199 return False
199 return False
200
200
201 def makememctx(repo, parents, text, user, date, branch, files, store,
201 def makememctx(repo, parents, text, user, date, branch, files, store,
202 editor=None):
202 editor=None):
203 def getfilectx(repo, memctx, path):
203 def getfilectx(repo, memctx, path):
204 data, (islink, isexec), copied = store.getfile(path)
204 data, (islink, isexec), copied = store.getfile(path)
205 return memfilectx(path, data, islink=islink, isexec=isexec,
205 return memfilectx(path, data, islink=islink, isexec=isexec,
206 copied=copied)
206 copied=copied)
207 extra = {}
207 extra = {}
208 if branch:
208 if branch:
209 extra['branch'] = encoding.fromlocal(branch)
209 extra['branch'] = encoding.fromlocal(branch)
210 ctx = memctx(repo, parents, text, files, getfilectx, user,
210 ctx = memctx(repo, parents, text, files, getfilectx, user,
211 date, extra)
211 date, extra)
212 if editor:
212 if editor:
213 ctx._text = editor(repo, ctx, [])
213 ctx._text = editor(repo, ctx, [])
214 return ctx
214 return ctx
215
215
216 class changectx(basectx):
216 class changectx(basectx):
217 """A changecontext object makes access to data related to a particular
217 """A changecontext object makes access to data related to a particular
218 changeset convenient. It represents a read-only context already present in
218 changeset convenient. It represents a read-only context already present in
219 the repo."""
219 the repo."""
220 def __init__(self, repo, changeid=''):
220 def __init__(self, repo, changeid=''):
221 """changeid is a revision number, node, or tag"""
221 """changeid is a revision number, node, or tag"""
222
222
223 # since basectx.__new__ already took care of copying the object, we
223 # since basectx.__new__ already took care of copying the object, we
224 # don't need to do anything in __init__, so we just exit here
224 # don't need to do anything in __init__, so we just exit here
225 if isinstance(changeid, basectx):
225 if isinstance(changeid, basectx):
226 return
226 return
227
227
228 if changeid == '':
228 if changeid == '':
229 changeid = '.'
229 changeid = '.'
230 self._repo = repo
230 self._repo = repo
231
231
232 if isinstance(changeid, int):
232 if isinstance(changeid, int):
233 try:
233 try:
234 self._node = repo.changelog.node(changeid)
234 self._node = repo.changelog.node(changeid)
235 except IndexError:
235 except IndexError:
236 raise error.RepoLookupError(
236 raise error.RepoLookupError(
237 _("unknown revision '%s'") % changeid)
237 _("unknown revision '%s'") % changeid)
238 self._rev = changeid
238 self._rev = changeid
239 return
239 return
240 if isinstance(changeid, long):
240 if isinstance(changeid, long):
241 changeid = str(changeid)
241 changeid = str(changeid)
242 if changeid == '.':
242 if changeid == '.':
243 self._node = repo.dirstate.p1()
243 self._node = repo.dirstate.p1()
244 self._rev = repo.changelog.rev(self._node)
244 self._rev = repo.changelog.rev(self._node)
245 return
245 return
246 if changeid == 'null':
246 if changeid == 'null':
247 self._node = nullid
247 self._node = nullid
248 self._rev = nullrev
248 self._rev = nullrev
249 return
249 return
250 if changeid == 'tip':
250 if changeid == 'tip':
251 self._node = repo.changelog.tip()
251 self._node = repo.changelog.tip()
252 self._rev = repo.changelog.rev(self._node)
252 self._rev = repo.changelog.rev(self._node)
253 return
253 return
254 if len(changeid) == 20:
254 if len(changeid) == 20:
255 try:
255 try:
256 self._node = changeid
256 self._node = changeid
257 self._rev = repo.changelog.rev(changeid)
257 self._rev = repo.changelog.rev(changeid)
258 return
258 return
259 except LookupError:
259 except LookupError:
260 pass
260 pass
261
261
262 try:
262 try:
263 r = int(changeid)
263 r = int(changeid)
264 if str(r) != changeid:
264 if str(r) != changeid:
265 raise ValueError
265 raise ValueError
266 l = len(repo.changelog)
266 l = len(repo.changelog)
267 if r < 0:
267 if r < 0:
268 r += l
268 r += l
269 if r < 0 or r >= l:
269 if r < 0 or r >= l:
270 raise ValueError
270 raise ValueError
271 self._rev = r
271 self._rev = r
272 self._node = repo.changelog.node(r)
272 self._node = repo.changelog.node(r)
273 return
273 return
274 except (ValueError, OverflowError, IndexError):
274 except (ValueError, OverflowError, IndexError):
275 pass
275 pass
276
276
277 if len(changeid) == 40:
277 if len(changeid) == 40:
278 try:
278 try:
279 self._node = bin(changeid)
279 self._node = bin(changeid)
280 self._rev = repo.changelog.rev(self._node)
280 self._rev = repo.changelog.rev(self._node)
281 return
281 return
282 except (TypeError, LookupError):
282 except (TypeError, LookupError):
283 pass
283 pass
284
284
285 if changeid in repo._bookmarks:
285 if changeid in repo._bookmarks:
286 self._node = repo._bookmarks[changeid]
286 self._node = repo._bookmarks[changeid]
287 self._rev = repo.changelog.rev(self._node)
287 self._rev = repo.changelog.rev(self._node)
288 return
288 return
289 if changeid in repo._tagscache.tags:
289 if changeid in repo._tagscache.tags:
290 self._node = repo._tagscache.tags[changeid]
290 self._node = repo._tagscache.tags[changeid]
291 self._rev = repo.changelog.rev(self._node)
291 self._rev = repo.changelog.rev(self._node)
292 return
292 return
293 try:
293 try:
294 self._node = repo.branchtip(changeid)
294 self._node = repo.branchtip(changeid)
295 self._rev = repo.changelog.rev(self._node)
295 self._rev = repo.changelog.rev(self._node)
296 return
296 return
297 except error.RepoLookupError:
297 except error.RepoLookupError:
298 pass
298 pass
299
299
300 self._node = repo.changelog._partialmatch(changeid)
300 self._node = repo.changelog._partialmatch(changeid)
301 if self._node is not None:
301 if self._node is not None:
302 self._rev = repo.changelog.rev(self._node)
302 self._rev = repo.changelog.rev(self._node)
303 return
303 return
304
304
305 # lookup failed
305 # lookup failed
306 # check if it might have come from damaged dirstate
306 # check if it might have come from damaged dirstate
307 #
307 #
308 # XXX we could avoid the unfiltered if we had a recognizable exception
308 # XXX we could avoid the unfiltered if we had a recognizable exception
309 # for filtered changeset access
309 # for filtered changeset access
310 if changeid in repo.unfiltered().dirstate.parents():
310 if changeid in repo.unfiltered().dirstate.parents():
311 raise error.Abort(_("working directory has unknown parent '%s'!")
311 raise error.Abort(_("working directory has unknown parent '%s'!")
312 % short(changeid))
312 % short(changeid))
313 try:
313 try:
314 if len(changeid) == 20:
314 if len(changeid) == 20:
315 changeid = hex(changeid)
315 changeid = hex(changeid)
316 except TypeError:
316 except TypeError:
317 pass
317 pass
318 raise error.RepoLookupError(
318 raise error.RepoLookupError(
319 _("unknown revision '%s'") % changeid)
319 _("unknown revision '%s'") % changeid)
320
320
321 def __hash__(self):
321 def __hash__(self):
322 try:
322 try:
323 return hash(self._rev)
323 return hash(self._rev)
324 except AttributeError:
324 except AttributeError:
325 return id(self)
325 return id(self)
326
326
327 def __nonzero__(self):
327 def __nonzero__(self):
328 return self._rev != nullrev
328 return self._rev != nullrev
329
329
330 @propertycache
330 @propertycache
331 def _changeset(self):
331 def _changeset(self):
332 return self._repo.changelog.read(self.rev())
332 return self._repo.changelog.read(self.rev())
333
333
334 @propertycache
334 @propertycache
335 def _manifest(self):
335 def _manifest(self):
336 return self._repo.manifest.read(self._changeset[0])
336 return self._repo.manifest.read(self._changeset[0])
337
337
338 @propertycache
338 @propertycache
339 def _manifestdelta(self):
339 def _manifestdelta(self):
340 return self._repo.manifest.readdelta(self._changeset[0])
340 return self._repo.manifest.readdelta(self._changeset[0])
341
341
342 @propertycache
342 @propertycache
343 def _parents(self):
343 def _parents(self):
344 p = self._repo.changelog.parentrevs(self._rev)
344 p = self._repo.changelog.parentrevs(self._rev)
345 if p[1] == nullrev:
345 if p[1] == nullrev:
346 p = p[:-1]
346 p = p[:-1]
347 return [changectx(self._repo, x) for x in p]
347 return [changectx(self._repo, x) for x in p]
348
348
349 def changeset(self):
349 def changeset(self):
350 return self._changeset
350 return self._changeset
351 def manifestnode(self):
351 def manifestnode(self):
352 return self._changeset[0]
352 return self._changeset[0]
353
353
354 def user(self):
354 def user(self):
355 return self._changeset[1]
355 return self._changeset[1]
356 def date(self):
356 def date(self):
357 return self._changeset[2]
357 return self._changeset[2]
358 def files(self):
358 def files(self):
359 return self._changeset[3]
359 return self._changeset[3]
360 def description(self):
360 def description(self):
361 return self._changeset[4]
361 return self._changeset[4]
362 def branch(self):
362 def branch(self):
363 return encoding.tolocal(self._changeset[5].get("branch"))
363 return encoding.tolocal(self._changeset[5].get("branch"))
364 def closesbranch(self):
364 def closesbranch(self):
365 return 'close' in self._changeset[5]
365 return 'close' in self._changeset[5]
366 def extra(self):
366 def extra(self):
367 return self._changeset[5]
367 return self._changeset[5]
368 def tags(self):
368 def tags(self):
369 return self._repo.nodetags(self._node)
369 return self._repo.nodetags(self._node)
370 def bookmarks(self):
370 def bookmarks(self):
371 return self._repo.nodebookmarks(self._node)
371 return self._repo.nodebookmarks(self._node)
372 def phase(self):
372 def phase(self):
373 return self._repo._phasecache.phase(self._repo, self._rev)
373 return self._repo._phasecache.phase(self._repo, self._rev)
374 def hidden(self):
374 def hidden(self):
375 return self._rev in repoview.filterrevs(self._repo, 'visible')
375 return self._rev in repoview.filterrevs(self._repo, 'visible')
376
376
377 def children(self):
377 def children(self):
378 """return contexts for each child changeset"""
378 """return contexts for each child changeset"""
379 c = self._repo.changelog.children(self._node)
379 c = self._repo.changelog.children(self._node)
380 return [changectx(self._repo, x) for x in c]
380 return [changectx(self._repo, x) for x in c]
381
381
382 def ancestors(self):
382 def ancestors(self):
383 for a in self._repo.changelog.ancestors([self._rev]):
383 for a in self._repo.changelog.ancestors([self._rev]):
384 yield changectx(self._repo, a)
384 yield changectx(self._repo, a)
385
385
386 def descendants(self):
386 def descendants(self):
387 for d in self._repo.changelog.descendants([self._rev]):
387 for d in self._repo.changelog.descendants([self._rev]):
388 yield changectx(self._repo, d)
388 yield changectx(self._repo, d)
389
389
390 def filectx(self, path, fileid=None, filelog=None):
390 def filectx(self, path, fileid=None, filelog=None):
391 """get a file context from this changeset"""
391 """get a file context from this changeset"""
392 if fileid is None:
392 if fileid is None:
393 fileid = self.filenode(path)
393 fileid = self.filenode(path)
394 return filectx(self._repo, path, fileid=fileid,
394 return filectx(self._repo, path, fileid=fileid,
395 changectx=self, filelog=filelog)
395 changectx=self, filelog=filelog)
396
396
397 def ancestor(self, c2):
397 def ancestor(self, c2, warn=False):
398 """
398 """
399 return the "best" ancestor context of self and c2
399 return the "best" ancestor context of self and c2
400 """
400 """
401 # deal with workingctxs
401 # deal with workingctxs
402 n2 = c2._node
402 n2 = c2._node
403 if n2 is None:
403 if n2 is None:
404 n2 = c2._parents[0]._node
404 n2 = c2._parents[0]._node
405 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
405 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
406 if not cahs:
406 if not cahs:
407 anc = nullid
407 anc = nullid
408 elif len(cahs) == 1:
408 elif len(cahs) == 1:
409 anc = cahs[0]
409 anc = cahs[0]
410 else:
410 else:
411 for r in self._repo.ui.configlist('merge', 'preferancestor'):
411 for r in self._repo.ui.configlist('merge', 'preferancestor'):
412 ctx = changectx(self._repo, r)
412 ctx = changectx(self._repo, r)
413 anc = ctx.node()
413 anc = ctx.node()
414 if anc in cahs:
414 if anc in cahs:
415 break
415 break
416 else:
416 else:
417 anc = self._repo.changelog.ancestor(self._node, n2)
417 anc = self._repo.changelog.ancestor(self._node, n2)
418 self._repo.ui.status(
418 if warn:
419 (_("note: using %s as ancestor of %s and %s\n") %
419 self._repo.ui.status(
420 (short(anc), short(self._node), short(n2))) +
420 (_("note: using %s as ancestor of %s and %s\n") %
421 ''.join(_(" alternatively, use --config "
421 (short(anc), short(self._node), short(n2))) +
422 "merge.preferancestor=%s\n") %
422 ''.join(_(" alternatively, use --config "
423 short(n) for n in sorted(cahs) if n != anc))
423 "merge.preferancestor=%s\n") %
424 short(n) for n in sorted(cahs) if n != anc))
424 return changectx(self._repo, anc)
425 return changectx(self._repo, anc)
425
426
426 def descendant(self, other):
427 def descendant(self, other):
427 """True if other is descendant of this changeset"""
428 """True if other is descendant of this changeset"""
428 return self._repo.changelog.descendant(self._rev, other._rev)
429 return self._repo.changelog.descendant(self._rev, other._rev)
429
430
430 def walk(self, match):
431 def walk(self, match):
431 fset = set(match.files())
432 fset = set(match.files())
432 # for dirstate.walk, files=['.'] means "walk the whole tree".
433 # for dirstate.walk, files=['.'] means "walk the whole tree".
433 # follow that here, too
434 # follow that here, too
434 fset.discard('.')
435 fset.discard('.')
435
436
436 # avoid the entire walk if we're only looking for specific files
437 # avoid the entire walk if we're only looking for specific files
437 if fset and not match.anypats():
438 if fset and not match.anypats():
438 if util.all([fn in self for fn in fset]):
439 if util.all([fn in self for fn in fset]):
439 for fn in sorted(fset):
440 for fn in sorted(fset):
440 if match(fn):
441 if match(fn):
441 yield fn
442 yield fn
442 raise StopIteration
443 raise StopIteration
443
444
444 for fn in self:
445 for fn in self:
445 if fn in fset:
446 if fn in fset:
446 # specified pattern is the exact name
447 # specified pattern is the exact name
447 fset.remove(fn)
448 fset.remove(fn)
448 if match(fn):
449 if match(fn):
449 yield fn
450 yield fn
450 for fn in sorted(fset):
451 for fn in sorted(fset):
451 if fn in self._dirs:
452 if fn in self._dirs:
452 # specified pattern is a directory
453 # specified pattern is a directory
453 continue
454 continue
454 match.bad(fn, _('no such file in rev %s') % self)
455 match.bad(fn, _('no such file in rev %s') % self)
455
456
456 class basefilectx(object):
457 class basefilectx(object):
457 """A filecontext object represents the common logic for its children:
458 """A filecontext object represents the common logic for its children:
458 filectx: read-only access to a filerevision that is already present
459 filectx: read-only access to a filerevision that is already present
459 in the repo,
460 in the repo,
460 workingfilectx: a filecontext that represents files from the working
461 workingfilectx: a filecontext that represents files from the working
461 directory,
462 directory,
462 memfilectx: a filecontext that represents files in-memory."""
463 memfilectx: a filecontext that represents files in-memory."""
463 def __new__(cls, repo, path, *args, **kwargs):
464 def __new__(cls, repo, path, *args, **kwargs):
464 return super(basefilectx, cls).__new__(cls)
465 return super(basefilectx, cls).__new__(cls)
465
466
466 @propertycache
467 @propertycache
467 def _filelog(self):
468 def _filelog(self):
468 return self._repo.file(self._path)
469 return self._repo.file(self._path)
469
470
470 @propertycache
471 @propertycache
471 def _changeid(self):
472 def _changeid(self):
472 if '_changeid' in self.__dict__:
473 if '_changeid' in self.__dict__:
473 return self._changeid
474 return self._changeid
474 elif '_changectx' in self.__dict__:
475 elif '_changectx' in self.__dict__:
475 return self._changectx.rev()
476 return self._changectx.rev()
476 else:
477 else:
477 return self._filelog.linkrev(self._filerev)
478 return self._filelog.linkrev(self._filerev)
478
479
479 @propertycache
480 @propertycache
480 def _filenode(self):
481 def _filenode(self):
481 if '_fileid' in self.__dict__:
482 if '_fileid' in self.__dict__:
482 return self._filelog.lookup(self._fileid)
483 return self._filelog.lookup(self._fileid)
483 else:
484 else:
484 return self._changectx.filenode(self._path)
485 return self._changectx.filenode(self._path)
485
486
486 @propertycache
487 @propertycache
487 def _filerev(self):
488 def _filerev(self):
488 return self._filelog.rev(self._filenode)
489 return self._filelog.rev(self._filenode)
489
490
490 @propertycache
491 @propertycache
491 def _repopath(self):
492 def _repopath(self):
492 return self._path
493 return self._path
493
494
494 def __nonzero__(self):
495 def __nonzero__(self):
495 try:
496 try:
496 self._filenode
497 self._filenode
497 return True
498 return True
498 except error.LookupError:
499 except error.LookupError:
499 # file is missing
500 # file is missing
500 return False
501 return False
501
502
502 def __str__(self):
503 def __str__(self):
503 return "%s@%s" % (self.path(), self._changectx)
504 return "%s@%s" % (self.path(), self._changectx)
504
505
505 def __repr__(self):
506 def __repr__(self):
506 return "<%s %s>" % (type(self).__name__, str(self))
507 return "<%s %s>" % (type(self).__name__, str(self))
507
508
508 def __hash__(self):
509 def __hash__(self):
509 try:
510 try:
510 return hash((self._path, self._filenode))
511 return hash((self._path, self._filenode))
511 except AttributeError:
512 except AttributeError:
512 return id(self)
513 return id(self)
513
514
514 def __eq__(self, other):
515 def __eq__(self, other):
515 try:
516 try:
516 return (type(self) == type(other) and self._path == other._path
517 return (type(self) == type(other) and self._path == other._path
517 and self._filenode == other._filenode)
518 and self._filenode == other._filenode)
518 except AttributeError:
519 except AttributeError:
519 return False
520 return False
520
521
521 def __ne__(self, other):
522 def __ne__(self, other):
522 return not (self == other)
523 return not (self == other)
523
524
524 def filerev(self):
525 def filerev(self):
525 return self._filerev
526 return self._filerev
526 def filenode(self):
527 def filenode(self):
527 return self._filenode
528 return self._filenode
528 def flags(self):
529 def flags(self):
529 return self._changectx.flags(self._path)
530 return self._changectx.flags(self._path)
530 def filelog(self):
531 def filelog(self):
531 return self._filelog
532 return self._filelog
532 def rev(self):
533 def rev(self):
533 return self._changeid
534 return self._changeid
534 def linkrev(self):
535 def linkrev(self):
535 return self._filelog.linkrev(self._filerev)
536 return self._filelog.linkrev(self._filerev)
536 def node(self):
537 def node(self):
537 return self._changectx.node()
538 return self._changectx.node()
538 def hex(self):
539 def hex(self):
539 return self._changectx.hex()
540 return self._changectx.hex()
540 def user(self):
541 def user(self):
541 return self._changectx.user()
542 return self._changectx.user()
542 def date(self):
543 def date(self):
543 return self._changectx.date()
544 return self._changectx.date()
544 def files(self):
545 def files(self):
545 return self._changectx.files()
546 return self._changectx.files()
546 def description(self):
547 def description(self):
547 return self._changectx.description()
548 return self._changectx.description()
548 def branch(self):
549 def branch(self):
549 return self._changectx.branch()
550 return self._changectx.branch()
550 def extra(self):
551 def extra(self):
551 return self._changectx.extra()
552 return self._changectx.extra()
552 def phase(self):
553 def phase(self):
553 return self._changectx.phase()
554 return self._changectx.phase()
554 def phasestr(self):
555 def phasestr(self):
555 return self._changectx.phasestr()
556 return self._changectx.phasestr()
556 def manifest(self):
557 def manifest(self):
557 return self._changectx.manifest()
558 return self._changectx.manifest()
558 def changectx(self):
559 def changectx(self):
559 return self._changectx
560 return self._changectx
560
561
561 def path(self):
562 def path(self):
562 return self._path
563 return self._path
563
564
564 def isbinary(self):
565 def isbinary(self):
565 try:
566 try:
566 return util.binary(self.data())
567 return util.binary(self.data())
567 except IOError:
568 except IOError:
568 return False
569 return False
569
570
570 def cmp(self, fctx):
571 def cmp(self, fctx):
571 """compare with other file context
572 """compare with other file context
572
573
573 returns True if different than fctx.
574 returns True if different than fctx.
574 """
575 """
575 if (fctx._filerev is None
576 if (fctx._filerev is None
576 and (self._repo._encodefilterpats
577 and (self._repo._encodefilterpats
577 # if file data starts with '\1\n', empty metadata block is
578 # if file data starts with '\1\n', empty metadata block is
578 # prepended, which adds 4 bytes to filelog.size().
579 # prepended, which adds 4 bytes to filelog.size().
579 or self.size() - 4 == fctx.size())
580 or self.size() - 4 == fctx.size())
580 or self.size() == fctx.size()):
581 or self.size() == fctx.size()):
581 return self._filelog.cmp(self._filenode, fctx.data())
582 return self._filelog.cmp(self._filenode, fctx.data())
582
583
583 return True
584 return True
584
585
585 def parents(self):
586 def parents(self):
586 p = self._path
587 p = self._path
587 fl = self._filelog
588 fl = self._filelog
588 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
589 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
589
590
590 r = self._filelog.renamed(self._filenode)
591 r = self._filelog.renamed(self._filenode)
591 if r:
592 if r:
592 pl[0] = (r[0], r[1], None)
593 pl[0] = (r[0], r[1], None)
593
594
594 return [filectx(self._repo, p, fileid=n, filelog=l)
595 return [filectx(self._repo, p, fileid=n, filelog=l)
595 for p, n, l in pl if n != nullid]
596 for p, n, l in pl if n != nullid]
596
597
597 def p1(self):
598 def p1(self):
598 return self.parents()[0]
599 return self.parents()[0]
599
600
600 def p2(self):
601 def p2(self):
601 p = self.parents()
602 p = self.parents()
602 if len(p) == 2:
603 if len(p) == 2:
603 return p[1]
604 return p[1]
604 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
605 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
605
606
606 def annotate(self, follow=False, linenumber=None, diffopts=None):
607 def annotate(self, follow=False, linenumber=None, diffopts=None):
607 '''returns a list of tuples of (ctx, line) for each line
608 '''returns a list of tuples of (ctx, line) for each line
608 in the file, where ctx is the filectx of the node where
609 in the file, where ctx is the filectx of the node where
609 that line was last changed.
610 that line was last changed.
610 This returns tuples of ((ctx, linenumber), line) for each line,
611 This returns tuples of ((ctx, linenumber), line) for each line,
611 if "linenumber" parameter is NOT "None".
612 if "linenumber" parameter is NOT "None".
612 In such tuples, linenumber means one at the first appearance
613 In such tuples, linenumber means one at the first appearance
613 in the managed file.
614 in the managed file.
614 To reduce annotation cost,
615 To reduce annotation cost,
615 this returns fixed value(False is used) as linenumber,
616 this returns fixed value(False is used) as linenumber,
616 if "linenumber" parameter is "False".'''
617 if "linenumber" parameter is "False".'''
617
618
618 def decorate_compat(text, rev):
619 def decorate_compat(text, rev):
619 return ([rev] * len(text.splitlines()), text)
620 return ([rev] * len(text.splitlines()), text)
620
621
621 def without_linenumber(text, rev):
622 def without_linenumber(text, rev):
622 return ([(rev, False)] * len(text.splitlines()), text)
623 return ([(rev, False)] * len(text.splitlines()), text)
623
624
624 def with_linenumber(text, rev):
625 def with_linenumber(text, rev):
625 size = len(text.splitlines())
626 size = len(text.splitlines())
626 return ([(rev, i) for i in xrange(1, size + 1)], text)
627 return ([(rev, i) for i in xrange(1, size + 1)], text)
627
628
628 decorate = (((linenumber is None) and decorate_compat) or
629 decorate = (((linenumber is None) and decorate_compat) or
629 (linenumber and with_linenumber) or
630 (linenumber and with_linenumber) or
630 without_linenumber)
631 without_linenumber)
631
632
632 def pair(parent, child):
633 def pair(parent, child):
633 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
634 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
634 refine=True)
635 refine=True)
635 for (a1, a2, b1, b2), t in blocks:
636 for (a1, a2, b1, b2), t in blocks:
636 # Changed blocks ('!') or blocks made only of blank lines ('~')
637 # Changed blocks ('!') or blocks made only of blank lines ('~')
637 # belong to the child.
638 # belong to the child.
638 if t == '=':
639 if t == '=':
639 child[0][b1:b2] = parent[0][a1:a2]
640 child[0][b1:b2] = parent[0][a1:a2]
640 return child
641 return child
641
642
642 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
643 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
643
644
644 def parents(f):
645 def parents(f):
645 pl = f.parents()
646 pl = f.parents()
646
647
647 # Don't return renamed parents if we aren't following.
648 # Don't return renamed parents if we aren't following.
648 if not follow:
649 if not follow:
649 pl = [p for p in pl if p.path() == f.path()]
650 pl = [p for p in pl if p.path() == f.path()]
650
651
651 # renamed filectx won't have a filelog yet, so set it
652 # renamed filectx won't have a filelog yet, so set it
652 # from the cache to save time
653 # from the cache to save time
653 for p in pl:
654 for p in pl:
654 if not '_filelog' in p.__dict__:
655 if not '_filelog' in p.__dict__:
655 p._filelog = getlog(p.path())
656 p._filelog = getlog(p.path())
656
657
657 return pl
658 return pl
658
659
659 # use linkrev to find the first changeset where self appeared
660 # use linkrev to find the first changeset where self appeared
660 if self.rev() != self.linkrev():
661 if self.rev() != self.linkrev():
661 base = self.filectx(self.filenode())
662 base = self.filectx(self.filenode())
662 else:
663 else:
663 base = self
664 base = self
664
665
665 # This algorithm would prefer to be recursive, but Python is a
666 # This algorithm would prefer to be recursive, but Python is a
666 # bit recursion-hostile. Instead we do an iterative
667 # bit recursion-hostile. Instead we do an iterative
667 # depth-first search.
668 # depth-first search.
668
669
669 visit = [base]
670 visit = [base]
670 hist = {}
671 hist = {}
671 pcache = {}
672 pcache = {}
672 needed = {base: 1}
673 needed = {base: 1}
673 while visit:
674 while visit:
674 f = visit[-1]
675 f = visit[-1]
675 pcached = f in pcache
676 pcached = f in pcache
676 if not pcached:
677 if not pcached:
677 pcache[f] = parents(f)
678 pcache[f] = parents(f)
678
679
679 ready = True
680 ready = True
680 pl = pcache[f]
681 pl = pcache[f]
681 for p in pl:
682 for p in pl:
682 if p not in hist:
683 if p not in hist:
683 ready = False
684 ready = False
684 visit.append(p)
685 visit.append(p)
685 if not pcached:
686 if not pcached:
686 needed[p] = needed.get(p, 0) + 1
687 needed[p] = needed.get(p, 0) + 1
687 if ready:
688 if ready:
688 visit.pop()
689 visit.pop()
689 reusable = f in hist
690 reusable = f in hist
690 if reusable:
691 if reusable:
691 curr = hist[f]
692 curr = hist[f]
692 else:
693 else:
693 curr = decorate(f.data(), f)
694 curr = decorate(f.data(), f)
694 for p in pl:
695 for p in pl:
695 if not reusable:
696 if not reusable:
696 curr = pair(hist[p], curr)
697 curr = pair(hist[p], curr)
697 if needed[p] == 1:
698 if needed[p] == 1:
698 del hist[p]
699 del hist[p]
699 del needed[p]
700 del needed[p]
700 else:
701 else:
701 needed[p] -= 1
702 needed[p] -= 1
702
703
703 hist[f] = curr
704 hist[f] = curr
704 pcache[f] = []
705 pcache[f] = []
705
706
706 return zip(hist[base][0], hist[base][1].splitlines(True))
707 return zip(hist[base][0], hist[base][1].splitlines(True))
707
708
708 def ancestors(self, followfirst=False):
709 def ancestors(self, followfirst=False):
709 visit = {}
710 visit = {}
710 c = self
711 c = self
711 cut = followfirst and 1 or None
712 cut = followfirst and 1 or None
712 while True:
713 while True:
713 for parent in c.parents()[:cut]:
714 for parent in c.parents()[:cut]:
714 visit[(parent.rev(), parent.node())] = parent
715 visit[(parent.rev(), parent.node())] = parent
715 if not visit:
716 if not visit:
716 break
717 break
717 c = visit.pop(max(visit))
718 c = visit.pop(max(visit))
718 yield c
719 yield c
719
720
720 class filectx(basefilectx):
721 class filectx(basefilectx):
721 """A filecontext object makes access to data related to a particular
722 """A filecontext object makes access to data related to a particular
722 filerevision convenient."""
723 filerevision convenient."""
723 def __init__(self, repo, path, changeid=None, fileid=None,
724 def __init__(self, repo, path, changeid=None, fileid=None,
724 filelog=None, changectx=None):
725 filelog=None, changectx=None):
725 """changeid can be a changeset revision, node, or tag.
726 """changeid can be a changeset revision, node, or tag.
726 fileid can be a file revision or node."""
727 fileid can be a file revision or node."""
727 self._repo = repo
728 self._repo = repo
728 self._path = path
729 self._path = path
729
730
730 assert (changeid is not None
731 assert (changeid is not None
731 or fileid is not None
732 or fileid is not None
732 or changectx is not None), \
733 or changectx is not None), \
733 ("bad args: changeid=%r, fileid=%r, changectx=%r"
734 ("bad args: changeid=%r, fileid=%r, changectx=%r"
734 % (changeid, fileid, changectx))
735 % (changeid, fileid, changectx))
735
736
736 if filelog is not None:
737 if filelog is not None:
737 self._filelog = filelog
738 self._filelog = filelog
738
739
739 if changeid is not None:
740 if changeid is not None:
740 self._changeid = changeid
741 self._changeid = changeid
741 if changectx is not None:
742 if changectx is not None:
742 self._changectx = changectx
743 self._changectx = changectx
743 if fileid is not None:
744 if fileid is not None:
744 self._fileid = fileid
745 self._fileid = fileid
745
746
746 @propertycache
747 @propertycache
747 def _changectx(self):
748 def _changectx(self):
748 try:
749 try:
749 return changectx(self._repo, self._changeid)
750 return changectx(self._repo, self._changeid)
750 except error.RepoLookupError:
751 except error.RepoLookupError:
751 # Linkrev may point to any revision in the repository. When the
752 # Linkrev may point to any revision in the repository. When the
752 # repository is filtered this may lead to `filectx` trying to build
753 # repository is filtered this may lead to `filectx` trying to build
753 # `changectx` for filtered revision. In such case we fallback to
754 # `changectx` for filtered revision. In such case we fallback to
754 # creating `changectx` on the unfiltered version of the reposition.
755 # creating `changectx` on the unfiltered version of the reposition.
755 # This fallback should not be an issue because `changectx` from
756 # This fallback should not be an issue because `changectx` from
756 # `filectx` are not used in complex operations that care about
757 # `filectx` are not used in complex operations that care about
757 # filtering.
758 # filtering.
758 #
759 #
759 # This fallback is a cheap and dirty fix that prevent several
760 # This fallback is a cheap and dirty fix that prevent several
760 # crashes. It does not ensure the behavior is correct. However the
761 # crashes. It does not ensure the behavior is correct. However the
761 # behavior was not correct before filtering either and "incorrect
762 # behavior was not correct before filtering either and "incorrect
762 # behavior" is seen as better as "crash"
763 # behavior" is seen as better as "crash"
763 #
764 #
764 # Linkrevs have several serious troubles with filtering that are
765 # Linkrevs have several serious troubles with filtering that are
765 # complicated to solve. Proper handling of the issue here should be
766 # complicated to solve. Proper handling of the issue here should be
766 # considered when solving linkrev issue are on the table.
767 # considered when solving linkrev issue are on the table.
767 return changectx(self._repo.unfiltered(), self._changeid)
768 return changectx(self._repo.unfiltered(), self._changeid)
768
769
769 def filectx(self, fileid):
770 def filectx(self, fileid):
770 '''opens an arbitrary revision of the file without
771 '''opens an arbitrary revision of the file without
771 opening a new filelog'''
772 opening a new filelog'''
772 return filectx(self._repo, self._path, fileid=fileid,
773 return filectx(self._repo, self._path, fileid=fileid,
773 filelog=self._filelog)
774 filelog=self._filelog)
774
775
775 def data(self):
776 def data(self):
776 return self._filelog.read(self._filenode)
777 return self._filelog.read(self._filenode)
777 def size(self):
778 def size(self):
778 return self._filelog.size(self._filerev)
779 return self._filelog.size(self._filerev)
779
780
780 def renamed(self):
781 def renamed(self):
781 """check if file was actually renamed in this changeset revision
782 """check if file was actually renamed in this changeset revision
782
783
783 If rename logged in file revision, we report copy for changeset only
784 If rename logged in file revision, we report copy for changeset only
784 if file revisions linkrev points back to the changeset in question
785 if file revisions linkrev points back to the changeset in question
785 or both changeset parents contain different file revisions.
786 or both changeset parents contain different file revisions.
786 """
787 """
787
788
788 renamed = self._filelog.renamed(self._filenode)
789 renamed = self._filelog.renamed(self._filenode)
789 if not renamed:
790 if not renamed:
790 return renamed
791 return renamed
791
792
792 if self.rev() == self.linkrev():
793 if self.rev() == self.linkrev():
793 return renamed
794 return renamed
794
795
795 name = self.path()
796 name = self.path()
796 fnode = self._filenode
797 fnode = self._filenode
797 for p in self._changectx.parents():
798 for p in self._changectx.parents():
798 try:
799 try:
799 if fnode == p.filenode(name):
800 if fnode == p.filenode(name):
800 return None
801 return None
801 except error.LookupError:
802 except error.LookupError:
802 pass
803 pass
803 return renamed
804 return renamed
804
805
805 def children(self):
806 def children(self):
806 # hard for renames
807 # hard for renames
807 c = self._filelog.children(self._filenode)
808 c = self._filelog.children(self._filenode)
808 return [filectx(self._repo, self._path, fileid=x,
809 return [filectx(self._repo, self._path, fileid=x,
809 filelog=self._filelog) for x in c]
810 filelog=self._filelog) for x in c]
810
811
811 class committablectx(basectx):
812 class committablectx(basectx):
812 """A committablectx object provides common functionality for a context that
813 """A committablectx object provides common functionality for a context that
813 wants the ability to commit, e.g. workingctx or memctx."""
814 wants the ability to commit, e.g. workingctx or memctx."""
814 def __init__(self, repo, text="", user=None, date=None, extra=None,
815 def __init__(self, repo, text="", user=None, date=None, extra=None,
815 changes=None):
816 changes=None):
816 self._repo = repo
817 self._repo = repo
817 self._rev = None
818 self._rev = None
818 self._node = None
819 self._node = None
819 self._text = text
820 self._text = text
820 if date:
821 if date:
821 self._date = util.parsedate(date)
822 self._date = util.parsedate(date)
822 if user:
823 if user:
823 self._user = user
824 self._user = user
824 if changes:
825 if changes:
825 self._status = list(changes[:4])
826 self._status = list(changes[:4])
826 self._unknown = changes[4]
827 self._unknown = changes[4]
827 self._ignored = changes[5]
828 self._ignored = changes[5]
828 self._clean = changes[6]
829 self._clean = changes[6]
829 else:
830 else:
830 self._unknown = None
831 self._unknown = None
831 self._ignored = None
832 self._ignored = None
832 self._clean = None
833 self._clean = None
833
834
834 self._extra = {}
835 self._extra = {}
835 if extra:
836 if extra:
836 self._extra = extra.copy()
837 self._extra = extra.copy()
837 if 'branch' not in self._extra:
838 if 'branch' not in self._extra:
838 try:
839 try:
839 branch = encoding.fromlocal(self._repo.dirstate.branch())
840 branch = encoding.fromlocal(self._repo.dirstate.branch())
840 except UnicodeDecodeError:
841 except UnicodeDecodeError:
841 raise util.Abort(_('branch name not in UTF-8!'))
842 raise util.Abort(_('branch name not in UTF-8!'))
842 self._extra['branch'] = branch
843 self._extra['branch'] = branch
843 if self._extra['branch'] == '':
844 if self._extra['branch'] == '':
844 self._extra['branch'] = 'default'
845 self._extra['branch'] = 'default'
845
846
846 def __str__(self):
847 def __str__(self):
847 return str(self._parents[0]) + "+"
848 return str(self._parents[0]) + "+"
848
849
849 def __nonzero__(self):
850 def __nonzero__(self):
850 return True
851 return True
851
852
852 def __contains__(self, key):
853 def __contains__(self, key):
853 return self._repo.dirstate[key] not in "?r"
854 return self._repo.dirstate[key] not in "?r"
854
855
855 def _buildflagfunc(self):
856 def _buildflagfunc(self):
856 # Create a fallback function for getting file flags when the
857 # Create a fallback function for getting file flags when the
857 # filesystem doesn't support them
858 # filesystem doesn't support them
858
859
859 copiesget = self._repo.dirstate.copies().get
860 copiesget = self._repo.dirstate.copies().get
860
861
861 if len(self._parents) < 2:
862 if len(self._parents) < 2:
862 # when we have one parent, it's easy: copy from parent
863 # when we have one parent, it's easy: copy from parent
863 man = self._parents[0].manifest()
864 man = self._parents[0].manifest()
864 def func(f):
865 def func(f):
865 f = copiesget(f, f)
866 f = copiesget(f, f)
866 return man.flags(f)
867 return man.flags(f)
867 else:
868 else:
868 # merges are tricky: we try to reconstruct the unstored
869 # merges are tricky: we try to reconstruct the unstored
869 # result from the merge (issue1802)
870 # result from the merge (issue1802)
870 p1, p2 = self._parents
871 p1, p2 = self._parents
871 pa = p1.ancestor(p2)
872 pa = p1.ancestor(p2)
872 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
873 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
873
874
874 def func(f):
875 def func(f):
875 f = copiesget(f, f) # may be wrong for merges with copies
876 f = copiesget(f, f) # may be wrong for merges with copies
876 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
877 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
877 if fl1 == fl2:
878 if fl1 == fl2:
878 return fl1
879 return fl1
879 if fl1 == fla:
880 if fl1 == fla:
880 return fl2
881 return fl2
881 if fl2 == fla:
882 if fl2 == fla:
882 return fl1
883 return fl1
883 return '' # punt for conflicts
884 return '' # punt for conflicts
884
885
885 return func
886 return func
886
887
887 @propertycache
888 @propertycache
888 def _flagfunc(self):
889 def _flagfunc(self):
889 return self._repo.dirstate.flagfunc(self._buildflagfunc)
890 return self._repo.dirstate.flagfunc(self._buildflagfunc)
890
891
891 @propertycache
892 @propertycache
892 def _manifest(self):
893 def _manifest(self):
893 """generate a manifest corresponding to the working directory"""
894 """generate a manifest corresponding to the working directory"""
894
895
895 man = self._parents[0].manifest().copy()
896 man = self._parents[0].manifest().copy()
896 if len(self._parents) > 1:
897 if len(self._parents) > 1:
897 man2 = self.p2().manifest()
898 man2 = self.p2().manifest()
898 def getman(f):
899 def getman(f):
899 if f in man:
900 if f in man:
900 return man
901 return man
901 return man2
902 return man2
902 else:
903 else:
903 getman = lambda f: man
904 getman = lambda f: man
904
905
905 copied = self._repo.dirstate.copies()
906 copied = self._repo.dirstate.copies()
906 ff = self._flagfunc
907 ff = self._flagfunc
907 modified, added, removed, deleted = self._status
908 modified, added, removed, deleted = self._status
908 for i, l in (("a", added), ("m", modified)):
909 for i, l in (("a", added), ("m", modified)):
909 for f in l:
910 for f in l:
910 orig = copied.get(f, f)
911 orig = copied.get(f, f)
911 man[f] = getman(orig).get(orig, nullid) + i
912 man[f] = getman(orig).get(orig, nullid) + i
912 try:
913 try:
913 man.set(f, ff(f))
914 man.set(f, ff(f))
914 except OSError:
915 except OSError:
915 pass
916 pass
916
917
917 for f in deleted + removed:
918 for f in deleted + removed:
918 if f in man:
919 if f in man:
919 del man[f]
920 del man[f]
920
921
921 return man
922 return man
922
923
923 @propertycache
924 @propertycache
924 def _status(self):
925 def _status(self):
925 return self._repo.status()[:4]
926 return self._repo.status()[:4]
926
927
927 @propertycache
928 @propertycache
928 def _user(self):
929 def _user(self):
929 return self._repo.ui.username()
930 return self._repo.ui.username()
930
931
931 @propertycache
932 @propertycache
932 def _date(self):
933 def _date(self):
933 return util.makedate()
934 return util.makedate()
934
935
935 def status(self, ignored=False, clean=False, unknown=False):
936 def status(self, ignored=False, clean=False, unknown=False):
936 """Explicit status query
937 """Explicit status query
937 Unless this method is used to query the working copy status, the
938 Unless this method is used to query the working copy status, the
938 _status property will implicitly read the status using its default
939 _status property will implicitly read the status using its default
939 arguments."""
940 arguments."""
940 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
941 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
941 self._unknown = self._ignored = self._clean = None
942 self._unknown = self._ignored = self._clean = None
942 if unknown:
943 if unknown:
943 self._unknown = stat[4]
944 self._unknown = stat[4]
944 if ignored:
945 if ignored:
945 self._ignored = stat[5]
946 self._ignored = stat[5]
946 if clean:
947 if clean:
947 self._clean = stat[6]
948 self._clean = stat[6]
948 self._status = stat[:4]
949 self._status = stat[:4]
949 return stat
950 return stat
950
951
951 def user(self):
952 def user(self):
952 return self._user or self._repo.ui.username()
953 return self._user or self._repo.ui.username()
953 def date(self):
954 def date(self):
954 return self._date
955 return self._date
955 def description(self):
956 def description(self):
956 return self._text
957 return self._text
957 def files(self):
958 def files(self):
958 return sorted(self._status[0] + self._status[1] + self._status[2])
959 return sorted(self._status[0] + self._status[1] + self._status[2])
959
960
960 def modified(self):
961 def modified(self):
961 return self._status[0]
962 return self._status[0]
962 def added(self):
963 def added(self):
963 return self._status[1]
964 return self._status[1]
964 def removed(self):
965 def removed(self):
965 return self._status[2]
966 return self._status[2]
966 def deleted(self):
967 def deleted(self):
967 return self._status[3]
968 return self._status[3]
968 def unknown(self):
969 def unknown(self):
969 assert self._unknown is not None # must call status first
970 assert self._unknown is not None # must call status first
970 return self._unknown
971 return self._unknown
971 def ignored(self):
972 def ignored(self):
972 assert self._ignored is not None # must call status first
973 assert self._ignored is not None # must call status first
973 return self._ignored
974 return self._ignored
974 def clean(self):
975 def clean(self):
975 assert self._clean is not None # must call status first
976 assert self._clean is not None # must call status first
976 return self._clean
977 return self._clean
977 def branch(self):
978 def branch(self):
978 return encoding.tolocal(self._extra['branch'])
979 return encoding.tolocal(self._extra['branch'])
979 def closesbranch(self):
980 def closesbranch(self):
980 return 'close' in self._extra
981 return 'close' in self._extra
981 def extra(self):
982 def extra(self):
982 return self._extra
983 return self._extra
983
984
984 def tags(self):
985 def tags(self):
985 t = []
986 t = []
986 for p in self.parents():
987 for p in self.parents():
987 t.extend(p.tags())
988 t.extend(p.tags())
988 return t
989 return t
989
990
990 def bookmarks(self):
991 def bookmarks(self):
991 b = []
992 b = []
992 for p in self.parents():
993 for p in self.parents():
993 b.extend(p.bookmarks())
994 b.extend(p.bookmarks())
994 return b
995 return b
995
996
996 def phase(self):
997 def phase(self):
997 phase = phases.draft # default phase to draft
998 phase = phases.draft # default phase to draft
998 for p in self.parents():
999 for p in self.parents():
999 phase = max(phase, p.phase())
1000 phase = max(phase, p.phase())
1000 return phase
1001 return phase
1001
1002
1002 def hidden(self):
1003 def hidden(self):
1003 return False
1004 return False
1004
1005
1005 def children(self):
1006 def children(self):
1006 return []
1007 return []
1007
1008
1008 def flags(self, path):
1009 def flags(self, path):
1009 if '_manifest' in self.__dict__:
1010 if '_manifest' in self.__dict__:
1010 try:
1011 try:
1011 return self._manifest.flags(path)
1012 return self._manifest.flags(path)
1012 except KeyError:
1013 except KeyError:
1013 return ''
1014 return ''
1014
1015
1015 try:
1016 try:
1016 return self._flagfunc(path)
1017 return self._flagfunc(path)
1017 except OSError:
1018 except OSError:
1018 return ''
1019 return ''
1019
1020
1020 def ancestor(self, c2):
1021 def ancestor(self, c2):
1021 """return the ancestor context of self and c2"""
1022 """return the ancestor context of self and c2"""
1022 return self._parents[0].ancestor(c2) # punt on two parents for now
1023 return self._parents[0].ancestor(c2) # punt on two parents for now
1023
1024
1024 def walk(self, match):
1025 def walk(self, match):
1025 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1026 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1026 True, False))
1027 True, False))
1027
1028
1028 def ancestors(self):
1029 def ancestors(self):
1029 for a in self._repo.changelog.ancestors(
1030 for a in self._repo.changelog.ancestors(
1030 [p.rev() for p in self._parents]):
1031 [p.rev() for p in self._parents]):
1031 yield changectx(self._repo, a)
1032 yield changectx(self._repo, a)
1032
1033
1033 def markcommitted(self, node):
1034 def markcommitted(self, node):
1034 """Perform post-commit cleanup necessary after committing this ctx
1035 """Perform post-commit cleanup necessary after committing this ctx
1035
1036
1036 Specifically, this updates backing stores this working context
1037 Specifically, this updates backing stores this working context
1037 wraps to reflect the fact that the changes reflected by this
1038 wraps to reflect the fact that the changes reflected by this
1038 workingctx have been committed. For example, it marks
1039 workingctx have been committed. For example, it marks
1039 modified and added files as normal in the dirstate.
1040 modified and added files as normal in the dirstate.
1040
1041
1041 """
1042 """
1042
1043
1043 for f in self.modified() + self.added():
1044 for f in self.modified() + self.added():
1044 self._repo.dirstate.normal(f)
1045 self._repo.dirstate.normal(f)
1045 for f in self.removed():
1046 for f in self.removed():
1046 self._repo.dirstate.drop(f)
1047 self._repo.dirstate.drop(f)
1047 self._repo.dirstate.setparents(node)
1048 self._repo.dirstate.setparents(node)
1048
1049
1049 def dirs(self):
1050 def dirs(self):
1050 return self._repo.dirstate.dirs()
1051 return self._repo.dirstate.dirs()
1051
1052
1052 class workingctx(committablectx):
1053 class workingctx(committablectx):
1053 """A workingctx object makes access to data related to
1054 """A workingctx object makes access to data related to
1054 the current working directory convenient.
1055 the current working directory convenient.
1055 date - any valid date string or (unixtime, offset), or None.
1056 date - any valid date string or (unixtime, offset), or None.
1056 user - username string, or None.
1057 user - username string, or None.
1057 extra - a dictionary of extra values, or None.
1058 extra - a dictionary of extra values, or None.
1058 changes - a list of file lists as returned by localrepo.status()
1059 changes - a list of file lists as returned by localrepo.status()
1059 or None to use the repository status.
1060 or None to use the repository status.
1060 """
1061 """
1061 def __init__(self, repo, text="", user=None, date=None, extra=None,
1062 def __init__(self, repo, text="", user=None, date=None, extra=None,
1062 changes=None):
1063 changes=None):
1063 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1064 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1064
1065
1065 def __iter__(self):
1066 def __iter__(self):
1066 d = self._repo.dirstate
1067 d = self._repo.dirstate
1067 for f in d:
1068 for f in d:
1068 if d[f] != 'r':
1069 if d[f] != 'r':
1069 yield f
1070 yield f
1070
1071
1071 @propertycache
1072 @propertycache
1072 def _parents(self):
1073 def _parents(self):
1073 p = self._repo.dirstate.parents()
1074 p = self._repo.dirstate.parents()
1074 if p[1] == nullid:
1075 if p[1] == nullid:
1075 p = p[:-1]
1076 p = p[:-1]
1076 return [changectx(self._repo, x) for x in p]
1077 return [changectx(self._repo, x) for x in p]
1077
1078
1078 def filectx(self, path, filelog=None):
1079 def filectx(self, path, filelog=None):
1079 """get a file context from the working directory"""
1080 """get a file context from the working directory"""
1080 return workingfilectx(self._repo, path, workingctx=self,
1081 return workingfilectx(self._repo, path, workingctx=self,
1081 filelog=filelog)
1082 filelog=filelog)
1082
1083
1083 def dirty(self, missing=False, merge=True, branch=True):
1084 def dirty(self, missing=False, merge=True, branch=True):
1084 "check whether a working directory is modified"
1085 "check whether a working directory is modified"
1085 # check subrepos first
1086 # check subrepos first
1086 for s in sorted(self.substate):
1087 for s in sorted(self.substate):
1087 if self.sub(s).dirty():
1088 if self.sub(s).dirty():
1088 return True
1089 return True
1089 # check current working dir
1090 # check current working dir
1090 return ((merge and self.p2()) or
1091 return ((merge and self.p2()) or
1091 (branch and self.branch() != self.p1().branch()) or
1092 (branch and self.branch() != self.p1().branch()) or
1092 self.modified() or self.added() or self.removed() or
1093 self.modified() or self.added() or self.removed() or
1093 (missing and self.deleted()))
1094 (missing and self.deleted()))
1094
1095
1095 def add(self, list, prefix=""):
1096 def add(self, list, prefix=""):
1096 join = lambda f: os.path.join(prefix, f)
1097 join = lambda f: os.path.join(prefix, f)
1097 wlock = self._repo.wlock()
1098 wlock = self._repo.wlock()
1098 ui, ds = self._repo.ui, self._repo.dirstate
1099 ui, ds = self._repo.ui, self._repo.dirstate
1099 try:
1100 try:
1100 rejected = []
1101 rejected = []
1101 lstat = self._repo.wvfs.lstat
1102 lstat = self._repo.wvfs.lstat
1102 for f in list:
1103 for f in list:
1103 scmutil.checkportable(ui, join(f))
1104 scmutil.checkportable(ui, join(f))
1104 try:
1105 try:
1105 st = lstat(f)
1106 st = lstat(f)
1106 except OSError:
1107 except OSError:
1107 ui.warn(_("%s does not exist!\n") % join(f))
1108 ui.warn(_("%s does not exist!\n") % join(f))
1108 rejected.append(f)
1109 rejected.append(f)
1109 continue
1110 continue
1110 if st.st_size > 10000000:
1111 if st.st_size > 10000000:
1111 ui.warn(_("%s: up to %d MB of RAM may be required "
1112 ui.warn(_("%s: up to %d MB of RAM may be required "
1112 "to manage this file\n"
1113 "to manage this file\n"
1113 "(use 'hg revert %s' to cancel the "
1114 "(use 'hg revert %s' to cancel the "
1114 "pending addition)\n")
1115 "pending addition)\n")
1115 % (f, 3 * st.st_size // 1000000, join(f)))
1116 % (f, 3 * st.st_size // 1000000, join(f)))
1116 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1117 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1117 ui.warn(_("%s not added: only files and symlinks "
1118 ui.warn(_("%s not added: only files and symlinks "
1118 "supported currently\n") % join(f))
1119 "supported currently\n") % join(f))
1119 rejected.append(f)
1120 rejected.append(f)
1120 elif ds[f] in 'amn':
1121 elif ds[f] in 'amn':
1121 ui.warn(_("%s already tracked!\n") % join(f))
1122 ui.warn(_("%s already tracked!\n") % join(f))
1122 elif ds[f] == 'r':
1123 elif ds[f] == 'r':
1123 ds.normallookup(f)
1124 ds.normallookup(f)
1124 else:
1125 else:
1125 ds.add(f)
1126 ds.add(f)
1126 return rejected
1127 return rejected
1127 finally:
1128 finally:
1128 wlock.release()
1129 wlock.release()
1129
1130
1130 def forget(self, files, prefix=""):
1131 def forget(self, files, prefix=""):
1131 join = lambda f: os.path.join(prefix, f)
1132 join = lambda f: os.path.join(prefix, f)
1132 wlock = self._repo.wlock()
1133 wlock = self._repo.wlock()
1133 try:
1134 try:
1134 rejected = []
1135 rejected = []
1135 for f in files:
1136 for f in files:
1136 if f not in self._repo.dirstate:
1137 if f not in self._repo.dirstate:
1137 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1138 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1138 rejected.append(f)
1139 rejected.append(f)
1139 elif self._repo.dirstate[f] != 'a':
1140 elif self._repo.dirstate[f] != 'a':
1140 self._repo.dirstate.remove(f)
1141 self._repo.dirstate.remove(f)
1141 else:
1142 else:
1142 self._repo.dirstate.drop(f)
1143 self._repo.dirstate.drop(f)
1143 return rejected
1144 return rejected
1144 finally:
1145 finally:
1145 wlock.release()
1146 wlock.release()
1146
1147
1147 def undelete(self, list):
1148 def undelete(self, list):
1148 pctxs = self.parents()
1149 pctxs = self.parents()
1149 wlock = self._repo.wlock()
1150 wlock = self._repo.wlock()
1150 try:
1151 try:
1151 for f in list:
1152 for f in list:
1152 if self._repo.dirstate[f] != 'r':
1153 if self._repo.dirstate[f] != 'r':
1153 self._repo.ui.warn(_("%s not removed!\n") % f)
1154 self._repo.ui.warn(_("%s not removed!\n") % f)
1154 else:
1155 else:
1155 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1156 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1156 t = fctx.data()
1157 t = fctx.data()
1157 self._repo.wwrite(f, t, fctx.flags())
1158 self._repo.wwrite(f, t, fctx.flags())
1158 self._repo.dirstate.normal(f)
1159 self._repo.dirstate.normal(f)
1159 finally:
1160 finally:
1160 wlock.release()
1161 wlock.release()
1161
1162
1162 def copy(self, source, dest):
1163 def copy(self, source, dest):
1163 try:
1164 try:
1164 st = self._repo.wvfs.lstat(dest)
1165 st = self._repo.wvfs.lstat(dest)
1165 except OSError, err:
1166 except OSError, err:
1166 if err.errno != errno.ENOENT:
1167 if err.errno != errno.ENOENT:
1167 raise
1168 raise
1168 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1169 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1169 return
1170 return
1170 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1171 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1171 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1172 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1172 "symbolic link\n") % dest)
1173 "symbolic link\n") % dest)
1173 else:
1174 else:
1174 wlock = self._repo.wlock()
1175 wlock = self._repo.wlock()
1175 try:
1176 try:
1176 if self._repo.dirstate[dest] in '?r':
1177 if self._repo.dirstate[dest] in '?r':
1177 self._repo.dirstate.add(dest)
1178 self._repo.dirstate.add(dest)
1178 self._repo.dirstate.copy(source, dest)
1179 self._repo.dirstate.copy(source, dest)
1179 finally:
1180 finally:
1180 wlock.release()
1181 wlock.release()
1181
1182
1182 class committablefilectx(basefilectx):
1183 class committablefilectx(basefilectx):
1183 """A committablefilectx provides common functionality for a file context
1184 """A committablefilectx provides common functionality for a file context
1184 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1185 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1185 def __init__(self, repo, path, filelog=None, ctx=None):
1186 def __init__(self, repo, path, filelog=None, ctx=None):
1186 self._repo = repo
1187 self._repo = repo
1187 self._path = path
1188 self._path = path
1188 self._changeid = None
1189 self._changeid = None
1189 self._filerev = self._filenode = None
1190 self._filerev = self._filenode = None
1190
1191
1191 if filelog is not None:
1192 if filelog is not None:
1192 self._filelog = filelog
1193 self._filelog = filelog
1193 if ctx:
1194 if ctx:
1194 self._changectx = ctx
1195 self._changectx = ctx
1195
1196
1196 def __nonzero__(self):
1197 def __nonzero__(self):
1197 return True
1198 return True
1198
1199
1199 def parents(self):
1200 def parents(self):
1200 '''return parent filectxs, following copies if necessary'''
1201 '''return parent filectxs, following copies if necessary'''
1201 def filenode(ctx, path):
1202 def filenode(ctx, path):
1202 return ctx._manifest.get(path, nullid)
1203 return ctx._manifest.get(path, nullid)
1203
1204
1204 path = self._path
1205 path = self._path
1205 fl = self._filelog
1206 fl = self._filelog
1206 pcl = self._changectx._parents
1207 pcl = self._changectx._parents
1207 renamed = self.renamed()
1208 renamed = self.renamed()
1208
1209
1209 if renamed:
1210 if renamed:
1210 pl = [renamed + (None,)]
1211 pl = [renamed + (None,)]
1211 else:
1212 else:
1212 pl = [(path, filenode(pcl[0], path), fl)]
1213 pl = [(path, filenode(pcl[0], path), fl)]
1213
1214
1214 for pc in pcl[1:]:
1215 for pc in pcl[1:]:
1215 pl.append((path, filenode(pc, path), fl))
1216 pl.append((path, filenode(pc, path), fl))
1216
1217
1217 return [filectx(self._repo, p, fileid=n, filelog=l)
1218 return [filectx(self._repo, p, fileid=n, filelog=l)
1218 for p, n, l in pl if n != nullid]
1219 for p, n, l in pl if n != nullid]
1219
1220
1220 def children(self):
1221 def children(self):
1221 return []
1222 return []
1222
1223
1223 class workingfilectx(committablefilectx):
1224 class workingfilectx(committablefilectx):
1224 """A workingfilectx object makes access to data related to a particular
1225 """A workingfilectx object makes access to data related to a particular
1225 file in the working directory convenient."""
1226 file in the working directory convenient."""
1226 def __init__(self, repo, path, filelog=None, workingctx=None):
1227 def __init__(self, repo, path, filelog=None, workingctx=None):
1227 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1228 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1228
1229
1229 @propertycache
1230 @propertycache
1230 def _changectx(self):
1231 def _changectx(self):
1231 return workingctx(self._repo)
1232 return workingctx(self._repo)
1232
1233
1233 def data(self):
1234 def data(self):
1234 return self._repo.wread(self._path)
1235 return self._repo.wread(self._path)
1235 def renamed(self):
1236 def renamed(self):
1236 rp = self._repo.dirstate.copied(self._path)
1237 rp = self._repo.dirstate.copied(self._path)
1237 if not rp:
1238 if not rp:
1238 return None
1239 return None
1239 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1240 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1240
1241
1241 def size(self):
1242 def size(self):
1242 return self._repo.wvfs.lstat(self._path).st_size
1243 return self._repo.wvfs.lstat(self._path).st_size
1243 def date(self):
1244 def date(self):
1244 t, tz = self._changectx.date()
1245 t, tz = self._changectx.date()
1245 try:
1246 try:
1246 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1247 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1247 except OSError, err:
1248 except OSError, err:
1248 if err.errno != errno.ENOENT:
1249 if err.errno != errno.ENOENT:
1249 raise
1250 raise
1250 return (t, tz)
1251 return (t, tz)
1251
1252
1252 def cmp(self, fctx):
1253 def cmp(self, fctx):
1253 """compare with other file context
1254 """compare with other file context
1254
1255
1255 returns True if different than fctx.
1256 returns True if different than fctx.
1256 """
1257 """
1257 # fctx should be a filectx (not a workingfilectx)
1258 # fctx should be a filectx (not a workingfilectx)
1258 # invert comparison to reuse the same code path
1259 # invert comparison to reuse the same code path
1259 return fctx.cmp(self)
1260 return fctx.cmp(self)
1260
1261
1261 class memctx(object):
1262 class memctx(object):
1262 """Use memctx to perform in-memory commits via localrepo.commitctx().
1263 """Use memctx to perform in-memory commits via localrepo.commitctx().
1263
1264
1264 Revision information is supplied at initialization time while
1265 Revision information is supplied at initialization time while
1265 related files data and is made available through a callback
1266 related files data and is made available through a callback
1266 mechanism. 'repo' is the current localrepo, 'parents' is a
1267 mechanism. 'repo' is the current localrepo, 'parents' is a
1267 sequence of two parent revisions identifiers (pass None for every
1268 sequence of two parent revisions identifiers (pass None for every
1268 missing parent), 'text' is the commit message and 'files' lists
1269 missing parent), 'text' is the commit message and 'files' lists
1269 names of files touched by the revision (normalized and relative to
1270 names of files touched by the revision (normalized and relative to
1270 repository root).
1271 repository root).
1271
1272
1272 filectxfn(repo, memctx, path) is a callable receiving the
1273 filectxfn(repo, memctx, path) is a callable receiving the
1273 repository, the current memctx object and the normalized path of
1274 repository, the current memctx object and the normalized path of
1274 requested file, relative to repository root. It is fired by the
1275 requested file, relative to repository root. It is fired by the
1275 commit function for every file in 'files', but calls order is
1276 commit function for every file in 'files', but calls order is
1276 undefined. If the file is available in the revision being
1277 undefined. If the file is available in the revision being
1277 committed (updated or added), filectxfn returns a memfilectx
1278 committed (updated or added), filectxfn returns a memfilectx
1278 object. If the file was removed, filectxfn raises an
1279 object. If the file was removed, filectxfn raises an
1279 IOError. Moved files are represented by marking the source file
1280 IOError. Moved files are represented by marking the source file
1280 removed and the new file added with copy information (see
1281 removed and the new file added with copy information (see
1281 memfilectx).
1282 memfilectx).
1282
1283
1283 user receives the committer name and defaults to current
1284 user receives the committer name and defaults to current
1284 repository username, date is the commit date in any format
1285 repository username, date is the commit date in any format
1285 supported by util.parsedate() and defaults to current date, extra
1286 supported by util.parsedate() and defaults to current date, extra
1286 is a dictionary of metadata or is left empty.
1287 is a dictionary of metadata or is left empty.
1287 """
1288 """
1288 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1289 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1289 date=None, extra=None):
1290 date=None, extra=None):
1290 self._repo = repo
1291 self._repo = repo
1291 self._rev = None
1292 self._rev = None
1292 self._node = None
1293 self._node = None
1293 self._text = text
1294 self._text = text
1294 self._date = date and util.parsedate(date) or util.makedate()
1295 self._date = date and util.parsedate(date) or util.makedate()
1295 self._user = user
1296 self._user = user
1296 parents = [(p or nullid) for p in parents]
1297 parents = [(p or nullid) for p in parents]
1297 p1, p2 = parents
1298 p1, p2 = parents
1298 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1299 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1299 files = sorted(set(files))
1300 files = sorted(set(files))
1300 self._status = [files, [], [], [], []]
1301 self._status = [files, [], [], [], []]
1301 self._filectxfn = filectxfn
1302 self._filectxfn = filectxfn
1302
1303
1303 self._extra = extra and extra.copy() or {}
1304 self._extra = extra and extra.copy() or {}
1304 if self._extra.get('branch', '') == '':
1305 if self._extra.get('branch', '') == '':
1305 self._extra['branch'] = 'default'
1306 self._extra['branch'] = 'default'
1306
1307
1307 def __str__(self):
1308 def __str__(self):
1308 return str(self._parents[0]) + "+"
1309 return str(self._parents[0]) + "+"
1309
1310
1310 def __int__(self):
1311 def __int__(self):
1311 return self._rev
1312 return self._rev
1312
1313
1313 def __nonzero__(self):
1314 def __nonzero__(self):
1314 return True
1315 return True
1315
1316
1316 def __getitem__(self, key):
1317 def __getitem__(self, key):
1317 return self.filectx(key)
1318 return self.filectx(key)
1318
1319
1319 def p1(self):
1320 def p1(self):
1320 return self._parents[0]
1321 return self._parents[0]
1321 def p2(self):
1322 def p2(self):
1322 return self._parents[1]
1323 return self._parents[1]
1323
1324
1324 def user(self):
1325 def user(self):
1325 return self._user or self._repo.ui.username()
1326 return self._user or self._repo.ui.username()
1326 def date(self):
1327 def date(self):
1327 return self._date
1328 return self._date
1328 def description(self):
1329 def description(self):
1329 return self._text
1330 return self._text
1330 def files(self):
1331 def files(self):
1331 return self.modified()
1332 return self.modified()
1332 def modified(self):
1333 def modified(self):
1333 return self._status[0]
1334 return self._status[0]
1334 def added(self):
1335 def added(self):
1335 return self._status[1]
1336 return self._status[1]
1336 def removed(self):
1337 def removed(self):
1337 return self._status[2]
1338 return self._status[2]
1338 def deleted(self):
1339 def deleted(self):
1339 return self._status[3]
1340 return self._status[3]
1340 def unknown(self):
1341 def unknown(self):
1341 return self._status[4]
1342 return self._status[4]
1342 def ignored(self):
1343 def ignored(self):
1343 return self._status[5]
1344 return self._status[5]
1344 def clean(self):
1345 def clean(self):
1345 return self._status[6]
1346 return self._status[6]
1346 def branch(self):
1347 def branch(self):
1347 return encoding.tolocal(self._extra['branch'])
1348 return encoding.tolocal(self._extra['branch'])
1348 def extra(self):
1349 def extra(self):
1349 return self._extra
1350 return self._extra
1350 def flags(self, f):
1351 def flags(self, f):
1351 return self[f].flags()
1352 return self[f].flags()
1352
1353
1353 def parents(self):
1354 def parents(self):
1354 """return contexts for each parent changeset"""
1355 """return contexts for each parent changeset"""
1355 return self._parents
1356 return self._parents
1356
1357
1357 def filectx(self, path, filelog=None):
1358 def filectx(self, path, filelog=None):
1358 """get a file context from the working directory"""
1359 """get a file context from the working directory"""
1359 return self._filectxfn(self._repo, self, path)
1360 return self._filectxfn(self._repo, self, path)
1360
1361
1361 def commit(self):
1362 def commit(self):
1362 """commit context to the repo"""
1363 """commit context to the repo"""
1363 return self._repo.commitctx(self)
1364 return self._repo.commitctx(self)
1364
1365
1365 class memfilectx(object):
1366 class memfilectx(object):
1366 """memfilectx represents an in-memory file to commit.
1367 """memfilectx represents an in-memory file to commit.
1367
1368
1368 See memctx for more details.
1369 See memctx for more details.
1369 """
1370 """
1370 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1371 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1371 """
1372 """
1372 path is the normalized file path relative to repository root.
1373 path is the normalized file path relative to repository root.
1373 data is the file content as a string.
1374 data is the file content as a string.
1374 islink is True if the file is a symbolic link.
1375 islink is True if the file is a symbolic link.
1375 isexec is True if the file is executable.
1376 isexec is True if the file is executable.
1376 copied is the source file path if current file was copied in the
1377 copied is the source file path if current file was copied in the
1377 revision being committed, or None."""
1378 revision being committed, or None."""
1378 self._path = path
1379 self._path = path
1379 self._data = data
1380 self._data = data
1380 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1381 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1381 self._copied = None
1382 self._copied = None
1382 if copied:
1383 if copied:
1383 self._copied = (copied, nullid)
1384 self._copied = (copied, nullid)
1384
1385
1385 def __nonzero__(self):
1386 def __nonzero__(self):
1386 return True
1387 return True
1387 def __str__(self):
1388 def __str__(self):
1388 return "%s@%s" % (self.path(), self._changectx)
1389 return "%s@%s" % (self.path(), self._changectx)
1389 def path(self):
1390 def path(self):
1390 return self._path
1391 return self._path
1391 def data(self):
1392 def data(self):
1392 return self._data
1393 return self._data
1393 def flags(self):
1394 def flags(self):
1394 return self._flags
1395 return self._flags
1395 def isexec(self):
1396 def isexec(self):
1396 return 'x' in self._flags
1397 return 'x' in self._flags
1397 def islink(self):
1398 def islink(self):
1398 return 'l' in self._flags
1399 return 'l' in self._flags
1399 def renamed(self):
1400 def renamed(self):
1400 return self._copied
1401 return self._copied
@@ -1,1089 +1,1089 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 if node:
58 if node:
59 self._local = node
59 self._local = node
60 self._other = other
60 self._other = other
61 shutil.rmtree(self._repo.join("merge"), True)
61 shutil.rmtree(self._repo.join("merge"), True)
62 self._dirty = False
62 self._dirty = False
63
63
64 def _read(self):
64 def _read(self):
65 """Analyse each record content to restore a serialized state from disk
65 """Analyse each record content to restore a serialized state from disk
66
66
67 This function process "record" entry produced by the de-serialization
67 This function process "record" entry produced by the de-serialization
68 of on disk file.
68 of on disk file.
69 """
69 """
70 self._state = {}
70 self._state = {}
71 records = self._readrecords()
71 records = self._readrecords()
72 for rtype, record in records:
72 for rtype, record in records:
73 if rtype == 'L':
73 if rtype == 'L':
74 self._local = bin(record)
74 self._local = bin(record)
75 elif rtype == 'O':
75 elif rtype == 'O':
76 self._other = bin(record)
76 self._other = bin(record)
77 elif rtype == "F":
77 elif rtype == "F":
78 bits = record.split("\0")
78 bits = record.split("\0")
79 self._state[bits[0]] = bits[1:]
79 self._state[bits[0]] = bits[1:]
80 elif not rtype.islower():
80 elif not rtype.islower():
81 raise util.Abort(_('unsupported merge state record: %s')
81 raise util.Abort(_('unsupported merge state record: %s')
82 % rtype)
82 % rtype)
83 self._dirty = False
83 self._dirty = False
84
84
85 def _readrecords(self):
85 def _readrecords(self):
86 """Read merge state from disk and return a list of record (TYPE, data)
86 """Read merge state from disk and return a list of record (TYPE, data)
87
87
88 We read data from both v1 and v2 files and decide which one to use.
88 We read data from both v1 and v2 files and decide which one to use.
89
89
90 V1 has been used by version prior to 2.9.1 and contains less data than
90 V1 has been used by version prior to 2.9.1 and contains less data than
91 v2. We read both versions and check if no data in v2 contradicts
91 v2. We read both versions and check if no data in v2 contradicts
92 v1. If there is not contradiction we can safely assume that both v1
92 v1. If there is not contradiction we can safely assume that both v1
93 and v2 were written at the same time and use the extract data in v2. If
93 and v2 were written at the same time and use the extract data in v2. If
94 there is contradiction we ignore v2 content as we assume an old version
94 there is contradiction we ignore v2 content as we assume an old version
95 of Mercurial has overwritten the mergestate file and left an old v2
95 of Mercurial has overwritten the mergestate file and left an old v2
96 file around.
96 file around.
97
97
98 returns list of record [(TYPE, data), ...]"""
98 returns list of record [(TYPE, data), ...]"""
99 v1records = self._readrecordsv1()
99 v1records = self._readrecordsv1()
100 v2records = self._readrecordsv2()
100 v2records = self._readrecordsv2()
101 oldv2 = set() # old format version of v2 record
101 oldv2 = set() # old format version of v2 record
102 for rec in v2records:
102 for rec in v2records:
103 if rec[0] == 'L':
103 if rec[0] == 'L':
104 oldv2.add(rec)
104 oldv2.add(rec)
105 elif rec[0] == 'F':
105 elif rec[0] == 'F':
106 # drop the onode data (not contained in v1)
106 # drop the onode data (not contained in v1)
107 oldv2.add(('F', _droponode(rec[1])))
107 oldv2.add(('F', _droponode(rec[1])))
108 for rec in v1records:
108 for rec in v1records:
109 if rec not in oldv2:
109 if rec not in oldv2:
110 # v1 file is newer than v2 file, use it
110 # v1 file is newer than v2 file, use it
111 # we have to infer the "other" changeset of the merge
111 # we have to infer the "other" changeset of the merge
112 # we cannot do better than that with v1 of the format
112 # we cannot do better than that with v1 of the format
113 mctx = self._repo[None].parents()[-1]
113 mctx = self._repo[None].parents()[-1]
114 v1records.append(('O', mctx.hex()))
114 v1records.append(('O', mctx.hex()))
115 # add place holder "other" file node information
115 # add place holder "other" file node information
116 # nobody is using it yet so we do no need to fetch the data
116 # nobody is using it yet so we do no need to fetch the data
117 # if mctx was wrong `mctx[bits[-2]]` may fails.
117 # if mctx was wrong `mctx[bits[-2]]` may fails.
118 for idx, r in enumerate(v1records):
118 for idx, r in enumerate(v1records):
119 if r[0] == 'F':
119 if r[0] == 'F':
120 bits = r[1].split("\0")
120 bits = r[1].split("\0")
121 bits.insert(-2, '')
121 bits.insert(-2, '')
122 v1records[idx] = (r[0], "\0".join(bits))
122 v1records[idx] = (r[0], "\0".join(bits))
123 return v1records
123 return v1records
124 else:
124 else:
125 return v2records
125 return v2records
126
126
127 def _readrecordsv1(self):
127 def _readrecordsv1(self):
128 """read on disk merge state for version 1 file
128 """read on disk merge state for version 1 file
129
129
130 returns list of record [(TYPE, data), ...]
130 returns list of record [(TYPE, data), ...]
131
131
132 Note: the "F" data from this file are one entry short
132 Note: the "F" data from this file are one entry short
133 (no "other file node" entry)
133 (no "other file node" entry)
134 """
134 """
135 records = []
135 records = []
136 try:
136 try:
137 f = self._repo.opener(self.statepathv1)
137 f = self._repo.opener(self.statepathv1)
138 for i, l in enumerate(f):
138 for i, l in enumerate(f):
139 if i == 0:
139 if i == 0:
140 records.append(('L', l[:-1]))
140 records.append(('L', l[:-1]))
141 else:
141 else:
142 records.append(('F', l[:-1]))
142 records.append(('F', l[:-1]))
143 f.close()
143 f.close()
144 except IOError, err:
144 except IOError, err:
145 if err.errno != errno.ENOENT:
145 if err.errno != errno.ENOENT:
146 raise
146 raise
147 return records
147 return records
148
148
149 def _readrecordsv2(self):
149 def _readrecordsv2(self):
150 """read on disk merge state for version 2 file
150 """read on disk merge state for version 2 file
151
151
152 returns list of record [(TYPE, data), ...]
152 returns list of record [(TYPE, data), ...]
153 """
153 """
154 records = []
154 records = []
155 try:
155 try:
156 f = self._repo.opener(self.statepathv2)
156 f = self._repo.opener(self.statepathv2)
157 data = f.read()
157 data = f.read()
158 off = 0
158 off = 0
159 end = len(data)
159 end = len(data)
160 while off < end:
160 while off < end:
161 rtype = data[off]
161 rtype = data[off]
162 off += 1
162 off += 1
163 length = _unpack('>I', data[off:(off + 4)])[0]
163 length = _unpack('>I', data[off:(off + 4)])[0]
164 off += 4
164 off += 4
165 record = data[off:(off + length)]
165 record = data[off:(off + length)]
166 off += length
166 off += length
167 records.append((rtype, record))
167 records.append((rtype, record))
168 f.close()
168 f.close()
169 except IOError, err:
169 except IOError, err:
170 if err.errno != errno.ENOENT:
170 if err.errno != errno.ENOENT:
171 raise
171 raise
172 return records
172 return records
173
173
174 def commit(self):
174 def commit(self):
175 """Write current state on disk (if necessary)"""
175 """Write current state on disk (if necessary)"""
176 if self._dirty:
176 if self._dirty:
177 records = []
177 records = []
178 records.append(("L", hex(self._local)))
178 records.append(("L", hex(self._local)))
179 records.append(("O", hex(self._other)))
179 records.append(("O", hex(self._other)))
180 for d, v in self._state.iteritems():
180 for d, v in self._state.iteritems():
181 records.append(("F", "\0".join([d] + v)))
181 records.append(("F", "\0".join([d] + v)))
182 self._writerecords(records)
182 self._writerecords(records)
183 self._dirty = False
183 self._dirty = False
184
184
185 def _writerecords(self, records):
185 def _writerecords(self, records):
186 """Write current state on disk (both v1 and v2)"""
186 """Write current state on disk (both v1 and v2)"""
187 self._writerecordsv1(records)
187 self._writerecordsv1(records)
188 self._writerecordsv2(records)
188 self._writerecordsv2(records)
189
189
190 def _writerecordsv1(self, records):
190 def _writerecordsv1(self, records):
191 """Write current state on disk in a version 1 file"""
191 """Write current state on disk in a version 1 file"""
192 f = self._repo.opener(self.statepathv1, "w")
192 f = self._repo.opener(self.statepathv1, "w")
193 irecords = iter(records)
193 irecords = iter(records)
194 lrecords = irecords.next()
194 lrecords = irecords.next()
195 assert lrecords[0] == 'L'
195 assert lrecords[0] == 'L'
196 f.write(hex(self._local) + "\n")
196 f.write(hex(self._local) + "\n")
197 for rtype, data in irecords:
197 for rtype, data in irecords:
198 if rtype == "F":
198 if rtype == "F":
199 f.write("%s\n" % _droponode(data))
199 f.write("%s\n" % _droponode(data))
200 f.close()
200 f.close()
201
201
202 def _writerecordsv2(self, records):
202 def _writerecordsv2(self, records):
203 """Write current state on disk in a version 2 file"""
203 """Write current state on disk in a version 2 file"""
204 f = self._repo.opener(self.statepathv2, "w")
204 f = self._repo.opener(self.statepathv2, "w")
205 for key, data in records:
205 for key, data in records:
206 assert len(key) == 1
206 assert len(key) == 1
207 format = ">sI%is" % len(data)
207 format = ">sI%is" % len(data)
208 f.write(_pack(format, key, len(data), data))
208 f.write(_pack(format, key, len(data), data))
209 f.close()
209 f.close()
210
210
211 def add(self, fcl, fco, fca, fd):
211 def add(self, fcl, fco, fca, fd):
212 """add a new (potentially?) conflicting file the merge state
212 """add a new (potentially?) conflicting file the merge state
213 fcl: file context for local,
213 fcl: file context for local,
214 fco: file context for remote,
214 fco: file context for remote,
215 fca: file context for ancestors,
215 fca: file context for ancestors,
216 fd: file path of the resulting merge.
216 fd: file path of the resulting merge.
217
217
218 note: also write the local version to the `.hg/merge` directory.
218 note: also write the local version to the `.hg/merge` directory.
219 """
219 """
220 hash = util.sha1(fcl.path()).hexdigest()
220 hash = util.sha1(fcl.path()).hexdigest()
221 self._repo.opener.write("merge/" + hash, fcl.data())
221 self._repo.opener.write("merge/" + hash, fcl.data())
222 self._state[fd] = ['u', hash, fcl.path(),
222 self._state[fd] = ['u', hash, fcl.path(),
223 fca.path(), hex(fca.filenode()),
223 fca.path(), hex(fca.filenode()),
224 fco.path(), hex(fco.filenode()),
224 fco.path(), hex(fco.filenode()),
225 fcl.flags()]
225 fcl.flags()]
226 self._dirty = True
226 self._dirty = True
227
227
228 def __contains__(self, dfile):
228 def __contains__(self, dfile):
229 return dfile in self._state
229 return dfile in self._state
230
230
231 def __getitem__(self, dfile):
231 def __getitem__(self, dfile):
232 return self._state[dfile][0]
232 return self._state[dfile][0]
233
233
234 def __iter__(self):
234 def __iter__(self):
235 l = self._state.keys()
235 l = self._state.keys()
236 l.sort()
236 l.sort()
237 for f in l:
237 for f in l:
238 yield f
238 yield f
239
239
240 def files(self):
240 def files(self):
241 return self._state.keys()
241 return self._state.keys()
242
242
243 def mark(self, dfile, state):
243 def mark(self, dfile, state):
244 self._state[dfile][0] = state
244 self._state[dfile][0] = state
245 self._dirty = True
245 self._dirty = True
246
246
247 def resolve(self, dfile, wctx):
247 def resolve(self, dfile, wctx):
248 """rerun merge process for file path `dfile`"""
248 """rerun merge process for file path `dfile`"""
249 if self[dfile] == 'r':
249 if self[dfile] == 'r':
250 return 0
250 return 0
251 stateentry = self._state[dfile]
251 stateentry = self._state[dfile]
252 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
252 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
253 octx = self._repo[self._other]
253 octx = self._repo[self._other]
254 fcd = wctx[dfile]
254 fcd = wctx[dfile]
255 fco = octx[ofile]
255 fco = octx[ofile]
256 fca = self._repo.filectx(afile, fileid=anode)
256 fca = self._repo.filectx(afile, fileid=anode)
257 # "premerge" x flags
257 # "premerge" x flags
258 flo = fco.flags()
258 flo = fco.flags()
259 fla = fca.flags()
259 fla = fca.flags()
260 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
260 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
261 if fca.node() == nullid:
261 if fca.node() == nullid:
262 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
262 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
263 afile)
263 afile)
264 elif flags == fla:
264 elif flags == fla:
265 flags = flo
265 flags = flo
266 # restore local
266 # restore local
267 f = self._repo.opener("merge/" + hash)
267 f = self._repo.opener("merge/" + hash)
268 self._repo.wwrite(dfile, f.read(), flags)
268 self._repo.wwrite(dfile, f.read(), flags)
269 f.close()
269 f.close()
270 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
270 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
271 if r is None:
271 if r is None:
272 # no real conflict
272 # no real conflict
273 del self._state[dfile]
273 del self._state[dfile]
274 self._dirty = True
274 self._dirty = True
275 elif not r:
275 elif not r:
276 self.mark(dfile, 'r')
276 self.mark(dfile, 'r')
277 return r
277 return r
278
278
279 def _checkunknownfile(repo, wctx, mctx, f):
279 def _checkunknownfile(repo, wctx, mctx, f):
280 return (not repo.dirstate._ignore(f)
280 return (not repo.dirstate._ignore(f)
281 and os.path.isfile(repo.wjoin(f))
281 and os.path.isfile(repo.wjoin(f))
282 and repo.wopener.audit.check(f)
282 and repo.wopener.audit.check(f)
283 and repo.dirstate.normalize(f) not in repo.dirstate
283 and repo.dirstate.normalize(f) not in repo.dirstate
284 and mctx[f].cmp(wctx[f]))
284 and mctx[f].cmp(wctx[f]))
285
285
286 def _checkunknown(repo, wctx, mctx):
286 def _checkunknown(repo, wctx, mctx):
287 "check for collisions between unknown files and files in mctx"
287 "check for collisions between unknown files and files in mctx"
288
288
289 error = False
289 error = False
290 for f in mctx:
290 for f in mctx:
291 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
291 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
292 error = True
292 error = True
293 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
293 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
294 if error:
294 if error:
295 raise util.Abort(_("untracked files in working directory differ "
295 raise util.Abort(_("untracked files in working directory differ "
296 "from files in requested revision"))
296 "from files in requested revision"))
297
297
298 def _forgetremoved(wctx, mctx, branchmerge):
298 def _forgetremoved(wctx, mctx, branchmerge):
299 """
299 """
300 Forget removed files
300 Forget removed files
301
301
302 If we're jumping between revisions (as opposed to merging), and if
302 If we're jumping between revisions (as opposed to merging), and if
303 neither the working directory nor the target rev has the file,
303 neither the working directory nor the target rev has the file,
304 then we need to remove it from the dirstate, to prevent the
304 then we need to remove it from the dirstate, to prevent the
305 dirstate from listing the file when it is no longer in the
305 dirstate from listing the file when it is no longer in the
306 manifest.
306 manifest.
307
307
308 If we're merging, and the other revision has removed a file
308 If we're merging, and the other revision has removed a file
309 that is not present in the working directory, we need to mark it
309 that is not present in the working directory, we need to mark it
310 as removed.
310 as removed.
311 """
311 """
312
312
313 actions = []
313 actions = []
314 state = branchmerge and 'r' or 'f'
314 state = branchmerge and 'r' or 'f'
315 for f in wctx.deleted():
315 for f in wctx.deleted():
316 if f not in mctx:
316 if f not in mctx:
317 actions.append((f, state, None, "forget deleted"))
317 actions.append((f, state, None, "forget deleted"))
318
318
319 if not branchmerge:
319 if not branchmerge:
320 for f in wctx.removed():
320 for f in wctx.removed():
321 if f not in mctx:
321 if f not in mctx:
322 actions.append((f, "f", None, "forget removed"))
322 actions.append((f, "f", None, "forget removed"))
323
323
324 return actions
324 return actions
325
325
326 def _checkcollision(repo, wmf, actions):
326 def _checkcollision(repo, wmf, actions):
327 # build provisional merged manifest up
327 # build provisional merged manifest up
328 pmmf = set(wmf)
328 pmmf = set(wmf)
329
329
330 def addop(f, args):
330 def addop(f, args):
331 pmmf.add(f)
331 pmmf.add(f)
332 def removeop(f, args):
332 def removeop(f, args):
333 pmmf.discard(f)
333 pmmf.discard(f)
334 def nop(f, args):
334 def nop(f, args):
335 pass
335 pass
336
336
337 def renamemoveop(f, args):
337 def renamemoveop(f, args):
338 f2, flags = args
338 f2, flags = args
339 pmmf.discard(f2)
339 pmmf.discard(f2)
340 pmmf.add(f)
340 pmmf.add(f)
341 def renamegetop(f, args):
341 def renamegetop(f, args):
342 f2, flags = args
342 f2, flags = args
343 pmmf.add(f)
343 pmmf.add(f)
344 def mergeop(f, args):
344 def mergeop(f, args):
345 f1, f2, fa, move, anc = args
345 f1, f2, fa, move, anc = args
346 if move:
346 if move:
347 pmmf.discard(f1)
347 pmmf.discard(f1)
348 pmmf.add(f)
348 pmmf.add(f)
349
349
350 opmap = {
350 opmap = {
351 "a": addop,
351 "a": addop,
352 "dm": renamemoveop,
352 "dm": renamemoveop,
353 "dg": renamegetop,
353 "dg": renamegetop,
354 "dr": nop,
354 "dr": nop,
355 "e": nop,
355 "e": nop,
356 "k": nop,
356 "k": nop,
357 "f": addop, # untracked file should be kept in working directory
357 "f": addop, # untracked file should be kept in working directory
358 "g": addop,
358 "g": addop,
359 "m": mergeop,
359 "m": mergeop,
360 "r": removeop,
360 "r": removeop,
361 "rd": nop,
361 "rd": nop,
362 "cd": addop,
362 "cd": addop,
363 "dc": addop,
363 "dc": addop,
364 }
364 }
365 for f, m, args, msg in actions:
365 for f, m, args, msg in actions:
366 op = opmap.get(m)
366 op = opmap.get(m)
367 assert op, m
367 assert op, m
368 op(f, args)
368 op(f, args)
369
369
370 # check case-folding collision in provisional merged manifest
370 # check case-folding collision in provisional merged manifest
371 foldmap = {}
371 foldmap = {}
372 for f in sorted(pmmf):
372 for f in sorted(pmmf):
373 fold = util.normcase(f)
373 fold = util.normcase(f)
374 if fold in foldmap:
374 if fold in foldmap:
375 raise util.Abort(_("case-folding collision between %s and %s")
375 raise util.Abort(_("case-folding collision between %s and %s")
376 % (f, foldmap[fold]))
376 % (f, foldmap[fold]))
377 foldmap[fold] = f
377 foldmap[fold] = f
378
378
379 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
379 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
380 acceptremote, followcopies):
380 acceptremote, followcopies):
381 """
381 """
382 Merge p1 and p2 with ancestor pa and generate merge action list
382 Merge p1 and p2 with ancestor pa and generate merge action list
383
383
384 branchmerge and force are as passed in to update
384 branchmerge and force are as passed in to update
385 partial = function to filter file lists
385 partial = function to filter file lists
386 acceptremote = accept the incoming changes without prompting
386 acceptremote = accept the incoming changes without prompting
387 """
387 """
388
388
389 actions, copy, movewithdir = [], {}, {}
389 actions, copy, movewithdir = [], {}, {}
390
390
391 # manifests fetched in order are going to be faster, so prime the caches
391 # manifests fetched in order are going to be faster, so prime the caches
392 [x.manifest() for x in
392 [x.manifest() for x in
393 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
393 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
394
394
395 if followcopies:
395 if followcopies:
396 ret = copies.mergecopies(repo, wctx, p2, pa)
396 ret = copies.mergecopies(repo, wctx, p2, pa)
397 copy, movewithdir, diverge, renamedelete = ret
397 copy, movewithdir, diverge, renamedelete = ret
398 for of, fl in diverge.iteritems():
398 for of, fl in diverge.iteritems():
399 actions.append((of, "dr", (fl,), "divergent renames"))
399 actions.append((of, "dr", (fl,), "divergent renames"))
400 for of, fl in renamedelete.iteritems():
400 for of, fl in renamedelete.iteritems():
401 actions.append((of, "rd", (fl,), "rename and delete"))
401 actions.append((of, "rd", (fl,), "rename and delete"))
402
402
403 repo.ui.note(_("resolving manifests\n"))
403 repo.ui.note(_("resolving manifests\n"))
404 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
404 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
405 % (bool(branchmerge), bool(force), bool(partial)))
405 % (bool(branchmerge), bool(force), bool(partial)))
406 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
406 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
407
407
408 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
408 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
409 copied = set(copy.values())
409 copied = set(copy.values())
410 copied.update(movewithdir.values())
410 copied.update(movewithdir.values())
411
411
412 if '.hgsubstate' in m1:
412 if '.hgsubstate' in m1:
413 # check whether sub state is modified
413 # check whether sub state is modified
414 for s in sorted(wctx.substate):
414 for s in sorted(wctx.substate):
415 if wctx.sub(s).dirty():
415 if wctx.sub(s).dirty():
416 m1['.hgsubstate'] += "+"
416 m1['.hgsubstate'] += "+"
417 break
417 break
418
418
419 aborts = []
419 aborts = []
420 # Compare manifests
420 # Compare manifests
421 fdiff = dicthelpers.diff(m1, m2)
421 fdiff = dicthelpers.diff(m1, m2)
422 flagsdiff = m1.flagsdiff(m2)
422 flagsdiff = m1.flagsdiff(m2)
423 diff12 = dicthelpers.join(fdiff, flagsdiff)
423 diff12 = dicthelpers.join(fdiff, flagsdiff)
424
424
425 for f, (n12, fl12) in diff12.iteritems():
425 for f, (n12, fl12) in diff12.iteritems():
426 if n12:
426 if n12:
427 n1, n2 = n12
427 n1, n2 = n12
428 else: # file contents didn't change, but flags did
428 else: # file contents didn't change, but flags did
429 n1 = n2 = m1.get(f, None)
429 n1 = n2 = m1.get(f, None)
430 if n1 is None:
430 if n1 is None:
431 # Since n1 == n2, the file isn't present in m2 either. This
431 # Since n1 == n2, the file isn't present in m2 either. This
432 # means that the file was removed or deleted locally and
432 # means that the file was removed or deleted locally and
433 # removed remotely, but that residual entries remain in flags.
433 # removed remotely, but that residual entries remain in flags.
434 # This can happen in manifests generated by workingctx.
434 # This can happen in manifests generated by workingctx.
435 continue
435 continue
436 if fl12:
436 if fl12:
437 fl1, fl2 = fl12
437 fl1, fl2 = fl12
438 else: # flags didn't change, file contents did
438 else: # flags didn't change, file contents did
439 fl1 = fl2 = m1.flags(f)
439 fl1 = fl2 = m1.flags(f)
440
440
441 if partial and not partial(f):
441 if partial and not partial(f):
442 continue
442 continue
443 if n1 and n2:
443 if n1 and n2:
444 fa = f
444 fa = f
445 a = ma.get(f, nullid)
445 a = ma.get(f, nullid)
446 if a == nullid:
446 if a == nullid:
447 fa = copy.get(f, f)
447 fa = copy.get(f, f)
448 # Note: f as default is wrong - we can't really make a 3-way
448 # Note: f as default is wrong - we can't really make a 3-way
449 # merge without an ancestor file.
449 # merge without an ancestor file.
450 fla = ma.flags(fa)
450 fla = ma.flags(fa)
451 nol = 'l' not in fl1 + fl2 + fla
451 nol = 'l' not in fl1 + fl2 + fla
452 if n2 == a and fl2 == fla:
452 if n2 == a and fl2 == fla:
453 actions.append((f, "k", (), "keep")) # remote unchanged
453 actions.append((f, "k", (), "keep")) # remote unchanged
454 elif n1 == a and fl1 == fla: # local unchanged - use remote
454 elif n1 == a and fl1 == fla: # local unchanged - use remote
455 if n1 == n2: # optimization: keep local content
455 if n1 == n2: # optimization: keep local content
456 actions.append((f, "e", (fl2,), "update permissions"))
456 actions.append((f, "e", (fl2,), "update permissions"))
457 else:
457 else:
458 actions.append((f, "g", (fl2,), "remote is newer"))
458 actions.append((f, "g", (fl2,), "remote is newer"))
459 elif nol and n2 == a: # remote only changed 'x'
459 elif nol and n2 == a: # remote only changed 'x'
460 actions.append((f, "e", (fl2,), "update permissions"))
460 actions.append((f, "e", (fl2,), "update permissions"))
461 elif nol and n1 == a: # local only changed 'x'
461 elif nol and n1 == a: # local only changed 'x'
462 actions.append((f, "g", (fl1,), "remote is newer"))
462 actions.append((f, "g", (fl1,), "remote is newer"))
463 else: # both changed something
463 else: # both changed something
464 actions.append((f, "m", (f, f, fa, False, pa.node()),
464 actions.append((f, "m", (f, f, fa, False, pa.node()),
465 "versions differ"))
465 "versions differ"))
466 elif f in copied: # files we'll deal with on m2 side
466 elif f in copied: # files we'll deal with on m2 side
467 pass
467 pass
468 elif n1 and f in movewithdir: # directory rename, move local
468 elif n1 and f in movewithdir: # directory rename, move local
469 f2 = movewithdir[f]
469 f2 = movewithdir[f]
470 actions.append((f2, "dm", (f, fl1),
470 actions.append((f2, "dm", (f, fl1),
471 "remote directory rename - move from " + f))
471 "remote directory rename - move from " + f))
472 elif n1 and f in copy:
472 elif n1 and f in copy:
473 f2 = copy[f]
473 f2 = copy[f]
474 actions.append((f, "m", (f, f2, f2, False, pa.node()),
474 actions.append((f, "m", (f, f2, f2, False, pa.node()),
475 "local copied/moved from " + f2))
475 "local copied/moved from " + f2))
476 elif n1 and f in ma: # clean, a different, no remote
476 elif n1 and f in ma: # clean, a different, no remote
477 if n1 != ma[f]:
477 if n1 != ma[f]:
478 if acceptremote:
478 if acceptremote:
479 actions.append((f, "r", None, "remote delete"))
479 actions.append((f, "r", None, "remote delete"))
480 else:
480 else:
481 actions.append((f, "cd", None, "prompt changed/deleted"))
481 actions.append((f, "cd", None, "prompt changed/deleted"))
482 elif n1[20:] == "a": # added, no remote
482 elif n1[20:] == "a": # added, no remote
483 actions.append((f, "f", None, "remote deleted"))
483 actions.append((f, "f", None, "remote deleted"))
484 else:
484 else:
485 actions.append((f, "r", None, "other deleted"))
485 actions.append((f, "r", None, "other deleted"))
486 elif n2 and f in movewithdir:
486 elif n2 and f in movewithdir:
487 f2 = movewithdir[f]
487 f2 = movewithdir[f]
488 actions.append((f2, "dg", (f, fl2),
488 actions.append((f2, "dg", (f, fl2),
489 "local directory rename - get from " + f))
489 "local directory rename - get from " + f))
490 elif n2 and f in copy:
490 elif n2 and f in copy:
491 f2 = copy[f]
491 f2 = copy[f]
492 if f2 in m2:
492 if f2 in m2:
493 actions.append((f, "m", (f2, f, f2, False, pa.node()),
493 actions.append((f, "m", (f2, f, f2, False, pa.node()),
494 "remote copied from " + f2))
494 "remote copied from " + f2))
495 else:
495 else:
496 actions.append((f, "m", (f2, f, f2, True, pa.node()),
496 actions.append((f, "m", (f2, f, f2, True, pa.node()),
497 "remote moved from " + f2))
497 "remote moved from " + f2))
498 elif n2 and f not in ma:
498 elif n2 and f not in ma:
499 # local unknown, remote created: the logic is described by the
499 # local unknown, remote created: the logic is described by the
500 # following table:
500 # following table:
501 #
501 #
502 # force branchmerge different | action
502 # force branchmerge different | action
503 # n * n | get
503 # n * n | get
504 # n * y | abort
504 # n * y | abort
505 # y n * | get
505 # y n * | get
506 # y y n | get
506 # y y n | get
507 # y y y | merge
507 # y y y | merge
508 #
508 #
509 # Checking whether the files are different is expensive, so we
509 # Checking whether the files are different is expensive, so we
510 # don't do that when we can avoid it.
510 # don't do that when we can avoid it.
511 if force and not branchmerge:
511 if force and not branchmerge:
512 actions.append((f, "g", (fl2,), "remote created"))
512 actions.append((f, "g", (fl2,), "remote created"))
513 else:
513 else:
514 different = _checkunknownfile(repo, wctx, p2, f)
514 different = _checkunknownfile(repo, wctx, p2, f)
515 if force and branchmerge and different:
515 if force and branchmerge and different:
516 # FIXME: This is wrong - f is not in ma ...
516 # FIXME: This is wrong - f is not in ma ...
517 actions.append((f, "m", (f, f, f, False, pa.node()),
517 actions.append((f, "m", (f, f, f, False, pa.node()),
518 "remote differs from untracked local"))
518 "remote differs from untracked local"))
519 elif not force and different:
519 elif not force and different:
520 aborts.append((f, "ud"))
520 aborts.append((f, "ud"))
521 else:
521 else:
522 actions.append((f, "g", (fl2,), "remote created"))
522 actions.append((f, "g", (fl2,), "remote created"))
523 elif n2 and n2 != ma[f]:
523 elif n2 and n2 != ma[f]:
524 different = _checkunknownfile(repo, wctx, p2, f)
524 different = _checkunknownfile(repo, wctx, p2, f)
525 if not force and different:
525 if not force and different:
526 aborts.append((f, "ud"))
526 aborts.append((f, "ud"))
527 else:
527 else:
528 # if different: old untracked f may be overwritten and lost
528 # if different: old untracked f may be overwritten and lost
529 if acceptremote:
529 if acceptremote:
530 actions.append((f, "g", (m2.flags(f),),
530 actions.append((f, "g", (m2.flags(f),),
531 "remote recreating"))
531 "remote recreating"))
532 else:
532 else:
533 actions.append((f, "dc", (m2.flags(f),),
533 actions.append((f, "dc", (m2.flags(f),),
534 "prompt deleted/changed"))
534 "prompt deleted/changed"))
535
535
536 for f, m in sorted(aborts):
536 for f, m in sorted(aborts):
537 if m == "ud":
537 if m == "ud":
538 repo.ui.warn(_("%s: untracked file differs\n") % f)
538 repo.ui.warn(_("%s: untracked file differs\n") % f)
539 else: assert False, m
539 else: assert False, m
540 if aborts:
540 if aborts:
541 raise util.Abort(_("untracked files in working directory differ "
541 raise util.Abort(_("untracked files in working directory differ "
542 "from files in requested revision"))
542 "from files in requested revision"))
543
543
544 if not util.checkcase(repo.path):
544 if not util.checkcase(repo.path):
545 # check collision between files only in p2 for clean update
545 # check collision between files only in p2 for clean update
546 if (not branchmerge and
546 if (not branchmerge and
547 (force or not wctx.dirty(missing=True, branch=False))):
547 (force or not wctx.dirty(missing=True, branch=False))):
548 _checkcollision(repo, m2, [])
548 _checkcollision(repo, m2, [])
549 else:
549 else:
550 _checkcollision(repo, m1, actions)
550 _checkcollision(repo, m1, actions)
551
551
552 return actions
552 return actions
553
553
554 def actionkey(a):
554 def actionkey(a):
555 return a[1] in "rf" and -1 or 0, a
555 return a[1] in "rf" and -1 or 0, a
556
556
557 def getremove(repo, mctx, overwrite, args):
557 def getremove(repo, mctx, overwrite, args):
558 """apply usually-non-interactive updates to the working directory
558 """apply usually-non-interactive updates to the working directory
559
559
560 mctx is the context to be merged into the working copy
560 mctx is the context to be merged into the working copy
561
561
562 yields tuples for progress updates
562 yields tuples for progress updates
563 """
563 """
564 verbose = repo.ui.verbose
564 verbose = repo.ui.verbose
565 unlink = util.unlinkpath
565 unlink = util.unlinkpath
566 wjoin = repo.wjoin
566 wjoin = repo.wjoin
567 fctx = mctx.filectx
567 fctx = mctx.filectx
568 wwrite = repo.wwrite
568 wwrite = repo.wwrite
569 audit = repo.wopener.audit
569 audit = repo.wopener.audit
570 i = 0
570 i = 0
571 for arg in args:
571 for arg in args:
572 f = arg[0]
572 f = arg[0]
573 if arg[1] == 'r':
573 if arg[1] == 'r':
574 if verbose:
574 if verbose:
575 repo.ui.note(_("removing %s\n") % f)
575 repo.ui.note(_("removing %s\n") % f)
576 audit(f)
576 audit(f)
577 try:
577 try:
578 unlink(wjoin(f), ignoremissing=True)
578 unlink(wjoin(f), ignoremissing=True)
579 except OSError, inst:
579 except OSError, inst:
580 repo.ui.warn(_("update failed to remove %s: %s!\n") %
580 repo.ui.warn(_("update failed to remove %s: %s!\n") %
581 (f, inst.strerror))
581 (f, inst.strerror))
582 else:
582 else:
583 if verbose:
583 if verbose:
584 repo.ui.note(_("getting %s\n") % f)
584 repo.ui.note(_("getting %s\n") % f)
585 wwrite(f, fctx(f).data(), arg[2][0])
585 wwrite(f, fctx(f).data(), arg[2][0])
586 if i == 100:
586 if i == 100:
587 yield i, f
587 yield i, f
588 i = 0
588 i = 0
589 i += 1
589 i += 1
590 if i > 0:
590 if i > 0:
591 yield i, f
591 yield i, f
592
592
593 def applyupdates(repo, actions, wctx, mctx, overwrite):
593 def applyupdates(repo, actions, wctx, mctx, overwrite):
594 """apply the merge action list to the working directory
594 """apply the merge action list to the working directory
595
595
596 wctx is the working copy context
596 wctx is the working copy context
597 mctx is the context to be merged into the working copy
597 mctx is the context to be merged into the working copy
598
598
599 Return a tuple of counts (updated, merged, removed, unresolved) that
599 Return a tuple of counts (updated, merged, removed, unresolved) that
600 describes how many files were affected by the update.
600 describes how many files were affected by the update.
601 """
601 """
602
602
603 updated, merged, removed, unresolved = 0, 0, 0, 0
603 updated, merged, removed, unresolved = 0, 0, 0, 0
604 ms = mergestate(repo)
604 ms = mergestate(repo)
605 ms.reset(wctx.p1().node(), mctx.node())
605 ms.reset(wctx.p1().node(), mctx.node())
606 moves = []
606 moves = []
607 actions.sort(key=actionkey)
607 actions.sort(key=actionkey)
608
608
609 # prescan for merges
609 # prescan for merges
610 for a in actions:
610 for a in actions:
611 f, m, args, msg = a
611 f, m, args, msg = a
612 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
612 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
613 if m == "m": # merge
613 if m == "m": # merge
614 f1, f2, fa, move, anc = args
614 f1, f2, fa, move, anc = args
615 if f == '.hgsubstate': # merged internally
615 if f == '.hgsubstate': # merged internally
616 continue
616 continue
617 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
617 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
618 fcl = wctx[f1]
618 fcl = wctx[f1]
619 fco = mctx[f2]
619 fco = mctx[f2]
620 actx = repo[anc]
620 actx = repo[anc]
621 if fa in actx:
621 if fa in actx:
622 fca = actx[fa]
622 fca = actx[fa]
623 else:
623 else:
624 fca = repo.filectx(f1, fileid=nullrev)
624 fca = repo.filectx(f1, fileid=nullrev)
625 ms.add(fcl, fco, fca, f)
625 ms.add(fcl, fco, fca, f)
626 if f1 != f and move:
626 if f1 != f and move:
627 moves.append(f1)
627 moves.append(f1)
628
628
629 audit = repo.wopener.audit
629 audit = repo.wopener.audit
630
630
631 # remove renamed files after safely stored
631 # remove renamed files after safely stored
632 for f in moves:
632 for f in moves:
633 if os.path.lexists(repo.wjoin(f)):
633 if os.path.lexists(repo.wjoin(f)):
634 repo.ui.debug("removing %s\n" % f)
634 repo.ui.debug("removing %s\n" % f)
635 audit(f)
635 audit(f)
636 util.unlinkpath(repo.wjoin(f))
636 util.unlinkpath(repo.wjoin(f))
637
637
638 numupdates = len([a for a in actions if a[1] != 'k'])
638 numupdates = len([a for a in actions if a[1] != 'k'])
639 workeractions = [a for a in actions if a[1] in 'gr']
639 workeractions = [a for a in actions if a[1] in 'gr']
640 updateactions = [a for a in workeractions if a[1] == 'g']
640 updateactions = [a for a in workeractions if a[1] == 'g']
641 updated = len(updateactions)
641 updated = len(updateactions)
642 removeactions = [a for a in workeractions if a[1] == 'r']
642 removeactions = [a for a in workeractions if a[1] == 'r']
643 removed = len(removeactions)
643 removed = len(removeactions)
644 actions = [a for a in actions if a[1] not in 'grk']
644 actions = [a for a in actions if a[1] not in 'grk']
645
645
646 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
646 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
647 if hgsub and hgsub[0] == 'r':
647 if hgsub and hgsub[0] == 'r':
648 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
648 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
649
649
650 z = 0
650 z = 0
651 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
651 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
652 removeactions)
652 removeactions)
653 for i, item in prog:
653 for i, item in prog:
654 z += i
654 z += i
655 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
655 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
656 unit=_('files'))
656 unit=_('files'))
657 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
657 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
658 updateactions)
658 updateactions)
659 for i, item in prog:
659 for i, item in prog:
660 z += i
660 z += i
661 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
661 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
662 unit=_('files'))
662 unit=_('files'))
663
663
664 if hgsub and hgsub[0] == 'g':
664 if hgsub and hgsub[0] == 'g':
665 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
665 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
666
666
667 _updating = _('updating')
667 _updating = _('updating')
668 _files = _('files')
668 _files = _('files')
669 progress = repo.ui.progress
669 progress = repo.ui.progress
670
670
671 for i, a in enumerate(actions):
671 for i, a in enumerate(actions):
672 f, m, args, msg = a
672 f, m, args, msg = a
673 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
673 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
674 if m == "m": # merge
674 if m == "m": # merge
675 f1, f2, fa, move, anc = args
675 f1, f2, fa, move, anc = args
676 if f == '.hgsubstate': # subrepo states need updating
676 if f == '.hgsubstate': # subrepo states need updating
677 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
677 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
678 overwrite)
678 overwrite)
679 continue
679 continue
680 audit(f)
680 audit(f)
681 r = ms.resolve(f, wctx)
681 r = ms.resolve(f, wctx)
682 if r is not None and r > 0:
682 if r is not None and r > 0:
683 unresolved += 1
683 unresolved += 1
684 else:
684 else:
685 if r is None:
685 if r is None:
686 updated += 1
686 updated += 1
687 else:
687 else:
688 merged += 1
688 merged += 1
689 elif m == "dm": # directory rename, move local
689 elif m == "dm": # directory rename, move local
690 f0, flags = args
690 f0, flags = args
691 repo.ui.note(_("moving %s to %s\n") % (f0, f))
691 repo.ui.note(_("moving %s to %s\n") % (f0, f))
692 audit(f)
692 audit(f)
693 repo.wwrite(f, wctx.filectx(f0).data(), flags)
693 repo.wwrite(f, wctx.filectx(f0).data(), flags)
694 util.unlinkpath(repo.wjoin(f0))
694 util.unlinkpath(repo.wjoin(f0))
695 updated += 1
695 updated += 1
696 elif m == "dg": # local directory rename, get
696 elif m == "dg": # local directory rename, get
697 f0, flags = args
697 f0, flags = args
698 repo.ui.note(_("getting %s to %s\n") % (f0, f))
698 repo.ui.note(_("getting %s to %s\n") % (f0, f))
699 repo.wwrite(f, mctx.filectx(f0).data(), flags)
699 repo.wwrite(f, mctx.filectx(f0).data(), flags)
700 updated += 1
700 updated += 1
701 elif m == "dr": # divergent renames
701 elif m == "dr": # divergent renames
702 fl, = args
702 fl, = args
703 repo.ui.warn(_("note: possible conflict - %s was renamed "
703 repo.ui.warn(_("note: possible conflict - %s was renamed "
704 "multiple times to:\n") % f)
704 "multiple times to:\n") % f)
705 for nf in fl:
705 for nf in fl:
706 repo.ui.warn(" %s\n" % nf)
706 repo.ui.warn(" %s\n" % nf)
707 elif m == "rd": # rename and delete
707 elif m == "rd": # rename and delete
708 fl, = args
708 fl, = args
709 repo.ui.warn(_("note: possible conflict - %s was deleted "
709 repo.ui.warn(_("note: possible conflict - %s was deleted "
710 "and renamed to:\n") % f)
710 "and renamed to:\n") % f)
711 for nf in fl:
711 for nf in fl:
712 repo.ui.warn(" %s\n" % nf)
712 repo.ui.warn(" %s\n" % nf)
713 elif m == "e": # exec
713 elif m == "e": # exec
714 flags, = args
714 flags, = args
715 audit(f)
715 audit(f)
716 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
716 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
717 updated += 1
717 updated += 1
718 ms.commit()
718 ms.commit()
719 progress(_updating, None, total=numupdates, unit=_files)
719 progress(_updating, None, total=numupdates, unit=_files)
720
720
721 return updated, merged, removed, unresolved
721 return updated, merged, removed, unresolved
722
722
723 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
723 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
724 acceptremote, followcopies):
724 acceptremote, followcopies):
725 "Calculate the actions needed to merge mctx into wctx using ancestors"
725 "Calculate the actions needed to merge mctx into wctx using ancestors"
726
726
727 if len(ancestors) == 1: # default
727 if len(ancestors) == 1: # default
728 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
728 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
729 branchmerge, force,
729 branchmerge, force,
730 partial, acceptremote, followcopies)
730 partial, acceptremote, followcopies)
731
731
732 else: # only when merge.preferancestor=* - experimentalish code
732 else: # only when merge.preferancestor=* - experimentalish code
733 repo.ui.status(
733 repo.ui.status(
734 _("note: merging %s and %s using bids from ancestors %s\n") %
734 _("note: merging %s and %s using bids from ancestors %s\n") %
735 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
735 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
736
736
737 # Call for bids
737 # Call for bids
738 fbids = {} # mapping filename to list af action bids
738 fbids = {} # mapping filename to list af action bids
739 for ancestor in ancestors:
739 for ancestor in ancestors:
740 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
740 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
741 actions = manifestmerge(repo, wctx, mctx, ancestor,
741 actions = manifestmerge(repo, wctx, mctx, ancestor,
742 branchmerge, force,
742 branchmerge, force,
743 partial, acceptremote, followcopies)
743 partial, acceptremote, followcopies)
744 for a in sorted(actions):
744 for a in sorted(actions):
745 repo.ui.debug(' %s: %s\n' % (a[0], a[1]))
745 repo.ui.debug(' %s: %s\n' % (a[0], a[1]))
746 f = a[0]
746 f = a[0]
747 if f in fbids:
747 if f in fbids:
748 fbids[f].append(a)
748 fbids[f].append(a)
749 else:
749 else:
750 fbids[f] = [a]
750 fbids[f] = [a]
751
751
752 # Pick the best bid for each file
752 # Pick the best bid for each file
753 repo.ui.note(_('\nauction for merging merge bids\n'))
753 repo.ui.note(_('\nauction for merging merge bids\n'))
754 actions = []
754 actions = []
755 for f, bidsl in sorted(fbids.items()):
755 for f, bidsl in sorted(fbids.items()):
756 # Consensus?
756 # Consensus?
757 a0 = bidsl[0]
757 a0 = bidsl[0]
758 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
758 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
759 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
759 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
760 actions.append(a0)
760 actions.append(a0)
761 continue
761 continue
762 # Group bids by kind of action
762 # Group bids by kind of action
763 bids = {}
763 bids = {}
764 for a in bidsl:
764 for a in bidsl:
765 m = a[1]
765 m = a[1]
766 if m in bids:
766 if m in bids:
767 bids[m].append(a)
767 bids[m].append(a)
768 else:
768 else:
769 bids[m] = [a]
769 bids[m] = [a]
770 # If keep is an option, just do it.
770 # If keep is an option, just do it.
771 if "k" in bids:
771 if "k" in bids:
772 repo.ui.note(" %s: picking 'keep' action\n" % f)
772 repo.ui.note(" %s: picking 'keep' action\n" % f)
773 actions.append(bids["k"][0])
773 actions.append(bids["k"][0])
774 continue
774 continue
775 # If all gets agree [how could they not?], just do it.
775 # If all gets agree [how could they not?], just do it.
776 if "g" in bids:
776 if "g" in bids:
777 ga0 = bids["g"][0]
777 ga0 = bids["g"][0]
778 if util.all(a == ga0 for a in bids["g"][1:]):
778 if util.all(a == ga0 for a in bids["g"][1:]):
779 repo.ui.note(" %s: picking 'get' action\n" % f)
779 repo.ui.note(" %s: picking 'get' action\n" % f)
780 actions.append(ga0)
780 actions.append(ga0)
781 continue
781 continue
782 # TODO: Consider other simple actions such as mode changes
782 # TODO: Consider other simple actions such as mode changes
783 # Handle inefficient democrazy.
783 # Handle inefficient democrazy.
784 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
784 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
785 for _f, m, args, msg in bidsl:
785 for _f, m, args, msg in bidsl:
786 repo.ui.note(' %s -> %s\n' % (msg, m))
786 repo.ui.note(' %s -> %s\n' % (msg, m))
787 # Pick random action. TODO: Instead, prompt user when resolving
787 # Pick random action. TODO: Instead, prompt user when resolving
788 a0 = bidsl[0]
788 a0 = bidsl[0]
789 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
789 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
790 (f, a0[1]))
790 (f, a0[1]))
791 actions.append(a0)
791 actions.append(a0)
792 continue
792 continue
793 repo.ui.note(_('end of auction\n\n'))
793 repo.ui.note(_('end of auction\n\n'))
794
794
795 # Filter out prompts.
795 # Filter out prompts.
796 newactions, prompts = [], []
796 newactions, prompts = [], []
797 for a in actions:
797 for a in actions:
798 if a[1] in ("cd", "dc"):
798 if a[1] in ("cd", "dc"):
799 prompts.append(a)
799 prompts.append(a)
800 else:
800 else:
801 newactions.append(a)
801 newactions.append(a)
802 # Prompt and create actions. TODO: Move this towards resolve phase.
802 # Prompt and create actions. TODO: Move this towards resolve phase.
803 for f, m, args, msg in sorted(prompts):
803 for f, m, args, msg in sorted(prompts):
804 if m == "cd":
804 if m == "cd":
805 if repo.ui.promptchoice(
805 if repo.ui.promptchoice(
806 _("local changed %s which remote deleted\n"
806 _("local changed %s which remote deleted\n"
807 "use (c)hanged version or (d)elete?"
807 "use (c)hanged version or (d)elete?"
808 "$$ &Changed $$ &Delete") % f, 0):
808 "$$ &Changed $$ &Delete") % f, 0):
809 newactions.append((f, "r", None, "prompt delete"))
809 newactions.append((f, "r", None, "prompt delete"))
810 else:
810 else:
811 newactions.append((f, "a", None, "prompt keep"))
811 newactions.append((f, "a", None, "prompt keep"))
812 elif m == "dc":
812 elif m == "dc":
813 flags, = args
813 flags, = args
814 if repo.ui.promptchoice(
814 if repo.ui.promptchoice(
815 _("remote changed %s which local deleted\n"
815 _("remote changed %s which local deleted\n"
816 "use (c)hanged version or leave (d)eleted?"
816 "use (c)hanged version or leave (d)eleted?"
817 "$$ &Changed $$ &Deleted") % f, 0) == 0:
817 "$$ &Changed $$ &Deleted") % f, 0) == 0:
818 newactions.append((f, "g", (flags,), "prompt recreating"))
818 newactions.append((f, "g", (flags,), "prompt recreating"))
819 else: assert False, m
819 else: assert False, m
820
820
821 if wctx.rev() is None:
821 if wctx.rev() is None:
822 newactions += _forgetremoved(wctx, mctx, branchmerge)
822 newactions += _forgetremoved(wctx, mctx, branchmerge)
823
823
824 return newactions
824 return newactions
825
825
826 def recordupdates(repo, actions, branchmerge):
826 def recordupdates(repo, actions, branchmerge):
827 "record merge actions to the dirstate"
827 "record merge actions to the dirstate"
828
828
829 for a in actions:
829 for a in actions:
830 f, m, args, msg = a
830 f, m, args, msg = a
831 if m == "r": # remove
831 if m == "r": # remove
832 if branchmerge:
832 if branchmerge:
833 repo.dirstate.remove(f)
833 repo.dirstate.remove(f)
834 else:
834 else:
835 repo.dirstate.drop(f)
835 repo.dirstate.drop(f)
836 elif m == "a": # re-add
836 elif m == "a": # re-add
837 if not branchmerge:
837 if not branchmerge:
838 repo.dirstate.add(f)
838 repo.dirstate.add(f)
839 elif m == "f": # forget
839 elif m == "f": # forget
840 repo.dirstate.drop(f)
840 repo.dirstate.drop(f)
841 elif m == "e": # exec change
841 elif m == "e": # exec change
842 repo.dirstate.normallookup(f)
842 repo.dirstate.normallookup(f)
843 elif m == "k": # keep
843 elif m == "k": # keep
844 pass
844 pass
845 elif m == "g": # get
845 elif m == "g": # get
846 if branchmerge:
846 if branchmerge:
847 repo.dirstate.otherparent(f)
847 repo.dirstate.otherparent(f)
848 else:
848 else:
849 repo.dirstate.normal(f)
849 repo.dirstate.normal(f)
850 elif m == "m": # merge
850 elif m == "m": # merge
851 f1, f2, fa, move, anc = args
851 f1, f2, fa, move, anc = args
852 if branchmerge:
852 if branchmerge:
853 # We've done a branch merge, mark this file as merged
853 # We've done a branch merge, mark this file as merged
854 # so that we properly record the merger later
854 # so that we properly record the merger later
855 repo.dirstate.merge(f)
855 repo.dirstate.merge(f)
856 if f1 != f2: # copy/rename
856 if f1 != f2: # copy/rename
857 if move:
857 if move:
858 repo.dirstate.remove(f1)
858 repo.dirstate.remove(f1)
859 if f1 != f:
859 if f1 != f:
860 repo.dirstate.copy(f1, f)
860 repo.dirstate.copy(f1, f)
861 else:
861 else:
862 repo.dirstate.copy(f2, f)
862 repo.dirstate.copy(f2, f)
863 else:
863 else:
864 # We've update-merged a locally modified file, so
864 # We've update-merged a locally modified file, so
865 # we set the dirstate to emulate a normal checkout
865 # we set the dirstate to emulate a normal checkout
866 # of that file some time in the past. Thus our
866 # of that file some time in the past. Thus our
867 # merge will appear as a normal local file
867 # merge will appear as a normal local file
868 # modification.
868 # modification.
869 if f2 == f: # file not locally copied/moved
869 if f2 == f: # file not locally copied/moved
870 repo.dirstate.normallookup(f)
870 repo.dirstate.normallookup(f)
871 if move:
871 if move:
872 repo.dirstate.drop(f1)
872 repo.dirstate.drop(f1)
873 elif m == "dm": # directory rename, move local
873 elif m == "dm": # directory rename, move local
874 f0, flag = args
874 f0, flag = args
875 if f0 not in repo.dirstate:
875 if f0 not in repo.dirstate:
876 # untracked file moved
876 # untracked file moved
877 continue
877 continue
878 if branchmerge:
878 if branchmerge:
879 repo.dirstate.add(f)
879 repo.dirstate.add(f)
880 repo.dirstate.remove(f0)
880 repo.dirstate.remove(f0)
881 repo.dirstate.copy(f0, f)
881 repo.dirstate.copy(f0, f)
882 else:
882 else:
883 repo.dirstate.normal(f)
883 repo.dirstate.normal(f)
884 repo.dirstate.drop(f0)
884 repo.dirstate.drop(f0)
885 elif m == "dg": # directory rename, get
885 elif m == "dg": # directory rename, get
886 f0, flag = args
886 f0, flag = args
887 if branchmerge:
887 if branchmerge:
888 repo.dirstate.add(f)
888 repo.dirstate.add(f)
889 repo.dirstate.copy(f0, f)
889 repo.dirstate.copy(f0, f)
890 else:
890 else:
891 repo.dirstate.normal(f)
891 repo.dirstate.normal(f)
892
892
893 def update(repo, node, branchmerge, force, partial, ancestor=None,
893 def update(repo, node, branchmerge, force, partial, ancestor=None,
894 mergeancestor=False):
894 mergeancestor=False):
895 """
895 """
896 Perform a merge between the working directory and the given node
896 Perform a merge between the working directory and the given node
897
897
898 node = the node to update to, or None if unspecified
898 node = the node to update to, or None if unspecified
899 branchmerge = whether to merge between branches
899 branchmerge = whether to merge between branches
900 force = whether to force branch merging or file overwriting
900 force = whether to force branch merging or file overwriting
901 partial = a function to filter file lists (dirstate not updated)
901 partial = a function to filter file lists (dirstate not updated)
902 mergeancestor = whether it is merging with an ancestor. If true,
902 mergeancestor = whether it is merging with an ancestor. If true,
903 we should accept the incoming changes for any prompts that occur.
903 we should accept the incoming changes for any prompts that occur.
904 If false, merging with an ancestor (fast-forward) is only allowed
904 If false, merging with an ancestor (fast-forward) is only allowed
905 between different named branches. This flag is used by rebase extension
905 between different named branches. This flag is used by rebase extension
906 as a temporary fix and should be avoided in general.
906 as a temporary fix and should be avoided in general.
907
907
908 The table below shows all the behaviors of the update command
908 The table below shows all the behaviors of the update command
909 given the -c and -C or no options, whether the working directory
909 given the -c and -C or no options, whether the working directory
910 is dirty, whether a revision is specified, and the relationship of
910 is dirty, whether a revision is specified, and the relationship of
911 the parent rev to the target rev (linear, on the same named
911 the parent rev to the target rev (linear, on the same named
912 branch, or on another named branch).
912 branch, or on another named branch).
913
913
914 This logic is tested by test-update-branches.t.
914 This logic is tested by test-update-branches.t.
915
915
916 -c -C dirty rev | linear same cross
916 -c -C dirty rev | linear same cross
917 n n n n | ok (1) x
917 n n n n | ok (1) x
918 n n n y | ok ok ok
918 n n n y | ok ok ok
919 n n y n | merge (2) (2)
919 n n y n | merge (2) (2)
920 n n y y | merge (3) (3)
920 n n y y | merge (3) (3)
921 n y * * | --- discard ---
921 n y * * | --- discard ---
922 y n y * | --- (4) ---
922 y n y * | --- (4) ---
923 y n n * | --- ok ---
923 y n n * | --- ok ---
924 y y * * | --- (5) ---
924 y y * * | --- (5) ---
925
925
926 x = can't happen
926 x = can't happen
927 * = don't-care
927 * = don't-care
928 1 = abort: not a linear update (merge or update --check to force update)
928 1 = abort: not a linear update (merge or update --check to force update)
929 2 = abort: uncommitted changes (commit and merge, or update --clean to
929 2 = abort: uncommitted changes (commit and merge, or update --clean to
930 discard changes)
930 discard changes)
931 3 = abort: uncommitted changes (commit or update --clean to discard changes)
931 3 = abort: uncommitted changes (commit or update --clean to discard changes)
932 4 = abort: uncommitted changes (checked in commands.py)
932 4 = abort: uncommitted changes (checked in commands.py)
933 5 = incompatible options (checked in commands.py)
933 5 = incompatible options (checked in commands.py)
934
934
935 Return the same tuple as applyupdates().
935 Return the same tuple as applyupdates().
936 """
936 """
937
937
938 onode = node
938 onode = node
939 wlock = repo.wlock()
939 wlock = repo.wlock()
940 try:
940 try:
941 wc = repo[None]
941 wc = repo[None]
942 pl = wc.parents()
942 pl = wc.parents()
943 p1 = pl[0]
943 p1 = pl[0]
944 pas = [None]
944 pas = [None]
945 if ancestor:
945 if ancestor:
946 pas = [repo[ancestor]]
946 pas = [repo[ancestor]]
947
947
948 if node is None:
948 if node is None:
949 # Here is where we should consider bookmarks, divergent bookmarks,
949 # Here is where we should consider bookmarks, divergent bookmarks,
950 # foreground changesets (successors), and tip of current branch;
950 # foreground changesets (successors), and tip of current branch;
951 # but currently we are only checking the branch tips.
951 # but currently we are only checking the branch tips.
952 try:
952 try:
953 node = repo.branchtip(wc.branch())
953 node = repo.branchtip(wc.branch())
954 except error.RepoLookupError:
954 except error.RepoLookupError:
955 if wc.branch() == "default": # no default branch!
955 if wc.branch() == "default": # no default branch!
956 node = repo.lookup("tip") # update to tip
956 node = repo.lookup("tip") # update to tip
957 else:
957 else:
958 raise util.Abort(_("branch %s not found") % wc.branch())
958 raise util.Abort(_("branch %s not found") % wc.branch())
959
959
960 if p1.obsolete() and not p1.children():
960 if p1.obsolete() and not p1.children():
961 # allow updating to successors
961 # allow updating to successors
962 successors = obsolete.successorssets(repo, p1.node())
962 successors = obsolete.successorssets(repo, p1.node())
963
963
964 # behavior of certain cases is as follows,
964 # behavior of certain cases is as follows,
965 #
965 #
966 # divergent changesets: update to highest rev, similar to what
966 # divergent changesets: update to highest rev, similar to what
967 # is currently done when there are more than one head
967 # is currently done when there are more than one head
968 # (i.e. 'tip')
968 # (i.e. 'tip')
969 #
969 #
970 # replaced changesets: same as divergent except we know there
970 # replaced changesets: same as divergent except we know there
971 # is no conflict
971 # is no conflict
972 #
972 #
973 # pruned changeset: no update is done; though, we could
973 # pruned changeset: no update is done; though, we could
974 # consider updating to the first non-obsolete parent,
974 # consider updating to the first non-obsolete parent,
975 # similar to what is current done for 'hg prune'
975 # similar to what is current done for 'hg prune'
976
976
977 if successors:
977 if successors:
978 # flatten the list here handles both divergent (len > 1)
978 # flatten the list here handles both divergent (len > 1)
979 # and the usual case (len = 1)
979 # and the usual case (len = 1)
980 successors = [n for sub in successors for n in sub]
980 successors = [n for sub in successors for n in sub]
981
981
982 # get the max revision for the given successors set,
982 # get the max revision for the given successors set,
983 # i.e. the 'tip' of a set
983 # i.e. the 'tip' of a set
984 node = repo.revs("max(%ln)", successors)[0]
984 node = repo.revs("max(%ln)", successors)[0]
985 pas = [p1]
985 pas = [p1]
986
986
987 overwrite = force and not branchmerge
987 overwrite = force and not branchmerge
988
988
989 p2 = repo[node]
989 p2 = repo[node]
990 if pas[0] is None:
990 if pas[0] is None:
991 if repo.ui.config("merge", "preferancestor") == '*':
991 if repo.ui.config("merge", "preferancestor") == '*':
992 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
992 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
993 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
993 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
994 else:
994 else:
995 pas = [p1.ancestor(p2)]
995 pas = [p1.ancestor(p2, warn=True)]
996
996
997 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
997 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
998
998
999 ### check phase
999 ### check phase
1000 if not overwrite and len(pl) > 1:
1000 if not overwrite and len(pl) > 1:
1001 raise util.Abort(_("outstanding uncommitted merges"))
1001 raise util.Abort(_("outstanding uncommitted merges"))
1002 if branchmerge:
1002 if branchmerge:
1003 if pas == [p2]:
1003 if pas == [p2]:
1004 raise util.Abort(_("merging with a working directory ancestor"
1004 raise util.Abort(_("merging with a working directory ancestor"
1005 " has no effect"))
1005 " has no effect"))
1006 elif pas == [p1]:
1006 elif pas == [p1]:
1007 if not mergeancestor and p1.branch() == p2.branch():
1007 if not mergeancestor and p1.branch() == p2.branch():
1008 raise util.Abort(_("nothing to merge"),
1008 raise util.Abort(_("nothing to merge"),
1009 hint=_("use 'hg update' "
1009 hint=_("use 'hg update' "
1010 "or check 'hg heads'"))
1010 "or check 'hg heads'"))
1011 if not force and (wc.files() or wc.deleted()):
1011 if not force and (wc.files() or wc.deleted()):
1012 raise util.Abort(_("uncommitted changes"),
1012 raise util.Abort(_("uncommitted changes"),
1013 hint=_("use 'hg status' to list changes"))
1013 hint=_("use 'hg status' to list changes"))
1014 for s in sorted(wc.substate):
1014 for s in sorted(wc.substate):
1015 if wc.sub(s).dirty():
1015 if wc.sub(s).dirty():
1016 raise util.Abort(_("uncommitted changes in "
1016 raise util.Abort(_("uncommitted changes in "
1017 "subrepository '%s'") % s)
1017 "subrepository '%s'") % s)
1018
1018
1019 elif not overwrite:
1019 elif not overwrite:
1020 if p1 == p2: # no-op update
1020 if p1 == p2: # no-op update
1021 # call the hooks and exit early
1021 # call the hooks and exit early
1022 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1022 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1023 repo.hook('update', parent1=xp2, parent2='', error=0)
1023 repo.hook('update', parent1=xp2, parent2='', error=0)
1024 return 0, 0, 0, 0
1024 return 0, 0, 0, 0
1025
1025
1026 if pas not in ([p1], [p2]): # nonlinear
1026 if pas not in ([p1], [p2]): # nonlinear
1027 dirty = wc.dirty(missing=True)
1027 dirty = wc.dirty(missing=True)
1028 if dirty or onode is None:
1028 if dirty or onode is None:
1029 # Branching is a bit strange to ensure we do the minimal
1029 # Branching is a bit strange to ensure we do the minimal
1030 # amount of call to obsolete.background.
1030 # amount of call to obsolete.background.
1031 foreground = obsolete.foreground(repo, [p1.node()])
1031 foreground = obsolete.foreground(repo, [p1.node()])
1032 # note: the <node> variable contains a random identifier
1032 # note: the <node> variable contains a random identifier
1033 if repo[node].node() in foreground:
1033 if repo[node].node() in foreground:
1034 pas = [p1] # allow updating to successors
1034 pas = [p1] # allow updating to successors
1035 elif dirty:
1035 elif dirty:
1036 msg = _("uncommitted changes")
1036 msg = _("uncommitted changes")
1037 if onode is None:
1037 if onode is None:
1038 hint = _("commit and merge, or update --clean to"
1038 hint = _("commit and merge, or update --clean to"
1039 " discard changes")
1039 " discard changes")
1040 else:
1040 else:
1041 hint = _("commit or update --clean to discard"
1041 hint = _("commit or update --clean to discard"
1042 " changes")
1042 " changes")
1043 raise util.Abort(msg, hint=hint)
1043 raise util.Abort(msg, hint=hint)
1044 else: # node is none
1044 else: # node is none
1045 msg = _("not a linear update")
1045 msg = _("not a linear update")
1046 hint = _("merge or update --check to force update")
1046 hint = _("merge or update --check to force update")
1047 raise util.Abort(msg, hint=hint)
1047 raise util.Abort(msg, hint=hint)
1048 else:
1048 else:
1049 # Allow jumping branches if clean and specific rev given
1049 # Allow jumping branches if clean and specific rev given
1050 pas = [p1]
1050 pas = [p1]
1051
1051
1052 followcopies = False
1052 followcopies = False
1053 if overwrite:
1053 if overwrite:
1054 pas = [wc]
1054 pas = [wc]
1055 elif pas == [p2]: # backwards
1055 elif pas == [p2]: # backwards
1056 pas = [wc.p1()]
1056 pas = [wc.p1()]
1057 elif not branchmerge and not wc.dirty(missing=True):
1057 elif not branchmerge and not wc.dirty(missing=True):
1058 pass
1058 pass
1059 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1059 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1060 followcopies = True
1060 followcopies = True
1061
1061
1062 ### calculate phase
1062 ### calculate phase
1063 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1063 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1064 partial, mergeancestor, followcopies)
1064 partial, mergeancestor, followcopies)
1065
1065
1066 ### apply phase
1066 ### apply phase
1067 if not branchmerge: # just jump to the new rev
1067 if not branchmerge: # just jump to the new rev
1068 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1068 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1069 if not partial:
1069 if not partial:
1070 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1070 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1071 # note that we're in the middle of an update
1071 # note that we're in the middle of an update
1072 repo.vfs.write('updatestate', p2.hex())
1072 repo.vfs.write('updatestate', p2.hex())
1073
1073
1074 stats = applyupdates(repo, actions, wc, p2, overwrite)
1074 stats = applyupdates(repo, actions, wc, p2, overwrite)
1075
1075
1076 if not partial:
1076 if not partial:
1077 repo.setparents(fp1, fp2)
1077 repo.setparents(fp1, fp2)
1078 recordupdates(repo, actions, branchmerge)
1078 recordupdates(repo, actions, branchmerge)
1079 # update completed, clear state
1079 # update completed, clear state
1080 util.unlink(repo.join('updatestate'))
1080 util.unlink(repo.join('updatestate'))
1081
1081
1082 if not branchmerge:
1082 if not branchmerge:
1083 repo.dirstate.setbranch(p2.branch())
1083 repo.dirstate.setbranch(p2.branch())
1084 finally:
1084 finally:
1085 wlock.release()
1085 wlock.release()
1086
1086
1087 if not partial:
1087 if not partial:
1088 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1088 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1089 return stats
1089 return stats
General Comments 0
You need to be logged in to leave comments. Login now