##// END OF EJS Templates
merge: replace magic strings with NAMED_CONSTANTS (API)...
Augie Fackler -
r43240:1ad3ebb3 default
parent child Browse files
Show More
@@ -1,1240 +1,1245 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 scmutil,
42 scmutil,
43 sshpeer,
43 sshpeer,
44 statichttprepo,
44 statichttprepo,
45 ui as uimod,
45 ui as uimod,
46 unionrepo,
46 unionrepo,
47 url,
47 url,
48 util,
48 util,
49 verify as verifymod,
49 verify as verifymod,
50 vfs as vfsmod,
50 vfs as vfsmod,
51 )
51 )
52
52
53 from .interfaces import (
53 from .interfaces import (
54 repository as repositorymod,
54 repository as repositorymod,
55 )
55 )
56
56
57 release = lock.release
57 release = lock.release
58
58
59 # shared features
59 # shared features
60 sharedbookmarks = 'bookmarks'
60 sharedbookmarks = 'bookmarks'
61
61
62 def _local(path):
62 def _local(path):
63 path = util.expandpath(util.urllocalpath(path))
63 path = util.expandpath(util.urllocalpath(path))
64
64
65 try:
65 try:
66 isfile = os.path.isfile(path)
66 isfile = os.path.isfile(path)
67 # Python 2 raises TypeError, Python 3 ValueError.
67 # Python 2 raises TypeError, Python 3 ValueError.
68 except (TypeError, ValueError) as e:
68 except (TypeError, ValueError) as e:
69 raise error.Abort(_('invalid path %s: %s') % (
69 raise error.Abort(_('invalid path %s: %s') % (
70 path, pycompat.bytestr(e)))
70 path, pycompat.bytestr(e)))
71
71
72 return isfile and bundlerepo or localrepo
72 return isfile and bundlerepo or localrepo
73
73
74 def addbranchrevs(lrepo, other, branches, revs):
74 def addbranchrevs(lrepo, other, branches, revs):
75 peer = other.peer() # a courtesy to callers using a localrepo for other
75 peer = other.peer() # a courtesy to callers using a localrepo for other
76 hashbranch, branches = branches
76 hashbranch, branches = branches
77 if not hashbranch and not branches:
77 if not hashbranch and not branches:
78 x = revs or None
78 x = revs or None
79 if revs:
79 if revs:
80 y = revs[0]
80 y = revs[0]
81 else:
81 else:
82 y = None
82 y = None
83 return x, y
83 return x, y
84 if revs:
84 if revs:
85 revs = list(revs)
85 revs = list(revs)
86 else:
86 else:
87 revs = []
87 revs = []
88
88
89 if not peer.capable('branchmap'):
89 if not peer.capable('branchmap'):
90 if branches:
90 if branches:
91 raise error.Abort(_("remote branch lookup not supported"))
91 raise error.Abort(_("remote branch lookup not supported"))
92 revs.append(hashbranch)
92 revs.append(hashbranch)
93 return revs, revs[0]
93 return revs, revs[0]
94
94
95 with peer.commandexecutor() as e:
95 with peer.commandexecutor() as e:
96 branchmap = e.callcommand('branchmap', {}).result()
96 branchmap = e.callcommand('branchmap', {}).result()
97
97
98 def primary(branch):
98 def primary(branch):
99 if branch == '.':
99 if branch == '.':
100 if not lrepo:
100 if not lrepo:
101 raise error.Abort(_("dirstate branch not accessible"))
101 raise error.Abort(_("dirstate branch not accessible"))
102 branch = lrepo.dirstate.branch()
102 branch = lrepo.dirstate.branch()
103 if branch in branchmap:
103 if branch in branchmap:
104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
105 return True
105 return True
106 else:
106 else:
107 return False
107 return False
108
108
109 for branch in branches:
109 for branch in branches:
110 if not primary(branch):
110 if not primary(branch):
111 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
111 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
112 if hashbranch:
112 if hashbranch:
113 if not primary(hashbranch):
113 if not primary(hashbranch):
114 revs.append(hashbranch)
114 revs.append(hashbranch)
115 return revs, revs[0]
115 return revs, revs[0]
116
116
117 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
118 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
119
119
120 u = util.url(path)
120 u = util.url(path)
121 branch = None
121 branch = None
122 if u.fragment:
122 if u.fragment:
123 branch = u.fragment
123 branch = u.fragment
124 u.fragment = None
124 u.fragment = None
125 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
126
126
127 schemes = {
127 schemes = {
128 'bundle': bundlerepo,
128 'bundle': bundlerepo,
129 'union': unionrepo,
129 'union': unionrepo,
130 'file': _local,
130 'file': _local,
131 'http': httppeer,
131 'http': httppeer,
132 'https': httppeer,
132 'https': httppeer,
133 'ssh': sshpeer,
133 'ssh': sshpeer,
134 'static-http': statichttprepo,
134 'static-http': statichttprepo,
135 }
135 }
136
136
137 def _peerlookup(path):
137 def _peerlookup(path):
138 u = util.url(path)
138 u = util.url(path)
139 scheme = u.scheme or 'file'
139 scheme = u.scheme or 'file'
140 thing = schemes.get(scheme) or schemes['file']
140 thing = schemes.get(scheme) or schemes['file']
141 try:
141 try:
142 return thing(path)
142 return thing(path)
143 except TypeError:
143 except TypeError:
144 # we can't test callable(thing) because 'thing' can be an unloaded
144 # we can't test callable(thing) because 'thing' can be an unloaded
145 # module that implements __call__
145 # module that implements __call__
146 if not util.safehasattr(thing, 'instance'):
146 if not util.safehasattr(thing, 'instance'):
147 raise
147 raise
148 return thing
148 return thing
149
149
150 def islocal(repo):
150 def islocal(repo):
151 '''return true if repo (or path pointing to repo) is local'''
151 '''return true if repo (or path pointing to repo) is local'''
152 if isinstance(repo, bytes):
152 if isinstance(repo, bytes):
153 try:
153 try:
154 return _peerlookup(repo).islocal(repo)
154 return _peerlookup(repo).islocal(repo)
155 except AttributeError:
155 except AttributeError:
156 return False
156 return False
157 return repo.local()
157 return repo.local()
158
158
159 def openpath(ui, path, sendaccept=True):
159 def openpath(ui, path, sendaccept=True):
160 '''open path with open if local, url.open if remote'''
160 '''open path with open if local, url.open if remote'''
161 pathurl = util.url(path, parsequery=False, parsefragment=False)
161 pathurl = util.url(path, parsequery=False, parsefragment=False)
162 if pathurl.islocal():
162 if pathurl.islocal():
163 return util.posixfile(pathurl.localpath(), 'rb')
163 return util.posixfile(pathurl.localpath(), 'rb')
164 else:
164 else:
165 return url.open(ui, path, sendaccept=sendaccept)
165 return url.open(ui, path, sendaccept=sendaccept)
166
166
167 # a list of (ui, repo) functions called for wire peer initialization
167 # a list of (ui, repo) functions called for wire peer initialization
168 wirepeersetupfuncs = []
168 wirepeersetupfuncs = []
169
169
170 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
170 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
171 intents=None, createopts=None):
171 intents=None, createopts=None):
172 """return a repository object for the specified path"""
172 """return a repository object for the specified path"""
173 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
173 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
174 createopts=createopts)
174 createopts=createopts)
175 ui = getattr(obj, "ui", ui)
175 ui = getattr(obj, "ui", ui)
176 for f in presetupfuncs or []:
176 for f in presetupfuncs or []:
177 f(ui, obj)
177 f(ui, obj)
178 ui.log(b'extension', b'- executing reposetup hooks\n')
178 ui.log(b'extension', b'- executing reposetup hooks\n')
179 with util.timedcm('all reposetup') as allreposetupstats:
179 with util.timedcm('all reposetup') as allreposetupstats:
180 for name, module in extensions.extensions(ui):
180 for name, module in extensions.extensions(ui):
181 ui.log(b'extension', b' - running reposetup for %s\n', name)
181 ui.log(b'extension', b' - running reposetup for %s\n', name)
182 hook = getattr(module, 'reposetup', None)
182 hook = getattr(module, 'reposetup', None)
183 if hook:
183 if hook:
184 with util.timedcm('reposetup %r', name) as stats:
184 with util.timedcm('reposetup %r', name) as stats:
185 hook(ui, obj)
185 hook(ui, obj)
186 ui.log(b'extension', b' > reposetup for %s took %s\n',
186 ui.log(b'extension', b' > reposetup for %s took %s\n',
187 name, stats)
187 name, stats)
188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
189 if not obj.local():
189 if not obj.local():
190 for f in wirepeersetupfuncs:
190 for f in wirepeersetupfuncs:
191 f(ui, obj)
191 f(ui, obj)
192 return obj
192 return obj
193
193
194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
195 createopts=None):
195 createopts=None):
196 """return a repository object for the specified path"""
196 """return a repository object for the specified path"""
197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
198 intents=intents, createopts=createopts)
198 intents=intents, createopts=createopts)
199 repo = peer.local()
199 repo = peer.local()
200 if not repo:
200 if not repo:
201 raise error.Abort(_("repository '%s' is not local") %
201 raise error.Abort(_("repository '%s' is not local") %
202 (path or peer.url()))
202 (path or peer.url()))
203 return repo.filtered('visible')
203 return repo.filtered('visible')
204
204
205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
206 '''return a repository peer for the specified path'''
206 '''return a repository peer for the specified path'''
207 rui = remoteui(uiorrepo, opts)
207 rui = remoteui(uiorrepo, opts)
208 return _peerorrepo(rui, path, create, intents=intents,
208 return _peerorrepo(rui, path, create, intents=intents,
209 createopts=createopts).peer()
209 createopts=createopts).peer()
210
210
211 def defaultdest(source):
211 def defaultdest(source):
212 '''return default destination of clone if none is given
212 '''return default destination of clone if none is given
213
213
214 >>> defaultdest(b'foo')
214 >>> defaultdest(b'foo')
215 'foo'
215 'foo'
216 >>> defaultdest(b'/foo/bar')
216 >>> defaultdest(b'/foo/bar')
217 'bar'
217 'bar'
218 >>> defaultdest(b'/')
218 >>> defaultdest(b'/')
219 ''
219 ''
220 >>> defaultdest(b'')
220 >>> defaultdest(b'')
221 ''
221 ''
222 >>> defaultdest(b'http://example.org/')
222 >>> defaultdest(b'http://example.org/')
223 ''
223 ''
224 >>> defaultdest(b'http://example.org/foo/')
224 >>> defaultdest(b'http://example.org/foo/')
225 'foo'
225 'foo'
226 '''
226 '''
227 path = util.url(source).path
227 path = util.url(source).path
228 if not path:
228 if not path:
229 return ''
229 return ''
230 return os.path.basename(os.path.normpath(path))
230 return os.path.basename(os.path.normpath(path))
231
231
232 def sharedreposource(repo):
232 def sharedreposource(repo):
233 """Returns repository object for source repository of a shared repo.
233 """Returns repository object for source repository of a shared repo.
234
234
235 If repo is not a shared repository, returns None.
235 If repo is not a shared repository, returns None.
236 """
236 """
237 if repo.sharedpath == repo.path:
237 if repo.sharedpath == repo.path:
238 return None
238 return None
239
239
240 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
240 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
241 return repo.srcrepo
241 return repo.srcrepo
242
242
243 # the sharedpath always ends in the .hg; we want the path to the repo
243 # the sharedpath always ends in the .hg; we want the path to the repo
244 source = repo.vfs.split(repo.sharedpath)[0]
244 source = repo.vfs.split(repo.sharedpath)[0]
245 srcurl, branches = parseurl(source)
245 srcurl, branches = parseurl(source)
246 srcrepo = repository(repo.ui, srcurl)
246 srcrepo = repository(repo.ui, srcurl)
247 repo.srcrepo = srcrepo
247 repo.srcrepo = srcrepo
248 return srcrepo
248 return srcrepo
249
249
250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
251 relative=False):
251 relative=False):
252 '''create a shared repository'''
252 '''create a shared repository'''
253
253
254 if not islocal(source):
254 if not islocal(source):
255 raise error.Abort(_('can only share local repositories'))
255 raise error.Abort(_('can only share local repositories'))
256
256
257 if not dest:
257 if not dest:
258 dest = defaultdest(source)
258 dest = defaultdest(source)
259 else:
259 else:
260 dest = ui.expandpath(dest)
260 dest = ui.expandpath(dest)
261
261
262 if isinstance(source, bytes):
262 if isinstance(source, bytes):
263 origsource = ui.expandpath(source)
263 origsource = ui.expandpath(source)
264 source, branches = parseurl(origsource)
264 source, branches = parseurl(origsource)
265 srcrepo = repository(ui, source)
265 srcrepo = repository(ui, source)
266 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
266 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
267 else:
267 else:
268 srcrepo = source.local()
268 srcrepo = source.local()
269 checkout = None
269 checkout = None
270
270
271 shareditems = set()
271 shareditems = set()
272 if bookmarks:
272 if bookmarks:
273 shareditems.add(sharedbookmarks)
273 shareditems.add(sharedbookmarks)
274
274
275 r = repository(ui, dest, create=True, createopts={
275 r = repository(ui, dest, create=True, createopts={
276 'sharedrepo': srcrepo,
276 'sharedrepo': srcrepo,
277 'sharedrelative': relative,
277 'sharedrelative': relative,
278 'shareditems': shareditems,
278 'shareditems': shareditems,
279 })
279 })
280
280
281 postshare(srcrepo, r, defaultpath=defaultpath)
281 postshare(srcrepo, r, defaultpath=defaultpath)
282 r = repository(ui, dest)
282 r = repository(ui, dest)
283 _postshareupdate(r, update, checkout=checkout)
283 _postshareupdate(r, update, checkout=checkout)
284 return r
284 return r
285
285
286 def unshare(ui, repo):
286 def unshare(ui, repo):
287 """convert a shared repository to a normal one
287 """convert a shared repository to a normal one
288
288
289 Copy the store data to the repo and remove the sharedpath data.
289 Copy the store data to the repo and remove the sharedpath data.
290
290
291 Returns a new repository object representing the unshared repository.
291 Returns a new repository object representing the unshared repository.
292
292
293 The passed repository object is not usable after this function is
293 The passed repository object is not usable after this function is
294 called.
294 called.
295 """
295 """
296
296
297 with repo.lock():
297 with repo.lock():
298 # we use locks here because if we race with commit, we
298 # we use locks here because if we race with commit, we
299 # can end up with extra data in the cloned revlogs that's
299 # can end up with extra data in the cloned revlogs that's
300 # not pointed to by changesets, thus causing verify to
300 # not pointed to by changesets, thus causing verify to
301 # fail
301 # fail
302 destlock = copystore(ui, repo, repo.path)
302 destlock = copystore(ui, repo, repo.path)
303 with destlock or util.nullcontextmanager():
303 with destlock or util.nullcontextmanager():
304
304
305 sharefile = repo.vfs.join('sharedpath')
305 sharefile = repo.vfs.join('sharedpath')
306 util.rename(sharefile, sharefile + '.old')
306 util.rename(sharefile, sharefile + '.old')
307
307
308 repo.requirements.discard('shared')
308 repo.requirements.discard('shared')
309 repo.requirements.discard('relshared')
309 repo.requirements.discard('relshared')
310 repo._writerequirements()
310 repo._writerequirements()
311
311
312 # Removing share changes some fundamental properties of the repo instance.
312 # Removing share changes some fundamental properties of the repo instance.
313 # So we instantiate a new repo object and operate on it rather than
313 # So we instantiate a new repo object and operate on it rather than
314 # try to keep the existing repo usable.
314 # try to keep the existing repo usable.
315 newrepo = repository(repo.baseui, repo.root, create=False)
315 newrepo = repository(repo.baseui, repo.root, create=False)
316
316
317 # TODO: figure out how to access subrepos that exist, but were previously
317 # TODO: figure out how to access subrepos that exist, but were previously
318 # removed from .hgsub
318 # removed from .hgsub
319 c = newrepo['.']
319 c = newrepo['.']
320 subs = c.substate
320 subs = c.substate
321 for s in sorted(subs):
321 for s in sorted(subs):
322 c.sub(s).unshare()
322 c.sub(s).unshare()
323
323
324 localrepo.poisonrepository(repo)
324 localrepo.poisonrepository(repo)
325
325
326 return newrepo
326 return newrepo
327
327
328 def postshare(sourcerepo, destrepo, defaultpath=None):
328 def postshare(sourcerepo, destrepo, defaultpath=None):
329 """Called after a new shared repo is created.
329 """Called after a new shared repo is created.
330
330
331 The new repo only has a requirements file and pointer to the source.
331 The new repo only has a requirements file and pointer to the source.
332 This function configures additional shared data.
332 This function configures additional shared data.
333
333
334 Extensions can wrap this function and write additional entries to
334 Extensions can wrap this function and write additional entries to
335 destrepo/.hg/shared to indicate additional pieces of data to be shared.
335 destrepo/.hg/shared to indicate additional pieces of data to be shared.
336 """
336 """
337 default = defaultpath or sourcerepo.ui.config('paths', 'default')
337 default = defaultpath or sourcerepo.ui.config('paths', 'default')
338 if default:
338 if default:
339 template = ('[paths]\n'
339 template = ('[paths]\n'
340 'default = %s\n')
340 'default = %s\n')
341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
343 with destrepo.wlock():
343 with destrepo.wlock():
344 narrowspec.copytoworkingcopy(destrepo)
344 narrowspec.copytoworkingcopy(destrepo)
345
345
346 def _postshareupdate(repo, update, checkout=None):
346 def _postshareupdate(repo, update, checkout=None):
347 """Maybe perform a working directory update after a shared repo is created.
347 """Maybe perform a working directory update after a shared repo is created.
348
348
349 ``update`` can be a boolean or a revision to update to.
349 ``update`` can be a boolean or a revision to update to.
350 """
350 """
351 if not update:
351 if not update:
352 return
352 return
353
353
354 repo.ui.status(_("updating working directory\n"))
354 repo.ui.status(_("updating working directory\n"))
355 if update is not True:
355 if update is not True:
356 checkout = update
356 checkout = update
357 for test in (checkout, 'default', 'tip'):
357 for test in (checkout, 'default', 'tip'):
358 if test is None:
358 if test is None:
359 continue
359 continue
360 try:
360 try:
361 uprev = repo.lookup(test)
361 uprev = repo.lookup(test)
362 break
362 break
363 except error.RepoLookupError:
363 except error.RepoLookupError:
364 continue
364 continue
365 _update(repo, uprev)
365 _update(repo, uprev)
366
366
367 def copystore(ui, srcrepo, destpath):
367 def copystore(ui, srcrepo, destpath):
368 '''copy files from store of srcrepo in destpath
368 '''copy files from store of srcrepo in destpath
369
369
370 returns destlock
370 returns destlock
371 '''
371 '''
372 destlock = None
372 destlock = None
373 try:
373 try:
374 hardlink = None
374 hardlink = None
375 topic = _('linking') if hardlink else _('copying')
375 topic = _('linking') if hardlink else _('copying')
376 with ui.makeprogress(topic, unit=_('files')) as progress:
376 with ui.makeprogress(topic, unit=_('files')) as progress:
377 num = 0
377 num = 0
378 srcpublishing = srcrepo.publishing()
378 srcpublishing = srcrepo.publishing()
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
379 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
380 dstvfs = vfsmod.vfs(destpath)
380 dstvfs = vfsmod.vfs(destpath)
381 for f in srcrepo.store.copylist():
381 for f in srcrepo.store.copylist():
382 if srcpublishing and f.endswith('phaseroots'):
382 if srcpublishing and f.endswith('phaseroots'):
383 continue
383 continue
384 dstbase = os.path.dirname(f)
384 dstbase = os.path.dirname(f)
385 if dstbase and not dstvfs.exists(dstbase):
385 if dstbase and not dstvfs.exists(dstbase):
386 dstvfs.mkdir(dstbase)
386 dstvfs.mkdir(dstbase)
387 if srcvfs.exists(f):
387 if srcvfs.exists(f):
388 if f.endswith('data'):
388 if f.endswith('data'):
389 # 'dstbase' may be empty (e.g. revlog format 0)
389 # 'dstbase' may be empty (e.g. revlog format 0)
390 lockfile = os.path.join(dstbase, "lock")
390 lockfile = os.path.join(dstbase, "lock")
391 # lock to avoid premature writing to the target
391 # lock to avoid premature writing to the target
392 destlock = lock.lock(dstvfs, lockfile)
392 destlock = lock.lock(dstvfs, lockfile)
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
394 hardlink, progress)
394 hardlink, progress)
395 num += n
395 num += n
396 if hardlink:
396 if hardlink:
397 ui.debug("linked %d files\n" % num)
397 ui.debug("linked %d files\n" % num)
398 else:
398 else:
399 ui.debug("copied %d files\n" % num)
399 ui.debug("copied %d files\n" % num)
400 return destlock
400 return destlock
401 except: # re-raises
401 except: # re-raises
402 release(destlock)
402 release(destlock)
403 raise
403 raise
404
404
405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
406 rev=None, update=True, stream=False):
406 rev=None, update=True, stream=False):
407 """Perform a clone using a shared repo.
407 """Perform a clone using a shared repo.
408
408
409 The store for the repository will be located at <sharepath>/.hg. The
409 The store for the repository will be located at <sharepath>/.hg. The
410 specified revisions will be cloned or pulled from "source". A shared repo
410 specified revisions will be cloned or pulled from "source". A shared repo
411 will be created at "dest" and a working copy will be created if "update" is
411 will be created at "dest" and a working copy will be created if "update" is
412 True.
412 True.
413 """
413 """
414 revs = None
414 revs = None
415 if rev:
415 if rev:
416 if not srcpeer.capable('lookup'):
416 if not srcpeer.capable('lookup'):
417 raise error.Abort(_("src repository does not support "
417 raise error.Abort(_("src repository does not support "
418 "revision lookup and so doesn't "
418 "revision lookup and so doesn't "
419 "support clone by revision"))
419 "support clone by revision"))
420
420
421 # TODO this is batchable.
421 # TODO this is batchable.
422 remoterevs = []
422 remoterevs = []
423 for r in rev:
423 for r in rev:
424 with srcpeer.commandexecutor() as e:
424 with srcpeer.commandexecutor() as e:
425 remoterevs.append(e.callcommand('lookup', {
425 remoterevs.append(e.callcommand('lookup', {
426 'key': r,
426 'key': r,
427 }).result())
427 }).result())
428 revs = remoterevs
428 revs = remoterevs
429
429
430 # Obtain a lock before checking for or cloning the pooled repo otherwise
430 # Obtain a lock before checking for or cloning the pooled repo otherwise
431 # 2 clients may race creating or populating it.
431 # 2 clients may race creating or populating it.
432 pooldir = os.path.dirname(sharepath)
432 pooldir = os.path.dirname(sharepath)
433 # lock class requires the directory to exist.
433 # lock class requires the directory to exist.
434 try:
434 try:
435 util.makedir(pooldir, False)
435 util.makedir(pooldir, False)
436 except OSError as e:
436 except OSError as e:
437 if e.errno != errno.EEXIST:
437 if e.errno != errno.EEXIST:
438 raise
438 raise
439
439
440 poolvfs = vfsmod.vfs(pooldir)
440 poolvfs = vfsmod.vfs(pooldir)
441 basename = os.path.basename(sharepath)
441 basename = os.path.basename(sharepath)
442
442
443 with lock.lock(poolvfs, '%s.lock' % basename):
443 with lock.lock(poolvfs, '%s.lock' % basename):
444 if os.path.exists(sharepath):
444 if os.path.exists(sharepath):
445 ui.status(_('(sharing from existing pooled repository %s)\n') %
445 ui.status(_('(sharing from existing pooled repository %s)\n') %
446 basename)
446 basename)
447 else:
447 else:
448 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
448 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
449 # Always use pull mode because hardlinks in share mode don't work
449 # Always use pull mode because hardlinks in share mode don't work
450 # well. Never update because working copies aren't necessary in
450 # well. Never update because working copies aren't necessary in
451 # share mode.
451 # share mode.
452 clone(ui, peeropts, source, dest=sharepath, pull=True,
452 clone(ui, peeropts, source, dest=sharepath, pull=True,
453 revs=rev, update=False, stream=stream)
453 revs=rev, update=False, stream=stream)
454
454
455 # Resolve the value to put in [paths] section for the source.
455 # Resolve the value to put in [paths] section for the source.
456 if islocal(source):
456 if islocal(source):
457 defaultpath = os.path.abspath(util.urllocalpath(source))
457 defaultpath = os.path.abspath(util.urllocalpath(source))
458 else:
458 else:
459 defaultpath = source
459 defaultpath = source
460
460
461 sharerepo = repository(ui, path=sharepath)
461 sharerepo = repository(ui, path=sharepath)
462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
463 defaultpath=defaultpath)
463 defaultpath=defaultpath)
464
464
465 # We need to perform a pull against the dest repo to fetch bookmarks
465 # We need to perform a pull against the dest repo to fetch bookmarks
466 # and other non-store data that isn't shared by default. In the case of
466 # and other non-store data that isn't shared by default. In the case of
467 # non-existing shared repo, this means we pull from the remote twice. This
467 # non-existing shared repo, this means we pull from the remote twice. This
468 # is a bit weird. But at the time it was implemented, there wasn't an easy
468 # is a bit weird. But at the time it was implemented, there wasn't an easy
469 # way to pull just non-changegroup data.
469 # way to pull just non-changegroup data.
470 exchange.pull(destrepo, srcpeer, heads=revs)
470 exchange.pull(destrepo, srcpeer, heads=revs)
471
471
472 _postshareupdate(destrepo, update)
472 _postshareupdate(destrepo, update)
473
473
474 return srcpeer, peer(ui, peeropts, dest)
474 return srcpeer, peer(ui, peeropts, dest)
475
475
476 # Recomputing branch cache might be slow on big repos,
476 # Recomputing branch cache might be slow on big repos,
477 # so just copy it
477 # so just copy it
478 def _copycache(srcrepo, dstcachedir, fname):
478 def _copycache(srcrepo, dstcachedir, fname):
479 """copy a cache from srcrepo to destcachedir (if it exists)"""
479 """copy a cache from srcrepo to destcachedir (if it exists)"""
480 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
480 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
481 dstbranchcache = os.path.join(dstcachedir, fname)
481 dstbranchcache = os.path.join(dstcachedir, fname)
482 if os.path.exists(srcbranchcache):
482 if os.path.exists(srcbranchcache):
483 if not os.path.exists(dstcachedir):
483 if not os.path.exists(dstcachedir):
484 os.mkdir(dstcachedir)
484 os.mkdir(dstcachedir)
485 util.copyfile(srcbranchcache, dstbranchcache)
485 util.copyfile(srcbranchcache, dstbranchcache)
486
486
487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
488 update=True, stream=False, branch=None, shareopts=None,
488 update=True, stream=False, branch=None, shareopts=None,
489 storeincludepats=None, storeexcludepats=None, depth=None):
489 storeincludepats=None, storeexcludepats=None, depth=None):
490 """Make a copy of an existing repository.
490 """Make a copy of an existing repository.
491
491
492 Create a copy of an existing repository in a new directory. The
492 Create a copy of an existing repository in a new directory. The
493 source and destination are URLs, as passed to the repository
493 source and destination are URLs, as passed to the repository
494 function. Returns a pair of repository peers, the source and
494 function. Returns a pair of repository peers, the source and
495 newly created destination.
495 newly created destination.
496
496
497 The location of the source is added to the new repository's
497 The location of the source is added to the new repository's
498 .hg/hgrc file, as the default to be used for future pulls and
498 .hg/hgrc file, as the default to be used for future pulls and
499 pushes.
499 pushes.
500
500
501 If an exception is raised, the partly cloned/updated destination
501 If an exception is raised, the partly cloned/updated destination
502 repository will be deleted.
502 repository will be deleted.
503
503
504 Arguments:
504 Arguments:
505
505
506 source: repository object or URL
506 source: repository object or URL
507
507
508 dest: URL of destination repository to create (defaults to base
508 dest: URL of destination repository to create (defaults to base
509 name of source repository)
509 name of source repository)
510
510
511 pull: always pull from source repository, even in local case or if the
511 pull: always pull from source repository, even in local case or if the
512 server prefers streaming
512 server prefers streaming
513
513
514 stream: stream raw data uncompressed from repository (fast over
514 stream: stream raw data uncompressed from repository (fast over
515 LAN, slow over WAN)
515 LAN, slow over WAN)
516
516
517 revs: revision to clone up to (implies pull=True)
517 revs: revision to clone up to (implies pull=True)
518
518
519 update: update working directory after clone completes, if
519 update: update working directory after clone completes, if
520 destination is local repository (True means update to default rev,
520 destination is local repository (True means update to default rev,
521 anything else is treated as a revision)
521 anything else is treated as a revision)
522
522
523 branch: branches to clone
523 branch: branches to clone
524
524
525 shareopts: dict of options to control auto sharing behavior. The "pool" key
525 shareopts: dict of options to control auto sharing behavior. The "pool" key
526 activates auto sharing mode and defines the directory for stores. The
526 activates auto sharing mode and defines the directory for stores. The
527 "mode" key determines how to construct the directory name of the shared
527 "mode" key determines how to construct the directory name of the shared
528 repository. "identity" means the name is derived from the node of the first
528 repository. "identity" means the name is derived from the node of the first
529 changeset in the repository. "remote" means the name is derived from the
529 changeset in the repository. "remote" means the name is derived from the
530 remote's path/URL. Defaults to "identity."
530 remote's path/URL. Defaults to "identity."
531
531
532 storeincludepats and storeexcludepats: sets of file patterns to include and
532 storeincludepats and storeexcludepats: sets of file patterns to include and
533 exclude in the repository copy, respectively. If not defined, all files
533 exclude in the repository copy, respectively. If not defined, all files
534 will be included (a "full" clone). Otherwise a "narrow" clone containing
534 will be included (a "full" clone). Otherwise a "narrow" clone containing
535 only the requested files will be performed. If ``storeincludepats`` is not
535 only the requested files will be performed. If ``storeincludepats`` is not
536 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
536 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
537 ``path:.``. If both are empty sets, no files will be cloned.
537 ``path:.``. If both are empty sets, no files will be cloned.
538 """
538 """
539
539
540 if isinstance(source, bytes):
540 if isinstance(source, bytes):
541 origsource = ui.expandpath(source)
541 origsource = ui.expandpath(source)
542 source, branches = parseurl(origsource, branch)
542 source, branches = parseurl(origsource, branch)
543 srcpeer = peer(ui, peeropts, source)
543 srcpeer = peer(ui, peeropts, source)
544 else:
544 else:
545 srcpeer = source.peer() # in case we were called with a localrepo
545 srcpeer = source.peer() # in case we were called with a localrepo
546 branches = (None, branch or [])
546 branches = (None, branch or [])
547 origsource = source = srcpeer.url()
547 origsource = source = srcpeer.url()
548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
549
549
550 if dest is None:
550 if dest is None:
551 dest = defaultdest(source)
551 dest = defaultdest(source)
552 if dest:
552 if dest:
553 ui.status(_("destination directory: %s\n") % dest)
553 ui.status(_("destination directory: %s\n") % dest)
554 else:
554 else:
555 dest = ui.expandpath(dest)
555 dest = ui.expandpath(dest)
556
556
557 dest = util.urllocalpath(dest)
557 dest = util.urllocalpath(dest)
558 source = util.urllocalpath(source)
558 source = util.urllocalpath(source)
559
559
560 if not dest:
560 if not dest:
561 raise error.Abort(_("empty destination path is not valid"))
561 raise error.Abort(_("empty destination path is not valid"))
562
562
563 destvfs = vfsmod.vfs(dest, expandpath=True)
563 destvfs = vfsmod.vfs(dest, expandpath=True)
564 if destvfs.lexists():
564 if destvfs.lexists():
565 if not destvfs.isdir():
565 if not destvfs.isdir():
566 raise error.Abort(_("destination '%s' already exists") % dest)
566 raise error.Abort(_("destination '%s' already exists") % dest)
567 elif destvfs.listdir():
567 elif destvfs.listdir():
568 raise error.Abort(_("destination '%s' is not empty") % dest)
568 raise error.Abort(_("destination '%s' is not empty") % dest)
569
569
570 createopts = {}
570 createopts = {}
571 narrow = False
571 narrow = False
572
572
573 if storeincludepats is not None:
573 if storeincludepats is not None:
574 narrowspec.validatepatterns(storeincludepats)
574 narrowspec.validatepatterns(storeincludepats)
575 narrow = True
575 narrow = True
576
576
577 if storeexcludepats is not None:
577 if storeexcludepats is not None:
578 narrowspec.validatepatterns(storeexcludepats)
578 narrowspec.validatepatterns(storeexcludepats)
579 narrow = True
579 narrow = True
580
580
581 if narrow:
581 if narrow:
582 # Include everything by default if only exclusion patterns defined.
582 # Include everything by default if only exclusion patterns defined.
583 if storeexcludepats and not storeincludepats:
583 if storeexcludepats and not storeincludepats:
584 storeincludepats = {'path:.'}
584 storeincludepats = {'path:.'}
585
585
586 createopts['narrowfiles'] = True
586 createopts['narrowfiles'] = True
587
587
588 if depth:
588 if depth:
589 createopts['shallowfilestore'] = True
589 createopts['shallowfilestore'] = True
590
590
591 if srcpeer.capable(b'lfs-serve'):
591 if srcpeer.capable(b'lfs-serve'):
592 # Repository creation honors the config if it disabled the extension, so
592 # Repository creation honors the config if it disabled the extension, so
593 # we can't just announce that lfs will be enabled. This check avoids
593 # we can't just announce that lfs will be enabled. This check avoids
594 # saying that lfs will be enabled, and then saying it's an unknown
594 # saying that lfs will be enabled, and then saying it's an unknown
595 # feature. The lfs creation option is set in either case so that a
595 # feature. The lfs creation option is set in either case so that a
596 # requirement is added. If the extension is explicitly disabled but the
596 # requirement is added. If the extension is explicitly disabled but the
597 # requirement is set, the clone aborts early, before transferring any
597 # requirement is set, the clone aborts early, before transferring any
598 # data.
598 # data.
599 createopts['lfs'] = True
599 createopts['lfs'] = True
600
600
601 if extensions.disabledext('lfs'):
601 if extensions.disabledext('lfs'):
602 ui.status(_('(remote is using large file support (lfs), but it is '
602 ui.status(_('(remote is using large file support (lfs), but it is '
603 'explicitly disabled in the local configuration)\n'))
603 'explicitly disabled in the local configuration)\n'))
604 else:
604 else:
605 ui.status(_('(remote is using large file support (lfs); lfs will '
605 ui.status(_('(remote is using large file support (lfs); lfs will '
606 'be enabled for this repository)\n'))
606 'be enabled for this repository)\n'))
607
607
608 shareopts = shareopts or {}
608 shareopts = shareopts or {}
609 sharepool = shareopts.get('pool')
609 sharepool = shareopts.get('pool')
610 sharenamemode = shareopts.get('mode')
610 sharenamemode = shareopts.get('mode')
611 if sharepool and islocal(dest):
611 if sharepool and islocal(dest):
612 sharepath = None
612 sharepath = None
613 if sharenamemode == 'identity':
613 if sharenamemode == 'identity':
614 # Resolve the name from the initial changeset in the remote
614 # Resolve the name from the initial changeset in the remote
615 # repository. This returns nullid when the remote is empty. It
615 # repository. This returns nullid when the remote is empty. It
616 # raises RepoLookupError if revision 0 is filtered or otherwise
616 # raises RepoLookupError if revision 0 is filtered or otherwise
617 # not available. If we fail to resolve, sharing is not enabled.
617 # not available. If we fail to resolve, sharing is not enabled.
618 try:
618 try:
619 with srcpeer.commandexecutor() as e:
619 with srcpeer.commandexecutor() as e:
620 rootnode = e.callcommand('lookup', {
620 rootnode = e.callcommand('lookup', {
621 'key': '0',
621 'key': '0',
622 }).result()
622 }).result()
623
623
624 if rootnode != node.nullid:
624 if rootnode != node.nullid:
625 sharepath = os.path.join(sharepool, node.hex(rootnode))
625 sharepath = os.path.join(sharepool, node.hex(rootnode))
626 else:
626 else:
627 ui.status(_('(not using pooled storage: '
627 ui.status(_('(not using pooled storage: '
628 'remote appears to be empty)\n'))
628 'remote appears to be empty)\n'))
629 except error.RepoLookupError:
629 except error.RepoLookupError:
630 ui.status(_('(not using pooled storage: '
630 ui.status(_('(not using pooled storage: '
631 'unable to resolve identity of remote)\n'))
631 'unable to resolve identity of remote)\n'))
632 elif sharenamemode == 'remote':
632 elif sharenamemode == 'remote':
633 sharepath = os.path.join(
633 sharepath = os.path.join(
634 sharepool, node.hex(hashlib.sha1(source).digest()))
634 sharepool, node.hex(hashlib.sha1(source).digest()))
635 else:
635 else:
636 raise error.Abort(_('unknown share naming mode: %s') %
636 raise error.Abort(_('unknown share naming mode: %s') %
637 sharenamemode)
637 sharenamemode)
638
638
639 # TODO this is a somewhat arbitrary restriction.
639 # TODO this is a somewhat arbitrary restriction.
640 if narrow:
640 if narrow:
641 ui.status(_('(pooled storage not supported for narrow clones)\n'))
641 ui.status(_('(pooled storage not supported for narrow clones)\n'))
642 sharepath = None
642 sharepath = None
643
643
644 if sharepath:
644 if sharepath:
645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
646 dest, pull=pull, rev=revs, update=update,
646 dest, pull=pull, rev=revs, update=update,
647 stream=stream)
647 stream=stream)
648
648
649 srclock = destlock = cleandir = None
649 srclock = destlock = cleandir = None
650 srcrepo = srcpeer.local()
650 srcrepo = srcpeer.local()
651 try:
651 try:
652 abspath = origsource
652 abspath = origsource
653 if islocal(origsource):
653 if islocal(origsource):
654 abspath = os.path.abspath(util.urllocalpath(origsource))
654 abspath = os.path.abspath(util.urllocalpath(origsource))
655
655
656 if islocal(dest):
656 if islocal(dest):
657 cleandir = dest
657 cleandir = dest
658
658
659 copy = False
659 copy = False
660 if (srcrepo and srcrepo.cancopy() and islocal(dest)
660 if (srcrepo and srcrepo.cancopy() and islocal(dest)
661 and not phases.hassecret(srcrepo)):
661 and not phases.hassecret(srcrepo)):
662 copy = not pull and not revs
662 copy = not pull and not revs
663
663
664 # TODO this is a somewhat arbitrary restriction.
664 # TODO this is a somewhat arbitrary restriction.
665 if narrow:
665 if narrow:
666 copy = False
666 copy = False
667
667
668 if copy:
668 if copy:
669 try:
669 try:
670 # we use a lock here because if we race with commit, we
670 # we use a lock here because if we race with commit, we
671 # can end up with extra data in the cloned revlogs that's
671 # can end up with extra data in the cloned revlogs that's
672 # not pointed to by changesets, thus causing verify to
672 # not pointed to by changesets, thus causing verify to
673 # fail
673 # fail
674 srclock = srcrepo.lock(wait=False)
674 srclock = srcrepo.lock(wait=False)
675 except error.LockError:
675 except error.LockError:
676 copy = False
676 copy = False
677
677
678 if copy:
678 if copy:
679 srcrepo.hook('preoutgoing', throw=True, source='clone')
679 srcrepo.hook('preoutgoing', throw=True, source='clone')
680 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
680 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
681 if not os.path.exists(dest):
681 if not os.path.exists(dest):
682 util.makedirs(dest)
682 util.makedirs(dest)
683 else:
683 else:
684 # only clean up directories we create ourselves
684 # only clean up directories we create ourselves
685 cleandir = hgdir
685 cleandir = hgdir
686 try:
686 try:
687 destpath = hgdir
687 destpath = hgdir
688 util.makedir(destpath, notindexed=True)
688 util.makedir(destpath, notindexed=True)
689 except OSError as inst:
689 except OSError as inst:
690 if inst.errno == errno.EEXIST:
690 if inst.errno == errno.EEXIST:
691 cleandir = None
691 cleandir = None
692 raise error.Abort(_("destination '%s' already exists")
692 raise error.Abort(_("destination '%s' already exists")
693 % dest)
693 % dest)
694 raise
694 raise
695
695
696 destlock = copystore(ui, srcrepo, destpath)
696 destlock = copystore(ui, srcrepo, destpath)
697 # copy bookmarks over
697 # copy bookmarks over
698 srcbookmarks = srcrepo.vfs.join('bookmarks')
698 srcbookmarks = srcrepo.vfs.join('bookmarks')
699 dstbookmarks = os.path.join(destpath, 'bookmarks')
699 dstbookmarks = os.path.join(destpath, 'bookmarks')
700 if os.path.exists(srcbookmarks):
700 if os.path.exists(srcbookmarks):
701 util.copyfile(srcbookmarks, dstbookmarks)
701 util.copyfile(srcbookmarks, dstbookmarks)
702
702
703 dstcachedir = os.path.join(destpath, 'cache')
703 dstcachedir = os.path.join(destpath, 'cache')
704 for cache in cacheutil.cachetocopy(srcrepo):
704 for cache in cacheutil.cachetocopy(srcrepo):
705 _copycache(srcrepo, dstcachedir, cache)
705 _copycache(srcrepo, dstcachedir, cache)
706
706
707 # we need to re-init the repo after manually copying the data
707 # we need to re-init the repo after manually copying the data
708 # into it
708 # into it
709 destpeer = peer(srcrepo, peeropts, dest)
709 destpeer = peer(srcrepo, peeropts, dest)
710 srcrepo.hook('outgoing', source='clone',
710 srcrepo.hook('outgoing', source='clone',
711 node=node.hex(node.nullid))
711 node=node.hex(node.nullid))
712 else:
712 else:
713 try:
713 try:
714 # only pass ui when no srcrepo
714 # only pass ui when no srcrepo
715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
716 createopts=createopts)
716 createopts=createopts)
717 except OSError as inst:
717 except OSError as inst:
718 if inst.errno == errno.EEXIST:
718 if inst.errno == errno.EEXIST:
719 cleandir = None
719 cleandir = None
720 raise error.Abort(_("destination '%s' already exists")
720 raise error.Abort(_("destination '%s' already exists")
721 % dest)
721 % dest)
722 raise
722 raise
723
723
724 if revs:
724 if revs:
725 if not srcpeer.capable('lookup'):
725 if not srcpeer.capable('lookup'):
726 raise error.Abort(_("src repository does not support "
726 raise error.Abort(_("src repository does not support "
727 "revision lookup and so doesn't "
727 "revision lookup and so doesn't "
728 "support clone by revision"))
728 "support clone by revision"))
729
729
730 # TODO this is batchable.
730 # TODO this is batchable.
731 remoterevs = []
731 remoterevs = []
732 for rev in revs:
732 for rev in revs:
733 with srcpeer.commandexecutor() as e:
733 with srcpeer.commandexecutor() as e:
734 remoterevs.append(e.callcommand('lookup', {
734 remoterevs.append(e.callcommand('lookup', {
735 'key': rev,
735 'key': rev,
736 }).result())
736 }).result())
737 revs = remoterevs
737 revs = remoterevs
738
738
739 checkout = revs[0]
739 checkout = revs[0]
740 else:
740 else:
741 revs = None
741 revs = None
742 local = destpeer.local()
742 local = destpeer.local()
743 if local:
743 if local:
744 if narrow:
744 if narrow:
745 with local.wlock(), local.lock():
745 with local.wlock(), local.lock():
746 local.setnarrowpats(storeincludepats, storeexcludepats)
746 local.setnarrowpats(storeincludepats, storeexcludepats)
747 narrowspec.copytoworkingcopy(local)
747 narrowspec.copytoworkingcopy(local)
748
748
749 u = util.url(abspath)
749 u = util.url(abspath)
750 defaulturl = bytes(u)
750 defaulturl = bytes(u)
751 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
751 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
752 if not stream:
752 if not stream:
753 if pull:
753 if pull:
754 stream = False
754 stream = False
755 else:
755 else:
756 stream = None
756 stream = None
757 # internal config: ui.quietbookmarkmove
757 # internal config: ui.quietbookmarkmove
758 overrides = {('ui', 'quietbookmarkmove'): True}
758 overrides = {('ui', 'quietbookmarkmove'): True}
759 with local.ui.configoverride(overrides, 'clone'):
759 with local.ui.configoverride(overrides, 'clone'):
760 exchange.pull(local, srcpeer, revs,
760 exchange.pull(local, srcpeer, revs,
761 streamclonerequested=stream,
761 streamclonerequested=stream,
762 includepats=storeincludepats,
762 includepats=storeincludepats,
763 excludepats=storeexcludepats,
763 excludepats=storeexcludepats,
764 depth=depth)
764 depth=depth)
765 elif srcrepo:
765 elif srcrepo:
766 # TODO lift restriction once exchange.push() accepts narrow
766 # TODO lift restriction once exchange.push() accepts narrow
767 # push.
767 # push.
768 if narrow:
768 if narrow:
769 raise error.Abort(_('narrow clone not available for '
769 raise error.Abort(_('narrow clone not available for '
770 'remote destinations'))
770 'remote destinations'))
771
771
772 exchange.push(srcrepo, destpeer, revs=revs,
772 exchange.push(srcrepo, destpeer, revs=revs,
773 bookmarks=srcrepo._bookmarks.keys())
773 bookmarks=srcrepo._bookmarks.keys())
774 else:
774 else:
775 raise error.Abort(_("clone from remote to remote not supported")
775 raise error.Abort(_("clone from remote to remote not supported")
776 )
776 )
777
777
778 cleandir = None
778 cleandir = None
779
779
780 destrepo = destpeer.local()
780 destrepo = destpeer.local()
781 if destrepo:
781 if destrepo:
782 template = uimod.samplehgrcs['cloned']
782 template = uimod.samplehgrcs['cloned']
783 u = util.url(abspath)
783 u = util.url(abspath)
784 u.passwd = None
784 u.passwd = None
785 defaulturl = bytes(u)
785 defaulturl = bytes(u)
786 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
786 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
787 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
787 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
788
788
789 if ui.configbool('experimental', 'remotenames'):
789 if ui.configbool('experimental', 'remotenames'):
790 logexchange.pullremotenames(destrepo, srcpeer)
790 logexchange.pullremotenames(destrepo, srcpeer)
791
791
792 if update:
792 if update:
793 if update is not True:
793 if update is not True:
794 with srcpeer.commandexecutor() as e:
794 with srcpeer.commandexecutor() as e:
795 checkout = e.callcommand('lookup', {
795 checkout = e.callcommand('lookup', {
796 'key': update,
796 'key': update,
797 }).result()
797 }).result()
798
798
799 uprev = None
799 uprev = None
800 status = None
800 status = None
801 if checkout is not None:
801 if checkout is not None:
802 # Some extensions (at least hg-git and hg-subversion) have
802 # Some extensions (at least hg-git and hg-subversion) have
803 # a peer.lookup() implementation that returns a name instead
803 # a peer.lookup() implementation that returns a name instead
804 # of a nodeid. We work around it here until we've figured
804 # of a nodeid. We work around it here until we've figured
805 # out a better solution.
805 # out a better solution.
806 if len(checkout) == 20 and checkout in destrepo:
806 if len(checkout) == 20 and checkout in destrepo:
807 uprev = checkout
807 uprev = checkout
808 elif scmutil.isrevsymbol(destrepo, checkout):
808 elif scmutil.isrevsymbol(destrepo, checkout):
809 uprev = scmutil.revsymbol(destrepo, checkout).node()
809 uprev = scmutil.revsymbol(destrepo, checkout).node()
810 else:
810 else:
811 if update is not True:
811 if update is not True:
812 try:
812 try:
813 uprev = destrepo.lookup(update)
813 uprev = destrepo.lookup(update)
814 except error.RepoLookupError:
814 except error.RepoLookupError:
815 pass
815 pass
816 if uprev is None:
816 if uprev is None:
817 try:
817 try:
818 uprev = destrepo._bookmarks['@']
818 uprev = destrepo._bookmarks['@']
819 update = '@'
819 update = '@'
820 bn = destrepo[uprev].branch()
820 bn = destrepo[uprev].branch()
821 if bn == 'default':
821 if bn == 'default':
822 status = _("updating to bookmark @\n")
822 status = _("updating to bookmark @\n")
823 else:
823 else:
824 status = (_("updating to bookmark @ on branch %s\n")
824 status = (_("updating to bookmark @ on branch %s\n")
825 % bn)
825 % bn)
826 except KeyError:
826 except KeyError:
827 try:
827 try:
828 uprev = destrepo.branchtip('default')
828 uprev = destrepo.branchtip('default')
829 except error.RepoLookupError:
829 except error.RepoLookupError:
830 uprev = destrepo.lookup('tip')
830 uprev = destrepo.lookup('tip')
831 if not status:
831 if not status:
832 bn = destrepo[uprev].branch()
832 bn = destrepo[uprev].branch()
833 status = _("updating to branch %s\n") % bn
833 status = _("updating to branch %s\n") % bn
834 destrepo.ui.status(status)
834 destrepo.ui.status(status)
835 _update(destrepo, uprev)
835 _update(destrepo, uprev)
836 if update in destrepo._bookmarks:
836 if update in destrepo._bookmarks:
837 bookmarks.activate(destrepo, update)
837 bookmarks.activate(destrepo, update)
838 finally:
838 finally:
839 release(srclock, destlock)
839 release(srclock, destlock)
840 if cleandir is not None:
840 if cleandir is not None:
841 shutil.rmtree(cleandir, True)
841 shutil.rmtree(cleandir, True)
842 if srcpeer is not None:
842 if srcpeer is not None:
843 srcpeer.close()
843 srcpeer.close()
844 return srcpeer, destpeer
844 return srcpeer, destpeer
845
845
846 def _showstats(repo, stats, quietempty=False):
846 def _showstats(repo, stats, quietempty=False):
847 if quietempty and stats.isempty():
847 if quietempty and stats.isempty():
848 return
848 return
849 repo.ui.status(_("%d files updated, %d files merged, "
849 repo.ui.status(_("%d files updated, %d files merged, "
850 "%d files removed, %d files unresolved\n") % (
850 "%d files removed, %d files unresolved\n") % (
851 stats.updatedcount, stats.mergedcount,
851 stats.updatedcount, stats.mergedcount,
852 stats.removedcount, stats.unresolvedcount))
852 stats.removedcount, stats.unresolvedcount))
853
853
854 def updaterepo(repo, node, overwrite, updatecheck=None):
854 def updaterepo(repo, node, overwrite, updatecheck=None):
855 """Update the working directory to node.
855 """Update the working directory to node.
856
856
857 When overwrite is set, changes are clobbered, merged else
857 When overwrite is set, changes are clobbered, merged else
858
858
859 returns stats (see pydoc mercurial.merge.applyupdates)"""
859 returns stats (see pydoc mercurial.merge.applyupdates)"""
860 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
860 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
861 labels=['working copy', 'destination'],
861 labels=['working copy', 'destination'],
862 updatecheck=updatecheck)
862 updatecheck=updatecheck)
863
863
864 def update(repo, node, quietempty=False, updatecheck=None):
864 def update(repo, node, quietempty=False, updatecheck=None):
865 """update the working directory to node"""
865 """update the working directory to node"""
866 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
866 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
867 _showstats(repo, stats, quietempty)
867 _showstats(repo, stats, quietempty)
868 if stats.unresolvedcount:
868 if stats.unresolvedcount:
869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
870 return stats.unresolvedcount > 0
870 return stats.unresolvedcount > 0
871
871
872 # naming conflict in clone()
872 # naming conflict in clone()
873 _update = update
873 _update = update
874
874
875 def clean(repo, node, show_stats=True, quietempty=False):
875 def clean(repo, node, show_stats=True, quietempty=False):
876 """forcibly switch the working directory to node, clobbering changes"""
876 """forcibly switch the working directory to node, clobbering changes"""
877 stats = updaterepo(repo, node, True)
877 stats = updaterepo(repo, node, True)
878 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
878 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
879 if show_stats:
879 if show_stats:
880 _showstats(repo, stats, quietempty)
880 _showstats(repo, stats, quietempty)
881 return stats.unresolvedcount > 0
881 return stats.unresolvedcount > 0
882
882
883 # naming conflict in updatetotally()
883 # naming conflict in updatetotally()
884 _clean = clean
884 _clean = clean
885
885
886 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
886 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
887 """Update the working directory with extra care for non-file components
887 """Update the working directory with extra care for non-file components
888
888
889 This takes care of non-file components below:
889 This takes care of non-file components below:
890
890
891 :bookmark: might be advanced or (in)activated
891 :bookmark: might be advanced or (in)activated
892
892
893 This takes arguments below:
893 This takes arguments below:
894
894
895 :checkout: to which revision the working directory is updated
895 :checkout: to which revision the working directory is updated
896 :brev: a name, which might be a bookmark to be activated after updating
896 :brev: a name, which might be a bookmark to be activated after updating
897 :clean: whether changes in the working directory can be discarded
897 :clean: whether changes in the working directory can be discarded
898 :updatecheck: how to deal with a dirty working directory
898 :updatecheck: how to deal with a dirty working directory
899
899
900 Valid values for updatecheck are (None => linear):
900 Valid values for updatecheck are the UPDATECHECK_* constants
901 defined in the merge module. Passing `None` will result in using the
902 configured default.
901
903
902 * abort: abort if the working directory is dirty
904 * ABORT: abort if the working directory is dirty
903 * none: don't check (merge working directory changes into destination)
905 * NONE: don't check (merge working directory changes into destination)
904 * linear: check that update is linear before merging working directory
906 * LINEAR: check that update is linear before merging working directory
905 changes into destination
907 changes into destination
906 * noconflict: check that the update does not result in file merges
908 * NO_CONFLICT: check that the update does not result in file merges
907
909
908 This returns whether conflict is detected at updating or not.
910 This returns whether conflict is detected at updating or not.
909 """
911 """
910 if updatecheck is None:
912 if updatecheck is None:
911 updatecheck = ui.config('commands', 'update.check')
913 updatecheck = ui.config('commands', 'update.check')
912 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
914 if updatecheck not in (mergemod.UPDATECHECK_ABORT,
915 mergemod.UPDATECHECK_NONE,
916 mergemod.UPDATECHECK_LINEAR,
917 mergemod.UPDATECHECK_NO_CONFLICT):
913 # If not configured, or invalid value configured
918 # If not configured, or invalid value configured
914 updatecheck = 'linear'
919 updatecheck = mergemod.UPDATECHECK_LINEAR
915 with repo.wlock():
920 with repo.wlock():
916 movemarkfrom = None
921 movemarkfrom = None
917 warndest = False
922 warndest = False
918 if checkout is None:
923 if checkout is None:
919 updata = destutil.destupdate(repo, clean=clean)
924 updata = destutil.destupdate(repo, clean=clean)
920 checkout, movemarkfrom, brev = updata
925 checkout, movemarkfrom, brev = updata
921 warndest = True
926 warndest = True
922
927
923 if clean:
928 if clean:
924 ret = _clean(repo, checkout)
929 ret = _clean(repo, checkout)
925 else:
930 else:
926 if updatecheck == 'abort':
931 if updatecheck == mergemod.UPDATECHECK_ABORT:
927 cmdutil.bailifchanged(repo, merge=False)
932 cmdutil.bailifchanged(repo, merge=False)
928 updatecheck = 'none'
933 updatecheck = mergemod.UPDATECHECK_NONE
929 ret = _update(repo, checkout, updatecheck=updatecheck)
934 ret = _update(repo, checkout, updatecheck=updatecheck)
930
935
931 if not ret and movemarkfrom:
936 if not ret and movemarkfrom:
932 if movemarkfrom == repo['.'].node():
937 if movemarkfrom == repo['.'].node():
933 pass # no-op update
938 pass # no-op update
934 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
939 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
935 b = ui.label(repo._activebookmark, 'bookmarks.active')
940 b = ui.label(repo._activebookmark, 'bookmarks.active')
936 ui.status(_("updating bookmark %s\n") % b)
941 ui.status(_("updating bookmark %s\n") % b)
937 else:
942 else:
938 # this can happen with a non-linear update
943 # this can happen with a non-linear update
939 b = ui.label(repo._activebookmark, 'bookmarks')
944 b = ui.label(repo._activebookmark, 'bookmarks')
940 ui.status(_("(leaving bookmark %s)\n") % b)
945 ui.status(_("(leaving bookmark %s)\n") % b)
941 bookmarks.deactivate(repo)
946 bookmarks.deactivate(repo)
942 elif brev in repo._bookmarks:
947 elif brev in repo._bookmarks:
943 if brev != repo._activebookmark:
948 if brev != repo._activebookmark:
944 b = ui.label(brev, 'bookmarks.active')
949 b = ui.label(brev, 'bookmarks.active')
945 ui.status(_("(activating bookmark %s)\n") % b)
950 ui.status(_("(activating bookmark %s)\n") % b)
946 bookmarks.activate(repo, brev)
951 bookmarks.activate(repo, brev)
947 elif brev:
952 elif brev:
948 if repo._activebookmark:
953 if repo._activebookmark:
949 b = ui.label(repo._activebookmark, 'bookmarks')
954 b = ui.label(repo._activebookmark, 'bookmarks')
950 ui.status(_("(leaving bookmark %s)\n") % b)
955 ui.status(_("(leaving bookmark %s)\n") % b)
951 bookmarks.deactivate(repo)
956 bookmarks.deactivate(repo)
952
957
953 if warndest:
958 if warndest:
954 destutil.statusotherdests(ui, repo)
959 destutil.statusotherdests(ui, repo)
955
960
956 return ret
961 return ret
957
962
958 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
963 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
959 abort=False):
964 abort=False):
960 """Branch merge with node, resolving changes. Return true if any
965 """Branch merge with node, resolving changes. Return true if any
961 unresolved conflicts."""
966 unresolved conflicts."""
962 if abort:
967 if abort:
963 return abortmerge(repo.ui, repo)
968 return abortmerge(repo.ui, repo)
964
969
965 stats = mergemod.update(repo, node, branchmerge=True, force=force,
970 stats = mergemod.update(repo, node, branchmerge=True, force=force,
966 mergeforce=mergeforce, labels=labels)
971 mergeforce=mergeforce, labels=labels)
967 _showstats(repo, stats)
972 _showstats(repo, stats)
968 if stats.unresolvedcount:
973 if stats.unresolvedcount:
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
974 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 "or 'hg merge --abort' to abandon\n"))
975 "or 'hg merge --abort' to abandon\n"))
971 elif remind:
976 elif remind:
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
977 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 return stats.unresolvedcount > 0
978 return stats.unresolvedcount > 0
974
979
975 def abortmerge(ui, repo):
980 def abortmerge(ui, repo):
976 ms = mergemod.mergestate.read(repo)
981 ms = mergemod.mergestate.read(repo)
977 if ms.active():
982 if ms.active():
978 # there were conflicts
983 # there were conflicts
979 node = ms.localctx.hex()
984 node = ms.localctx.hex()
980 else:
985 else:
981 # there were no conficts, mergestate was not stored
986 # there were no conficts, mergestate was not stored
982 node = repo['.'].hex()
987 node = repo['.'].hex()
983
988
984 repo.ui.status(_("aborting the merge, updating back to"
989 repo.ui.status(_("aborting the merge, updating back to"
985 " %s\n") % node[:12])
990 " %s\n") % node[:12])
986 stats = mergemod.update(repo, node, branchmerge=False, force=True)
991 stats = mergemod.update(repo, node, branchmerge=False, force=True)
987 _showstats(repo, stats)
992 _showstats(repo, stats)
988 return stats.unresolvedcount > 0
993 return stats.unresolvedcount > 0
989
994
990 def _incoming(displaychlist, subreporecurse, ui, repo, source,
995 def _incoming(displaychlist, subreporecurse, ui, repo, source,
991 opts, buffered=False):
996 opts, buffered=False):
992 """
997 """
993 Helper for incoming / gincoming.
998 Helper for incoming / gincoming.
994 displaychlist gets called with
999 displaychlist gets called with
995 (remoterepo, incomingchangesetlist, displayer) parameters,
1000 (remoterepo, incomingchangesetlist, displayer) parameters,
996 and is supposed to contain only code that can't be unified.
1001 and is supposed to contain only code that can't be unified.
997 """
1002 """
998 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
1003 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
999 other = peer(repo, opts, source)
1004 other = peer(repo, opts, source)
1000 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1005 ui.status(_('comparing with %s\n') % util.hidepassword(source))
1001 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1006 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
1002
1007
1003 if revs:
1008 if revs:
1004 revs = [other.lookup(rev) for rev in revs]
1009 revs = [other.lookup(rev) for rev in revs]
1005 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1010 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
1006 revs, opts["bundle"], opts["force"])
1011 revs, opts["bundle"], opts["force"])
1007 try:
1012 try:
1008 if not chlist:
1013 if not chlist:
1009 ui.status(_("no changes found\n"))
1014 ui.status(_("no changes found\n"))
1010 return subreporecurse()
1015 return subreporecurse()
1011 ui.pager('incoming')
1016 ui.pager('incoming')
1012 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1017 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1013 buffered=buffered)
1018 buffered=buffered)
1014 displaychlist(other, chlist, displayer)
1019 displaychlist(other, chlist, displayer)
1015 displayer.close()
1020 displayer.close()
1016 finally:
1021 finally:
1017 cleanupfn()
1022 cleanupfn()
1018 subreporecurse()
1023 subreporecurse()
1019 return 0 # exit code is zero since we found incoming changes
1024 return 0 # exit code is zero since we found incoming changes
1020
1025
1021 def incoming(ui, repo, source, opts):
1026 def incoming(ui, repo, source, opts):
1022 def subreporecurse():
1027 def subreporecurse():
1023 ret = 1
1028 ret = 1
1024 if opts.get('subrepos'):
1029 if opts.get('subrepos'):
1025 ctx = repo[None]
1030 ctx = repo[None]
1026 for subpath in sorted(ctx.substate):
1031 for subpath in sorted(ctx.substate):
1027 sub = ctx.sub(subpath)
1032 sub = ctx.sub(subpath)
1028 ret = min(ret, sub.incoming(ui, source, opts))
1033 ret = min(ret, sub.incoming(ui, source, opts))
1029 return ret
1034 return ret
1030
1035
1031 def display(other, chlist, displayer):
1036 def display(other, chlist, displayer):
1032 limit = logcmdutil.getlimit(opts)
1037 limit = logcmdutil.getlimit(opts)
1033 if opts.get('newest_first'):
1038 if opts.get('newest_first'):
1034 chlist.reverse()
1039 chlist.reverse()
1035 count = 0
1040 count = 0
1036 for n in chlist:
1041 for n in chlist:
1037 if limit is not None and count >= limit:
1042 if limit is not None and count >= limit:
1038 break
1043 break
1039 parents = [p for p in other.changelog.parents(n) if p != nullid]
1044 parents = [p for p in other.changelog.parents(n) if p != nullid]
1040 if opts.get('no_merges') and len(parents) == 2:
1045 if opts.get('no_merges') and len(parents) == 2:
1041 continue
1046 continue
1042 count += 1
1047 count += 1
1043 displayer.show(other[n])
1048 displayer.show(other[n])
1044 return _incoming(display, subreporecurse, ui, repo, source, opts)
1049 return _incoming(display, subreporecurse, ui, repo, source, opts)
1045
1050
1046 def _outgoing(ui, repo, dest, opts):
1051 def _outgoing(ui, repo, dest, opts):
1047 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1052 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1048 if not path:
1053 if not path:
1049 raise error.Abort(_('default repository not configured!'),
1054 raise error.Abort(_('default repository not configured!'),
1050 hint=_("see 'hg help config.paths'"))
1055 hint=_("see 'hg help config.paths'"))
1051 dest = path.pushloc or path.loc
1056 dest = path.pushloc or path.loc
1052 branches = path.branch, opts.get('branch') or []
1057 branches = path.branch, opts.get('branch') or []
1053
1058
1054 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1059 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1055 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1060 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1056 if revs:
1061 if revs:
1057 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1062 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1058
1063
1059 other = peer(repo, opts, dest)
1064 other = peer(repo, opts, dest)
1060 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1065 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1061 force=opts.get('force'))
1066 force=opts.get('force'))
1062 o = outgoing.missing
1067 o = outgoing.missing
1063 if not o:
1068 if not o:
1064 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1069 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1065 return o, other
1070 return o, other
1066
1071
1067 def outgoing(ui, repo, dest, opts):
1072 def outgoing(ui, repo, dest, opts):
1068 def recurse():
1073 def recurse():
1069 ret = 1
1074 ret = 1
1070 if opts.get('subrepos'):
1075 if opts.get('subrepos'):
1071 ctx = repo[None]
1076 ctx = repo[None]
1072 for subpath in sorted(ctx.substate):
1077 for subpath in sorted(ctx.substate):
1073 sub = ctx.sub(subpath)
1078 sub = ctx.sub(subpath)
1074 ret = min(ret, sub.outgoing(ui, dest, opts))
1079 ret = min(ret, sub.outgoing(ui, dest, opts))
1075 return ret
1080 return ret
1076
1081
1077 limit = logcmdutil.getlimit(opts)
1082 limit = logcmdutil.getlimit(opts)
1078 o, other = _outgoing(ui, repo, dest, opts)
1083 o, other = _outgoing(ui, repo, dest, opts)
1079 if not o:
1084 if not o:
1080 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1085 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1081 return recurse()
1086 return recurse()
1082
1087
1083 if opts.get('newest_first'):
1088 if opts.get('newest_first'):
1084 o.reverse()
1089 o.reverse()
1085 ui.pager('outgoing')
1090 ui.pager('outgoing')
1086 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1091 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1087 count = 0
1092 count = 0
1088 for n in o:
1093 for n in o:
1089 if limit is not None and count >= limit:
1094 if limit is not None and count >= limit:
1090 break
1095 break
1091 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1096 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1092 if opts.get('no_merges') and len(parents) == 2:
1097 if opts.get('no_merges') and len(parents) == 2:
1093 continue
1098 continue
1094 count += 1
1099 count += 1
1095 displayer.show(repo[n])
1100 displayer.show(repo[n])
1096 displayer.close()
1101 displayer.close()
1097 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1102 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1098 recurse()
1103 recurse()
1099 return 0 # exit code is zero since we found outgoing changes
1104 return 0 # exit code is zero since we found outgoing changes
1100
1105
1101 def verify(repo, level=None):
1106 def verify(repo, level=None):
1102 """verify the consistency of a repository"""
1107 """verify the consistency of a repository"""
1103 ret = verifymod.verify(repo, level=level)
1108 ret = verifymod.verify(repo, level=level)
1104
1109
1105 # Broken subrepo references in hidden csets don't seem worth worrying about,
1110 # Broken subrepo references in hidden csets don't seem worth worrying about,
1106 # since they can't be pushed/pulled, and --hidden can be used if they are a
1111 # since they can't be pushed/pulled, and --hidden can be used if they are a
1107 # concern.
1112 # concern.
1108
1113
1109 # pathto() is needed for -R case
1114 # pathto() is needed for -R case
1110 revs = repo.revs("filelog(%s)",
1115 revs = repo.revs("filelog(%s)",
1111 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1116 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1112
1117
1113 if revs:
1118 if revs:
1114 repo.ui.status(_('checking subrepo links\n'))
1119 repo.ui.status(_('checking subrepo links\n'))
1115 for rev in revs:
1120 for rev in revs:
1116 ctx = repo[rev]
1121 ctx = repo[rev]
1117 try:
1122 try:
1118 for subpath in ctx.substate:
1123 for subpath in ctx.substate:
1119 try:
1124 try:
1120 ret = (ctx.sub(subpath, allowcreate=False).verify()
1125 ret = (ctx.sub(subpath, allowcreate=False).verify()
1121 or ret)
1126 or ret)
1122 except error.RepoError as e:
1127 except error.RepoError as e:
1123 repo.ui.warn(('%d: %s\n') % (rev, e))
1128 repo.ui.warn(('%d: %s\n') % (rev, e))
1124 except Exception:
1129 except Exception:
1125 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1130 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1126 node.short(ctx.node()))
1131 node.short(ctx.node()))
1127
1132
1128 return ret
1133 return ret
1129
1134
1130 def remoteui(src, opts):
1135 def remoteui(src, opts):
1131 'build a remote ui from ui or repo and opts'
1136 'build a remote ui from ui or repo and opts'
1132 if util.safehasattr(src, 'baseui'): # looks like a repository
1137 if util.safehasattr(src, 'baseui'): # looks like a repository
1133 dst = src.baseui.copy() # drop repo-specific config
1138 dst = src.baseui.copy() # drop repo-specific config
1134 src = src.ui # copy target options from repo
1139 src = src.ui # copy target options from repo
1135 else: # assume it's a global ui object
1140 else: # assume it's a global ui object
1136 dst = src.copy() # keep all global options
1141 dst = src.copy() # keep all global options
1137
1142
1138 # copy ssh-specific options
1143 # copy ssh-specific options
1139 for o in 'ssh', 'remotecmd':
1144 for o in 'ssh', 'remotecmd':
1140 v = opts.get(o) or src.config('ui', o)
1145 v = opts.get(o) or src.config('ui', o)
1141 if v:
1146 if v:
1142 dst.setconfig("ui", o, v, 'copied')
1147 dst.setconfig("ui", o, v, 'copied')
1143
1148
1144 # copy bundle-specific options
1149 # copy bundle-specific options
1145 r = src.config('bundle', 'mainreporoot')
1150 r = src.config('bundle', 'mainreporoot')
1146 if r:
1151 if r:
1147 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1152 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1148
1153
1149 # copy selected local settings to the remote ui
1154 # copy selected local settings to the remote ui
1150 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1155 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1151 for key, val in src.configitems(sect):
1156 for key, val in src.configitems(sect):
1152 dst.setconfig(sect, key, val, 'copied')
1157 dst.setconfig(sect, key, val, 'copied')
1153 v = src.config('web', 'cacerts')
1158 v = src.config('web', 'cacerts')
1154 if v:
1159 if v:
1155 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1160 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1156
1161
1157 return dst
1162 return dst
1158
1163
1159 # Files of interest
1164 # Files of interest
1160 # Used to check if the repository has changed looking at mtime and size of
1165 # Used to check if the repository has changed looking at mtime and size of
1161 # these files.
1166 # these files.
1162 foi = [('spath', '00changelog.i'),
1167 foi = [('spath', '00changelog.i'),
1163 ('spath', 'phaseroots'), # ! phase can change content at the same size
1168 ('spath', 'phaseroots'), # ! phase can change content at the same size
1164 ('spath', 'obsstore'),
1169 ('spath', 'obsstore'),
1165 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1170 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1166 ]
1171 ]
1167
1172
1168 class cachedlocalrepo(object):
1173 class cachedlocalrepo(object):
1169 """Holds a localrepository that can be cached and reused."""
1174 """Holds a localrepository that can be cached and reused."""
1170
1175
1171 def __init__(self, repo):
1176 def __init__(self, repo):
1172 """Create a new cached repo from an existing repo.
1177 """Create a new cached repo from an existing repo.
1173
1178
1174 We assume the passed in repo was recently created. If the
1179 We assume the passed in repo was recently created. If the
1175 repo has changed between when it was created and when it was
1180 repo has changed between when it was created and when it was
1176 turned into a cache, it may not refresh properly.
1181 turned into a cache, it may not refresh properly.
1177 """
1182 """
1178 assert isinstance(repo, localrepo.localrepository)
1183 assert isinstance(repo, localrepo.localrepository)
1179 self._repo = repo
1184 self._repo = repo
1180 self._state, self.mtime = self._repostate()
1185 self._state, self.mtime = self._repostate()
1181 self._filtername = repo.filtername
1186 self._filtername = repo.filtername
1182
1187
1183 def fetch(self):
1188 def fetch(self):
1184 """Refresh (if necessary) and return a repository.
1189 """Refresh (if necessary) and return a repository.
1185
1190
1186 If the cached instance is out of date, it will be recreated
1191 If the cached instance is out of date, it will be recreated
1187 automatically and returned.
1192 automatically and returned.
1188
1193
1189 Returns a tuple of the repo and a boolean indicating whether a new
1194 Returns a tuple of the repo and a boolean indicating whether a new
1190 repo instance was created.
1195 repo instance was created.
1191 """
1196 """
1192 # We compare the mtimes and sizes of some well-known files to
1197 # We compare the mtimes and sizes of some well-known files to
1193 # determine if the repo changed. This is not precise, as mtimes
1198 # determine if the repo changed. This is not precise, as mtimes
1194 # are susceptible to clock skew and imprecise filesystems and
1199 # are susceptible to clock skew and imprecise filesystems and
1195 # file content can change while maintaining the same size.
1200 # file content can change while maintaining the same size.
1196
1201
1197 state, mtime = self._repostate()
1202 state, mtime = self._repostate()
1198 if state == self._state:
1203 if state == self._state:
1199 return self._repo, False
1204 return self._repo, False
1200
1205
1201 repo = repository(self._repo.baseui, self._repo.url())
1206 repo = repository(self._repo.baseui, self._repo.url())
1202 if self._filtername:
1207 if self._filtername:
1203 self._repo = repo.filtered(self._filtername)
1208 self._repo = repo.filtered(self._filtername)
1204 else:
1209 else:
1205 self._repo = repo.unfiltered()
1210 self._repo = repo.unfiltered()
1206 self._state = state
1211 self._state = state
1207 self.mtime = mtime
1212 self.mtime = mtime
1208
1213
1209 return self._repo, True
1214 return self._repo, True
1210
1215
1211 def _repostate(self):
1216 def _repostate(self):
1212 state = []
1217 state = []
1213 maxmtime = -1
1218 maxmtime = -1
1214 for attr, fname in foi:
1219 for attr, fname in foi:
1215 prefix = getattr(self._repo, attr)
1220 prefix = getattr(self._repo, attr)
1216 p = os.path.join(prefix, fname)
1221 p = os.path.join(prefix, fname)
1217 try:
1222 try:
1218 st = os.stat(p)
1223 st = os.stat(p)
1219 except OSError:
1224 except OSError:
1220 st = os.stat(prefix)
1225 st = os.stat(prefix)
1221 state.append((st[stat.ST_MTIME], st.st_size))
1226 state.append((st[stat.ST_MTIME], st.st_size))
1222 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1227 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1223
1228
1224 return tuple(state), maxmtime
1229 return tuple(state), maxmtime
1225
1230
1226 def copy(self):
1231 def copy(self):
1227 """Obtain a copy of this class instance.
1232 """Obtain a copy of this class instance.
1228
1233
1229 A new localrepository instance is obtained. The new instance should be
1234 A new localrepository instance is obtained. The new instance should be
1230 completely independent of the original.
1235 completely independent of the original.
1231 """
1236 """
1232 repo = repository(self._repo.baseui, self._repo.origroot)
1237 repo = repository(self._repo.baseui, self._repo.origroot)
1233 if self._filtername:
1238 if self._filtername:
1234 repo = repo.filtered(self._filtername)
1239 repo = repo.filtered(self._filtername)
1235 else:
1240 else:
1236 repo = repo.unfiltered()
1241 repo = repo.unfiltered()
1237 c = cachedlocalrepo(repo)
1242 c = cachedlocalrepo(repo)
1238 c._state = self._state
1243 c._state = self._state
1239 c.mtime = self.mtime
1244 c.mtime = self.mtime
1240 return c
1245 return c
@@ -1,2335 +1,2343 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from .thirdparty import (
26 from .thirdparty import (
27 attr,
27 attr,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 filemerge,
33 filemerge,
34 match as matchmod,
34 match as matchmod,
35 obsutil,
35 obsutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 subrepoutil,
38 subrepoutil,
39 util,
39 util,
40 worker,
40 worker,
41 )
41 )
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46 def _droponode(data):
46 def _droponode(data):
47 # used for compatibility for v1
47 # used for compatibility for v1
48 bits = data.split('\0')
48 bits = data.split('\0')
49 bits = bits[:-2] + bits[-1:]
49 bits = bits[:-2] + bits[-1:]
50 return '\0'.join(bits)
50 return '\0'.join(bits)
51
51
52 # Merge state record types. See ``mergestate`` docs for more.
52 # Merge state record types. See ``mergestate`` docs for more.
53 RECORD_LOCAL = b'L'
53 RECORD_LOCAL = b'L'
54 RECORD_OTHER = b'O'
54 RECORD_OTHER = b'O'
55 RECORD_MERGED = b'F'
55 RECORD_MERGED = b'F'
56 RECORD_CHANGEDELETE_CONFLICT = b'C'
56 RECORD_CHANGEDELETE_CONFLICT = b'C'
57 RECORD_MERGE_DRIVER_MERGE = b'D'
57 RECORD_MERGE_DRIVER_MERGE = b'D'
58 RECORD_PATH_CONFLICT = b'P'
58 RECORD_PATH_CONFLICT = b'P'
59 RECORD_MERGE_DRIVER_STATE = b'm'
59 RECORD_MERGE_DRIVER_STATE = b'm'
60 RECORD_FILE_VALUES = b'f'
60 RECORD_FILE_VALUES = b'f'
61 RECORD_LABELS = b'l'
61 RECORD_LABELS = b'l'
62 RECORD_OVERRIDE = b't'
62 RECORD_OVERRIDE = b't'
63 RECORD_UNSUPPORTED_MANDATORY = b'X'
63 RECORD_UNSUPPORTED_MANDATORY = b'X'
64 RECORD_UNSUPPORTED_ADVISORY = b'x'
64 RECORD_UNSUPPORTED_ADVISORY = b'x'
65
65
66 MERGE_DRIVER_STATE_UNMARKED = b'u'
66 MERGE_DRIVER_STATE_UNMARKED = b'u'
67 MERGE_DRIVER_STATE_MARKED = b'm'
67 MERGE_DRIVER_STATE_MARKED = b'm'
68 MERGE_DRIVER_STATE_SUCCESS = b's'
68 MERGE_DRIVER_STATE_SUCCESS = b's'
69
69
70 MERGE_RECORD_UNRESOLVED = b'u'
70 MERGE_RECORD_UNRESOLVED = b'u'
71 MERGE_RECORD_RESOLVED = b'r'
71 MERGE_RECORD_RESOLVED = b'r'
72 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
72 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
73 MERGE_RECORD_RESOLVED_PATH = b'pr'
73 MERGE_RECORD_RESOLVED_PATH = b'pr'
74 MERGE_RECORD_DRIVER_RESOLVED = b'd'
74 MERGE_RECORD_DRIVER_RESOLVED = b'd'
75
75
76 ACTION_FORGET = b'f'
76 ACTION_FORGET = b'f'
77 ACTION_REMOVE = b'r'
77 ACTION_REMOVE = b'r'
78 ACTION_ADD = b'a'
78 ACTION_ADD = b'a'
79 ACTION_GET = b'g'
79 ACTION_GET = b'g'
80 ACTION_PATH_CONFLICT = b'p'
80 ACTION_PATH_CONFLICT = b'p'
81 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
81 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
82 ACTION_ADD_MODIFIED = b'am'
82 ACTION_ADD_MODIFIED = b'am'
83 ACTION_CREATED = b'c'
83 ACTION_CREATED = b'c'
84 ACTION_DELETED_CHANGED = b'dc'
84 ACTION_DELETED_CHANGED = b'dc'
85 ACTION_CHANGED_DELETED = b'cd'
85 ACTION_CHANGED_DELETED = b'cd'
86 ACTION_MERGE = b'm'
86 ACTION_MERGE = b'm'
87 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
87 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
88 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
88 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
89 ACTION_KEEP = b'k'
89 ACTION_KEEP = b'k'
90 ACTION_EXEC = b'e'
90 ACTION_EXEC = b'e'
91 ACTION_CREATED_MERGE = b'cm'
91 ACTION_CREATED_MERGE = b'cm'
92
92
93 class mergestate(object):
93 class mergestate(object):
94 '''track 3-way merge state of individual files
94 '''track 3-way merge state of individual files
95
95
96 The merge state is stored on disk when needed. Two files are used: one with
96 The merge state is stored on disk when needed. Two files are used: one with
97 an old format (version 1), and one with a new format (version 2). Version 2
97 an old format (version 1), and one with a new format (version 2). Version 2
98 stores a superset of the data in version 1, including new kinds of records
98 stores a superset of the data in version 1, including new kinds of records
99 in the future. For more about the new format, see the documentation for
99 in the future. For more about the new format, see the documentation for
100 `_readrecordsv2`.
100 `_readrecordsv2`.
101
101
102 Each record can contain arbitrary content, and has an associated type. This
102 Each record can contain arbitrary content, and has an associated type. This
103 `type` should be a letter. If `type` is uppercase, the record is mandatory:
103 `type` should be a letter. If `type` is uppercase, the record is mandatory:
104 versions of Mercurial that don't support it should abort. If `type` is
104 versions of Mercurial that don't support it should abort. If `type` is
105 lowercase, the record can be safely ignored.
105 lowercase, the record can be safely ignored.
106
106
107 Currently known records:
107 Currently known records:
108
108
109 L: the node of the "local" part of the merge (hexified version)
109 L: the node of the "local" part of the merge (hexified version)
110 O: the node of the "other" part of the merge (hexified version)
110 O: the node of the "other" part of the merge (hexified version)
111 F: a file to be merged entry
111 F: a file to be merged entry
112 C: a change/delete or delete/change conflict
112 C: a change/delete or delete/change conflict
113 D: a file that the external merge driver will merge internally
113 D: a file that the external merge driver will merge internally
114 (experimental)
114 (experimental)
115 P: a path conflict (file vs directory)
115 P: a path conflict (file vs directory)
116 m: the external merge driver defined for this merge plus its run state
116 m: the external merge driver defined for this merge plus its run state
117 (experimental)
117 (experimental)
118 f: a (filename, dictionary) tuple of optional values for a given file
118 f: a (filename, dictionary) tuple of optional values for a given file
119 X: unsupported mandatory record type (used in tests)
119 X: unsupported mandatory record type (used in tests)
120 x: unsupported advisory record type (used in tests)
120 x: unsupported advisory record type (used in tests)
121 l: the labels for the parts of the merge.
121 l: the labels for the parts of the merge.
122
122
123 Merge driver run states (experimental):
123 Merge driver run states (experimental):
124 u: driver-resolved files unmarked -- needs to be run next time we're about
124 u: driver-resolved files unmarked -- needs to be run next time we're about
125 to resolve or commit
125 to resolve or commit
126 m: driver-resolved files marked -- only needs to be run before commit
126 m: driver-resolved files marked -- only needs to be run before commit
127 s: success/skipped -- does not need to be run any more
127 s: success/skipped -- does not need to be run any more
128
128
129 Merge record states (stored in self._state, indexed by filename):
129 Merge record states (stored in self._state, indexed by filename):
130 u: unresolved conflict
130 u: unresolved conflict
131 r: resolved conflict
131 r: resolved conflict
132 pu: unresolved path conflict (file conflicts with directory)
132 pu: unresolved path conflict (file conflicts with directory)
133 pr: resolved path conflict
133 pr: resolved path conflict
134 d: driver-resolved conflict
134 d: driver-resolved conflict
135
135
136 The resolve command transitions between 'u' and 'r' for conflicts and
136 The resolve command transitions between 'u' and 'r' for conflicts and
137 'pu' and 'pr' for path conflicts.
137 'pu' and 'pr' for path conflicts.
138 '''
138 '''
139 statepathv1 = 'merge/state'
139 statepathv1 = 'merge/state'
140 statepathv2 = 'merge/state2'
140 statepathv2 = 'merge/state2'
141
141
142 @staticmethod
142 @staticmethod
143 def clean(repo, node=None, other=None, labels=None):
143 def clean(repo, node=None, other=None, labels=None):
144 """Initialize a brand new merge state, removing any existing state on
144 """Initialize a brand new merge state, removing any existing state on
145 disk."""
145 disk."""
146 ms = mergestate(repo)
146 ms = mergestate(repo)
147 ms.reset(node, other, labels)
147 ms.reset(node, other, labels)
148 return ms
148 return ms
149
149
150 @staticmethod
150 @staticmethod
151 def read(repo):
151 def read(repo):
152 """Initialize the merge state, reading it from disk."""
152 """Initialize the merge state, reading it from disk."""
153 ms = mergestate(repo)
153 ms = mergestate(repo)
154 ms._read()
154 ms._read()
155 return ms
155 return ms
156
156
157 def __init__(self, repo):
157 def __init__(self, repo):
158 """Initialize the merge state.
158 """Initialize the merge state.
159
159
160 Do not use this directly! Instead call read() or clean()."""
160 Do not use this directly! Instead call read() or clean()."""
161 self._repo = repo
161 self._repo = repo
162 self._dirty = False
162 self._dirty = False
163 self._labels = None
163 self._labels = None
164
164
165 def reset(self, node=None, other=None, labels=None):
165 def reset(self, node=None, other=None, labels=None):
166 self._state = {}
166 self._state = {}
167 self._stateextras = {}
167 self._stateextras = {}
168 self._local = None
168 self._local = None
169 self._other = None
169 self._other = None
170 self._labels = labels
170 self._labels = labels
171 for var in ('localctx', 'otherctx'):
171 for var in ('localctx', 'otherctx'):
172 if var in vars(self):
172 if var in vars(self):
173 delattr(self, var)
173 delattr(self, var)
174 if node:
174 if node:
175 self._local = node
175 self._local = node
176 self._other = other
176 self._other = other
177 self._readmergedriver = None
177 self._readmergedriver = None
178 if self.mergedriver:
178 if self.mergedriver:
179 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
179 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
180 else:
180 else:
181 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
181 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
182 shutil.rmtree(self._repo.vfs.join('merge'), True)
182 shutil.rmtree(self._repo.vfs.join('merge'), True)
183 self._results = {}
183 self._results = {}
184 self._dirty = False
184 self._dirty = False
185
185
186 def _read(self):
186 def _read(self):
187 """Analyse each record content to restore a serialized state from disk
187 """Analyse each record content to restore a serialized state from disk
188
188
189 This function process "record" entry produced by the de-serialization
189 This function process "record" entry produced by the de-serialization
190 of on disk file.
190 of on disk file.
191 """
191 """
192 self._state = {}
192 self._state = {}
193 self._stateextras = {}
193 self._stateextras = {}
194 self._local = None
194 self._local = None
195 self._other = None
195 self._other = None
196 for var in ('localctx', 'otherctx'):
196 for var in ('localctx', 'otherctx'):
197 if var in vars(self):
197 if var in vars(self):
198 delattr(self, var)
198 delattr(self, var)
199 self._readmergedriver = None
199 self._readmergedriver = None
200 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
200 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
201 unsupported = set()
201 unsupported = set()
202 records = self._readrecords()
202 records = self._readrecords()
203 for rtype, record in records:
203 for rtype, record in records:
204 if rtype == RECORD_LOCAL:
204 if rtype == RECORD_LOCAL:
205 self._local = bin(record)
205 self._local = bin(record)
206 elif rtype == RECORD_OTHER:
206 elif rtype == RECORD_OTHER:
207 self._other = bin(record)
207 self._other = bin(record)
208 elif rtype == RECORD_MERGE_DRIVER_STATE:
208 elif rtype == RECORD_MERGE_DRIVER_STATE:
209 bits = record.split('\0', 1)
209 bits = record.split('\0', 1)
210 mdstate = bits[1]
210 mdstate = bits[1]
211 if len(mdstate) != 1 or mdstate not in (
211 if len(mdstate) != 1 or mdstate not in (
212 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
212 MERGE_DRIVER_STATE_UNMARKED, MERGE_DRIVER_STATE_MARKED,
213 MERGE_DRIVER_STATE_SUCCESS):
213 MERGE_DRIVER_STATE_SUCCESS):
214 # the merge driver should be idempotent, so just rerun it
214 # the merge driver should be idempotent, so just rerun it
215 mdstate = MERGE_DRIVER_STATE_UNMARKED
215 mdstate = MERGE_DRIVER_STATE_UNMARKED
216
216
217 self._readmergedriver = bits[0]
217 self._readmergedriver = bits[0]
218 self._mdstate = mdstate
218 self._mdstate = mdstate
219 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
219 elif rtype in (RECORD_MERGED, RECORD_CHANGEDELETE_CONFLICT,
220 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
220 RECORD_PATH_CONFLICT, RECORD_MERGE_DRIVER_MERGE):
221 bits = record.split('\0')
221 bits = record.split('\0')
222 self._state[bits[0]] = bits[1:]
222 self._state[bits[0]] = bits[1:]
223 elif rtype == RECORD_FILE_VALUES:
223 elif rtype == RECORD_FILE_VALUES:
224 filename, rawextras = record.split('\0', 1)
224 filename, rawextras = record.split('\0', 1)
225 extraparts = rawextras.split('\0')
225 extraparts = rawextras.split('\0')
226 extras = {}
226 extras = {}
227 i = 0
227 i = 0
228 while i < len(extraparts):
228 while i < len(extraparts):
229 extras[extraparts[i]] = extraparts[i + 1]
229 extras[extraparts[i]] = extraparts[i + 1]
230 i += 2
230 i += 2
231
231
232 self._stateextras[filename] = extras
232 self._stateextras[filename] = extras
233 elif rtype == RECORD_LABELS:
233 elif rtype == RECORD_LABELS:
234 labels = record.split('\0', 2)
234 labels = record.split('\0', 2)
235 self._labels = [l for l in labels if len(l) > 0]
235 self._labels = [l for l in labels if len(l) > 0]
236 elif not rtype.islower():
236 elif not rtype.islower():
237 unsupported.add(rtype)
237 unsupported.add(rtype)
238 self._results = {}
238 self._results = {}
239 self._dirty = False
239 self._dirty = False
240
240
241 if unsupported:
241 if unsupported:
242 raise error.UnsupportedMergeRecords(unsupported)
242 raise error.UnsupportedMergeRecords(unsupported)
243
243
244 def _readrecords(self):
244 def _readrecords(self):
245 """Read merge state from disk and return a list of record (TYPE, data)
245 """Read merge state from disk and return a list of record (TYPE, data)
246
246
247 We read data from both v1 and v2 files and decide which one to use.
247 We read data from both v1 and v2 files and decide which one to use.
248
248
249 V1 has been used by version prior to 2.9.1 and contains less data than
249 V1 has been used by version prior to 2.9.1 and contains less data than
250 v2. We read both versions and check if no data in v2 contradicts
250 v2. We read both versions and check if no data in v2 contradicts
251 v1. If there is not contradiction we can safely assume that both v1
251 v1. If there is not contradiction we can safely assume that both v1
252 and v2 were written at the same time and use the extract data in v2. If
252 and v2 were written at the same time and use the extract data in v2. If
253 there is contradiction we ignore v2 content as we assume an old version
253 there is contradiction we ignore v2 content as we assume an old version
254 of Mercurial has overwritten the mergestate file and left an old v2
254 of Mercurial has overwritten the mergestate file and left an old v2
255 file around.
255 file around.
256
256
257 returns list of record [(TYPE, data), ...]"""
257 returns list of record [(TYPE, data), ...]"""
258 v1records = self._readrecordsv1()
258 v1records = self._readrecordsv1()
259 v2records = self._readrecordsv2()
259 v2records = self._readrecordsv2()
260 if self._v1v2match(v1records, v2records):
260 if self._v1v2match(v1records, v2records):
261 return v2records
261 return v2records
262 else:
262 else:
263 # v1 file is newer than v2 file, use it
263 # v1 file is newer than v2 file, use it
264 # we have to infer the "other" changeset of the merge
264 # we have to infer the "other" changeset of the merge
265 # we cannot do better than that with v1 of the format
265 # we cannot do better than that with v1 of the format
266 mctx = self._repo[None].parents()[-1]
266 mctx = self._repo[None].parents()[-1]
267 v1records.append((RECORD_OTHER, mctx.hex()))
267 v1records.append((RECORD_OTHER, mctx.hex()))
268 # add place holder "other" file node information
268 # add place holder "other" file node information
269 # nobody is using it yet so we do no need to fetch the data
269 # nobody is using it yet so we do no need to fetch the data
270 # if mctx was wrong `mctx[bits[-2]]` may fails.
270 # if mctx was wrong `mctx[bits[-2]]` may fails.
271 for idx, r in enumerate(v1records):
271 for idx, r in enumerate(v1records):
272 if r[0] == RECORD_MERGED:
272 if r[0] == RECORD_MERGED:
273 bits = r[1].split('\0')
273 bits = r[1].split('\0')
274 bits.insert(-2, '')
274 bits.insert(-2, '')
275 v1records[idx] = (r[0], '\0'.join(bits))
275 v1records[idx] = (r[0], '\0'.join(bits))
276 return v1records
276 return v1records
277
277
278 def _v1v2match(self, v1records, v2records):
278 def _v1v2match(self, v1records, v2records):
279 oldv2 = set() # old format version of v2 record
279 oldv2 = set() # old format version of v2 record
280 for rec in v2records:
280 for rec in v2records:
281 if rec[0] == RECORD_LOCAL:
281 if rec[0] == RECORD_LOCAL:
282 oldv2.add(rec)
282 oldv2.add(rec)
283 elif rec[0] == RECORD_MERGED:
283 elif rec[0] == RECORD_MERGED:
284 # drop the onode data (not contained in v1)
284 # drop the onode data (not contained in v1)
285 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
285 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
286 for rec in v1records:
286 for rec in v1records:
287 if rec not in oldv2:
287 if rec not in oldv2:
288 return False
288 return False
289 else:
289 else:
290 return True
290 return True
291
291
292 def _readrecordsv1(self):
292 def _readrecordsv1(self):
293 """read on disk merge state for version 1 file
293 """read on disk merge state for version 1 file
294
294
295 returns list of record [(TYPE, data), ...]
295 returns list of record [(TYPE, data), ...]
296
296
297 Note: the "F" data from this file are one entry short
297 Note: the "F" data from this file are one entry short
298 (no "other file node" entry)
298 (no "other file node" entry)
299 """
299 """
300 records = []
300 records = []
301 try:
301 try:
302 f = self._repo.vfs(self.statepathv1)
302 f = self._repo.vfs(self.statepathv1)
303 for i, l in enumerate(f):
303 for i, l in enumerate(f):
304 if i == 0:
304 if i == 0:
305 records.append((RECORD_LOCAL, l[:-1]))
305 records.append((RECORD_LOCAL, l[:-1]))
306 else:
306 else:
307 records.append((RECORD_MERGED, l[:-1]))
307 records.append((RECORD_MERGED, l[:-1]))
308 f.close()
308 f.close()
309 except IOError as err:
309 except IOError as err:
310 if err.errno != errno.ENOENT:
310 if err.errno != errno.ENOENT:
311 raise
311 raise
312 return records
312 return records
313
313
314 def _readrecordsv2(self):
314 def _readrecordsv2(self):
315 """read on disk merge state for version 2 file
315 """read on disk merge state for version 2 file
316
316
317 This format is a list of arbitrary records of the form:
317 This format is a list of arbitrary records of the form:
318
318
319 [type][length][content]
319 [type][length][content]
320
320
321 `type` is a single character, `length` is a 4 byte integer, and
321 `type` is a single character, `length` is a 4 byte integer, and
322 `content` is an arbitrary byte sequence of length `length`.
322 `content` is an arbitrary byte sequence of length `length`.
323
323
324 Mercurial versions prior to 3.7 have a bug where if there are
324 Mercurial versions prior to 3.7 have a bug where if there are
325 unsupported mandatory merge records, attempting to clear out the merge
325 unsupported mandatory merge records, attempting to clear out the merge
326 state with hg update --clean or similar aborts. The 't' record type
326 state with hg update --clean or similar aborts. The 't' record type
327 works around that by writing out what those versions treat as an
327 works around that by writing out what those versions treat as an
328 advisory record, but later versions interpret as special: the first
328 advisory record, but later versions interpret as special: the first
329 character is the 'real' record type and everything onwards is the data.
329 character is the 'real' record type and everything onwards is the data.
330
330
331 Returns list of records [(TYPE, data), ...]."""
331 Returns list of records [(TYPE, data), ...]."""
332 records = []
332 records = []
333 try:
333 try:
334 f = self._repo.vfs(self.statepathv2)
334 f = self._repo.vfs(self.statepathv2)
335 data = f.read()
335 data = f.read()
336 off = 0
336 off = 0
337 end = len(data)
337 end = len(data)
338 while off < end:
338 while off < end:
339 rtype = data[off:off + 1]
339 rtype = data[off:off + 1]
340 off += 1
340 off += 1
341 length = _unpack('>I', data[off:(off + 4)])[0]
341 length = _unpack('>I', data[off:(off + 4)])[0]
342 off += 4
342 off += 4
343 record = data[off:(off + length)]
343 record = data[off:(off + length)]
344 off += length
344 off += length
345 if rtype == RECORD_OVERRIDE:
345 if rtype == RECORD_OVERRIDE:
346 rtype, record = record[0:1], record[1:]
346 rtype, record = record[0:1], record[1:]
347 records.append((rtype, record))
347 records.append((rtype, record))
348 f.close()
348 f.close()
349 except IOError as err:
349 except IOError as err:
350 if err.errno != errno.ENOENT:
350 if err.errno != errno.ENOENT:
351 raise
351 raise
352 return records
352 return records
353
353
354 @util.propertycache
354 @util.propertycache
355 def mergedriver(self):
355 def mergedriver(self):
356 # protect against the following:
356 # protect against the following:
357 # - A configures a malicious merge driver in their hgrc, then
357 # - A configures a malicious merge driver in their hgrc, then
358 # pauses the merge
358 # pauses the merge
359 # - A edits their hgrc to remove references to the merge driver
359 # - A edits their hgrc to remove references to the merge driver
360 # - A gives a copy of their entire repo, including .hg, to B
360 # - A gives a copy of their entire repo, including .hg, to B
361 # - B inspects .hgrc and finds it to be clean
361 # - B inspects .hgrc and finds it to be clean
362 # - B then continues the merge and the malicious merge driver
362 # - B then continues the merge and the malicious merge driver
363 # gets invoked
363 # gets invoked
364 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
364 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
365 if (self._readmergedriver is not None
365 if (self._readmergedriver is not None
366 and self._readmergedriver != configmergedriver):
366 and self._readmergedriver != configmergedriver):
367 raise error.ConfigError(
367 raise error.ConfigError(
368 _("merge driver changed since merge started"),
368 _("merge driver changed since merge started"),
369 hint=_("revert merge driver change or abort merge"))
369 hint=_("revert merge driver change or abort merge"))
370
370
371 return configmergedriver
371 return configmergedriver
372
372
373 @util.propertycache
373 @util.propertycache
374 def localctx(self):
374 def localctx(self):
375 if self._local is None:
375 if self._local is None:
376 msg = "localctx accessed but self._local isn't set"
376 msg = "localctx accessed but self._local isn't set"
377 raise error.ProgrammingError(msg)
377 raise error.ProgrammingError(msg)
378 return self._repo[self._local]
378 return self._repo[self._local]
379
379
380 @util.propertycache
380 @util.propertycache
381 def otherctx(self):
381 def otherctx(self):
382 if self._other is None:
382 if self._other is None:
383 msg = "otherctx accessed but self._other isn't set"
383 msg = "otherctx accessed but self._other isn't set"
384 raise error.ProgrammingError(msg)
384 raise error.ProgrammingError(msg)
385 return self._repo[self._other]
385 return self._repo[self._other]
386
386
387 def active(self):
387 def active(self):
388 """Whether mergestate is active.
388 """Whether mergestate is active.
389
389
390 Returns True if there appears to be mergestate. This is a rough proxy
390 Returns True if there appears to be mergestate. This is a rough proxy
391 for "is a merge in progress."
391 for "is a merge in progress."
392 """
392 """
393 # Check local variables before looking at filesystem for performance
393 # Check local variables before looking at filesystem for performance
394 # reasons.
394 # reasons.
395 return (bool(self._local) or bool(self._state) or
395 return (bool(self._local) or bool(self._state) or
396 self._repo.vfs.exists(self.statepathv1) or
396 self._repo.vfs.exists(self.statepathv1) or
397 self._repo.vfs.exists(self.statepathv2))
397 self._repo.vfs.exists(self.statepathv2))
398
398
399 def commit(self):
399 def commit(self):
400 """Write current state on disk (if necessary)"""
400 """Write current state on disk (if necessary)"""
401 if self._dirty:
401 if self._dirty:
402 records = self._makerecords()
402 records = self._makerecords()
403 self._writerecords(records)
403 self._writerecords(records)
404 self._dirty = False
404 self._dirty = False
405
405
406 def _makerecords(self):
406 def _makerecords(self):
407 records = []
407 records = []
408 records.append((RECORD_LOCAL, hex(self._local)))
408 records.append((RECORD_LOCAL, hex(self._local)))
409 records.append((RECORD_OTHER, hex(self._other)))
409 records.append((RECORD_OTHER, hex(self._other)))
410 if self.mergedriver:
410 if self.mergedriver:
411 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
411 records.append((RECORD_MERGE_DRIVER_STATE, '\0'.join([
412 self.mergedriver, self._mdstate])))
412 self.mergedriver, self._mdstate])))
413 # Write out state items. In all cases, the value of the state map entry
413 # Write out state items. In all cases, the value of the state map entry
414 # is written as the contents of the record. The record type depends on
414 # is written as the contents of the record. The record type depends on
415 # the type of state that is stored, and capital-letter records are used
415 # the type of state that is stored, and capital-letter records are used
416 # to prevent older versions of Mercurial that do not support the feature
416 # to prevent older versions of Mercurial that do not support the feature
417 # from loading them.
417 # from loading them.
418 for filename, v in self._state.iteritems():
418 for filename, v in self._state.iteritems():
419 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
419 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
420 # Driver-resolved merge. These are stored in 'D' records.
420 # Driver-resolved merge. These are stored in 'D' records.
421 records.append((RECORD_MERGE_DRIVER_MERGE,
421 records.append((RECORD_MERGE_DRIVER_MERGE,
422 '\0'.join([filename] + v)))
422 '\0'.join([filename] + v)))
423 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
423 elif v[0] in (MERGE_RECORD_UNRESOLVED_PATH,
424 MERGE_RECORD_RESOLVED_PATH):
424 MERGE_RECORD_RESOLVED_PATH):
425 # Path conflicts. These are stored in 'P' records. The current
425 # Path conflicts. These are stored in 'P' records. The current
426 # resolution state ('pu' or 'pr') is stored within the record.
426 # resolution state ('pu' or 'pr') is stored within the record.
427 records.append((RECORD_PATH_CONFLICT,
427 records.append((RECORD_PATH_CONFLICT,
428 '\0'.join([filename] + v)))
428 '\0'.join([filename] + v)))
429 elif v[1] == nullhex or v[6] == nullhex:
429 elif v[1] == nullhex or v[6] == nullhex:
430 # Change/Delete or Delete/Change conflicts. These are stored in
430 # Change/Delete or Delete/Change conflicts. These are stored in
431 # 'C' records. v[1] is the local file, and is nullhex when the
431 # 'C' records. v[1] is the local file, and is nullhex when the
432 # file is deleted locally ('dc'). v[6] is the remote file, and
432 # file is deleted locally ('dc'). v[6] is the remote file, and
433 # is nullhex when the file is deleted remotely ('cd').
433 # is nullhex when the file is deleted remotely ('cd').
434 records.append((RECORD_CHANGEDELETE_CONFLICT,
434 records.append((RECORD_CHANGEDELETE_CONFLICT,
435 '\0'.join([filename] + v)))
435 '\0'.join([filename] + v)))
436 else:
436 else:
437 # Normal files. These are stored in 'F' records.
437 # Normal files. These are stored in 'F' records.
438 records.append((RECORD_MERGED,
438 records.append((RECORD_MERGED,
439 '\0'.join([filename] + v)))
439 '\0'.join([filename] + v)))
440 for filename, extras in sorted(self._stateextras.iteritems()):
440 for filename, extras in sorted(self._stateextras.iteritems()):
441 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
441 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
442 extras.iteritems())
442 extras.iteritems())
443 records.append((RECORD_FILE_VALUES,
443 records.append((RECORD_FILE_VALUES,
444 '%s\0%s' % (filename, rawextras)))
444 '%s\0%s' % (filename, rawextras)))
445 if self._labels is not None:
445 if self._labels is not None:
446 labels = '\0'.join(self._labels)
446 labels = '\0'.join(self._labels)
447 records.append((RECORD_LABELS, labels))
447 records.append((RECORD_LABELS, labels))
448 return records
448 return records
449
449
450 def _writerecords(self, records):
450 def _writerecords(self, records):
451 """Write current state on disk (both v1 and v2)"""
451 """Write current state on disk (both v1 and v2)"""
452 self._writerecordsv1(records)
452 self._writerecordsv1(records)
453 self._writerecordsv2(records)
453 self._writerecordsv2(records)
454
454
455 def _writerecordsv1(self, records):
455 def _writerecordsv1(self, records):
456 """Write current state on disk in a version 1 file"""
456 """Write current state on disk in a version 1 file"""
457 f = self._repo.vfs(self.statepathv1, 'wb')
457 f = self._repo.vfs(self.statepathv1, 'wb')
458 irecords = iter(records)
458 irecords = iter(records)
459 lrecords = next(irecords)
459 lrecords = next(irecords)
460 assert lrecords[0] == RECORD_LOCAL
460 assert lrecords[0] == RECORD_LOCAL
461 f.write(hex(self._local) + '\n')
461 f.write(hex(self._local) + '\n')
462 for rtype, data in irecords:
462 for rtype, data in irecords:
463 if rtype == RECORD_MERGED:
463 if rtype == RECORD_MERGED:
464 f.write('%s\n' % _droponode(data))
464 f.write('%s\n' % _droponode(data))
465 f.close()
465 f.close()
466
466
467 def _writerecordsv2(self, records):
467 def _writerecordsv2(self, records):
468 """Write current state on disk in a version 2 file
468 """Write current state on disk in a version 2 file
469
469
470 See the docstring for _readrecordsv2 for why we use 't'."""
470 See the docstring for _readrecordsv2 for why we use 't'."""
471 # these are the records that all version 2 clients can read
471 # these are the records that all version 2 clients can read
472 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
472 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
473 f = self._repo.vfs(self.statepathv2, 'wb')
473 f = self._repo.vfs(self.statepathv2, 'wb')
474 for key, data in records:
474 for key, data in records:
475 assert len(key) == 1
475 assert len(key) == 1
476 if key not in allowlist:
476 if key not in allowlist:
477 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
477 key, data = RECORD_OVERRIDE, '%s%s' % (key, data)
478 format = '>sI%is' % len(data)
478 format = '>sI%is' % len(data)
479 f.write(_pack(format, key, len(data), data))
479 f.write(_pack(format, key, len(data), data))
480 f.close()
480 f.close()
481
481
482 @staticmethod
482 @staticmethod
483 def getlocalkey(path):
483 def getlocalkey(path):
484 """hash the path of a local file context for storage in the .hg/merge
484 """hash the path of a local file context for storage in the .hg/merge
485 directory."""
485 directory."""
486
486
487 return hex(hashlib.sha1(path).digest())
487 return hex(hashlib.sha1(path).digest())
488
488
489 def add(self, fcl, fco, fca, fd):
489 def add(self, fcl, fco, fca, fd):
490 """add a new (potentially?) conflicting file the merge state
490 """add a new (potentially?) conflicting file the merge state
491 fcl: file context for local,
491 fcl: file context for local,
492 fco: file context for remote,
492 fco: file context for remote,
493 fca: file context for ancestors,
493 fca: file context for ancestors,
494 fd: file path of the resulting merge.
494 fd: file path of the resulting merge.
495
495
496 note: also write the local version to the `.hg/merge` directory.
496 note: also write the local version to the `.hg/merge` directory.
497 """
497 """
498 if fcl.isabsent():
498 if fcl.isabsent():
499 localkey = nullhex
499 localkey = nullhex
500 else:
500 else:
501 localkey = mergestate.getlocalkey(fcl.path())
501 localkey = mergestate.getlocalkey(fcl.path())
502 self._repo.vfs.write('merge/' + localkey, fcl.data())
502 self._repo.vfs.write('merge/' + localkey, fcl.data())
503 self._state[fd] = [MERGE_RECORD_UNRESOLVED, localkey, fcl.path(),
503 self._state[fd] = [MERGE_RECORD_UNRESOLVED, localkey, fcl.path(),
504 fca.path(), hex(fca.filenode()),
504 fca.path(), hex(fca.filenode()),
505 fco.path(), hex(fco.filenode()),
505 fco.path(), hex(fco.filenode()),
506 fcl.flags()]
506 fcl.flags()]
507 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
507 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
508 self._dirty = True
508 self._dirty = True
509
509
510 def addpath(self, path, frename, forigin):
510 def addpath(self, path, frename, forigin):
511 """add a new conflicting path to the merge state
511 """add a new conflicting path to the merge state
512 path: the path that conflicts
512 path: the path that conflicts
513 frename: the filename the conflicting file was renamed to
513 frename: the filename the conflicting file was renamed to
514 forigin: origin of the file ('l' or 'r' for local/remote)
514 forigin: origin of the file ('l' or 'r' for local/remote)
515 """
515 """
516 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
516 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
517 self._dirty = True
517 self._dirty = True
518
518
519 def __contains__(self, dfile):
519 def __contains__(self, dfile):
520 return dfile in self._state
520 return dfile in self._state
521
521
522 def __getitem__(self, dfile):
522 def __getitem__(self, dfile):
523 return self._state[dfile][0]
523 return self._state[dfile][0]
524
524
525 def __iter__(self):
525 def __iter__(self):
526 return iter(sorted(self._state))
526 return iter(sorted(self._state))
527
527
528 def files(self):
528 def files(self):
529 return self._state.keys()
529 return self._state.keys()
530
530
531 def mark(self, dfile, state):
531 def mark(self, dfile, state):
532 self._state[dfile][0] = state
532 self._state[dfile][0] = state
533 self._dirty = True
533 self._dirty = True
534
534
535 def mdstate(self):
535 def mdstate(self):
536 return self._mdstate
536 return self._mdstate
537
537
538 def unresolved(self):
538 def unresolved(self):
539 """Obtain the paths of unresolved files."""
539 """Obtain the paths of unresolved files."""
540
540
541 for f, entry in self._state.iteritems():
541 for f, entry in self._state.iteritems():
542 if entry[0] in (MERGE_RECORD_UNRESOLVED,
542 if entry[0] in (MERGE_RECORD_UNRESOLVED,
543 MERGE_RECORD_UNRESOLVED_PATH):
543 MERGE_RECORD_UNRESOLVED_PATH):
544 yield f
544 yield f
545
545
546 def driverresolved(self):
546 def driverresolved(self):
547 """Obtain the paths of driver-resolved files."""
547 """Obtain the paths of driver-resolved files."""
548
548
549 for f, entry in self._state.items():
549 for f, entry in self._state.items():
550 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
550 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
551 yield f
551 yield f
552
552
553 def extras(self, filename):
553 def extras(self, filename):
554 return self._stateextras.setdefault(filename, {})
554 return self._stateextras.setdefault(filename, {})
555
555
556 def _resolve(self, preresolve, dfile, wctx):
556 def _resolve(self, preresolve, dfile, wctx):
557 """rerun merge process for file path `dfile`"""
557 """rerun merge process for file path `dfile`"""
558 if self[dfile] in (MERGE_RECORD_RESOLVED,
558 if self[dfile] in (MERGE_RECORD_RESOLVED,
559 MERGE_RECORD_DRIVER_RESOLVED):
559 MERGE_RECORD_DRIVER_RESOLVED):
560 return True, 0
560 return True, 0
561 stateentry = self._state[dfile]
561 stateentry = self._state[dfile]
562 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
562 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
563 octx = self._repo[self._other]
563 octx = self._repo[self._other]
564 extras = self.extras(dfile)
564 extras = self.extras(dfile)
565 anccommitnode = extras.get('ancestorlinknode')
565 anccommitnode = extras.get('ancestorlinknode')
566 if anccommitnode:
566 if anccommitnode:
567 actx = self._repo[anccommitnode]
567 actx = self._repo[anccommitnode]
568 else:
568 else:
569 actx = None
569 actx = None
570 fcd = self._filectxorabsent(localkey, wctx, dfile)
570 fcd = self._filectxorabsent(localkey, wctx, dfile)
571 fco = self._filectxorabsent(onode, octx, ofile)
571 fco = self._filectxorabsent(onode, octx, ofile)
572 # TODO: move this to filectxorabsent
572 # TODO: move this to filectxorabsent
573 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
573 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
574 # "premerge" x flags
574 # "premerge" x flags
575 flo = fco.flags()
575 flo = fco.flags()
576 fla = fca.flags()
576 fla = fca.flags()
577 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
577 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
578 if fca.node() == nullid and flags != flo:
578 if fca.node() == nullid and flags != flo:
579 if preresolve:
579 if preresolve:
580 self._repo.ui.warn(
580 self._repo.ui.warn(
581 _('warning: cannot merge flags for %s '
581 _('warning: cannot merge flags for %s '
582 'without common ancestor - keeping local flags\n')
582 'without common ancestor - keeping local flags\n')
583 % afile)
583 % afile)
584 elif flags == fla:
584 elif flags == fla:
585 flags = flo
585 flags = flo
586 if preresolve:
586 if preresolve:
587 # restore local
587 # restore local
588 if localkey != nullhex:
588 if localkey != nullhex:
589 f = self._repo.vfs('merge/' + localkey)
589 f = self._repo.vfs('merge/' + localkey)
590 wctx[dfile].write(f.read(), flags)
590 wctx[dfile].write(f.read(), flags)
591 f.close()
591 f.close()
592 else:
592 else:
593 wctx[dfile].remove(ignoremissing=True)
593 wctx[dfile].remove(ignoremissing=True)
594 complete, r, deleted = filemerge.premerge(self._repo, wctx,
594 complete, r, deleted = filemerge.premerge(self._repo, wctx,
595 self._local, lfile, fcd,
595 self._local, lfile, fcd,
596 fco, fca,
596 fco, fca,
597 labels=self._labels)
597 labels=self._labels)
598 else:
598 else:
599 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
599 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
600 self._local, lfile, fcd,
600 self._local, lfile, fcd,
601 fco, fca,
601 fco, fca,
602 labels=self._labels)
602 labels=self._labels)
603 if r is None:
603 if r is None:
604 # no real conflict
604 # no real conflict
605 del self._state[dfile]
605 del self._state[dfile]
606 self._stateextras.pop(dfile, None)
606 self._stateextras.pop(dfile, None)
607 self._dirty = True
607 self._dirty = True
608 elif not r:
608 elif not r:
609 self.mark(dfile, MERGE_RECORD_RESOLVED)
609 self.mark(dfile, MERGE_RECORD_RESOLVED)
610
610
611 if complete:
611 if complete:
612 action = None
612 action = None
613 if deleted:
613 if deleted:
614 if fcd.isabsent():
614 if fcd.isabsent():
615 # dc: local picked. Need to drop if present, which may
615 # dc: local picked. Need to drop if present, which may
616 # happen on re-resolves.
616 # happen on re-resolves.
617 action = ACTION_FORGET
617 action = ACTION_FORGET
618 else:
618 else:
619 # cd: remote picked (or otherwise deleted)
619 # cd: remote picked (or otherwise deleted)
620 action = ACTION_REMOVE
620 action = ACTION_REMOVE
621 else:
621 else:
622 if fcd.isabsent(): # dc: remote picked
622 if fcd.isabsent(): # dc: remote picked
623 action = ACTION_GET
623 action = ACTION_GET
624 elif fco.isabsent(): # cd: local picked
624 elif fco.isabsent(): # cd: local picked
625 if dfile in self.localctx:
625 if dfile in self.localctx:
626 action = ACTION_ADD_MODIFIED
626 action = ACTION_ADD_MODIFIED
627 else:
627 else:
628 action = ACTION_ADD
628 action = ACTION_ADD
629 # else: regular merges (no action necessary)
629 # else: regular merges (no action necessary)
630 self._results[dfile] = r, action
630 self._results[dfile] = r, action
631
631
632 return complete, r
632 return complete, r
633
633
634 def _filectxorabsent(self, hexnode, ctx, f):
634 def _filectxorabsent(self, hexnode, ctx, f):
635 if hexnode == nullhex:
635 if hexnode == nullhex:
636 return filemerge.absentfilectx(ctx, f)
636 return filemerge.absentfilectx(ctx, f)
637 else:
637 else:
638 return ctx[f]
638 return ctx[f]
639
639
640 def preresolve(self, dfile, wctx):
640 def preresolve(self, dfile, wctx):
641 """run premerge process for dfile
641 """run premerge process for dfile
642
642
643 Returns whether the merge is complete, and the exit code."""
643 Returns whether the merge is complete, and the exit code."""
644 return self._resolve(True, dfile, wctx)
644 return self._resolve(True, dfile, wctx)
645
645
646 def resolve(self, dfile, wctx):
646 def resolve(self, dfile, wctx):
647 """run merge process (assuming premerge was run) for dfile
647 """run merge process (assuming premerge was run) for dfile
648
648
649 Returns the exit code of the merge."""
649 Returns the exit code of the merge."""
650 return self._resolve(False, dfile, wctx)[1]
650 return self._resolve(False, dfile, wctx)[1]
651
651
652 def counts(self):
652 def counts(self):
653 """return counts for updated, merged and removed files in this
653 """return counts for updated, merged and removed files in this
654 session"""
654 session"""
655 updated, merged, removed = 0, 0, 0
655 updated, merged, removed = 0, 0, 0
656 for r, action in self._results.itervalues():
656 for r, action in self._results.itervalues():
657 if r is None:
657 if r is None:
658 updated += 1
658 updated += 1
659 elif r == 0:
659 elif r == 0:
660 if action == ACTION_REMOVE:
660 if action == ACTION_REMOVE:
661 removed += 1
661 removed += 1
662 else:
662 else:
663 merged += 1
663 merged += 1
664 return updated, merged, removed
664 return updated, merged, removed
665
665
666 def unresolvedcount(self):
666 def unresolvedcount(self):
667 """get unresolved count for this merge (persistent)"""
667 """get unresolved count for this merge (persistent)"""
668 return len(list(self.unresolved()))
668 return len(list(self.unresolved()))
669
669
670 def actions(self):
670 def actions(self):
671 """return lists of actions to perform on the dirstate"""
671 """return lists of actions to perform on the dirstate"""
672 actions = {
672 actions = {
673 ACTION_REMOVE: [],
673 ACTION_REMOVE: [],
674 ACTION_FORGET: [],
674 ACTION_FORGET: [],
675 ACTION_ADD: [],
675 ACTION_ADD: [],
676 ACTION_ADD_MODIFIED: [],
676 ACTION_ADD_MODIFIED: [],
677 ACTION_GET: [],
677 ACTION_GET: [],
678 }
678 }
679 for f, (r, action) in self._results.iteritems():
679 for f, (r, action) in self._results.iteritems():
680 if action is not None:
680 if action is not None:
681 actions[action].append((f, None, "merge result"))
681 actions[action].append((f, None, "merge result"))
682 return actions
682 return actions
683
683
684 def recordactions(self):
684 def recordactions(self):
685 """record remove/add/get actions in the dirstate"""
685 """record remove/add/get actions in the dirstate"""
686 branchmerge = self._repo.dirstate.p2() != nullid
686 branchmerge = self._repo.dirstate.p2() != nullid
687 recordupdates(self._repo, self.actions(), branchmerge, None)
687 recordupdates(self._repo, self.actions(), branchmerge, None)
688
688
689 def queueremove(self, f):
689 def queueremove(self, f):
690 """queues a file to be removed from the dirstate
690 """queues a file to be removed from the dirstate
691
691
692 Meant for use by custom merge drivers."""
692 Meant for use by custom merge drivers."""
693 self._results[f] = 0, ACTION_REMOVE
693 self._results[f] = 0, ACTION_REMOVE
694
694
695 def queueadd(self, f):
695 def queueadd(self, f):
696 """queues a file to be added to the dirstate
696 """queues a file to be added to the dirstate
697
697
698 Meant for use by custom merge drivers."""
698 Meant for use by custom merge drivers."""
699 self._results[f] = 0, ACTION_ADD
699 self._results[f] = 0, ACTION_ADD
700
700
701 def queueget(self, f):
701 def queueget(self, f):
702 """queues a file to be marked modified in the dirstate
702 """queues a file to be marked modified in the dirstate
703
703
704 Meant for use by custom merge drivers."""
704 Meant for use by custom merge drivers."""
705 self._results[f] = 0, ACTION_GET
705 self._results[f] = 0, ACTION_GET
706
706
707 def _getcheckunknownconfig(repo, section, name):
707 def _getcheckunknownconfig(repo, section, name):
708 config = repo.ui.config(section, name)
708 config = repo.ui.config(section, name)
709 valid = ['abort', 'ignore', 'warn']
709 valid = ['abort', 'ignore', 'warn']
710 if config not in valid:
710 if config not in valid:
711 validstr = ', '.join(["'" + v + "'" for v in valid])
711 validstr = ', '.join(["'" + v + "'" for v in valid])
712 raise error.ConfigError(_("%s.%s not valid "
712 raise error.ConfigError(_("%s.%s not valid "
713 "('%s' is none of %s)")
713 "('%s' is none of %s)")
714 % (section, name, config, validstr))
714 % (section, name, config, validstr))
715 return config
715 return config
716
716
717 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
717 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
718 if wctx.isinmemory():
718 if wctx.isinmemory():
719 # Nothing to do in IMM because nothing in the "working copy" can be an
719 # Nothing to do in IMM because nothing in the "working copy" can be an
720 # unknown file.
720 # unknown file.
721 #
721 #
722 # Note that we should bail out here, not in ``_checkunknownfiles()``,
722 # Note that we should bail out here, not in ``_checkunknownfiles()``,
723 # because that function does other useful work.
723 # because that function does other useful work.
724 return False
724 return False
725
725
726 if f2 is None:
726 if f2 is None:
727 f2 = f
727 f2 = f
728 return (repo.wvfs.audit.check(f)
728 return (repo.wvfs.audit.check(f)
729 and repo.wvfs.isfileorlink(f)
729 and repo.wvfs.isfileorlink(f)
730 and repo.dirstate.normalize(f) not in repo.dirstate
730 and repo.dirstate.normalize(f) not in repo.dirstate
731 and mctx[f2].cmp(wctx[f]))
731 and mctx[f2].cmp(wctx[f]))
732
732
733 class _unknowndirschecker(object):
733 class _unknowndirschecker(object):
734 """
734 """
735 Look for any unknown files or directories that may have a path conflict
735 Look for any unknown files or directories that may have a path conflict
736 with a file. If any path prefix of the file exists as a file or link,
736 with a file. If any path prefix of the file exists as a file or link,
737 then it conflicts. If the file itself is a directory that contains any
737 then it conflicts. If the file itself is a directory that contains any
738 file that is not tracked, then it conflicts.
738 file that is not tracked, then it conflicts.
739
739
740 Returns the shortest path at which a conflict occurs, or None if there is
740 Returns the shortest path at which a conflict occurs, or None if there is
741 no conflict.
741 no conflict.
742 """
742 """
743 def __init__(self):
743 def __init__(self):
744 # A set of paths known to be good. This prevents repeated checking of
744 # A set of paths known to be good. This prevents repeated checking of
745 # dirs. It will be updated with any new dirs that are checked and found
745 # dirs. It will be updated with any new dirs that are checked and found
746 # to be safe.
746 # to be safe.
747 self._unknowndircache = set()
747 self._unknowndircache = set()
748
748
749 # A set of paths that are known to be absent. This prevents repeated
749 # A set of paths that are known to be absent. This prevents repeated
750 # checking of subdirectories that are known not to exist. It will be
750 # checking of subdirectories that are known not to exist. It will be
751 # updated with any new dirs that are checked and found to be absent.
751 # updated with any new dirs that are checked and found to be absent.
752 self._missingdircache = set()
752 self._missingdircache = set()
753
753
754 def __call__(self, repo, wctx, f):
754 def __call__(self, repo, wctx, f):
755 if wctx.isinmemory():
755 if wctx.isinmemory():
756 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
756 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
757 return False
757 return False
758
758
759 # Check for path prefixes that exist as unknown files.
759 # Check for path prefixes that exist as unknown files.
760 for p in reversed(list(util.finddirs(f))):
760 for p in reversed(list(util.finddirs(f))):
761 if p in self._missingdircache:
761 if p in self._missingdircache:
762 return
762 return
763 if p in self._unknowndircache:
763 if p in self._unknowndircache:
764 continue
764 continue
765 if repo.wvfs.audit.check(p):
765 if repo.wvfs.audit.check(p):
766 if (repo.wvfs.isfileorlink(p)
766 if (repo.wvfs.isfileorlink(p)
767 and repo.dirstate.normalize(p) not in repo.dirstate):
767 and repo.dirstate.normalize(p) not in repo.dirstate):
768 return p
768 return p
769 if not repo.wvfs.lexists(p):
769 if not repo.wvfs.lexists(p):
770 self._missingdircache.add(p)
770 self._missingdircache.add(p)
771 return
771 return
772 self._unknowndircache.add(p)
772 self._unknowndircache.add(p)
773
773
774 # Check if the file conflicts with a directory containing unknown files.
774 # Check if the file conflicts with a directory containing unknown files.
775 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
775 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
776 # Does the directory contain any files that are not in the dirstate?
776 # Does the directory contain any files that are not in the dirstate?
777 for p, dirs, files in repo.wvfs.walk(f):
777 for p, dirs, files in repo.wvfs.walk(f):
778 for fn in files:
778 for fn in files:
779 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
779 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
780 relf = repo.dirstate.normalize(relf, isknown=True)
780 relf = repo.dirstate.normalize(relf, isknown=True)
781 if relf not in repo.dirstate:
781 if relf not in repo.dirstate:
782 return f
782 return f
783 return None
783 return None
784
784
785 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
785 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
786 """
786 """
787 Considers any actions that care about the presence of conflicting unknown
787 Considers any actions that care about the presence of conflicting unknown
788 files. For some actions, the result is to abort; for others, it is to
788 files. For some actions, the result is to abort; for others, it is to
789 choose a different action.
789 choose a different action.
790 """
790 """
791 fileconflicts = set()
791 fileconflicts = set()
792 pathconflicts = set()
792 pathconflicts = set()
793 warnconflicts = set()
793 warnconflicts = set()
794 abortconflicts = set()
794 abortconflicts = set()
795 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
795 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
796 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
796 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
797 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
797 pathconfig = repo.ui.configbool('experimental', 'merge.checkpathconflicts')
798 if not force:
798 if not force:
799 def collectconflicts(conflicts, config):
799 def collectconflicts(conflicts, config):
800 if config == 'abort':
800 if config == 'abort':
801 abortconflicts.update(conflicts)
801 abortconflicts.update(conflicts)
802 elif config == 'warn':
802 elif config == 'warn':
803 warnconflicts.update(conflicts)
803 warnconflicts.update(conflicts)
804
804
805 checkunknowndirs = _unknowndirschecker()
805 checkunknowndirs = _unknowndirschecker()
806 for f, (m, args, msg) in actions.iteritems():
806 for f, (m, args, msg) in actions.iteritems():
807 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
807 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
808 if _checkunknownfile(repo, wctx, mctx, f):
808 if _checkunknownfile(repo, wctx, mctx, f):
809 fileconflicts.add(f)
809 fileconflicts.add(f)
810 elif pathconfig and f not in wctx:
810 elif pathconfig and f not in wctx:
811 path = checkunknowndirs(repo, wctx, f)
811 path = checkunknowndirs(repo, wctx, f)
812 if path is not None:
812 if path is not None:
813 pathconflicts.add(path)
813 pathconflicts.add(path)
814 elif m == ACTION_LOCAL_DIR_RENAME_GET:
814 elif m == ACTION_LOCAL_DIR_RENAME_GET:
815 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
815 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
816 fileconflicts.add(f)
816 fileconflicts.add(f)
817
817
818 allconflicts = fileconflicts | pathconflicts
818 allconflicts = fileconflicts | pathconflicts
819 ignoredconflicts = {c for c in allconflicts
819 ignoredconflicts = {c for c in allconflicts
820 if repo.dirstate._ignore(c)}
820 if repo.dirstate._ignore(c)}
821 unknownconflicts = allconflicts - ignoredconflicts
821 unknownconflicts = allconflicts - ignoredconflicts
822 collectconflicts(ignoredconflicts, ignoredconfig)
822 collectconflicts(ignoredconflicts, ignoredconfig)
823 collectconflicts(unknownconflicts, unknownconfig)
823 collectconflicts(unknownconflicts, unknownconfig)
824 else:
824 else:
825 for f, (m, args, msg) in actions.iteritems():
825 for f, (m, args, msg) in actions.iteritems():
826 if m == ACTION_CREATED_MERGE:
826 if m == ACTION_CREATED_MERGE:
827 fl2, anc = args
827 fl2, anc = args
828 different = _checkunknownfile(repo, wctx, mctx, f)
828 different = _checkunknownfile(repo, wctx, mctx, f)
829 if repo.dirstate._ignore(f):
829 if repo.dirstate._ignore(f):
830 config = ignoredconfig
830 config = ignoredconfig
831 else:
831 else:
832 config = unknownconfig
832 config = unknownconfig
833
833
834 # The behavior when force is True is described by this table:
834 # The behavior when force is True is described by this table:
835 # config different mergeforce | action backup
835 # config different mergeforce | action backup
836 # * n * | get n
836 # * n * | get n
837 # * y y | merge -
837 # * y y | merge -
838 # abort y n | merge - (1)
838 # abort y n | merge - (1)
839 # warn y n | warn + get y
839 # warn y n | warn + get y
840 # ignore y n | get y
840 # ignore y n | get y
841 #
841 #
842 # (1) this is probably the wrong behavior here -- we should
842 # (1) this is probably the wrong behavior here -- we should
843 # probably abort, but some actions like rebases currently
843 # probably abort, but some actions like rebases currently
844 # don't like an abort happening in the middle of
844 # don't like an abort happening in the middle of
845 # merge.update.
845 # merge.update.
846 if not different:
846 if not different:
847 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
847 actions[f] = (ACTION_GET, (fl2, False), 'remote created')
848 elif mergeforce or config == 'abort':
848 elif mergeforce or config == 'abort':
849 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
849 actions[f] = (ACTION_MERGE, (f, f, None, False, anc),
850 'remote differs from untracked local')
850 'remote differs from untracked local')
851 elif config == 'abort':
851 elif config == 'abort':
852 abortconflicts.add(f)
852 abortconflicts.add(f)
853 else:
853 else:
854 if config == 'warn':
854 if config == 'warn':
855 warnconflicts.add(f)
855 warnconflicts.add(f)
856 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
856 actions[f] = (ACTION_GET, (fl2, True), 'remote created')
857
857
858 for f in sorted(abortconflicts):
858 for f in sorted(abortconflicts):
859 warn = repo.ui.warn
859 warn = repo.ui.warn
860 if f in pathconflicts:
860 if f in pathconflicts:
861 if repo.wvfs.isfileorlink(f):
861 if repo.wvfs.isfileorlink(f):
862 warn(_("%s: untracked file conflicts with directory\n") % f)
862 warn(_("%s: untracked file conflicts with directory\n") % f)
863 else:
863 else:
864 warn(_("%s: untracked directory conflicts with file\n") % f)
864 warn(_("%s: untracked directory conflicts with file\n") % f)
865 else:
865 else:
866 warn(_("%s: untracked file differs\n") % f)
866 warn(_("%s: untracked file differs\n") % f)
867 if abortconflicts:
867 if abortconflicts:
868 raise error.Abort(_("untracked files in working directory "
868 raise error.Abort(_("untracked files in working directory "
869 "differ from files in requested revision"))
869 "differ from files in requested revision"))
870
870
871 for f in sorted(warnconflicts):
871 for f in sorted(warnconflicts):
872 if repo.wvfs.isfileorlink(f):
872 if repo.wvfs.isfileorlink(f):
873 repo.ui.warn(_("%s: replacing untracked file\n") % f)
873 repo.ui.warn(_("%s: replacing untracked file\n") % f)
874 else:
874 else:
875 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
875 repo.ui.warn(_("%s: replacing untracked files in directory\n") % f)
876
876
877 for f, (m, args, msg) in actions.iteritems():
877 for f, (m, args, msg) in actions.iteritems():
878 if m == ACTION_CREATED:
878 if m == ACTION_CREATED:
879 backup = (f in fileconflicts or f in pathconflicts or
879 backup = (f in fileconflicts or f in pathconflicts or
880 any(p in pathconflicts for p in util.finddirs(f)))
880 any(p in pathconflicts for p in util.finddirs(f)))
881 flags, = args
881 flags, = args
882 actions[f] = (ACTION_GET, (flags, backup), msg)
882 actions[f] = (ACTION_GET, (flags, backup), msg)
883
883
884 def _forgetremoved(wctx, mctx, branchmerge):
884 def _forgetremoved(wctx, mctx, branchmerge):
885 """
885 """
886 Forget removed files
886 Forget removed files
887
887
888 If we're jumping between revisions (as opposed to merging), and if
888 If we're jumping between revisions (as opposed to merging), and if
889 neither the working directory nor the target rev has the file,
889 neither the working directory nor the target rev has the file,
890 then we need to remove it from the dirstate, to prevent the
890 then we need to remove it from the dirstate, to prevent the
891 dirstate from listing the file when it is no longer in the
891 dirstate from listing the file when it is no longer in the
892 manifest.
892 manifest.
893
893
894 If we're merging, and the other revision has removed a file
894 If we're merging, and the other revision has removed a file
895 that is not present in the working directory, we need to mark it
895 that is not present in the working directory, we need to mark it
896 as removed.
896 as removed.
897 """
897 """
898
898
899 actions = {}
899 actions = {}
900 m = ACTION_FORGET
900 m = ACTION_FORGET
901 if branchmerge:
901 if branchmerge:
902 m = ACTION_REMOVE
902 m = ACTION_REMOVE
903 for f in wctx.deleted():
903 for f in wctx.deleted():
904 if f not in mctx:
904 if f not in mctx:
905 actions[f] = m, None, "forget deleted"
905 actions[f] = m, None, "forget deleted"
906
906
907 if not branchmerge:
907 if not branchmerge:
908 for f in wctx.removed():
908 for f in wctx.removed():
909 if f not in mctx:
909 if f not in mctx:
910 actions[f] = ACTION_FORGET, None, "forget removed"
910 actions[f] = ACTION_FORGET, None, "forget removed"
911
911
912 return actions
912 return actions
913
913
914 def _checkcollision(repo, wmf, actions):
914 def _checkcollision(repo, wmf, actions):
915 """
915 """
916 Check for case-folding collisions.
916 Check for case-folding collisions.
917 """
917 """
918
918
919 # If the repo is narrowed, filter out files outside the narrowspec.
919 # If the repo is narrowed, filter out files outside the narrowspec.
920 narrowmatch = repo.narrowmatch()
920 narrowmatch = repo.narrowmatch()
921 if not narrowmatch.always():
921 if not narrowmatch.always():
922 wmf = wmf.matches(narrowmatch)
922 wmf = wmf.matches(narrowmatch)
923 if actions:
923 if actions:
924 narrowactions = {}
924 narrowactions = {}
925 for m, actionsfortype in actions.iteritems():
925 for m, actionsfortype in actions.iteritems():
926 narrowactions[m] = []
926 narrowactions[m] = []
927 for (f, args, msg) in actionsfortype:
927 for (f, args, msg) in actionsfortype:
928 if narrowmatch(f):
928 if narrowmatch(f):
929 narrowactions[m].append((f, args, msg))
929 narrowactions[m].append((f, args, msg))
930 actions = narrowactions
930 actions = narrowactions
931
931
932 # build provisional merged manifest up
932 # build provisional merged manifest up
933 pmmf = set(wmf)
933 pmmf = set(wmf)
934
934
935 if actions:
935 if actions:
936 # KEEP and EXEC are no-op
936 # KEEP and EXEC are no-op
937 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
937 for m in (ACTION_ADD, ACTION_ADD_MODIFIED, ACTION_FORGET, ACTION_GET,
938 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
938 ACTION_CHANGED_DELETED, ACTION_DELETED_CHANGED):
939 for f, args, msg in actions[m]:
939 for f, args, msg in actions[m]:
940 pmmf.add(f)
940 pmmf.add(f)
941 for f, args, msg in actions[ACTION_REMOVE]:
941 for f, args, msg in actions[ACTION_REMOVE]:
942 pmmf.discard(f)
942 pmmf.discard(f)
943 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
943 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
944 f2, flags = args
944 f2, flags = args
945 pmmf.discard(f2)
945 pmmf.discard(f2)
946 pmmf.add(f)
946 pmmf.add(f)
947 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
947 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
948 pmmf.add(f)
948 pmmf.add(f)
949 for f, args, msg in actions[ACTION_MERGE]:
949 for f, args, msg in actions[ACTION_MERGE]:
950 f1, f2, fa, move, anc = args
950 f1, f2, fa, move, anc = args
951 if move:
951 if move:
952 pmmf.discard(f1)
952 pmmf.discard(f1)
953 pmmf.add(f)
953 pmmf.add(f)
954
954
955 # check case-folding collision in provisional merged manifest
955 # check case-folding collision in provisional merged manifest
956 foldmap = {}
956 foldmap = {}
957 for f in pmmf:
957 for f in pmmf:
958 fold = util.normcase(f)
958 fold = util.normcase(f)
959 if fold in foldmap:
959 if fold in foldmap:
960 raise error.Abort(_("case-folding collision between %s and %s")
960 raise error.Abort(_("case-folding collision between %s and %s")
961 % (f, foldmap[fold]))
961 % (f, foldmap[fold]))
962 foldmap[fold] = f
962 foldmap[fold] = f
963
963
964 # check case-folding of directories
964 # check case-folding of directories
965 foldprefix = unfoldprefix = lastfull = ''
965 foldprefix = unfoldprefix = lastfull = ''
966 for fold, f in sorted(foldmap.items()):
966 for fold, f in sorted(foldmap.items()):
967 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
967 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
968 # the folded prefix matches but actual casing is different
968 # the folded prefix matches but actual casing is different
969 raise error.Abort(_("case-folding collision between "
969 raise error.Abort(_("case-folding collision between "
970 "%s and directory of %s") % (lastfull, f))
970 "%s and directory of %s") % (lastfull, f))
971 foldprefix = fold + '/'
971 foldprefix = fold + '/'
972 unfoldprefix = f + '/'
972 unfoldprefix = f + '/'
973 lastfull = f
973 lastfull = f
974
974
975 def driverpreprocess(repo, ms, wctx, labels=None):
975 def driverpreprocess(repo, ms, wctx, labels=None):
976 """run the preprocess step of the merge driver, if any
976 """run the preprocess step of the merge driver, if any
977
977
978 This is currently not implemented -- it's an extension point."""
978 This is currently not implemented -- it's an extension point."""
979 return True
979 return True
980
980
981 def driverconclude(repo, ms, wctx, labels=None):
981 def driverconclude(repo, ms, wctx, labels=None):
982 """run the conclude step of the merge driver, if any
982 """run the conclude step of the merge driver, if any
983
983
984 This is currently not implemented -- it's an extension point."""
984 This is currently not implemented -- it's an extension point."""
985 return True
985 return True
986
986
987 def _filesindirs(repo, manifest, dirs):
987 def _filesindirs(repo, manifest, dirs):
988 """
988 """
989 Generator that yields pairs of all the files in the manifest that are found
989 Generator that yields pairs of all the files in the manifest that are found
990 inside the directories listed in dirs, and which directory they are found
990 inside the directories listed in dirs, and which directory they are found
991 in.
991 in.
992 """
992 """
993 for f in manifest:
993 for f in manifest:
994 for p in util.finddirs(f):
994 for p in util.finddirs(f):
995 if p in dirs:
995 if p in dirs:
996 yield f, p
996 yield f, p
997 break
997 break
998
998
999 def checkpathconflicts(repo, wctx, mctx, actions):
999 def checkpathconflicts(repo, wctx, mctx, actions):
1000 """
1000 """
1001 Check if any actions introduce path conflicts in the repository, updating
1001 Check if any actions introduce path conflicts in the repository, updating
1002 actions to record or handle the path conflict accordingly.
1002 actions to record or handle the path conflict accordingly.
1003 """
1003 """
1004 mf = wctx.manifest()
1004 mf = wctx.manifest()
1005
1005
1006 # The set of local files that conflict with a remote directory.
1006 # The set of local files that conflict with a remote directory.
1007 localconflicts = set()
1007 localconflicts = set()
1008
1008
1009 # The set of directories that conflict with a remote file, and so may cause
1009 # The set of directories that conflict with a remote file, and so may cause
1010 # conflicts if they still contain any files after the merge.
1010 # conflicts if they still contain any files after the merge.
1011 remoteconflicts = set()
1011 remoteconflicts = set()
1012
1012
1013 # The set of directories that appear as both a file and a directory in the
1013 # The set of directories that appear as both a file and a directory in the
1014 # remote manifest. These indicate an invalid remote manifest, which
1014 # remote manifest. These indicate an invalid remote manifest, which
1015 # can't be updated to cleanly.
1015 # can't be updated to cleanly.
1016 invalidconflicts = set()
1016 invalidconflicts = set()
1017
1017
1018 # The set of directories that contain files that are being created.
1018 # The set of directories that contain files that are being created.
1019 createdfiledirs = set()
1019 createdfiledirs = set()
1020
1020
1021 # The set of files deleted by all the actions.
1021 # The set of files deleted by all the actions.
1022 deletedfiles = set()
1022 deletedfiles = set()
1023
1023
1024 for f, (m, args, msg) in actions.items():
1024 for f, (m, args, msg) in actions.items():
1025 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1025 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED, ACTION_MERGE,
1026 ACTION_CREATED_MERGE):
1026 ACTION_CREATED_MERGE):
1027 # This action may create a new local file.
1027 # This action may create a new local file.
1028 createdfiledirs.update(util.finddirs(f))
1028 createdfiledirs.update(util.finddirs(f))
1029 if mf.hasdir(f):
1029 if mf.hasdir(f):
1030 # The file aliases a local directory. This might be ok if all
1030 # The file aliases a local directory. This might be ok if all
1031 # the files in the local directory are being deleted. This
1031 # the files in the local directory are being deleted. This
1032 # will be checked once we know what all the deleted files are.
1032 # will be checked once we know what all the deleted files are.
1033 remoteconflicts.add(f)
1033 remoteconflicts.add(f)
1034 # Track the names of all deleted files.
1034 # Track the names of all deleted files.
1035 if m == ACTION_REMOVE:
1035 if m == ACTION_REMOVE:
1036 deletedfiles.add(f)
1036 deletedfiles.add(f)
1037 if m == ACTION_MERGE:
1037 if m == ACTION_MERGE:
1038 f1, f2, fa, move, anc = args
1038 f1, f2, fa, move, anc = args
1039 if move:
1039 if move:
1040 deletedfiles.add(f1)
1040 deletedfiles.add(f1)
1041 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1041 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1042 f2, flags = args
1042 f2, flags = args
1043 deletedfiles.add(f2)
1043 deletedfiles.add(f2)
1044
1044
1045 # Check all directories that contain created files for path conflicts.
1045 # Check all directories that contain created files for path conflicts.
1046 for p in createdfiledirs:
1046 for p in createdfiledirs:
1047 if p in mf:
1047 if p in mf:
1048 if p in mctx:
1048 if p in mctx:
1049 # A file is in a directory which aliases both a local
1049 # A file is in a directory which aliases both a local
1050 # and a remote file. This is an internal inconsistency
1050 # and a remote file. This is an internal inconsistency
1051 # within the remote manifest.
1051 # within the remote manifest.
1052 invalidconflicts.add(p)
1052 invalidconflicts.add(p)
1053 else:
1053 else:
1054 # A file is in a directory which aliases a local file.
1054 # A file is in a directory which aliases a local file.
1055 # We will need to rename the local file.
1055 # We will need to rename the local file.
1056 localconflicts.add(p)
1056 localconflicts.add(p)
1057 if p in actions and actions[p][0] in (ACTION_CREATED,
1057 if p in actions and actions[p][0] in (ACTION_CREATED,
1058 ACTION_DELETED_CHANGED,
1058 ACTION_DELETED_CHANGED,
1059 ACTION_MERGE,
1059 ACTION_MERGE,
1060 ACTION_CREATED_MERGE):
1060 ACTION_CREATED_MERGE):
1061 # The file is in a directory which aliases a remote file.
1061 # The file is in a directory which aliases a remote file.
1062 # This is an internal inconsistency within the remote
1062 # This is an internal inconsistency within the remote
1063 # manifest.
1063 # manifest.
1064 invalidconflicts.add(p)
1064 invalidconflicts.add(p)
1065
1065
1066 # Rename all local conflicting files that have not been deleted.
1066 # Rename all local conflicting files that have not been deleted.
1067 for p in localconflicts:
1067 for p in localconflicts:
1068 if p not in deletedfiles:
1068 if p not in deletedfiles:
1069 ctxname = bytes(wctx).rstrip('+')
1069 ctxname = bytes(wctx).rstrip('+')
1070 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1070 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1071 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1071 actions[pnew] = (ACTION_PATH_CONFLICT_RESOLVE, (p,),
1072 'local path conflict')
1072 'local path conflict')
1073 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1073 actions[p] = (ACTION_PATH_CONFLICT, (pnew, 'l'),
1074 'path conflict')
1074 'path conflict')
1075
1075
1076 if remoteconflicts:
1076 if remoteconflicts:
1077 # Check if all files in the conflicting directories have been removed.
1077 # Check if all files in the conflicting directories have been removed.
1078 ctxname = bytes(mctx).rstrip('+')
1078 ctxname = bytes(mctx).rstrip('+')
1079 for f, p in _filesindirs(repo, mf, remoteconflicts):
1079 for f, p in _filesindirs(repo, mf, remoteconflicts):
1080 if f not in deletedfiles:
1080 if f not in deletedfiles:
1081 m, args, msg = actions[p]
1081 m, args, msg = actions[p]
1082 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1082 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1083 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1083 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1084 # Action was merge, just update target.
1084 # Action was merge, just update target.
1085 actions[pnew] = (m, args, msg)
1085 actions[pnew] = (m, args, msg)
1086 else:
1086 else:
1087 # Action was create, change to renamed get action.
1087 # Action was create, change to renamed get action.
1088 fl = args[0]
1088 fl = args[0]
1089 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1089 actions[pnew] = (ACTION_LOCAL_DIR_RENAME_GET, (p, fl),
1090 'remote path conflict')
1090 'remote path conflict')
1091 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1091 actions[p] = (ACTION_PATH_CONFLICT, (pnew, ACTION_REMOVE),
1092 'path conflict')
1092 'path conflict')
1093 remoteconflicts.remove(p)
1093 remoteconflicts.remove(p)
1094 break
1094 break
1095
1095
1096 if invalidconflicts:
1096 if invalidconflicts:
1097 for p in invalidconflicts:
1097 for p in invalidconflicts:
1098 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1098 repo.ui.warn(_("%s: is both a file and a directory\n") % p)
1099 raise error.Abort(_("destination manifest contains path conflicts"))
1099 raise error.Abort(_("destination manifest contains path conflicts"))
1100
1100
1101 def _filternarrowactions(narrowmatch, branchmerge, actions):
1101 def _filternarrowactions(narrowmatch, branchmerge, actions):
1102 """
1102 """
1103 Filters out actions that can ignored because the repo is narrowed.
1103 Filters out actions that can ignored because the repo is narrowed.
1104
1104
1105 Raise an exception if the merge cannot be completed because the repo is
1105 Raise an exception if the merge cannot be completed because the repo is
1106 narrowed.
1106 narrowed.
1107 """
1107 """
1108 nooptypes = {'k'} # TODO: handle with nonconflicttypes
1108 nooptypes = {'k'} # TODO: handle with nonconflicttypes
1109 nonconflicttypes = set('a am c cm f g r e'.split())
1109 nonconflicttypes = set('a am c cm f g r e'.split())
1110 # We mutate the items in the dict during iteration, so iterate
1110 # We mutate the items in the dict during iteration, so iterate
1111 # over a copy.
1111 # over a copy.
1112 for f, action in list(actions.items()):
1112 for f, action in list(actions.items()):
1113 if narrowmatch(f):
1113 if narrowmatch(f):
1114 pass
1114 pass
1115 elif not branchmerge:
1115 elif not branchmerge:
1116 del actions[f] # just updating, ignore changes outside clone
1116 del actions[f] # just updating, ignore changes outside clone
1117 elif action[0] in nooptypes:
1117 elif action[0] in nooptypes:
1118 del actions[f] # merge does not affect file
1118 del actions[f] # merge does not affect file
1119 elif action[0] in nonconflicttypes:
1119 elif action[0] in nonconflicttypes:
1120 raise error.Abort(_('merge affects file \'%s\' outside narrow, '
1120 raise error.Abort(_('merge affects file \'%s\' outside narrow, '
1121 'which is not yet supported') % f,
1121 'which is not yet supported') % f,
1122 hint=_('merging in the other direction '
1122 hint=_('merging in the other direction '
1123 'may work'))
1123 'may work'))
1124 else:
1124 else:
1125 raise error.Abort(_('conflict in file \'%s\' is outside '
1125 raise error.Abort(_('conflict in file \'%s\' is outside '
1126 'narrow clone') % f)
1126 'narrow clone') % f)
1127
1127
1128 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1128 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
1129 acceptremote, followcopies, forcefulldiff=False):
1129 acceptremote, followcopies, forcefulldiff=False):
1130 """
1130 """
1131 Merge wctx and p2 with ancestor pa and generate merge action list
1131 Merge wctx and p2 with ancestor pa and generate merge action list
1132
1132
1133 branchmerge and force are as passed in to update
1133 branchmerge and force are as passed in to update
1134 matcher = matcher to filter file lists
1134 matcher = matcher to filter file lists
1135 acceptremote = accept the incoming changes without prompting
1135 acceptremote = accept the incoming changes without prompting
1136 """
1136 """
1137 if matcher is not None and matcher.always():
1137 if matcher is not None and matcher.always():
1138 matcher = None
1138 matcher = None
1139
1139
1140 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1140 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1141
1141
1142 # manifests fetched in order are going to be faster, so prime the caches
1142 # manifests fetched in order are going to be faster, so prime the caches
1143 [x.manifest() for x in
1143 [x.manifest() for x in
1144 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1144 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
1145
1145
1146 if followcopies:
1146 if followcopies:
1147 ret = copies.mergecopies(repo, wctx, p2, pa)
1147 ret = copies.mergecopies(repo, wctx, p2, pa)
1148 copy, movewithdir, diverge, renamedelete, dirmove = ret
1148 copy, movewithdir, diverge, renamedelete, dirmove = ret
1149
1149
1150 boolbm = pycompat.bytestr(bool(branchmerge))
1150 boolbm = pycompat.bytestr(bool(branchmerge))
1151 boolf = pycompat.bytestr(bool(force))
1151 boolf = pycompat.bytestr(bool(force))
1152 boolm = pycompat.bytestr(bool(matcher))
1152 boolm = pycompat.bytestr(bool(matcher))
1153 repo.ui.note(_("resolving manifests\n"))
1153 repo.ui.note(_("resolving manifests\n"))
1154 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1154 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
1155 % (boolbm, boolf, boolm))
1155 % (boolbm, boolf, boolm))
1156 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1156 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1157
1157
1158 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1158 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1159 copied = set(copy.values())
1159 copied = set(copy.values())
1160 copied.update(movewithdir.values())
1160 copied.update(movewithdir.values())
1161
1161
1162 if '.hgsubstate' in m1 and wctx.rev() is None:
1162 if '.hgsubstate' in m1 and wctx.rev() is None:
1163 # Check whether sub state is modified, and overwrite the manifest
1163 # Check whether sub state is modified, and overwrite the manifest
1164 # to flag the change. If wctx is a committed revision, we shouldn't
1164 # to flag the change. If wctx is a committed revision, we shouldn't
1165 # care for the dirty state of the working directory.
1165 # care for the dirty state of the working directory.
1166 if any(wctx.sub(s).dirty() for s in wctx.substate):
1166 if any(wctx.sub(s).dirty() for s in wctx.substate):
1167 m1['.hgsubstate'] = modifiednodeid
1167 m1['.hgsubstate'] = modifiednodeid
1168
1168
1169 # Don't use m2-vs-ma optimization if:
1169 # Don't use m2-vs-ma optimization if:
1170 # - ma is the same as m1 or m2, which we're just going to diff again later
1170 # - ma is the same as m1 or m2, which we're just going to diff again later
1171 # - The caller specifically asks for a full diff, which is useful during bid
1171 # - The caller specifically asks for a full diff, which is useful during bid
1172 # merge.
1172 # merge.
1173 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1173 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
1174 # Identify which files are relevant to the merge, so we can limit the
1174 # Identify which files are relevant to the merge, so we can limit the
1175 # total m1-vs-m2 diff to just those files. This has significant
1175 # total m1-vs-m2 diff to just those files. This has significant
1176 # performance benefits in large repositories.
1176 # performance benefits in large repositories.
1177 relevantfiles = set(ma.diff(m2).keys())
1177 relevantfiles = set(ma.diff(m2).keys())
1178
1178
1179 # For copied and moved files, we need to add the source file too.
1179 # For copied and moved files, we need to add the source file too.
1180 for copykey, copyvalue in copy.iteritems():
1180 for copykey, copyvalue in copy.iteritems():
1181 if copyvalue in relevantfiles:
1181 if copyvalue in relevantfiles:
1182 relevantfiles.add(copykey)
1182 relevantfiles.add(copykey)
1183 for movedirkey in movewithdir:
1183 for movedirkey in movewithdir:
1184 relevantfiles.add(movedirkey)
1184 relevantfiles.add(movedirkey)
1185 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1185 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1186 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1186 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1187
1187
1188 diff = m1.diff(m2, match=matcher)
1188 diff = m1.diff(m2, match=matcher)
1189
1189
1190 actions = {}
1190 actions = {}
1191 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1191 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
1192 if n1 and n2: # file exists on both local and remote side
1192 if n1 and n2: # file exists on both local and remote side
1193 if f not in ma:
1193 if f not in ma:
1194 fa = copy.get(f, None)
1194 fa = copy.get(f, None)
1195 if fa is not None:
1195 if fa is not None:
1196 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1196 actions[f] = (ACTION_MERGE, (f, f, fa, False, pa.node()),
1197 'both renamed from %s' % fa)
1197 'both renamed from %s' % fa)
1198 else:
1198 else:
1199 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1199 actions[f] = (ACTION_MERGE, (f, f, None, False, pa.node()),
1200 'both created')
1200 'both created')
1201 else:
1201 else:
1202 a = ma[f]
1202 a = ma[f]
1203 fla = ma.flags(f)
1203 fla = ma.flags(f)
1204 nol = 'l' not in fl1 + fl2 + fla
1204 nol = 'l' not in fl1 + fl2 + fla
1205 if n2 == a and fl2 == fla:
1205 if n2 == a and fl2 == fla:
1206 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1206 actions[f] = (ACTION_KEEP, (), 'remote unchanged')
1207 elif n1 == a and fl1 == fla: # local unchanged - use remote
1207 elif n1 == a and fl1 == fla: # local unchanged - use remote
1208 if n1 == n2: # optimization: keep local content
1208 if n1 == n2: # optimization: keep local content
1209 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1209 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1210 else:
1210 else:
1211 actions[f] = (ACTION_GET, (fl2, False),
1211 actions[f] = (ACTION_GET, (fl2, False),
1212 'remote is newer')
1212 'remote is newer')
1213 elif nol and n2 == a: # remote only changed 'x'
1213 elif nol and n2 == a: # remote only changed 'x'
1214 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1214 actions[f] = (ACTION_EXEC, (fl2,), 'update permissions')
1215 elif nol and n1 == a: # local only changed 'x'
1215 elif nol and n1 == a: # local only changed 'x'
1216 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1216 actions[f] = (ACTION_GET, (fl1, False), 'remote is newer')
1217 else: # both changed something
1217 else: # both changed something
1218 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1218 actions[f] = (ACTION_MERGE, (f, f, f, False, pa.node()),
1219 'versions differ')
1219 'versions differ')
1220 elif n1: # file exists only on local side
1220 elif n1: # file exists only on local side
1221 if f in copied:
1221 if f in copied:
1222 pass # we'll deal with it on m2 side
1222 pass # we'll deal with it on m2 side
1223 elif f in movewithdir: # directory rename, move local
1223 elif f in movewithdir: # directory rename, move local
1224 f2 = movewithdir[f]
1224 f2 = movewithdir[f]
1225 if f2 in m2:
1225 if f2 in m2:
1226 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1226 actions[f2] = (ACTION_MERGE, (f, f2, None, True, pa.node()),
1227 'remote directory rename, both created')
1227 'remote directory rename, both created')
1228 else:
1228 else:
1229 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1229 actions[f2] = (ACTION_DIR_RENAME_MOVE_LOCAL, (f, fl1),
1230 'remote directory rename - move from %s' % f)
1230 'remote directory rename - move from %s' % f)
1231 elif f in copy:
1231 elif f in copy:
1232 f2 = copy[f]
1232 f2 = copy[f]
1233 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1233 actions[f] = (ACTION_MERGE, (f, f2, f2, False, pa.node()),
1234 'local copied/moved from %s' % f2)
1234 'local copied/moved from %s' % f2)
1235 elif f in ma: # clean, a different, no remote
1235 elif f in ma: # clean, a different, no remote
1236 if n1 != ma[f]:
1236 if n1 != ma[f]:
1237 if acceptremote:
1237 if acceptremote:
1238 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1238 actions[f] = (ACTION_REMOVE, None, 'remote delete')
1239 else:
1239 else:
1240 actions[f] = (ACTION_CHANGED_DELETED,
1240 actions[f] = (ACTION_CHANGED_DELETED,
1241 (f, None, f, False, pa.node()),
1241 (f, None, f, False, pa.node()),
1242 'prompt changed/deleted')
1242 'prompt changed/deleted')
1243 elif n1 == addednodeid:
1243 elif n1 == addednodeid:
1244 # This extra 'a' is added by working copy manifest to mark
1244 # This extra 'a' is added by working copy manifest to mark
1245 # the file as locally added. We should forget it instead of
1245 # the file as locally added. We should forget it instead of
1246 # deleting it.
1246 # deleting it.
1247 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1247 actions[f] = (ACTION_FORGET, None, 'remote deleted')
1248 else:
1248 else:
1249 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1249 actions[f] = (ACTION_REMOVE, None, 'other deleted')
1250 elif n2: # file exists only on remote side
1250 elif n2: # file exists only on remote side
1251 if f in copied:
1251 if f in copied:
1252 pass # we'll deal with it on m1 side
1252 pass # we'll deal with it on m1 side
1253 elif f in movewithdir:
1253 elif f in movewithdir:
1254 f2 = movewithdir[f]
1254 f2 = movewithdir[f]
1255 if f2 in m1:
1255 if f2 in m1:
1256 actions[f2] = (ACTION_MERGE,
1256 actions[f2] = (ACTION_MERGE,
1257 (f2, f, None, False, pa.node()),
1257 (f2, f, None, False, pa.node()),
1258 'local directory rename, both created')
1258 'local directory rename, both created')
1259 else:
1259 else:
1260 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1260 actions[f2] = (ACTION_LOCAL_DIR_RENAME_GET, (f, fl2),
1261 'local directory rename - get from %s' % f)
1261 'local directory rename - get from %s' % f)
1262 elif f in copy:
1262 elif f in copy:
1263 f2 = copy[f]
1263 f2 = copy[f]
1264 if f2 in m2:
1264 if f2 in m2:
1265 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1265 actions[f] = (ACTION_MERGE, (f2, f, f2, False, pa.node()),
1266 'remote copied from %s' % f2)
1266 'remote copied from %s' % f2)
1267 else:
1267 else:
1268 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1268 actions[f] = (ACTION_MERGE, (f2, f, f2, True, pa.node()),
1269 'remote moved from %s' % f2)
1269 'remote moved from %s' % f2)
1270 elif f not in ma:
1270 elif f not in ma:
1271 # local unknown, remote created: the logic is described by the
1271 # local unknown, remote created: the logic is described by the
1272 # following table:
1272 # following table:
1273 #
1273 #
1274 # force branchmerge different | action
1274 # force branchmerge different | action
1275 # n * * | create
1275 # n * * | create
1276 # y n * | create
1276 # y n * | create
1277 # y y n | create
1277 # y y n | create
1278 # y y y | merge
1278 # y y y | merge
1279 #
1279 #
1280 # Checking whether the files are different is expensive, so we
1280 # Checking whether the files are different is expensive, so we
1281 # don't do that when we can avoid it.
1281 # don't do that when we can avoid it.
1282 if not force:
1282 if not force:
1283 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1283 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1284 elif not branchmerge:
1284 elif not branchmerge:
1285 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1285 actions[f] = (ACTION_CREATED, (fl2,), 'remote created')
1286 else:
1286 else:
1287 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1287 actions[f] = (ACTION_CREATED_MERGE, (fl2, pa.node()),
1288 'remote created, get or merge')
1288 'remote created, get or merge')
1289 elif n2 != ma[f]:
1289 elif n2 != ma[f]:
1290 df = None
1290 df = None
1291 for d in dirmove:
1291 for d in dirmove:
1292 if f.startswith(d):
1292 if f.startswith(d):
1293 # new file added in a directory that was moved
1293 # new file added in a directory that was moved
1294 df = dirmove[d] + f[len(d):]
1294 df = dirmove[d] + f[len(d):]
1295 break
1295 break
1296 if df is not None and df in m1:
1296 if df is not None and df in m1:
1297 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1297 actions[df] = (ACTION_MERGE, (df, f, f, False, pa.node()),
1298 'local directory rename - respect move '
1298 'local directory rename - respect move '
1299 'from %s' % f)
1299 'from %s' % f)
1300 elif acceptremote:
1300 elif acceptremote:
1301 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1301 actions[f] = (ACTION_CREATED, (fl2,), 'remote recreating')
1302 else:
1302 else:
1303 actions[f] = (ACTION_DELETED_CHANGED,
1303 actions[f] = (ACTION_DELETED_CHANGED,
1304 (None, f, f, False, pa.node()),
1304 (None, f, f, False, pa.node()),
1305 'prompt deleted/changed')
1305 'prompt deleted/changed')
1306
1306
1307 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1307 if repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1308 # If we are merging, look for path conflicts.
1308 # If we are merging, look for path conflicts.
1309 checkpathconflicts(repo, wctx, p2, actions)
1309 checkpathconflicts(repo, wctx, p2, actions)
1310
1310
1311 narrowmatch = repo.narrowmatch()
1311 narrowmatch = repo.narrowmatch()
1312 if not narrowmatch.always():
1312 if not narrowmatch.always():
1313 # Updates "actions" in place
1313 # Updates "actions" in place
1314 _filternarrowactions(narrowmatch, branchmerge, actions)
1314 _filternarrowactions(narrowmatch, branchmerge, actions)
1315
1315
1316 return actions, diverge, renamedelete
1316 return actions, diverge, renamedelete
1317
1317
1318 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1318 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1319 """Resolves false conflicts where the nodeid changed but the content
1319 """Resolves false conflicts where the nodeid changed but the content
1320 remained the same."""
1320 remained the same."""
1321 # We force a copy of actions.items() because we're going to mutate
1321 # We force a copy of actions.items() because we're going to mutate
1322 # actions as we resolve trivial conflicts.
1322 # actions as we resolve trivial conflicts.
1323 for f, (m, args, msg) in list(actions.items()):
1323 for f, (m, args, msg) in list(actions.items()):
1324 if (m == ACTION_CHANGED_DELETED and f in ancestor
1324 if (m == ACTION_CHANGED_DELETED and f in ancestor
1325 and not wctx[f].cmp(ancestor[f])):
1325 and not wctx[f].cmp(ancestor[f])):
1326 # local did change but ended up with same content
1326 # local did change but ended up with same content
1327 actions[f] = ACTION_REMOVE, None, 'prompt same'
1327 actions[f] = ACTION_REMOVE, None, 'prompt same'
1328 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1328 elif (m == ACTION_DELETED_CHANGED and f in ancestor
1329 and not mctx[f].cmp(ancestor[f])):
1329 and not mctx[f].cmp(ancestor[f])):
1330 # remote did change but ended up with same content
1330 # remote did change but ended up with same content
1331 del actions[f] # don't get = keep local deleted
1331 del actions[f] # don't get = keep local deleted
1332
1332
1333 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1333 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
1334 acceptremote, followcopies, matcher=None,
1334 acceptremote, followcopies, matcher=None,
1335 mergeforce=False):
1335 mergeforce=False):
1336 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1336 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1337 # Avoid cycle.
1337 # Avoid cycle.
1338 from . import sparse
1338 from . import sparse
1339
1339
1340 if len(ancestors) == 1: # default
1340 if len(ancestors) == 1: # default
1341 actions, diverge, renamedelete = manifestmerge(
1341 actions, diverge, renamedelete = manifestmerge(
1342 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1342 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
1343 acceptremote, followcopies)
1343 acceptremote, followcopies)
1344 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1344 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1345
1345
1346 else: # only when merge.preferancestor=* - the default
1346 else: # only when merge.preferancestor=* - the default
1347 repo.ui.note(
1347 repo.ui.note(
1348 _("note: merging %s and %s using bids from ancestors %s\n") %
1348 _("note: merging %s and %s using bids from ancestors %s\n") %
1349 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1349 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
1350 for anc in ancestors)))
1350 for anc in ancestors)))
1351
1351
1352 # Call for bids
1352 # Call for bids
1353 fbids = {} # mapping filename to bids (action method to list af actions)
1353 fbids = {} # mapping filename to bids (action method to list af actions)
1354 diverge, renamedelete = None, None
1354 diverge, renamedelete = None, None
1355 for ancestor in ancestors:
1355 for ancestor in ancestors:
1356 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1356 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1357 actions, diverge1, renamedelete1 = manifestmerge(
1357 actions, diverge1, renamedelete1 = manifestmerge(
1358 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1358 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1359 acceptremote, followcopies, forcefulldiff=True)
1359 acceptremote, followcopies, forcefulldiff=True)
1360 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1360 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1361
1361
1362 # Track the shortest set of warning on the theory that bid
1362 # Track the shortest set of warning on the theory that bid
1363 # merge will correctly incorporate more information
1363 # merge will correctly incorporate more information
1364 if diverge is None or len(diverge1) < len(diverge):
1364 if diverge is None or len(diverge1) < len(diverge):
1365 diverge = diverge1
1365 diverge = diverge1
1366 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1366 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1367 renamedelete = renamedelete1
1367 renamedelete = renamedelete1
1368
1368
1369 for f, a in sorted(actions.iteritems()):
1369 for f, a in sorted(actions.iteritems()):
1370 m, args, msg = a
1370 m, args, msg = a
1371 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1371 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1372 if f in fbids:
1372 if f in fbids:
1373 d = fbids[f]
1373 d = fbids[f]
1374 if m in d:
1374 if m in d:
1375 d[m].append(a)
1375 d[m].append(a)
1376 else:
1376 else:
1377 d[m] = [a]
1377 d[m] = [a]
1378 else:
1378 else:
1379 fbids[f] = {m: [a]}
1379 fbids[f] = {m: [a]}
1380
1380
1381 # Pick the best bid for each file
1381 # Pick the best bid for each file
1382 repo.ui.note(_('\nauction for merging merge bids\n'))
1382 repo.ui.note(_('\nauction for merging merge bids\n'))
1383 actions = {}
1383 actions = {}
1384 for f, bids in sorted(fbids.items()):
1384 for f, bids in sorted(fbids.items()):
1385 # bids is a mapping from action method to list af actions
1385 # bids is a mapping from action method to list af actions
1386 # Consensus?
1386 # Consensus?
1387 if len(bids) == 1: # all bids are the same kind of method
1387 if len(bids) == 1: # all bids are the same kind of method
1388 m, l = list(bids.items())[0]
1388 m, l = list(bids.items())[0]
1389 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1389 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1390 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1390 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1391 actions[f] = l[0]
1391 actions[f] = l[0]
1392 continue
1392 continue
1393 # If keep is an option, just do it.
1393 # If keep is an option, just do it.
1394 if ACTION_KEEP in bids:
1394 if ACTION_KEEP in bids:
1395 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1395 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1396 actions[f] = bids[ACTION_KEEP][0]
1396 actions[f] = bids[ACTION_KEEP][0]
1397 continue
1397 continue
1398 # If there are gets and they all agree [how could they not?], do it.
1398 # If there are gets and they all agree [how could they not?], do it.
1399 if ACTION_GET in bids:
1399 if ACTION_GET in bids:
1400 ga0 = bids[ACTION_GET][0]
1400 ga0 = bids[ACTION_GET][0]
1401 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1401 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1402 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1402 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1403 actions[f] = ga0
1403 actions[f] = ga0
1404 continue
1404 continue
1405 # TODO: Consider other simple actions such as mode changes
1405 # TODO: Consider other simple actions such as mode changes
1406 # Handle inefficient democrazy.
1406 # Handle inefficient democrazy.
1407 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1407 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1408 for m, l in sorted(bids.items()):
1408 for m, l in sorted(bids.items()):
1409 for _f, args, msg in l:
1409 for _f, args, msg in l:
1410 repo.ui.note(' %s -> %s\n' % (msg, m))
1410 repo.ui.note(' %s -> %s\n' % (msg, m))
1411 # Pick random action. TODO: Instead, prompt user when resolving
1411 # Pick random action. TODO: Instead, prompt user when resolving
1412 m, l = list(bids.items())[0]
1412 m, l = list(bids.items())[0]
1413 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1413 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1414 (f, m))
1414 (f, m))
1415 actions[f] = l[0]
1415 actions[f] = l[0]
1416 continue
1416 continue
1417 repo.ui.note(_('end of auction\n\n'))
1417 repo.ui.note(_('end of auction\n\n'))
1418
1418
1419 if wctx.rev() is None:
1419 if wctx.rev() is None:
1420 fractions = _forgetremoved(wctx, mctx, branchmerge)
1420 fractions = _forgetremoved(wctx, mctx, branchmerge)
1421 actions.update(fractions)
1421 actions.update(fractions)
1422
1422
1423 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1423 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1424 actions)
1424 actions)
1425 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1425 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1426
1426
1427 return prunedactions, diverge, renamedelete
1427 return prunedactions, diverge, renamedelete
1428
1428
1429 def _getcwd():
1429 def _getcwd():
1430 try:
1430 try:
1431 return encoding.getcwd()
1431 return encoding.getcwd()
1432 except OSError as err:
1432 except OSError as err:
1433 if err.errno == errno.ENOENT:
1433 if err.errno == errno.ENOENT:
1434 return None
1434 return None
1435 raise
1435 raise
1436
1436
1437 def batchremove(repo, wctx, actions):
1437 def batchremove(repo, wctx, actions):
1438 """apply removes to the working directory
1438 """apply removes to the working directory
1439
1439
1440 yields tuples for progress updates
1440 yields tuples for progress updates
1441 """
1441 """
1442 verbose = repo.ui.verbose
1442 verbose = repo.ui.verbose
1443 cwd = _getcwd()
1443 cwd = _getcwd()
1444 i = 0
1444 i = 0
1445 for f, args, msg in actions:
1445 for f, args, msg in actions:
1446 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1446 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1447 if verbose:
1447 if verbose:
1448 repo.ui.note(_("removing %s\n") % f)
1448 repo.ui.note(_("removing %s\n") % f)
1449 wctx[f].audit()
1449 wctx[f].audit()
1450 try:
1450 try:
1451 wctx[f].remove(ignoremissing=True)
1451 wctx[f].remove(ignoremissing=True)
1452 except OSError as inst:
1452 except OSError as inst:
1453 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1453 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1454 (f, inst.strerror))
1454 (f, inst.strerror))
1455 if i == 100:
1455 if i == 100:
1456 yield i, f
1456 yield i, f
1457 i = 0
1457 i = 0
1458 i += 1
1458 i += 1
1459 if i > 0:
1459 if i > 0:
1460 yield i, f
1460 yield i, f
1461
1461
1462 if cwd and not _getcwd():
1462 if cwd and not _getcwd():
1463 # cwd was removed in the course of removing files; print a helpful
1463 # cwd was removed in the course of removing files; print a helpful
1464 # warning.
1464 # warning.
1465 repo.ui.warn(_("current directory was removed\n"
1465 repo.ui.warn(_("current directory was removed\n"
1466 "(consider changing to repo root: %s)\n") % repo.root)
1466 "(consider changing to repo root: %s)\n") % repo.root)
1467
1467
1468 def batchget(repo, mctx, wctx, wantfiledata, actions):
1468 def batchget(repo, mctx, wctx, wantfiledata, actions):
1469 """apply gets to the working directory
1469 """apply gets to the working directory
1470
1470
1471 mctx is the context to get from
1471 mctx is the context to get from
1472
1472
1473 Yields arbitrarily many (False, tuple) for progress updates, followed by
1473 Yields arbitrarily many (False, tuple) for progress updates, followed by
1474 exactly one (True, filedata). When wantfiledata is false, filedata is an
1474 exactly one (True, filedata). When wantfiledata is false, filedata is an
1475 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1475 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1476 mtime) of the file f written for each action.
1476 mtime) of the file f written for each action.
1477 """
1477 """
1478 filedata = {}
1478 filedata = {}
1479 verbose = repo.ui.verbose
1479 verbose = repo.ui.verbose
1480 fctx = mctx.filectx
1480 fctx = mctx.filectx
1481 ui = repo.ui
1481 ui = repo.ui
1482 i = 0
1482 i = 0
1483 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1483 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1484 for f, (flags, backup), msg in actions:
1484 for f, (flags, backup), msg in actions:
1485 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1485 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1486 if verbose:
1486 if verbose:
1487 repo.ui.note(_("getting %s\n") % f)
1487 repo.ui.note(_("getting %s\n") % f)
1488
1488
1489 if backup:
1489 if backup:
1490 # If a file or directory exists with the same name, back that
1490 # If a file or directory exists with the same name, back that
1491 # up. Otherwise, look to see if there is a file that conflicts
1491 # up. Otherwise, look to see if there is a file that conflicts
1492 # with a directory this file is in, and if so, back that up.
1492 # with a directory this file is in, and if so, back that up.
1493 conflicting = f
1493 conflicting = f
1494 if not repo.wvfs.lexists(f):
1494 if not repo.wvfs.lexists(f):
1495 for p in util.finddirs(f):
1495 for p in util.finddirs(f):
1496 if repo.wvfs.isfileorlink(p):
1496 if repo.wvfs.isfileorlink(p):
1497 conflicting = p
1497 conflicting = p
1498 break
1498 break
1499 if repo.wvfs.lexists(conflicting):
1499 if repo.wvfs.lexists(conflicting):
1500 orig = scmutil.backuppath(ui, repo, conflicting)
1500 orig = scmutil.backuppath(ui, repo, conflicting)
1501 util.rename(repo.wjoin(conflicting), orig)
1501 util.rename(repo.wjoin(conflicting), orig)
1502 wfctx = wctx[f]
1502 wfctx = wctx[f]
1503 wfctx.clearunknown()
1503 wfctx.clearunknown()
1504 atomictemp = ui.configbool("experimental", "update.atomic-file")
1504 atomictemp = ui.configbool("experimental", "update.atomic-file")
1505 size = wfctx.write(fctx(f).data(), flags,
1505 size = wfctx.write(fctx(f).data(), flags,
1506 backgroundclose=True,
1506 backgroundclose=True,
1507 atomictemp=atomictemp)
1507 atomictemp=atomictemp)
1508 if wantfiledata:
1508 if wantfiledata:
1509 s = wfctx.lstat()
1509 s = wfctx.lstat()
1510 mode = s.st_mode
1510 mode = s.st_mode
1511 mtime = s[stat.ST_MTIME]
1511 mtime = s[stat.ST_MTIME]
1512 filedata[f] = ((mode, size, mtime)) # for dirstate.normal
1512 filedata[f] = ((mode, size, mtime)) # for dirstate.normal
1513 if i == 100:
1513 if i == 100:
1514 yield False, (i, f)
1514 yield False, (i, f)
1515 i = 0
1515 i = 0
1516 i += 1
1516 i += 1
1517 if i > 0:
1517 if i > 0:
1518 yield False, (i, f)
1518 yield False, (i, f)
1519 yield True, filedata
1519 yield True, filedata
1520
1520
1521 def _prefetchfiles(repo, ctx, actions):
1521 def _prefetchfiles(repo, ctx, actions):
1522 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1522 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1523 of merge actions. ``ctx`` is the context being merged in."""
1523 of merge actions. ``ctx`` is the context being merged in."""
1524
1524
1525 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1525 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1526 # don't touch the context to be merged in. 'cd' is skipped, because
1526 # don't touch the context to be merged in. 'cd' is skipped, because
1527 # changed/deleted never resolves to something from the remote side.
1527 # changed/deleted never resolves to something from the remote side.
1528 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1528 oplist = [actions[a] for a in (ACTION_GET, ACTION_DELETED_CHANGED,
1529 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1529 ACTION_LOCAL_DIR_RENAME_GET, ACTION_MERGE)]
1530 prefetch = scmutil.prefetchfiles
1530 prefetch = scmutil.prefetchfiles
1531 matchfiles = scmutil.matchfiles
1531 matchfiles = scmutil.matchfiles
1532 prefetch(repo, [ctx.rev()],
1532 prefetch(repo, [ctx.rev()],
1533 matchfiles(repo,
1533 matchfiles(repo,
1534 [f for sublist in oplist for f, args, msg in sublist]))
1534 [f for sublist in oplist for f, args, msg in sublist]))
1535
1535
1536 @attr.s(frozen=True)
1536 @attr.s(frozen=True)
1537 class updateresult(object):
1537 class updateresult(object):
1538 updatedcount = attr.ib()
1538 updatedcount = attr.ib()
1539 mergedcount = attr.ib()
1539 mergedcount = attr.ib()
1540 removedcount = attr.ib()
1540 removedcount = attr.ib()
1541 unresolvedcount = attr.ib()
1541 unresolvedcount = attr.ib()
1542
1542
1543 def isempty(self):
1543 def isempty(self):
1544 return not (self.updatedcount or self.mergedcount
1544 return not (self.updatedcount or self.mergedcount
1545 or self.removedcount or self.unresolvedcount)
1545 or self.removedcount or self.unresolvedcount)
1546
1546
1547 def emptyactions():
1547 def emptyactions():
1548 """create an actions dict, to be populated and passed to applyupdates()"""
1548 """create an actions dict, to be populated and passed to applyupdates()"""
1549 return dict((m, [])
1549 return dict((m, [])
1550 for m in (
1550 for m in (
1551 ACTION_ADD,
1551 ACTION_ADD,
1552 ACTION_ADD_MODIFIED,
1552 ACTION_ADD_MODIFIED,
1553 ACTION_FORGET,
1553 ACTION_FORGET,
1554 ACTION_GET,
1554 ACTION_GET,
1555 ACTION_CHANGED_DELETED,
1555 ACTION_CHANGED_DELETED,
1556 ACTION_DELETED_CHANGED,
1556 ACTION_DELETED_CHANGED,
1557 ACTION_REMOVE,
1557 ACTION_REMOVE,
1558 ACTION_DIR_RENAME_MOVE_LOCAL,
1558 ACTION_DIR_RENAME_MOVE_LOCAL,
1559 ACTION_LOCAL_DIR_RENAME_GET,
1559 ACTION_LOCAL_DIR_RENAME_GET,
1560 ACTION_MERGE,
1560 ACTION_MERGE,
1561 ACTION_EXEC,
1561 ACTION_EXEC,
1562 ACTION_KEEP,
1562 ACTION_KEEP,
1563 ACTION_PATH_CONFLICT,
1563 ACTION_PATH_CONFLICT,
1564 ACTION_PATH_CONFLICT_RESOLVE))
1564 ACTION_PATH_CONFLICT_RESOLVE))
1565
1565
1566 def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata,
1566 def applyupdates(repo, actions, wctx, mctx, overwrite, wantfiledata,
1567 labels=None):
1567 labels=None):
1568 """apply the merge action list to the working directory
1568 """apply the merge action list to the working directory
1569
1569
1570 wctx is the working copy context
1570 wctx is the working copy context
1571 mctx is the context to be merged into the working copy
1571 mctx is the context to be merged into the working copy
1572
1572
1573 Return a tuple of (counts, filedata), where counts is a tuple
1573 Return a tuple of (counts, filedata), where counts is a tuple
1574 (updated, merged, removed, unresolved) that describes how many
1574 (updated, merged, removed, unresolved) that describes how many
1575 files were affected by the update, and filedata is as described in
1575 files were affected by the update, and filedata is as described in
1576 batchget.
1576 batchget.
1577 """
1577 """
1578
1578
1579 _prefetchfiles(repo, mctx, actions)
1579 _prefetchfiles(repo, mctx, actions)
1580
1580
1581 updated, merged, removed = 0, 0, 0
1581 updated, merged, removed = 0, 0, 0
1582 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1582 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1583 moves = []
1583 moves = []
1584 for m, l in actions.items():
1584 for m, l in actions.items():
1585 l.sort()
1585 l.sort()
1586
1586
1587 # 'cd' and 'dc' actions are treated like other merge conflicts
1587 # 'cd' and 'dc' actions are treated like other merge conflicts
1588 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1588 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1589 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1589 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1590 mergeactions.extend(actions[ACTION_MERGE])
1590 mergeactions.extend(actions[ACTION_MERGE])
1591 for f, args, msg in mergeactions:
1591 for f, args, msg in mergeactions:
1592 f1, f2, fa, move, anc = args
1592 f1, f2, fa, move, anc = args
1593 if f == '.hgsubstate': # merged internally
1593 if f == '.hgsubstate': # merged internally
1594 continue
1594 continue
1595 if f1 is None:
1595 if f1 is None:
1596 fcl = filemerge.absentfilectx(wctx, fa)
1596 fcl = filemerge.absentfilectx(wctx, fa)
1597 else:
1597 else:
1598 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1598 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1599 fcl = wctx[f1]
1599 fcl = wctx[f1]
1600 if f2 is None:
1600 if f2 is None:
1601 fco = filemerge.absentfilectx(mctx, fa)
1601 fco = filemerge.absentfilectx(mctx, fa)
1602 else:
1602 else:
1603 fco = mctx[f2]
1603 fco = mctx[f2]
1604 actx = repo[anc]
1604 actx = repo[anc]
1605 if fa in actx:
1605 if fa in actx:
1606 fca = actx[fa]
1606 fca = actx[fa]
1607 else:
1607 else:
1608 # TODO: move to absentfilectx
1608 # TODO: move to absentfilectx
1609 fca = repo.filectx(f1, fileid=nullrev)
1609 fca = repo.filectx(f1, fileid=nullrev)
1610 ms.add(fcl, fco, fca, f)
1610 ms.add(fcl, fco, fca, f)
1611 if f1 != f and move:
1611 if f1 != f and move:
1612 moves.append(f1)
1612 moves.append(f1)
1613
1613
1614 # remove renamed files after safely stored
1614 # remove renamed files after safely stored
1615 for f in moves:
1615 for f in moves:
1616 if wctx[f].lexists():
1616 if wctx[f].lexists():
1617 repo.ui.debug("removing %s\n" % f)
1617 repo.ui.debug("removing %s\n" % f)
1618 wctx[f].audit()
1618 wctx[f].audit()
1619 wctx[f].remove()
1619 wctx[f].remove()
1620
1620
1621 numupdates = sum(len(l) for m, l in actions.items()
1621 numupdates = sum(len(l) for m, l in actions.items()
1622 if m != ACTION_KEEP)
1622 if m != ACTION_KEEP)
1623 progress = repo.ui.makeprogress(_('updating'), unit=_('files'),
1623 progress = repo.ui.makeprogress(_('updating'), unit=_('files'),
1624 total=numupdates)
1624 total=numupdates)
1625
1625
1626 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1626 if [a for a in actions[ACTION_REMOVE] if a[0] == '.hgsubstate']:
1627 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1627 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1628
1628
1629 # record path conflicts
1629 # record path conflicts
1630 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1630 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1631 f1, fo = args
1631 f1, fo = args
1632 s = repo.ui.status
1632 s = repo.ui.status
1633 s(_("%s: path conflict - a file or link has the same name as a "
1633 s(_("%s: path conflict - a file or link has the same name as a "
1634 "directory\n") % f)
1634 "directory\n") % f)
1635 if fo == 'l':
1635 if fo == 'l':
1636 s(_("the local file has been renamed to %s\n") % f1)
1636 s(_("the local file has been renamed to %s\n") % f1)
1637 else:
1637 else:
1638 s(_("the remote file has been renamed to %s\n") % f1)
1638 s(_("the remote file has been renamed to %s\n") % f1)
1639 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1639 s(_("resolve manually then use 'hg resolve --mark %s'\n") % f)
1640 ms.addpath(f, f1, fo)
1640 ms.addpath(f, f1, fo)
1641 progress.increment(item=f)
1641 progress.increment(item=f)
1642
1642
1643 # When merging in-memory, we can't support worker processes, so set the
1643 # When merging in-memory, we can't support worker processes, so set the
1644 # per-item cost at 0 in that case.
1644 # per-item cost at 0 in that case.
1645 cost = 0 if wctx.isinmemory() else 0.001
1645 cost = 0 if wctx.isinmemory() else 0.001
1646
1646
1647 # remove in parallel (must come before resolving path conflicts and getting)
1647 # remove in parallel (must come before resolving path conflicts and getting)
1648 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1648 prog = worker.worker(repo.ui, cost, batchremove, (repo, wctx),
1649 actions[ACTION_REMOVE])
1649 actions[ACTION_REMOVE])
1650 for i, item in prog:
1650 for i, item in prog:
1651 progress.increment(step=i, item=item)
1651 progress.increment(step=i, item=item)
1652 removed = len(actions[ACTION_REMOVE])
1652 removed = len(actions[ACTION_REMOVE])
1653
1653
1654 # resolve path conflicts (must come before getting)
1654 # resolve path conflicts (must come before getting)
1655 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1655 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1656 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1656 repo.ui.debug(" %s: %s -> pr\n" % (f, msg))
1657 f0, = args
1657 f0, = args
1658 if wctx[f0].lexists():
1658 if wctx[f0].lexists():
1659 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1659 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1660 wctx[f].audit()
1660 wctx[f].audit()
1661 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1661 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1662 wctx[f0].remove()
1662 wctx[f0].remove()
1663 progress.increment(item=f)
1663 progress.increment(item=f)
1664
1664
1665 # get in parallel.
1665 # get in parallel.
1666 threadsafe = repo.ui.configbool('experimental',
1666 threadsafe = repo.ui.configbool('experimental',
1667 'worker.wdir-get-thread-safe')
1667 'worker.wdir-get-thread-safe')
1668 prog = worker.worker(repo.ui, cost, batchget,
1668 prog = worker.worker(repo.ui, cost, batchget,
1669 (repo, mctx, wctx, wantfiledata),
1669 (repo, mctx, wctx, wantfiledata),
1670 actions[ACTION_GET],
1670 actions[ACTION_GET],
1671 threadsafe=threadsafe,
1671 threadsafe=threadsafe,
1672 hasretval=True)
1672 hasretval=True)
1673 getfiledata = {}
1673 getfiledata = {}
1674 for final, res in prog:
1674 for final, res in prog:
1675 if final:
1675 if final:
1676 getfiledata = res
1676 getfiledata = res
1677 else:
1677 else:
1678 i, item = res
1678 i, item = res
1679 progress.increment(step=i, item=item)
1679 progress.increment(step=i, item=item)
1680 updated = len(actions[ACTION_GET])
1680 updated = len(actions[ACTION_GET])
1681
1681
1682 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1682 if [a for a in actions[ACTION_GET] if a[0] == '.hgsubstate']:
1683 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1683 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1684
1684
1685 # forget (manifest only, just log it) (must come first)
1685 # forget (manifest only, just log it) (must come first)
1686 for f, args, msg in actions[ACTION_FORGET]:
1686 for f, args, msg in actions[ACTION_FORGET]:
1687 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1687 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1688 progress.increment(item=f)
1688 progress.increment(item=f)
1689
1689
1690 # re-add (manifest only, just log it)
1690 # re-add (manifest only, just log it)
1691 for f, args, msg in actions[ACTION_ADD]:
1691 for f, args, msg in actions[ACTION_ADD]:
1692 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1692 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1693 progress.increment(item=f)
1693 progress.increment(item=f)
1694
1694
1695 # re-add/mark as modified (manifest only, just log it)
1695 # re-add/mark as modified (manifest only, just log it)
1696 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1696 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1697 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1697 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1698 progress.increment(item=f)
1698 progress.increment(item=f)
1699
1699
1700 # keep (noop, just log it)
1700 # keep (noop, just log it)
1701 for f, args, msg in actions[ACTION_KEEP]:
1701 for f, args, msg in actions[ACTION_KEEP]:
1702 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1702 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1703 # no progress
1703 # no progress
1704
1704
1705 # directory rename, move local
1705 # directory rename, move local
1706 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1706 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1707 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1707 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1708 progress.increment(item=f)
1708 progress.increment(item=f)
1709 f0, flags = args
1709 f0, flags = args
1710 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1710 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1711 wctx[f].audit()
1711 wctx[f].audit()
1712 wctx[f].write(wctx.filectx(f0).data(), flags)
1712 wctx[f].write(wctx.filectx(f0).data(), flags)
1713 wctx[f0].remove()
1713 wctx[f0].remove()
1714 updated += 1
1714 updated += 1
1715
1715
1716 # local directory rename, get
1716 # local directory rename, get
1717 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1717 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1718 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1718 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1719 progress.increment(item=f)
1719 progress.increment(item=f)
1720 f0, flags = args
1720 f0, flags = args
1721 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1721 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1722 wctx[f].write(mctx.filectx(f0).data(), flags)
1722 wctx[f].write(mctx.filectx(f0).data(), flags)
1723 updated += 1
1723 updated += 1
1724
1724
1725 # exec
1725 # exec
1726 for f, args, msg in actions[ACTION_EXEC]:
1726 for f, args, msg in actions[ACTION_EXEC]:
1727 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1727 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1728 progress.increment(item=f)
1728 progress.increment(item=f)
1729 flags, = args
1729 flags, = args
1730 wctx[f].audit()
1730 wctx[f].audit()
1731 wctx[f].setflags('l' in flags, 'x' in flags)
1731 wctx[f].setflags('l' in flags, 'x' in flags)
1732 updated += 1
1732 updated += 1
1733
1733
1734 # the ordering is important here -- ms.mergedriver will raise if the merge
1734 # the ordering is important here -- ms.mergedriver will raise if the merge
1735 # driver has changed, and we want to be able to bypass it when overwrite is
1735 # driver has changed, and we want to be able to bypass it when overwrite is
1736 # True
1736 # True
1737 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1737 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1738
1738
1739 if usemergedriver:
1739 if usemergedriver:
1740 if wctx.isinmemory():
1740 if wctx.isinmemory():
1741 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1741 raise error.InMemoryMergeConflictsError("in-memory merge does not "
1742 "support mergedriver")
1742 "support mergedriver")
1743 ms.commit()
1743 ms.commit()
1744 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1744 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1745 # the driver might leave some files unresolved
1745 # the driver might leave some files unresolved
1746 unresolvedf = set(ms.unresolved())
1746 unresolvedf = set(ms.unresolved())
1747 if not proceed:
1747 if not proceed:
1748 # XXX setting unresolved to at least 1 is a hack to make sure we
1748 # XXX setting unresolved to at least 1 is a hack to make sure we
1749 # error out
1749 # error out
1750 return updateresult(updated, merged, removed,
1750 return updateresult(updated, merged, removed,
1751 max(len(unresolvedf), 1))
1751 max(len(unresolvedf), 1))
1752 newactions = []
1752 newactions = []
1753 for f, args, msg in mergeactions:
1753 for f, args, msg in mergeactions:
1754 if f in unresolvedf:
1754 if f in unresolvedf:
1755 newactions.append((f, args, msg))
1755 newactions.append((f, args, msg))
1756 mergeactions = newactions
1756 mergeactions = newactions
1757
1757
1758 try:
1758 try:
1759 # premerge
1759 # premerge
1760 tocomplete = []
1760 tocomplete = []
1761 for f, args, msg in mergeactions:
1761 for f, args, msg in mergeactions:
1762 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1762 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1763 progress.increment(item=f)
1763 progress.increment(item=f)
1764 if f == '.hgsubstate': # subrepo states need updating
1764 if f == '.hgsubstate': # subrepo states need updating
1765 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1765 subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1766 overwrite, labels)
1766 overwrite, labels)
1767 continue
1767 continue
1768 wctx[f].audit()
1768 wctx[f].audit()
1769 complete, r = ms.preresolve(f, wctx)
1769 complete, r = ms.preresolve(f, wctx)
1770 if not complete:
1770 if not complete:
1771 numupdates += 1
1771 numupdates += 1
1772 tocomplete.append((f, args, msg))
1772 tocomplete.append((f, args, msg))
1773
1773
1774 # merge
1774 # merge
1775 for f, args, msg in tocomplete:
1775 for f, args, msg in tocomplete:
1776 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1776 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1777 progress.increment(item=f, total=numupdates)
1777 progress.increment(item=f, total=numupdates)
1778 ms.resolve(f, wctx)
1778 ms.resolve(f, wctx)
1779
1779
1780 finally:
1780 finally:
1781 ms.commit()
1781 ms.commit()
1782
1782
1783 unresolved = ms.unresolvedcount()
1783 unresolved = ms.unresolvedcount()
1784
1784
1785 if (usemergedriver and not unresolved
1785 if (usemergedriver and not unresolved
1786 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1786 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS):
1787 if not driverconclude(repo, ms, wctx, labels=labels):
1787 if not driverconclude(repo, ms, wctx, labels=labels):
1788 # XXX setting unresolved to at least 1 is a hack to make sure we
1788 # XXX setting unresolved to at least 1 is a hack to make sure we
1789 # error out
1789 # error out
1790 unresolved = max(unresolved, 1)
1790 unresolved = max(unresolved, 1)
1791
1791
1792 ms.commit()
1792 ms.commit()
1793
1793
1794 msupdated, msmerged, msremoved = ms.counts()
1794 msupdated, msmerged, msremoved = ms.counts()
1795 updated += msupdated
1795 updated += msupdated
1796 merged += msmerged
1796 merged += msmerged
1797 removed += msremoved
1797 removed += msremoved
1798
1798
1799 extraactions = ms.actions()
1799 extraactions = ms.actions()
1800 if extraactions:
1800 if extraactions:
1801 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1801 mfiles = set(a[0] for a in actions[ACTION_MERGE])
1802 for k, acts in extraactions.iteritems():
1802 for k, acts in extraactions.iteritems():
1803 actions[k].extend(acts)
1803 actions[k].extend(acts)
1804 if k == ACTION_GET and wantfiledata:
1804 if k == ACTION_GET and wantfiledata:
1805 # no filedata until mergestate is updated to provide it
1805 # no filedata until mergestate is updated to provide it
1806 for a in acts:
1806 for a in acts:
1807 getfiledata[a[0]] = None
1807 getfiledata[a[0]] = None
1808 # Remove these files from actions[ACTION_MERGE] as well. This is
1808 # Remove these files from actions[ACTION_MERGE] as well. This is
1809 # important because in recordupdates, files in actions[ACTION_MERGE]
1809 # important because in recordupdates, files in actions[ACTION_MERGE]
1810 # are processed after files in other actions, and the merge driver
1810 # are processed after files in other actions, and the merge driver
1811 # might add files to those actions via extraactions above. This can
1811 # might add files to those actions via extraactions above. This can
1812 # lead to a file being recorded twice, with poor results. This is
1812 # lead to a file being recorded twice, with poor results. This is
1813 # especially problematic for actions[ACTION_REMOVE] (currently only
1813 # especially problematic for actions[ACTION_REMOVE] (currently only
1814 # possible with the merge driver in the initial merge process;
1814 # possible with the merge driver in the initial merge process;
1815 # interrupted merges don't go through this flow).
1815 # interrupted merges don't go through this flow).
1816 #
1816 #
1817 # The real fix here is to have indexes by both file and action so
1817 # The real fix here is to have indexes by both file and action so
1818 # that when the action for a file is changed it is automatically
1818 # that when the action for a file is changed it is automatically
1819 # reflected in the other action lists. But that involves a more
1819 # reflected in the other action lists. But that involves a more
1820 # complex data structure, so this will do for now.
1820 # complex data structure, so this will do for now.
1821 #
1821 #
1822 # We don't need to do the same operation for 'dc' and 'cd' because
1822 # We don't need to do the same operation for 'dc' and 'cd' because
1823 # those lists aren't consulted again.
1823 # those lists aren't consulted again.
1824 mfiles.difference_update(a[0] for a in acts)
1824 mfiles.difference_update(a[0] for a in acts)
1825
1825
1826 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1826 actions[ACTION_MERGE] = [a for a in actions[ACTION_MERGE]
1827 if a[0] in mfiles]
1827 if a[0] in mfiles]
1828
1828
1829 progress.complete()
1829 progress.complete()
1830 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
1830 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
1831 return updateresult(updated, merged, removed, unresolved), getfiledata
1831 return updateresult(updated, merged, removed, unresolved), getfiledata
1832
1832
1833 def recordupdates(repo, actions, branchmerge, getfiledata):
1833 def recordupdates(repo, actions, branchmerge, getfiledata):
1834 "record merge actions to the dirstate"
1834 "record merge actions to the dirstate"
1835 # remove (must come first)
1835 # remove (must come first)
1836 for f, args, msg in actions.get(ACTION_REMOVE, []):
1836 for f, args, msg in actions.get(ACTION_REMOVE, []):
1837 if branchmerge:
1837 if branchmerge:
1838 repo.dirstate.remove(f)
1838 repo.dirstate.remove(f)
1839 else:
1839 else:
1840 repo.dirstate.drop(f)
1840 repo.dirstate.drop(f)
1841
1841
1842 # forget (must come first)
1842 # forget (must come first)
1843 for f, args, msg in actions.get(ACTION_FORGET, []):
1843 for f, args, msg in actions.get(ACTION_FORGET, []):
1844 repo.dirstate.drop(f)
1844 repo.dirstate.drop(f)
1845
1845
1846 # resolve path conflicts
1846 # resolve path conflicts
1847 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1847 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
1848 f0, = args
1848 f0, = args
1849 origf0 = repo.dirstate.copied(f0) or f0
1849 origf0 = repo.dirstate.copied(f0) or f0
1850 repo.dirstate.add(f)
1850 repo.dirstate.add(f)
1851 repo.dirstate.copy(origf0, f)
1851 repo.dirstate.copy(origf0, f)
1852 if f0 == origf0:
1852 if f0 == origf0:
1853 repo.dirstate.remove(f0)
1853 repo.dirstate.remove(f0)
1854 else:
1854 else:
1855 repo.dirstate.drop(f0)
1855 repo.dirstate.drop(f0)
1856
1856
1857 # re-add
1857 # re-add
1858 for f, args, msg in actions.get(ACTION_ADD, []):
1858 for f, args, msg in actions.get(ACTION_ADD, []):
1859 repo.dirstate.add(f)
1859 repo.dirstate.add(f)
1860
1860
1861 # re-add/mark as modified
1861 # re-add/mark as modified
1862 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1862 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
1863 if branchmerge:
1863 if branchmerge:
1864 repo.dirstate.normallookup(f)
1864 repo.dirstate.normallookup(f)
1865 else:
1865 else:
1866 repo.dirstate.add(f)
1866 repo.dirstate.add(f)
1867
1867
1868 # exec change
1868 # exec change
1869 for f, args, msg in actions.get(ACTION_EXEC, []):
1869 for f, args, msg in actions.get(ACTION_EXEC, []):
1870 repo.dirstate.normallookup(f)
1870 repo.dirstate.normallookup(f)
1871
1871
1872 # keep
1872 # keep
1873 for f, args, msg in actions.get(ACTION_KEEP, []):
1873 for f, args, msg in actions.get(ACTION_KEEP, []):
1874 pass
1874 pass
1875
1875
1876 # get
1876 # get
1877 for f, args, msg in actions.get(ACTION_GET, []):
1877 for f, args, msg in actions.get(ACTION_GET, []):
1878 if branchmerge:
1878 if branchmerge:
1879 repo.dirstate.otherparent(f)
1879 repo.dirstate.otherparent(f)
1880 else:
1880 else:
1881 parentfiledata = getfiledata[f] if getfiledata else None
1881 parentfiledata = getfiledata[f] if getfiledata else None
1882 repo.dirstate.normal(f, parentfiledata=parentfiledata)
1882 repo.dirstate.normal(f, parentfiledata=parentfiledata)
1883
1883
1884 # merge
1884 # merge
1885 for f, args, msg in actions.get(ACTION_MERGE, []):
1885 for f, args, msg in actions.get(ACTION_MERGE, []):
1886 f1, f2, fa, move, anc = args
1886 f1, f2, fa, move, anc = args
1887 if branchmerge:
1887 if branchmerge:
1888 # We've done a branch merge, mark this file as merged
1888 # We've done a branch merge, mark this file as merged
1889 # so that we properly record the merger later
1889 # so that we properly record the merger later
1890 repo.dirstate.merge(f)
1890 repo.dirstate.merge(f)
1891 if f1 != f2: # copy/rename
1891 if f1 != f2: # copy/rename
1892 if move:
1892 if move:
1893 repo.dirstate.remove(f1)
1893 repo.dirstate.remove(f1)
1894 if f1 != f:
1894 if f1 != f:
1895 repo.dirstate.copy(f1, f)
1895 repo.dirstate.copy(f1, f)
1896 else:
1896 else:
1897 repo.dirstate.copy(f2, f)
1897 repo.dirstate.copy(f2, f)
1898 else:
1898 else:
1899 # We've update-merged a locally modified file, so
1899 # We've update-merged a locally modified file, so
1900 # we set the dirstate to emulate a normal checkout
1900 # we set the dirstate to emulate a normal checkout
1901 # of that file some time in the past. Thus our
1901 # of that file some time in the past. Thus our
1902 # merge will appear as a normal local file
1902 # merge will appear as a normal local file
1903 # modification.
1903 # modification.
1904 if f2 == f: # file not locally copied/moved
1904 if f2 == f: # file not locally copied/moved
1905 repo.dirstate.normallookup(f)
1905 repo.dirstate.normallookup(f)
1906 if move:
1906 if move:
1907 repo.dirstate.drop(f1)
1907 repo.dirstate.drop(f1)
1908
1908
1909 # directory rename, move local
1909 # directory rename, move local
1910 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1910 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
1911 f0, flag = args
1911 f0, flag = args
1912 if branchmerge:
1912 if branchmerge:
1913 repo.dirstate.add(f)
1913 repo.dirstate.add(f)
1914 repo.dirstate.remove(f0)
1914 repo.dirstate.remove(f0)
1915 repo.dirstate.copy(f0, f)
1915 repo.dirstate.copy(f0, f)
1916 else:
1916 else:
1917 repo.dirstate.normal(f)
1917 repo.dirstate.normal(f)
1918 repo.dirstate.drop(f0)
1918 repo.dirstate.drop(f0)
1919
1919
1920 # directory rename, get
1920 # directory rename, get
1921 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1921 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
1922 f0, flag = args
1922 f0, flag = args
1923 if branchmerge:
1923 if branchmerge:
1924 repo.dirstate.add(f)
1924 repo.dirstate.add(f)
1925 repo.dirstate.copy(f0, f)
1925 repo.dirstate.copy(f0, f)
1926 else:
1926 else:
1927 repo.dirstate.normal(f)
1927 repo.dirstate.normal(f)
1928
1928
1929 UPDATECHECK_ABORT = 'abort' # handled at higher layers
1930 UPDATECHECK_NONE = 'none'
1931 UPDATECHECK_LINEAR = 'linear'
1932 UPDATECHECK_NO_CONFLICT = 'noconflict'
1933
1929 def update(repo, node, branchmerge, force, ancestor=None,
1934 def update(repo, node, branchmerge, force, ancestor=None,
1930 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1935 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1931 updatecheck=None, wc=None):
1936 updatecheck=None, wc=None):
1932 """
1937 """
1933 Perform a merge between the working directory and the given node
1938 Perform a merge between the working directory and the given node
1934
1939
1935 node = the node to update to
1940 node = the node to update to
1936 branchmerge = whether to merge between branches
1941 branchmerge = whether to merge between branches
1937 force = whether to force branch merging or file overwriting
1942 force = whether to force branch merging or file overwriting
1938 matcher = a matcher to filter file lists (dirstate not updated)
1943 matcher = a matcher to filter file lists (dirstate not updated)
1939 mergeancestor = whether it is merging with an ancestor. If true,
1944 mergeancestor = whether it is merging with an ancestor. If true,
1940 we should accept the incoming changes for any prompts that occur.
1945 we should accept the incoming changes for any prompts that occur.
1941 If false, merging with an ancestor (fast-forward) is only allowed
1946 If false, merging with an ancestor (fast-forward) is only allowed
1942 between different named branches. This flag is used by rebase extension
1947 between different named branches. This flag is used by rebase extension
1943 as a temporary fix and should be avoided in general.
1948 as a temporary fix and should be avoided in general.
1944 labels = labels to use for base, local and other
1949 labels = labels to use for base, local and other
1945 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1950 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1946 this is True, then 'force' should be True as well.
1951 this is True, then 'force' should be True as well.
1947
1952
1948 The table below shows all the behaviors of the update command given the
1953 The table below shows all the behaviors of the update command given the
1949 -c/--check and -C/--clean or no options, whether the working directory is
1954 -c/--check and -C/--clean or no options, whether the working directory is
1950 dirty, whether a revision is specified, and the relationship of the parent
1955 dirty, whether a revision is specified, and the relationship of the parent
1951 rev to the target rev (linear or not). Match from top first. The -n
1956 rev to the target rev (linear or not). Match from top first. The -n
1952 option doesn't exist on the command line, but represents the
1957 option doesn't exist on the command line, but represents the
1953 experimental.updatecheck=noconflict option.
1958 experimental.updatecheck=noconflict option.
1954
1959
1955 This logic is tested by test-update-branches.t.
1960 This logic is tested by test-update-branches.t.
1956
1961
1957 -c -C -n -m dirty rev linear | result
1962 -c -C -n -m dirty rev linear | result
1958 y y * * * * * | (1)
1963 y y * * * * * | (1)
1959 y * y * * * * | (1)
1964 y * y * * * * | (1)
1960 y * * y * * * | (1)
1965 y * * y * * * | (1)
1961 * y y * * * * | (1)
1966 * y y * * * * | (1)
1962 * y * y * * * | (1)
1967 * y * y * * * | (1)
1963 * * y y * * * | (1)
1968 * * y y * * * | (1)
1964 * * * * * n n | x
1969 * * * * * n n | x
1965 * * * * n * * | ok
1970 * * * * n * * | ok
1966 n n n n y * y | merge
1971 n n n n y * y | merge
1967 n n n n y y n | (2)
1972 n n n n y y n | (2)
1968 n n n y y * * | merge
1973 n n n y y * * | merge
1969 n n y n y * * | merge if no conflict
1974 n n y n y * * | merge if no conflict
1970 n y n n y * * | discard
1975 n y n n y * * | discard
1971 y n n n y * * | (3)
1976 y n n n y * * | (3)
1972
1977
1973 x = can't happen
1978 x = can't happen
1974 * = don't-care
1979 * = don't-care
1975 1 = incompatible options (checked in commands.py)
1980 1 = incompatible options (checked in commands.py)
1976 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1981 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1977 3 = abort: uncommitted changes (checked in commands.py)
1982 3 = abort: uncommitted changes (checked in commands.py)
1978
1983
1979 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1984 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1980 to repo[None] if None is passed.
1985 to repo[None] if None is passed.
1981
1986
1982 Return the same tuple as applyupdates().
1987 Return the same tuple as applyupdates().
1983 """
1988 """
1984 # Avoid cycle.
1989 # Avoid cycle.
1985 from . import sparse
1990 from . import sparse
1986
1991
1987 # This function used to find the default destination if node was None, but
1992 # This function used to find the default destination if node was None, but
1988 # that's now in destutil.py.
1993 # that's now in destutil.py.
1989 assert node is not None
1994 assert node is not None
1990 if not branchmerge and not force:
1995 if not branchmerge and not force:
1991 # TODO: remove the default once all callers that pass branchmerge=False
1996 # TODO: remove the default once all callers that pass branchmerge=False
1992 # and force=False pass a value for updatecheck. We may want to allow
1997 # and force=False pass a value for updatecheck. We may want to allow
1993 # updatecheck='abort' to better suppport some of these callers.
1998 # updatecheck='abort' to better suppport some of these callers.
1994 if updatecheck is None:
1999 if updatecheck is None:
1995 updatecheck = 'linear'
2000 updatecheck = UPDATECHECK_LINEAR
1996 assert updatecheck in ('none', 'linear', 'noconflict')
2001 assert updatecheck in (UPDATECHECK_NONE,
2002 UPDATECHECK_LINEAR,
2003 UPDATECHECK_NO_CONFLICT,
2004 )
1997 # If we're doing a partial update, we need to skip updating
2005 # If we're doing a partial update, we need to skip updating
1998 # the dirstate, so make a note of any partial-ness to the
2006 # the dirstate, so make a note of any partial-ness to the
1999 # update here.
2007 # update here.
2000 if matcher is None or matcher.always():
2008 if matcher is None or matcher.always():
2001 partial = False
2009 partial = False
2002 else:
2010 else:
2003 partial = True
2011 partial = True
2004 with repo.wlock():
2012 with repo.wlock():
2005 if wc is None:
2013 if wc is None:
2006 wc = repo[None]
2014 wc = repo[None]
2007 pl = wc.parents()
2015 pl = wc.parents()
2008 p1 = pl[0]
2016 p1 = pl[0]
2009 p2 = repo[node]
2017 p2 = repo[node]
2010 if ancestor is not None:
2018 if ancestor is not None:
2011 pas = [repo[ancestor]]
2019 pas = [repo[ancestor]]
2012 else:
2020 else:
2013 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
2021 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
2014 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2022 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2015 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2023 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2016 else:
2024 else:
2017 pas = [p1.ancestor(p2, warn=branchmerge)]
2025 pas = [p1.ancestor(p2, warn=branchmerge)]
2018
2026
2019 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2027 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2020
2028
2021 overwrite = force and not branchmerge
2029 overwrite = force and not branchmerge
2022 ### check phase
2030 ### check phase
2023 if not overwrite:
2031 if not overwrite:
2024 if len(pl) > 1:
2032 if len(pl) > 1:
2025 raise error.Abort(_("outstanding uncommitted merge"))
2033 raise error.Abort(_("outstanding uncommitted merge"))
2026 ms = mergestate.read(repo)
2034 ms = mergestate.read(repo)
2027 if list(ms.unresolved()):
2035 if list(ms.unresolved()):
2028 raise error.Abort(_("outstanding merge conflicts"),
2036 raise error.Abort(_("outstanding merge conflicts"),
2029 hint=_("use 'hg resolve' to resolve"))
2037 hint=_("use 'hg resolve' to resolve"))
2030 if branchmerge:
2038 if branchmerge:
2031 if pas == [p2]:
2039 if pas == [p2]:
2032 raise error.Abort(_("merging with a working directory ancestor"
2040 raise error.Abort(_("merging with a working directory ancestor"
2033 " has no effect"))
2041 " has no effect"))
2034 elif pas == [p1]:
2042 elif pas == [p1]:
2035 if not mergeancestor and wc.branch() == p2.branch():
2043 if not mergeancestor and wc.branch() == p2.branch():
2036 raise error.Abort(_("nothing to merge"),
2044 raise error.Abort(_("nothing to merge"),
2037 hint=_("use 'hg update' "
2045 hint=_("use 'hg update' "
2038 "or check 'hg heads'"))
2046 "or check 'hg heads'"))
2039 if not force and (wc.files() or wc.deleted()):
2047 if not force and (wc.files() or wc.deleted()):
2040 raise error.Abort(_("uncommitted changes"),
2048 raise error.Abort(_("uncommitted changes"),
2041 hint=_("use 'hg status' to list changes"))
2049 hint=_("use 'hg status' to list changes"))
2042 if not wc.isinmemory():
2050 if not wc.isinmemory():
2043 for s in sorted(wc.substate):
2051 for s in sorted(wc.substate):
2044 wc.sub(s).bailifchanged()
2052 wc.sub(s).bailifchanged()
2045
2053
2046 elif not overwrite:
2054 elif not overwrite:
2047 if p1 == p2: # no-op update
2055 if p1 == p2: # no-op update
2048 # call the hooks and exit early
2056 # call the hooks and exit early
2049 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
2057 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
2050 repo.hook('update', parent1=xp2, parent2='', error=0)
2058 repo.hook('update', parent1=xp2, parent2='', error=0)
2051 return updateresult(0, 0, 0, 0)
2059 return updateresult(0, 0, 0, 0)
2052
2060
2053 if (updatecheck == 'linear' and
2061 if (updatecheck == UPDATECHECK_LINEAR and
2054 pas not in ([p1], [p2])): # nonlinear
2062 pas not in ([p1], [p2])): # nonlinear
2055 dirty = wc.dirty(missing=True)
2063 dirty = wc.dirty(missing=True)
2056 if dirty:
2064 if dirty:
2057 # Branching is a bit strange to ensure we do the minimal
2065 # Branching is a bit strange to ensure we do the minimal
2058 # amount of call to obsutil.foreground.
2066 # amount of call to obsutil.foreground.
2059 foreground = obsutil.foreground(repo, [p1.node()])
2067 foreground = obsutil.foreground(repo, [p1.node()])
2060 # note: the <node> variable contains a random identifier
2068 # note: the <node> variable contains a random identifier
2061 if repo[node].node() in foreground:
2069 if repo[node].node() in foreground:
2062 pass # allow updating to successors
2070 pass # allow updating to successors
2063 else:
2071 else:
2064 msg = _("uncommitted changes")
2072 msg = _("uncommitted changes")
2065 hint = _("commit or update --clean to discard changes")
2073 hint = _("commit or update --clean to discard changes")
2066 raise error.UpdateAbort(msg, hint=hint)
2074 raise error.UpdateAbort(msg, hint=hint)
2067 else:
2075 else:
2068 # Allow jumping branches if clean and specific rev given
2076 # Allow jumping branches if clean and specific rev given
2069 pass
2077 pass
2070
2078
2071 if overwrite:
2079 if overwrite:
2072 pas = [wc]
2080 pas = [wc]
2073 elif not branchmerge:
2081 elif not branchmerge:
2074 pas = [p1]
2082 pas = [p1]
2075
2083
2076 # deprecated config: merge.followcopies
2084 # deprecated config: merge.followcopies
2077 followcopies = repo.ui.configbool('merge', 'followcopies')
2085 followcopies = repo.ui.configbool('merge', 'followcopies')
2078 if overwrite:
2086 if overwrite:
2079 followcopies = False
2087 followcopies = False
2080 elif not pas[0]:
2088 elif not pas[0]:
2081 followcopies = False
2089 followcopies = False
2082 if not branchmerge and not wc.dirty(missing=True):
2090 if not branchmerge and not wc.dirty(missing=True):
2083 followcopies = False
2091 followcopies = False
2084
2092
2085 ### calculate phase
2093 ### calculate phase
2086 actionbyfile, diverge, renamedelete = calculateupdates(
2094 actionbyfile, diverge, renamedelete = calculateupdates(
2087 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2095 repo, wc, p2, pas, branchmerge, force, mergeancestor,
2088 followcopies, matcher=matcher, mergeforce=mergeforce)
2096 followcopies, matcher=matcher, mergeforce=mergeforce)
2089
2097
2090 if updatecheck == 'noconflict':
2098 if updatecheck == UPDATECHECK_NO_CONFLICT:
2091 for f, (m, args, msg) in actionbyfile.iteritems():
2099 for f, (m, args, msg) in actionbyfile.iteritems():
2092 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2100 if m not in (ACTION_GET, ACTION_KEEP, ACTION_EXEC,
2093 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
2101 ACTION_REMOVE, ACTION_PATH_CONFLICT_RESOLVE):
2094 msg = _("conflicting changes")
2102 msg = _("conflicting changes")
2095 hint = _("commit or update --clean to discard changes")
2103 hint = _("commit or update --clean to discard changes")
2096 raise error.Abort(msg, hint=hint)
2104 raise error.Abort(msg, hint=hint)
2097
2105
2098 # Prompt and create actions. Most of this is in the resolve phase
2106 # Prompt and create actions. Most of this is in the resolve phase
2099 # already, but we can't handle .hgsubstate in filemerge or
2107 # already, but we can't handle .hgsubstate in filemerge or
2100 # subrepoutil.submerge yet so we have to keep prompting for it.
2108 # subrepoutil.submerge yet so we have to keep prompting for it.
2101 if '.hgsubstate' in actionbyfile:
2109 if '.hgsubstate' in actionbyfile:
2102 f = '.hgsubstate'
2110 f = '.hgsubstate'
2103 m, args, msg = actionbyfile[f]
2111 m, args, msg = actionbyfile[f]
2104 prompts = filemerge.partextras(labels)
2112 prompts = filemerge.partextras(labels)
2105 prompts['f'] = f
2113 prompts['f'] = f
2106 if m == ACTION_CHANGED_DELETED:
2114 if m == ACTION_CHANGED_DELETED:
2107 if repo.ui.promptchoice(
2115 if repo.ui.promptchoice(
2108 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2116 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
2109 "use (c)hanged version or (d)elete?"
2117 "use (c)hanged version or (d)elete?"
2110 "$$ &Changed $$ &Delete") % prompts, 0):
2118 "$$ &Changed $$ &Delete") % prompts, 0):
2111 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2119 actionbyfile[f] = (ACTION_REMOVE, None, 'prompt delete')
2112 elif f in p1:
2120 elif f in p1:
2113 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2121 actionbyfile[f] = (ACTION_ADD_MODIFIED, None, 'prompt keep')
2114 else:
2122 else:
2115 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2123 actionbyfile[f] = (ACTION_ADD, None, 'prompt keep')
2116 elif m == ACTION_DELETED_CHANGED:
2124 elif m == ACTION_DELETED_CHANGED:
2117 f1, f2, fa, move, anc = args
2125 f1, f2, fa, move, anc = args
2118 flags = p2[f2].flags()
2126 flags = p2[f2].flags()
2119 if repo.ui.promptchoice(
2127 if repo.ui.promptchoice(
2120 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2128 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
2121 "use (c)hanged version or leave (d)eleted?"
2129 "use (c)hanged version or leave (d)eleted?"
2122 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2130 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
2123 actionbyfile[f] = (ACTION_GET, (flags, False),
2131 actionbyfile[f] = (ACTION_GET, (flags, False),
2124 'prompt recreating')
2132 'prompt recreating')
2125 else:
2133 else:
2126 del actionbyfile[f]
2134 del actionbyfile[f]
2127
2135
2128 # Convert to dictionary-of-lists format
2136 # Convert to dictionary-of-lists format
2129 actions = emptyactions()
2137 actions = emptyactions()
2130 for f, (m, args, msg) in actionbyfile.iteritems():
2138 for f, (m, args, msg) in actionbyfile.iteritems():
2131 if m not in actions:
2139 if m not in actions:
2132 actions[m] = []
2140 actions[m] = []
2133 actions[m].append((f, args, msg))
2141 actions[m].append((f, args, msg))
2134
2142
2135 if not util.fscasesensitive(repo.path):
2143 if not util.fscasesensitive(repo.path):
2136 # check collision between files only in p2 for clean update
2144 # check collision between files only in p2 for clean update
2137 if (not branchmerge and
2145 if (not branchmerge and
2138 (force or not wc.dirty(missing=True, branch=False))):
2146 (force or not wc.dirty(missing=True, branch=False))):
2139 _checkcollision(repo, p2.manifest(), None)
2147 _checkcollision(repo, p2.manifest(), None)
2140 else:
2148 else:
2141 _checkcollision(repo, wc.manifest(), actions)
2149 _checkcollision(repo, wc.manifest(), actions)
2142
2150
2143 # divergent renames
2151 # divergent renames
2144 for f, fl in sorted(diverge.iteritems()):
2152 for f, fl in sorted(diverge.iteritems()):
2145 repo.ui.warn(_("note: possible conflict - %s was renamed "
2153 repo.ui.warn(_("note: possible conflict - %s was renamed "
2146 "multiple times to:\n") % f)
2154 "multiple times to:\n") % f)
2147 for nf in sorted(fl):
2155 for nf in sorted(fl):
2148 repo.ui.warn(" %s\n" % nf)
2156 repo.ui.warn(" %s\n" % nf)
2149
2157
2150 # rename and delete
2158 # rename and delete
2151 for f, fl in sorted(renamedelete.iteritems()):
2159 for f, fl in sorted(renamedelete.iteritems()):
2152 repo.ui.warn(_("note: possible conflict - %s was deleted "
2160 repo.ui.warn(_("note: possible conflict - %s was deleted "
2153 "and renamed to:\n") % f)
2161 "and renamed to:\n") % f)
2154 for nf in sorted(fl):
2162 for nf in sorted(fl):
2155 repo.ui.warn(" %s\n" % nf)
2163 repo.ui.warn(" %s\n" % nf)
2156
2164
2157 ### apply phase
2165 ### apply phase
2158 if not branchmerge: # just jump to the new rev
2166 if not branchmerge: # just jump to the new rev
2159 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2167 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
2160 if not partial and not wc.isinmemory():
2168 if not partial and not wc.isinmemory():
2161 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2169 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
2162 # note that we're in the middle of an update
2170 # note that we're in the middle of an update
2163 repo.vfs.write('updatestate', p2.hex())
2171 repo.vfs.write('updatestate', p2.hex())
2164
2172
2165 # Advertise fsmonitor when its presence could be useful.
2173 # Advertise fsmonitor when its presence could be useful.
2166 #
2174 #
2167 # We only advertise when performing an update from an empty working
2175 # We only advertise when performing an update from an empty working
2168 # directory. This typically only occurs during initial clone.
2176 # directory. This typically only occurs during initial clone.
2169 #
2177 #
2170 # We give users a mechanism to disable the warning in case it is
2178 # We give users a mechanism to disable the warning in case it is
2171 # annoying.
2179 # annoying.
2172 #
2180 #
2173 # We only allow on Linux and MacOS because that's where fsmonitor is
2181 # We only allow on Linux and MacOS because that's where fsmonitor is
2174 # considered stable.
2182 # considered stable.
2175 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2183 fsmonitorwarning = repo.ui.configbool('fsmonitor', 'warn_when_unused')
2176 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2184 fsmonitorthreshold = repo.ui.configint('fsmonitor',
2177 'warn_update_file_count')
2185 'warn_update_file_count')
2178 try:
2186 try:
2179 # avoid cycle: extensions -> cmdutil -> merge
2187 # avoid cycle: extensions -> cmdutil -> merge
2180 from . import extensions
2188 from . import extensions
2181 extensions.find('fsmonitor')
2189 extensions.find('fsmonitor')
2182 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2190 fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
2183 # We intentionally don't look at whether fsmonitor has disabled
2191 # We intentionally don't look at whether fsmonitor has disabled
2184 # itself because a) fsmonitor may have already printed a warning
2192 # itself because a) fsmonitor may have already printed a warning
2185 # b) we only care about the config state here.
2193 # b) we only care about the config state here.
2186 except KeyError:
2194 except KeyError:
2187 fsmonitorenabled = False
2195 fsmonitorenabled = False
2188
2196
2189 if (fsmonitorwarning
2197 if (fsmonitorwarning
2190 and not fsmonitorenabled
2198 and not fsmonitorenabled
2191 and p1.node() == nullid
2199 and p1.node() == nullid
2192 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2200 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2193 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2201 and pycompat.sysplatform.startswith(('linux', 'darwin'))):
2194 repo.ui.warn(
2202 repo.ui.warn(
2195 _('(warning: large working directory being used without '
2203 _('(warning: large working directory being used without '
2196 'fsmonitor enabled; enable fsmonitor to improve performance; '
2204 'fsmonitor enabled; enable fsmonitor to improve performance; '
2197 'see "hg help -e fsmonitor")\n'))
2205 'see "hg help -e fsmonitor")\n'))
2198
2206
2199 updatedirstate = not partial and not wc.isinmemory()
2207 updatedirstate = not partial and not wc.isinmemory()
2200 wantfiledata = updatedirstate and not branchmerge
2208 wantfiledata = updatedirstate and not branchmerge
2201 stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite,
2209 stats, getfiledata = applyupdates(repo, actions, wc, p2, overwrite,
2202 wantfiledata, labels=labels)
2210 wantfiledata, labels=labels)
2203
2211
2204 if updatedirstate:
2212 if updatedirstate:
2205 with repo.dirstate.parentchange():
2213 with repo.dirstate.parentchange():
2206 repo.setparents(fp1, fp2)
2214 repo.setparents(fp1, fp2)
2207 recordupdates(repo, actions, branchmerge, getfiledata)
2215 recordupdates(repo, actions, branchmerge, getfiledata)
2208 # update completed, clear state
2216 # update completed, clear state
2209 util.unlink(repo.vfs.join('updatestate'))
2217 util.unlink(repo.vfs.join('updatestate'))
2210
2218
2211 if not branchmerge:
2219 if not branchmerge:
2212 repo.dirstate.setbranch(p2.branch())
2220 repo.dirstate.setbranch(p2.branch())
2213
2221
2214 # If we're updating to a location, clean up any stale temporary includes
2222 # If we're updating to a location, clean up any stale temporary includes
2215 # (ex: this happens during hg rebase --abort).
2223 # (ex: this happens during hg rebase --abort).
2216 if not branchmerge:
2224 if not branchmerge:
2217 sparse.prunetemporaryincludes(repo)
2225 sparse.prunetemporaryincludes(repo)
2218
2226
2219 if not partial:
2227 if not partial:
2220 repo.hook('update', parent1=xp1, parent2=xp2,
2228 repo.hook('update', parent1=xp1, parent2=xp2,
2221 error=stats.unresolvedcount)
2229 error=stats.unresolvedcount)
2222 return stats
2230 return stats
2223
2231
2224 def graft(repo, ctx, pctx, labels=None, keepparent=False,
2232 def graft(repo, ctx, pctx, labels=None, keepparent=False,
2225 keepconflictparent=False):
2233 keepconflictparent=False):
2226 """Do a graft-like merge.
2234 """Do a graft-like merge.
2227
2235
2228 This is a merge where the merge ancestor is chosen such that one
2236 This is a merge where the merge ancestor is chosen such that one
2229 or more changesets are grafted onto the current changeset. In
2237 or more changesets are grafted onto the current changeset. In
2230 addition to the merge, this fixes up the dirstate to include only
2238 addition to the merge, this fixes up the dirstate to include only
2231 a single parent (if keepparent is False) and tries to duplicate any
2239 a single parent (if keepparent is False) and tries to duplicate any
2232 renames/copies appropriately.
2240 renames/copies appropriately.
2233
2241
2234 ctx - changeset to rebase
2242 ctx - changeset to rebase
2235 pctx - merge base, usually ctx.p1()
2243 pctx - merge base, usually ctx.p1()
2236 labels - merge labels eg ['local', 'graft']
2244 labels - merge labels eg ['local', 'graft']
2237 keepparent - keep second parent if any
2245 keepparent - keep second parent if any
2238 keepconflictparent - if unresolved, keep parent used for the merge
2246 keepconflictparent - if unresolved, keep parent used for the merge
2239
2247
2240 """
2248 """
2241 # If we're grafting a descendant onto an ancestor, be sure to pass
2249 # If we're grafting a descendant onto an ancestor, be sure to pass
2242 # mergeancestor=True to update. This does two things: 1) allows the merge if
2250 # mergeancestor=True to update. This does two things: 1) allows the merge if
2243 # the destination is the same as the parent of the ctx (so we can use graft
2251 # the destination is the same as the parent of the ctx (so we can use graft
2244 # to copy commits), and 2) informs update that the incoming changes are
2252 # to copy commits), and 2) informs update that the incoming changes are
2245 # newer than the destination so it doesn't prompt about "remote changed foo
2253 # newer than the destination so it doesn't prompt about "remote changed foo
2246 # which local deleted".
2254 # which local deleted".
2247 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2255 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
2248
2256
2249 stats = update(repo, ctx.node(), True, True, pctx.node(),
2257 stats = update(repo, ctx.node(), True, True, pctx.node(),
2250 mergeancestor=mergeancestor, labels=labels)
2258 mergeancestor=mergeancestor, labels=labels)
2251
2259
2252
2260
2253 if keepconflictparent and stats.unresolvedcount:
2261 if keepconflictparent and stats.unresolvedcount:
2254 pother = ctx.node()
2262 pother = ctx.node()
2255 else:
2263 else:
2256 pother = nullid
2264 pother = nullid
2257 parents = ctx.parents()
2265 parents = ctx.parents()
2258 if keepparent and len(parents) == 2 and pctx in parents:
2266 if keepparent and len(parents) == 2 and pctx in parents:
2259 parents.remove(pctx)
2267 parents.remove(pctx)
2260 pother = parents[0].node()
2268 pother = parents[0].node()
2261
2269
2262 with repo.dirstate.parentchange():
2270 with repo.dirstate.parentchange():
2263 repo.setparents(repo['.'].node(), pother)
2271 repo.setparents(repo['.'].node(), pother)
2264 repo.dirstate.write(repo.currenttransaction())
2272 repo.dirstate.write(repo.currenttransaction())
2265 # fix up dirstate for copies and renames
2273 # fix up dirstate for copies and renames
2266 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2274 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2267 return stats
2275 return stats
2268
2276
2269 def purge(repo, matcher, ignored=False, removeemptydirs=True,
2277 def purge(repo, matcher, ignored=False, removeemptydirs=True,
2270 removefiles=True, abortonerror=False, noop=False):
2278 removefiles=True, abortonerror=False, noop=False):
2271 """Purge the working directory of untracked files.
2279 """Purge the working directory of untracked files.
2272
2280
2273 ``matcher`` is a matcher configured to scan the working directory -
2281 ``matcher`` is a matcher configured to scan the working directory -
2274 potentially a subset.
2282 potentially a subset.
2275
2283
2276 ``ignored`` controls whether ignored files should also be purged.
2284 ``ignored`` controls whether ignored files should also be purged.
2277
2285
2278 ``removeemptydirs`` controls whether empty directories should be removed.
2286 ``removeemptydirs`` controls whether empty directories should be removed.
2279
2287
2280 ``removefiles`` controls whether files are removed.
2288 ``removefiles`` controls whether files are removed.
2281
2289
2282 ``abortonerror`` causes an exception to be raised if an error occurs
2290 ``abortonerror`` causes an exception to be raised if an error occurs
2283 deleting a file or directory.
2291 deleting a file or directory.
2284
2292
2285 ``noop`` controls whether to actually remove files. If not defined, actions
2293 ``noop`` controls whether to actually remove files. If not defined, actions
2286 will be taken.
2294 will be taken.
2287
2295
2288 Returns an iterable of relative paths in the working directory that were
2296 Returns an iterable of relative paths in the working directory that were
2289 or would be removed.
2297 or would be removed.
2290 """
2298 """
2291
2299
2292 def remove(removefn, path):
2300 def remove(removefn, path):
2293 try:
2301 try:
2294 removefn(path)
2302 removefn(path)
2295 except OSError:
2303 except OSError:
2296 m = _('%s cannot be removed') % path
2304 m = _('%s cannot be removed') % path
2297 if abortonerror:
2305 if abortonerror:
2298 raise error.Abort(m)
2306 raise error.Abort(m)
2299 else:
2307 else:
2300 repo.ui.warn(_('warning: %s\n') % m)
2308 repo.ui.warn(_('warning: %s\n') % m)
2301
2309
2302 # There's no API to copy a matcher. So mutate the passed matcher and
2310 # There's no API to copy a matcher. So mutate the passed matcher and
2303 # restore it when we're done.
2311 # restore it when we're done.
2304 oldexplicitdir = matcher.explicitdir
2312 oldexplicitdir = matcher.explicitdir
2305 oldtraversedir = matcher.traversedir
2313 oldtraversedir = matcher.traversedir
2306
2314
2307 res = []
2315 res = []
2308
2316
2309 try:
2317 try:
2310 if removeemptydirs:
2318 if removeemptydirs:
2311 directories = []
2319 directories = []
2312 matcher.explicitdir = matcher.traversedir = directories.append
2320 matcher.explicitdir = matcher.traversedir = directories.append
2313
2321
2314 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2322 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2315
2323
2316 if removefiles:
2324 if removefiles:
2317 for f in sorted(status.unknown + status.ignored):
2325 for f in sorted(status.unknown + status.ignored):
2318 if not noop:
2326 if not noop:
2319 repo.ui.note(_('removing file %s\n') % f)
2327 repo.ui.note(_('removing file %s\n') % f)
2320 remove(repo.wvfs.unlink, f)
2328 remove(repo.wvfs.unlink, f)
2321 res.append(f)
2329 res.append(f)
2322
2330
2323 if removeemptydirs:
2331 if removeemptydirs:
2324 for f in sorted(directories, reverse=True):
2332 for f in sorted(directories, reverse=True):
2325 if matcher(f) and not repo.wvfs.listdir(f):
2333 if matcher(f) and not repo.wvfs.listdir(f):
2326 if not noop:
2334 if not noop:
2327 repo.ui.note(_('removing directory %s\n') % f)
2335 repo.ui.note(_('removing directory %s\n') % f)
2328 remove(repo.wvfs.rmdir, f)
2336 remove(repo.wvfs.rmdir, f)
2329 res.append(f)
2337 res.append(f)
2330
2338
2331 return res
2339 return res
2332
2340
2333 finally:
2341 finally:
2334 matcher.explicitdir = oldexplicitdir
2342 matcher.explicitdir = oldexplicitdir
2335 matcher.traversedir = oldtraversedir
2343 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now