##// END OF EJS Templates
merge: introduce a merge() for that use-case...
Martin von Zweigbergk -
r44883:a45ffad9 default
parent child Browse files
Show More
@@ -1,1449 +1,1442 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import getattr
18 from .pycompat import getattr
19
19
20 from . import (
20 from . import (
21 bookmarks,
21 bookmarks,
22 bundlerepo,
22 bundlerepo,
23 cacheutil,
23 cacheutil,
24 cmdutil,
24 cmdutil,
25 destutil,
25 destutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 exchange,
28 exchange,
29 extensions,
29 extensions,
30 httppeer,
30 httppeer,
31 localrepo,
31 localrepo,
32 lock,
32 lock,
33 logcmdutil,
33 logcmdutil,
34 logexchange,
34 logexchange,
35 merge as mergemod,
35 merge as mergemod,
36 narrowspec,
36 narrowspec,
37 node,
37 node,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50 from .utils import hashutil
50 from .utils import hashutil
51 from .interfaces import repository as repositorymod
51 from .interfaces import repository as repositorymod
52
52
53 release = lock.release
53 release = lock.release
54
54
55 # shared features
55 # shared features
56 sharedbookmarks = b'bookmarks'
56 sharedbookmarks = b'bookmarks'
57
57
58
58
59 def _local(path):
59 def _local(path):
60 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
61
61
62 try:
62 try:
63 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
64 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
65 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
66 raise error.Abort(
66 raise error.Abort(
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 )
68 )
69
69
70 return isfile and bundlerepo or localrepo
70 return isfile and bundlerepo or localrepo
71
71
72
72
73 def addbranchrevs(lrepo, other, branches, revs):
73 def addbranchrevs(lrepo, other, branches, revs):
74 peer = other.peer() # a courtesy to callers using a localrepo for other
74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 hashbranch, branches = branches
75 hashbranch, branches = branches
76 if not hashbranch and not branches:
76 if not hashbranch and not branches:
77 x = revs or None
77 x = revs or None
78 if revs:
78 if revs:
79 y = revs[0]
79 y = revs[0]
80 else:
80 else:
81 y = None
81 y = None
82 return x, y
82 return x, y
83 if revs:
83 if revs:
84 revs = list(revs)
84 revs = list(revs)
85 else:
85 else:
86 revs = []
86 revs = []
87
87
88 if not peer.capable(b'branchmap'):
88 if not peer.capable(b'branchmap'):
89 if branches:
89 if branches:
90 raise error.Abort(_(b"remote branch lookup not supported"))
90 raise error.Abort(_(b"remote branch lookup not supported"))
91 revs.append(hashbranch)
91 revs.append(hashbranch)
92 return revs, revs[0]
92 return revs, revs[0]
93
93
94 with peer.commandexecutor() as e:
94 with peer.commandexecutor() as e:
95 branchmap = e.callcommand(b'branchmap', {}).result()
95 branchmap = e.callcommand(b'branchmap', {}).result()
96
96
97 def primary(branch):
97 def primary(branch):
98 if branch == b'.':
98 if branch == b'.':
99 if not lrepo:
99 if not lrepo:
100 raise error.Abort(_(b"dirstate branch not accessible"))
100 raise error.Abort(_(b"dirstate branch not accessible"))
101 branch = lrepo.dirstate.branch()
101 branch = lrepo.dirstate.branch()
102 if branch in branchmap:
102 if branch in branchmap:
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 return True
104 return True
105 else:
105 else:
106 return False
106 return False
107
107
108 for branch in branches:
108 for branch in branches:
109 if not primary(branch):
109 if not primary(branch):
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 if hashbranch:
111 if hashbranch:
112 if not primary(hashbranch):
112 if not primary(hashbranch):
113 revs.append(hashbranch)
113 revs.append(hashbranch)
114 return revs, revs[0]
114 return revs, revs[0]
115
115
116
116
117 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
118 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
119
119
120 u = util.url(path)
120 u = util.url(path)
121 branch = None
121 branch = None
122 if u.fragment:
122 if u.fragment:
123 branch = u.fragment
123 branch = u.fragment
124 u.fragment = None
124 u.fragment = None
125 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
126
126
127
127
128 schemes = {
128 schemes = {
129 b'bundle': bundlerepo,
129 b'bundle': bundlerepo,
130 b'union': unionrepo,
130 b'union': unionrepo,
131 b'file': _local,
131 b'file': _local,
132 b'http': httppeer,
132 b'http': httppeer,
133 b'https': httppeer,
133 b'https': httppeer,
134 b'ssh': sshpeer,
134 b'ssh': sshpeer,
135 b'static-http': statichttprepo,
135 b'static-http': statichttprepo,
136 }
136 }
137
137
138
138
139 def _peerlookup(path):
139 def _peerlookup(path):
140 u = util.url(path)
140 u = util.url(path)
141 scheme = u.scheme or b'file'
141 scheme = u.scheme or b'file'
142 thing = schemes.get(scheme) or schemes[b'file']
142 thing = schemes.get(scheme) or schemes[b'file']
143 try:
143 try:
144 return thing(path)
144 return thing(path)
145 except TypeError:
145 except TypeError:
146 # we can't test callable(thing) because 'thing' can be an unloaded
146 # we can't test callable(thing) because 'thing' can be an unloaded
147 # module that implements __call__
147 # module that implements __call__
148 if not util.safehasattr(thing, b'instance'):
148 if not util.safehasattr(thing, b'instance'):
149 raise
149 raise
150 return thing
150 return thing
151
151
152
152
153 def islocal(repo):
153 def islocal(repo):
154 '''return true if repo (or path pointing to repo) is local'''
154 '''return true if repo (or path pointing to repo) is local'''
155 if isinstance(repo, bytes):
155 if isinstance(repo, bytes):
156 try:
156 try:
157 return _peerlookup(repo).islocal(repo)
157 return _peerlookup(repo).islocal(repo)
158 except AttributeError:
158 except AttributeError:
159 return False
159 return False
160 return repo.local()
160 return repo.local()
161
161
162
162
163 def openpath(ui, path, sendaccept=True):
163 def openpath(ui, path, sendaccept=True):
164 '''open path with open if local, url.open if remote'''
164 '''open path with open if local, url.open if remote'''
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 if pathurl.islocal():
166 if pathurl.islocal():
167 return util.posixfile(pathurl.localpath(), b'rb')
167 return util.posixfile(pathurl.localpath(), b'rb')
168 else:
168 else:
169 return url.open(ui, path, sendaccept=sendaccept)
169 return url.open(ui, path, sendaccept=sendaccept)
170
170
171
171
172 # a list of (ui, repo) functions called for wire peer initialization
172 # a list of (ui, repo) functions called for wire peer initialization
173 wirepeersetupfuncs = []
173 wirepeersetupfuncs = []
174
174
175
175
176 def _peerorrepo(
176 def _peerorrepo(
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 ):
178 ):
179 """return a repository object for the specified path"""
179 """return a repository object for the specified path"""
180 obj = _peerlookup(path).instance(
180 obj = _peerlookup(path).instance(
181 ui, path, create, intents=intents, createopts=createopts
181 ui, path, create, intents=intents, createopts=createopts
182 )
182 )
183 ui = getattr(obj, "ui", ui)
183 ui = getattr(obj, "ui", ui)
184 for f in presetupfuncs or []:
184 for f in presetupfuncs or []:
185 f(ui, obj)
185 f(ui, obj)
186 ui.log(b'extension', b'- executing reposetup hooks\n')
186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 with util.timedcm('all reposetup') as allreposetupstats:
187 with util.timedcm('all reposetup') as allreposetupstats:
188 for name, module in extensions.extensions(ui):
188 for name, module in extensions.extensions(ui):
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 hook = getattr(module, 'reposetup', None)
190 hook = getattr(module, 'reposetup', None)
191 if hook:
191 if hook:
192 with util.timedcm('reposetup %r', name) as stats:
192 with util.timedcm('reposetup %r', name) as stats:
193 hook(ui, obj)
193 hook(ui, obj)
194 ui.log(
194 ui.log(
195 b'extension', b' > reposetup for %s took %s\n', name, stats
195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 )
196 )
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 if not obj.local():
198 if not obj.local():
199 for f in wirepeersetupfuncs:
199 for f in wirepeersetupfuncs:
200 f(ui, obj)
200 f(ui, obj)
201 return obj
201 return obj
202
202
203
203
204 def repository(
204 def repository(
205 ui,
205 ui,
206 path=b'',
206 path=b'',
207 create=False,
207 create=False,
208 presetupfuncs=None,
208 presetupfuncs=None,
209 intents=None,
209 intents=None,
210 createopts=None,
210 createopts=None,
211 ):
211 ):
212 """return a repository object for the specified path"""
212 """return a repository object for the specified path"""
213 peer = _peerorrepo(
213 peer = _peerorrepo(
214 ui,
214 ui,
215 path,
215 path,
216 create,
216 create,
217 presetupfuncs=presetupfuncs,
217 presetupfuncs=presetupfuncs,
218 intents=intents,
218 intents=intents,
219 createopts=createopts,
219 createopts=createopts,
220 )
220 )
221 repo = peer.local()
221 repo = peer.local()
222 if not repo:
222 if not repo:
223 raise error.Abort(
223 raise error.Abort(
224 _(b"repository '%s' is not local") % (path or peer.url())
224 _(b"repository '%s' is not local") % (path or peer.url())
225 )
225 )
226 return repo.filtered(b'visible')
226 return repo.filtered(b'visible')
227
227
228
228
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 '''return a repository peer for the specified path'''
230 '''return a repository peer for the specified path'''
231 rui = remoteui(uiorrepo, opts)
231 rui = remoteui(uiorrepo, opts)
232 return _peerorrepo(
232 return _peerorrepo(
233 rui, path, create, intents=intents, createopts=createopts
233 rui, path, create, intents=intents, createopts=createopts
234 ).peer()
234 ).peer()
235
235
236
236
237 def defaultdest(source):
237 def defaultdest(source):
238 '''return default destination of clone if none is given
238 '''return default destination of clone if none is given
239
239
240 >>> defaultdest(b'foo')
240 >>> defaultdest(b'foo')
241 'foo'
241 'foo'
242 >>> defaultdest(b'/foo/bar')
242 >>> defaultdest(b'/foo/bar')
243 'bar'
243 'bar'
244 >>> defaultdest(b'/')
244 >>> defaultdest(b'/')
245 ''
245 ''
246 >>> defaultdest(b'')
246 >>> defaultdest(b'')
247 ''
247 ''
248 >>> defaultdest(b'http://example.org/')
248 >>> defaultdest(b'http://example.org/')
249 ''
249 ''
250 >>> defaultdest(b'http://example.org/foo/')
250 >>> defaultdest(b'http://example.org/foo/')
251 'foo'
251 'foo'
252 '''
252 '''
253 path = util.url(source).path
253 path = util.url(source).path
254 if not path:
254 if not path:
255 return b''
255 return b''
256 return os.path.basename(os.path.normpath(path))
256 return os.path.basename(os.path.normpath(path))
257
257
258
258
259 def sharedreposource(repo):
259 def sharedreposource(repo):
260 """Returns repository object for source repository of a shared repo.
260 """Returns repository object for source repository of a shared repo.
261
261
262 If repo is not a shared repository, returns None.
262 If repo is not a shared repository, returns None.
263 """
263 """
264 if repo.sharedpath == repo.path:
264 if repo.sharedpath == repo.path:
265 return None
265 return None
266
266
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 return repo.srcrepo
268 return repo.srcrepo
269
269
270 # the sharedpath always ends in the .hg; we want the path to the repo
270 # the sharedpath always ends in the .hg; we want the path to the repo
271 source = repo.vfs.split(repo.sharedpath)[0]
271 source = repo.vfs.split(repo.sharedpath)[0]
272 srcurl, branches = parseurl(source)
272 srcurl, branches = parseurl(source)
273 srcrepo = repository(repo.ui, srcurl)
273 srcrepo = repository(repo.ui, srcurl)
274 repo.srcrepo = srcrepo
274 repo.srcrepo = srcrepo
275 return srcrepo
275 return srcrepo
276
276
277
277
278 def share(
278 def share(
279 ui,
279 ui,
280 source,
280 source,
281 dest=None,
281 dest=None,
282 update=True,
282 update=True,
283 bookmarks=True,
283 bookmarks=True,
284 defaultpath=None,
284 defaultpath=None,
285 relative=False,
285 relative=False,
286 ):
286 ):
287 '''create a shared repository'''
287 '''create a shared repository'''
288
288
289 if not islocal(source):
289 if not islocal(source):
290 raise error.Abort(_(b'can only share local repositories'))
290 raise error.Abort(_(b'can only share local repositories'))
291
291
292 if not dest:
292 if not dest:
293 dest = defaultdest(source)
293 dest = defaultdest(source)
294 else:
294 else:
295 dest = ui.expandpath(dest)
295 dest = ui.expandpath(dest)
296
296
297 if isinstance(source, bytes):
297 if isinstance(source, bytes):
298 origsource = ui.expandpath(source)
298 origsource = ui.expandpath(source)
299 source, branches = parseurl(origsource)
299 source, branches = parseurl(origsource)
300 srcrepo = repository(ui, source)
300 srcrepo = repository(ui, source)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 else:
302 else:
303 srcrepo = source.local()
303 srcrepo = source.local()
304 checkout = None
304 checkout = None
305
305
306 shareditems = set()
306 shareditems = set()
307 if bookmarks:
307 if bookmarks:
308 shareditems.add(sharedbookmarks)
308 shareditems.add(sharedbookmarks)
309
309
310 r = repository(
310 r = repository(
311 ui,
311 ui,
312 dest,
312 dest,
313 create=True,
313 create=True,
314 createopts={
314 createopts={
315 b'sharedrepo': srcrepo,
315 b'sharedrepo': srcrepo,
316 b'sharedrelative': relative,
316 b'sharedrelative': relative,
317 b'shareditems': shareditems,
317 b'shareditems': shareditems,
318 },
318 },
319 )
319 )
320
320
321 postshare(srcrepo, r, defaultpath=defaultpath)
321 postshare(srcrepo, r, defaultpath=defaultpath)
322 r = repository(ui, dest)
322 r = repository(ui, dest)
323 _postshareupdate(r, update, checkout=checkout)
323 _postshareupdate(r, update, checkout=checkout)
324 return r
324 return r
325
325
326
326
327 def unshare(ui, repo):
327 def unshare(ui, repo):
328 """convert a shared repository to a normal one
328 """convert a shared repository to a normal one
329
329
330 Copy the store data to the repo and remove the sharedpath data.
330 Copy the store data to the repo and remove the sharedpath data.
331
331
332 Returns a new repository object representing the unshared repository.
332 Returns a new repository object representing the unshared repository.
333
333
334 The passed repository object is not usable after this function is
334 The passed repository object is not usable after this function is
335 called.
335 called.
336 """
336 """
337
337
338 with repo.lock():
338 with repo.lock():
339 # we use locks here because if we race with commit, we
339 # we use locks here because if we race with commit, we
340 # can end up with extra data in the cloned revlogs that's
340 # can end up with extra data in the cloned revlogs that's
341 # not pointed to by changesets, thus causing verify to
341 # not pointed to by changesets, thus causing verify to
342 # fail
342 # fail
343 destlock = copystore(ui, repo, repo.path)
343 destlock = copystore(ui, repo, repo.path)
344 with destlock or util.nullcontextmanager():
344 with destlock or util.nullcontextmanager():
345
345
346 sharefile = repo.vfs.join(b'sharedpath')
346 sharefile = repo.vfs.join(b'sharedpath')
347 util.rename(sharefile, sharefile + b'.old')
347 util.rename(sharefile, sharefile + b'.old')
348
348
349 repo.requirements.discard(b'shared')
349 repo.requirements.discard(b'shared')
350 repo.requirements.discard(b'relshared')
350 repo.requirements.discard(b'relshared')
351 repo._writerequirements()
351 repo._writerequirements()
352
352
353 # Removing share changes some fundamental properties of the repo instance.
353 # Removing share changes some fundamental properties of the repo instance.
354 # So we instantiate a new repo object and operate on it rather than
354 # So we instantiate a new repo object and operate on it rather than
355 # try to keep the existing repo usable.
355 # try to keep the existing repo usable.
356 newrepo = repository(repo.baseui, repo.root, create=False)
356 newrepo = repository(repo.baseui, repo.root, create=False)
357
357
358 # TODO: figure out how to access subrepos that exist, but were previously
358 # TODO: figure out how to access subrepos that exist, but were previously
359 # removed from .hgsub
359 # removed from .hgsub
360 c = newrepo[b'.']
360 c = newrepo[b'.']
361 subs = c.substate
361 subs = c.substate
362 for s in sorted(subs):
362 for s in sorted(subs):
363 c.sub(s).unshare()
363 c.sub(s).unshare()
364
364
365 localrepo.poisonrepository(repo)
365 localrepo.poisonrepository(repo)
366
366
367 return newrepo
367 return newrepo
368
368
369
369
370 def postshare(sourcerepo, destrepo, defaultpath=None):
370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 """Called after a new shared repo is created.
371 """Called after a new shared repo is created.
372
372
373 The new repo only has a requirements file and pointer to the source.
373 The new repo only has a requirements file and pointer to the source.
374 This function configures additional shared data.
374 This function configures additional shared data.
375
375
376 Extensions can wrap this function and write additional entries to
376 Extensions can wrap this function and write additional entries to
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 """
378 """
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 if default:
380 if default:
381 template = b'[paths]\ndefault = %s\n'
381 template = b'[paths]\ndefault = %s\n'
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 with destrepo.wlock():
384 with destrepo.wlock():
385 narrowspec.copytoworkingcopy(destrepo)
385 narrowspec.copytoworkingcopy(destrepo)
386
386
387
387
388 def _postshareupdate(repo, update, checkout=None):
388 def _postshareupdate(repo, update, checkout=None):
389 """Maybe perform a working directory update after a shared repo is created.
389 """Maybe perform a working directory update after a shared repo is created.
390
390
391 ``update`` can be a boolean or a revision to update to.
391 ``update`` can be a boolean or a revision to update to.
392 """
392 """
393 if not update:
393 if not update:
394 return
394 return
395
395
396 repo.ui.status(_(b"updating working directory\n"))
396 repo.ui.status(_(b"updating working directory\n"))
397 if update is not True:
397 if update is not True:
398 checkout = update
398 checkout = update
399 for test in (checkout, b'default', b'tip'):
399 for test in (checkout, b'default', b'tip'):
400 if test is None:
400 if test is None:
401 continue
401 continue
402 try:
402 try:
403 uprev = repo.lookup(test)
403 uprev = repo.lookup(test)
404 break
404 break
405 except error.RepoLookupError:
405 except error.RepoLookupError:
406 continue
406 continue
407 _update(repo, uprev)
407 _update(repo, uprev)
408
408
409
409
410 def copystore(ui, srcrepo, destpath):
410 def copystore(ui, srcrepo, destpath):
411 '''copy files from store of srcrepo in destpath
411 '''copy files from store of srcrepo in destpath
412
412
413 returns destlock
413 returns destlock
414 '''
414 '''
415 destlock = None
415 destlock = None
416 try:
416 try:
417 hardlink = None
417 hardlink = None
418 topic = _(b'linking') if hardlink else _(b'copying')
418 topic = _(b'linking') if hardlink else _(b'copying')
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 num = 0
420 num = 0
421 srcpublishing = srcrepo.publishing()
421 srcpublishing = srcrepo.publishing()
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 dstvfs = vfsmod.vfs(destpath)
423 dstvfs = vfsmod.vfs(destpath)
424 for f in srcrepo.store.copylist():
424 for f in srcrepo.store.copylist():
425 if srcpublishing and f.endswith(b'phaseroots'):
425 if srcpublishing and f.endswith(b'phaseroots'):
426 continue
426 continue
427 dstbase = os.path.dirname(f)
427 dstbase = os.path.dirname(f)
428 if dstbase and not dstvfs.exists(dstbase):
428 if dstbase and not dstvfs.exists(dstbase):
429 dstvfs.mkdir(dstbase)
429 dstvfs.mkdir(dstbase)
430 if srcvfs.exists(f):
430 if srcvfs.exists(f):
431 if f.endswith(b'data'):
431 if f.endswith(b'data'):
432 # 'dstbase' may be empty (e.g. revlog format 0)
432 # 'dstbase' may be empty (e.g. revlog format 0)
433 lockfile = os.path.join(dstbase, b"lock")
433 lockfile = os.path.join(dstbase, b"lock")
434 # lock to avoid premature writing to the target
434 # lock to avoid premature writing to the target
435 destlock = lock.lock(dstvfs, lockfile)
435 destlock = lock.lock(dstvfs, lockfile)
436 hardlink, n = util.copyfiles(
436 hardlink, n = util.copyfiles(
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 )
438 )
439 num += n
439 num += n
440 if hardlink:
440 if hardlink:
441 ui.debug(b"linked %d files\n" % num)
441 ui.debug(b"linked %d files\n" % num)
442 else:
442 else:
443 ui.debug(b"copied %d files\n" % num)
443 ui.debug(b"copied %d files\n" % num)
444 return destlock
444 return destlock
445 except: # re-raises
445 except: # re-raises
446 release(destlock)
446 release(destlock)
447 raise
447 raise
448
448
449
449
450 def clonewithshare(
450 def clonewithshare(
451 ui,
451 ui,
452 peeropts,
452 peeropts,
453 sharepath,
453 sharepath,
454 source,
454 source,
455 srcpeer,
455 srcpeer,
456 dest,
456 dest,
457 pull=False,
457 pull=False,
458 rev=None,
458 rev=None,
459 update=True,
459 update=True,
460 stream=False,
460 stream=False,
461 ):
461 ):
462 """Perform a clone using a shared repo.
462 """Perform a clone using a shared repo.
463
463
464 The store for the repository will be located at <sharepath>/.hg. The
464 The store for the repository will be located at <sharepath>/.hg. The
465 specified revisions will be cloned or pulled from "source". A shared repo
465 specified revisions will be cloned or pulled from "source". A shared repo
466 will be created at "dest" and a working copy will be created if "update" is
466 will be created at "dest" and a working copy will be created if "update" is
467 True.
467 True.
468 """
468 """
469 revs = None
469 revs = None
470 if rev:
470 if rev:
471 if not srcpeer.capable(b'lookup'):
471 if not srcpeer.capable(b'lookup'):
472 raise error.Abort(
472 raise error.Abort(
473 _(
473 _(
474 b"src repository does not support "
474 b"src repository does not support "
475 b"revision lookup and so doesn't "
475 b"revision lookup and so doesn't "
476 b"support clone by revision"
476 b"support clone by revision"
477 )
477 )
478 )
478 )
479
479
480 # TODO this is batchable.
480 # TODO this is batchable.
481 remoterevs = []
481 remoterevs = []
482 for r in rev:
482 for r in rev:
483 with srcpeer.commandexecutor() as e:
483 with srcpeer.commandexecutor() as e:
484 remoterevs.append(
484 remoterevs.append(
485 e.callcommand(b'lookup', {b'key': r,}).result()
485 e.callcommand(b'lookup', {b'key': r,}).result()
486 )
486 )
487 revs = remoterevs
487 revs = remoterevs
488
488
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 # 2 clients may race creating or populating it.
490 # 2 clients may race creating or populating it.
491 pooldir = os.path.dirname(sharepath)
491 pooldir = os.path.dirname(sharepath)
492 # lock class requires the directory to exist.
492 # lock class requires the directory to exist.
493 try:
493 try:
494 util.makedir(pooldir, False)
494 util.makedir(pooldir, False)
495 except OSError as e:
495 except OSError as e:
496 if e.errno != errno.EEXIST:
496 if e.errno != errno.EEXIST:
497 raise
497 raise
498
498
499 poolvfs = vfsmod.vfs(pooldir)
499 poolvfs = vfsmod.vfs(pooldir)
500 basename = os.path.basename(sharepath)
500 basename = os.path.basename(sharepath)
501
501
502 with lock.lock(poolvfs, b'%s.lock' % basename):
502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 if os.path.exists(sharepath):
503 if os.path.exists(sharepath):
504 ui.status(
504 ui.status(
505 _(b'(sharing from existing pooled repository %s)\n') % basename
505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 )
506 )
507 else:
507 else:
508 ui.status(
508 ui.status(
509 _(b'(sharing from new pooled repository %s)\n') % basename
509 _(b'(sharing from new pooled repository %s)\n') % basename
510 )
510 )
511 # Always use pull mode because hardlinks in share mode don't work
511 # Always use pull mode because hardlinks in share mode don't work
512 # well. Never update because working copies aren't necessary in
512 # well. Never update because working copies aren't necessary in
513 # share mode.
513 # share mode.
514 clone(
514 clone(
515 ui,
515 ui,
516 peeropts,
516 peeropts,
517 source,
517 source,
518 dest=sharepath,
518 dest=sharepath,
519 pull=True,
519 pull=True,
520 revs=rev,
520 revs=rev,
521 update=False,
521 update=False,
522 stream=stream,
522 stream=stream,
523 )
523 )
524
524
525 # Resolve the value to put in [paths] section for the source.
525 # Resolve the value to put in [paths] section for the source.
526 if islocal(source):
526 if islocal(source):
527 defaultpath = os.path.abspath(util.urllocalpath(source))
527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 else:
528 else:
529 defaultpath = source
529 defaultpath = source
530
530
531 sharerepo = repository(ui, path=sharepath)
531 sharerepo = repository(ui, path=sharepath)
532 destrepo = share(
532 destrepo = share(
533 ui,
533 ui,
534 sharerepo,
534 sharerepo,
535 dest=dest,
535 dest=dest,
536 update=False,
536 update=False,
537 bookmarks=False,
537 bookmarks=False,
538 defaultpath=defaultpath,
538 defaultpath=defaultpath,
539 )
539 )
540
540
541 # We need to perform a pull against the dest repo to fetch bookmarks
541 # We need to perform a pull against the dest repo to fetch bookmarks
542 # and other non-store data that isn't shared by default. In the case of
542 # and other non-store data that isn't shared by default. In the case of
543 # non-existing shared repo, this means we pull from the remote twice. This
543 # non-existing shared repo, this means we pull from the remote twice. This
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 # way to pull just non-changegroup data.
545 # way to pull just non-changegroup data.
546 exchange.pull(destrepo, srcpeer, heads=revs)
546 exchange.pull(destrepo, srcpeer, heads=revs)
547
547
548 _postshareupdate(destrepo, update)
548 _postshareupdate(destrepo, update)
549
549
550 return srcpeer, peer(ui, peeropts, dest)
550 return srcpeer, peer(ui, peeropts, dest)
551
551
552
552
553 # Recomputing branch cache might be slow on big repos,
553 # Recomputing branch cache might be slow on big repos,
554 # so just copy it
554 # so just copy it
555 def _copycache(srcrepo, dstcachedir, fname):
555 def _copycache(srcrepo, dstcachedir, fname):
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
559 if os.path.exists(srcbranchcache):
559 if os.path.exists(srcbranchcache):
560 if not os.path.exists(dstcachedir):
560 if not os.path.exists(dstcachedir):
561 os.mkdir(dstcachedir)
561 os.mkdir(dstcachedir)
562 util.copyfile(srcbranchcache, dstbranchcache)
562 util.copyfile(srcbranchcache, dstbranchcache)
563
563
564
564
565 def clone(
565 def clone(
566 ui,
566 ui,
567 peeropts,
567 peeropts,
568 source,
568 source,
569 dest=None,
569 dest=None,
570 pull=False,
570 pull=False,
571 revs=None,
571 revs=None,
572 update=True,
572 update=True,
573 stream=False,
573 stream=False,
574 branch=None,
574 branch=None,
575 shareopts=None,
575 shareopts=None,
576 storeincludepats=None,
576 storeincludepats=None,
577 storeexcludepats=None,
577 storeexcludepats=None,
578 depth=None,
578 depth=None,
579 ):
579 ):
580 """Make a copy of an existing repository.
580 """Make a copy of an existing repository.
581
581
582 Create a copy of an existing repository in a new directory. The
582 Create a copy of an existing repository in a new directory. The
583 source and destination are URLs, as passed to the repository
583 source and destination are URLs, as passed to the repository
584 function. Returns a pair of repository peers, the source and
584 function. Returns a pair of repository peers, the source and
585 newly created destination.
585 newly created destination.
586
586
587 The location of the source is added to the new repository's
587 The location of the source is added to the new repository's
588 .hg/hgrc file, as the default to be used for future pulls and
588 .hg/hgrc file, as the default to be used for future pulls and
589 pushes.
589 pushes.
590
590
591 If an exception is raised, the partly cloned/updated destination
591 If an exception is raised, the partly cloned/updated destination
592 repository will be deleted.
592 repository will be deleted.
593
593
594 Arguments:
594 Arguments:
595
595
596 source: repository object or URL
596 source: repository object or URL
597
597
598 dest: URL of destination repository to create (defaults to base
598 dest: URL of destination repository to create (defaults to base
599 name of source repository)
599 name of source repository)
600
600
601 pull: always pull from source repository, even in local case or if the
601 pull: always pull from source repository, even in local case or if the
602 server prefers streaming
602 server prefers streaming
603
603
604 stream: stream raw data uncompressed from repository (fast over
604 stream: stream raw data uncompressed from repository (fast over
605 LAN, slow over WAN)
605 LAN, slow over WAN)
606
606
607 revs: revision to clone up to (implies pull=True)
607 revs: revision to clone up to (implies pull=True)
608
608
609 update: update working directory after clone completes, if
609 update: update working directory after clone completes, if
610 destination is local repository (True means update to default rev,
610 destination is local repository (True means update to default rev,
611 anything else is treated as a revision)
611 anything else is treated as a revision)
612
612
613 branch: branches to clone
613 branch: branches to clone
614
614
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 activates auto sharing mode and defines the directory for stores. The
616 activates auto sharing mode and defines the directory for stores. The
617 "mode" key determines how to construct the directory name of the shared
617 "mode" key determines how to construct the directory name of the shared
618 repository. "identity" means the name is derived from the node of the first
618 repository. "identity" means the name is derived from the node of the first
619 changeset in the repository. "remote" means the name is derived from the
619 changeset in the repository. "remote" means the name is derived from the
620 remote's path/URL. Defaults to "identity."
620 remote's path/URL. Defaults to "identity."
621
621
622 storeincludepats and storeexcludepats: sets of file patterns to include and
622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 exclude in the repository copy, respectively. If not defined, all files
623 exclude in the repository copy, respectively. If not defined, all files
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 only the requested files will be performed. If ``storeincludepats`` is not
625 only the requested files will be performed. If ``storeincludepats`` is not
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 ``path:.``. If both are empty sets, no files will be cloned.
627 ``path:.``. If both are empty sets, no files will be cloned.
628 """
628 """
629
629
630 if isinstance(source, bytes):
630 if isinstance(source, bytes):
631 origsource = ui.expandpath(source)
631 origsource = ui.expandpath(source)
632 source, branches = parseurl(origsource, branch)
632 source, branches = parseurl(origsource, branch)
633 srcpeer = peer(ui, peeropts, source)
633 srcpeer = peer(ui, peeropts, source)
634 else:
634 else:
635 srcpeer = source.peer() # in case we were called with a localrepo
635 srcpeer = source.peer() # in case we were called with a localrepo
636 branches = (None, branch or [])
636 branches = (None, branch or [])
637 origsource = source = srcpeer.url()
637 origsource = source = srcpeer.url()
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639
639
640 if dest is None:
640 if dest is None:
641 dest = defaultdest(source)
641 dest = defaultdest(source)
642 if dest:
642 if dest:
643 ui.status(_(b"destination directory: %s\n") % dest)
643 ui.status(_(b"destination directory: %s\n") % dest)
644 else:
644 else:
645 dest = ui.expandpath(dest)
645 dest = ui.expandpath(dest)
646
646
647 dest = util.urllocalpath(dest)
647 dest = util.urllocalpath(dest)
648 source = util.urllocalpath(source)
648 source = util.urllocalpath(source)
649
649
650 if not dest:
650 if not dest:
651 raise error.Abort(_(b"empty destination path is not valid"))
651 raise error.Abort(_(b"empty destination path is not valid"))
652
652
653 destvfs = vfsmod.vfs(dest, expandpath=True)
653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 if destvfs.lexists():
654 if destvfs.lexists():
655 if not destvfs.isdir():
655 if not destvfs.isdir():
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 elif destvfs.listdir():
657 elif destvfs.listdir():
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659
659
660 createopts = {}
660 createopts = {}
661 narrow = False
661 narrow = False
662
662
663 if storeincludepats is not None:
663 if storeincludepats is not None:
664 narrowspec.validatepatterns(storeincludepats)
664 narrowspec.validatepatterns(storeincludepats)
665 narrow = True
665 narrow = True
666
666
667 if storeexcludepats is not None:
667 if storeexcludepats is not None:
668 narrowspec.validatepatterns(storeexcludepats)
668 narrowspec.validatepatterns(storeexcludepats)
669 narrow = True
669 narrow = True
670
670
671 if narrow:
671 if narrow:
672 # Include everything by default if only exclusion patterns defined.
672 # Include everything by default if only exclusion patterns defined.
673 if storeexcludepats and not storeincludepats:
673 if storeexcludepats and not storeincludepats:
674 storeincludepats = {b'path:.'}
674 storeincludepats = {b'path:.'}
675
675
676 createopts[b'narrowfiles'] = True
676 createopts[b'narrowfiles'] = True
677
677
678 if depth:
678 if depth:
679 createopts[b'shallowfilestore'] = True
679 createopts[b'shallowfilestore'] = True
680
680
681 if srcpeer.capable(b'lfs-serve'):
681 if srcpeer.capable(b'lfs-serve'):
682 # Repository creation honors the config if it disabled the extension, so
682 # Repository creation honors the config if it disabled the extension, so
683 # we can't just announce that lfs will be enabled. This check avoids
683 # we can't just announce that lfs will be enabled. This check avoids
684 # saying that lfs will be enabled, and then saying it's an unknown
684 # saying that lfs will be enabled, and then saying it's an unknown
685 # feature. The lfs creation option is set in either case so that a
685 # feature. The lfs creation option is set in either case so that a
686 # requirement is added. If the extension is explicitly disabled but the
686 # requirement is added. If the extension is explicitly disabled but the
687 # requirement is set, the clone aborts early, before transferring any
687 # requirement is set, the clone aborts early, before transferring any
688 # data.
688 # data.
689 createopts[b'lfs'] = True
689 createopts[b'lfs'] = True
690
690
691 if extensions.disabledext(b'lfs'):
691 if extensions.disabledext(b'lfs'):
692 ui.status(
692 ui.status(
693 _(
693 _(
694 b'(remote is using large file support (lfs), but it is '
694 b'(remote is using large file support (lfs), but it is '
695 b'explicitly disabled in the local configuration)\n'
695 b'explicitly disabled in the local configuration)\n'
696 )
696 )
697 )
697 )
698 else:
698 else:
699 ui.status(
699 ui.status(
700 _(
700 _(
701 b'(remote is using large file support (lfs); lfs will '
701 b'(remote is using large file support (lfs); lfs will '
702 b'be enabled for this repository)\n'
702 b'be enabled for this repository)\n'
703 )
703 )
704 )
704 )
705
705
706 shareopts = shareopts or {}
706 shareopts = shareopts or {}
707 sharepool = shareopts.get(b'pool')
707 sharepool = shareopts.get(b'pool')
708 sharenamemode = shareopts.get(b'mode')
708 sharenamemode = shareopts.get(b'mode')
709 if sharepool and islocal(dest):
709 if sharepool and islocal(dest):
710 sharepath = None
710 sharepath = None
711 if sharenamemode == b'identity':
711 if sharenamemode == b'identity':
712 # Resolve the name from the initial changeset in the remote
712 # Resolve the name from the initial changeset in the remote
713 # repository. This returns nullid when the remote is empty. It
713 # repository. This returns nullid when the remote is empty. It
714 # raises RepoLookupError if revision 0 is filtered or otherwise
714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 # not available. If we fail to resolve, sharing is not enabled.
715 # not available. If we fail to resolve, sharing is not enabled.
716 try:
716 try:
717 with srcpeer.commandexecutor() as e:
717 with srcpeer.commandexecutor() as e:
718 rootnode = e.callcommand(
718 rootnode = e.callcommand(
719 b'lookup', {b'key': b'0',}
719 b'lookup', {b'key': b'0',}
720 ).result()
720 ).result()
721
721
722 if rootnode != node.nullid:
722 if rootnode != node.nullid:
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 else:
724 else:
725 ui.status(
725 ui.status(
726 _(
726 _(
727 b'(not using pooled storage: '
727 b'(not using pooled storage: '
728 b'remote appears to be empty)\n'
728 b'remote appears to be empty)\n'
729 )
729 )
730 )
730 )
731 except error.RepoLookupError:
731 except error.RepoLookupError:
732 ui.status(
732 ui.status(
733 _(
733 _(
734 b'(not using pooled storage: '
734 b'(not using pooled storage: '
735 b'unable to resolve identity of remote)\n'
735 b'unable to resolve identity of remote)\n'
736 )
736 )
737 )
737 )
738 elif sharenamemode == b'remote':
738 elif sharenamemode == b'remote':
739 sharepath = os.path.join(
739 sharepath = os.path.join(
740 sharepool, node.hex(hashutil.sha1(source).digest())
740 sharepool, node.hex(hashutil.sha1(source).digest())
741 )
741 )
742 else:
742 else:
743 raise error.Abort(
743 raise error.Abort(
744 _(b'unknown share naming mode: %s') % sharenamemode
744 _(b'unknown share naming mode: %s') % sharenamemode
745 )
745 )
746
746
747 # TODO this is a somewhat arbitrary restriction.
747 # TODO this is a somewhat arbitrary restriction.
748 if narrow:
748 if narrow:
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 sharepath = None
750 sharepath = None
751
751
752 if sharepath:
752 if sharepath:
753 return clonewithshare(
753 return clonewithshare(
754 ui,
754 ui,
755 peeropts,
755 peeropts,
756 sharepath,
756 sharepath,
757 source,
757 source,
758 srcpeer,
758 srcpeer,
759 dest,
759 dest,
760 pull=pull,
760 pull=pull,
761 rev=revs,
761 rev=revs,
762 update=update,
762 update=update,
763 stream=stream,
763 stream=stream,
764 )
764 )
765
765
766 srclock = destlock = cleandir = None
766 srclock = destlock = cleandir = None
767 srcrepo = srcpeer.local()
767 srcrepo = srcpeer.local()
768 try:
768 try:
769 abspath = origsource
769 abspath = origsource
770 if islocal(origsource):
770 if islocal(origsource):
771 abspath = os.path.abspath(util.urllocalpath(origsource))
771 abspath = os.path.abspath(util.urllocalpath(origsource))
772
772
773 if islocal(dest):
773 if islocal(dest):
774 cleandir = dest
774 cleandir = dest
775
775
776 copy = False
776 copy = False
777 if (
777 if (
778 srcrepo
778 srcrepo
779 and srcrepo.cancopy()
779 and srcrepo.cancopy()
780 and islocal(dest)
780 and islocal(dest)
781 and not phases.hassecret(srcrepo)
781 and not phases.hassecret(srcrepo)
782 ):
782 ):
783 copy = not pull and not revs
783 copy = not pull and not revs
784
784
785 # TODO this is a somewhat arbitrary restriction.
785 # TODO this is a somewhat arbitrary restriction.
786 if narrow:
786 if narrow:
787 copy = False
787 copy = False
788
788
789 if copy:
789 if copy:
790 try:
790 try:
791 # we use a lock here because if we race with commit, we
791 # we use a lock here because if we race with commit, we
792 # can end up with extra data in the cloned revlogs that's
792 # can end up with extra data in the cloned revlogs that's
793 # not pointed to by changesets, thus causing verify to
793 # not pointed to by changesets, thus causing verify to
794 # fail
794 # fail
795 srclock = srcrepo.lock(wait=False)
795 srclock = srcrepo.lock(wait=False)
796 except error.LockError:
796 except error.LockError:
797 copy = False
797 copy = False
798
798
799 if copy:
799 if copy:
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 if not os.path.exists(dest):
802 if not os.path.exists(dest):
803 util.makedirs(dest)
803 util.makedirs(dest)
804 else:
804 else:
805 # only clean up directories we create ourselves
805 # only clean up directories we create ourselves
806 cleandir = hgdir
806 cleandir = hgdir
807 try:
807 try:
808 destpath = hgdir
808 destpath = hgdir
809 util.makedir(destpath, notindexed=True)
809 util.makedir(destpath, notindexed=True)
810 except OSError as inst:
810 except OSError as inst:
811 if inst.errno == errno.EEXIST:
811 if inst.errno == errno.EEXIST:
812 cleandir = None
812 cleandir = None
813 raise error.Abort(
813 raise error.Abort(
814 _(b"destination '%s' already exists") % dest
814 _(b"destination '%s' already exists") % dest
815 )
815 )
816 raise
816 raise
817
817
818 destlock = copystore(ui, srcrepo, destpath)
818 destlock = copystore(ui, srcrepo, destpath)
819 # copy bookmarks over
819 # copy bookmarks over
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 if os.path.exists(srcbookmarks):
822 if os.path.exists(srcbookmarks):
823 util.copyfile(srcbookmarks, dstbookmarks)
823 util.copyfile(srcbookmarks, dstbookmarks)
824
824
825 dstcachedir = os.path.join(destpath, b'cache')
825 dstcachedir = os.path.join(destpath, b'cache')
826 for cache in cacheutil.cachetocopy(srcrepo):
826 for cache in cacheutil.cachetocopy(srcrepo):
827 _copycache(srcrepo, dstcachedir, cache)
827 _copycache(srcrepo, dstcachedir, cache)
828
828
829 # we need to re-init the repo after manually copying the data
829 # we need to re-init the repo after manually copying the data
830 # into it
830 # into it
831 destpeer = peer(srcrepo, peeropts, dest)
831 destpeer = peer(srcrepo, peeropts, dest)
832 srcrepo.hook(
832 srcrepo.hook(
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 )
834 )
835 else:
835 else:
836 try:
836 try:
837 # only pass ui when no srcrepo
837 # only pass ui when no srcrepo
838 destpeer = peer(
838 destpeer = peer(
839 srcrepo or ui,
839 srcrepo or ui,
840 peeropts,
840 peeropts,
841 dest,
841 dest,
842 create=True,
842 create=True,
843 createopts=createopts,
843 createopts=createopts,
844 )
844 )
845 except OSError as inst:
845 except OSError as inst:
846 if inst.errno == errno.EEXIST:
846 if inst.errno == errno.EEXIST:
847 cleandir = None
847 cleandir = None
848 raise error.Abort(
848 raise error.Abort(
849 _(b"destination '%s' already exists") % dest
849 _(b"destination '%s' already exists") % dest
850 )
850 )
851 raise
851 raise
852
852
853 if revs:
853 if revs:
854 if not srcpeer.capable(b'lookup'):
854 if not srcpeer.capable(b'lookup'):
855 raise error.Abort(
855 raise error.Abort(
856 _(
856 _(
857 b"src repository does not support "
857 b"src repository does not support "
858 b"revision lookup and so doesn't "
858 b"revision lookup and so doesn't "
859 b"support clone by revision"
859 b"support clone by revision"
860 )
860 )
861 )
861 )
862
862
863 # TODO this is batchable.
863 # TODO this is batchable.
864 remoterevs = []
864 remoterevs = []
865 for rev in revs:
865 for rev in revs:
866 with srcpeer.commandexecutor() as e:
866 with srcpeer.commandexecutor() as e:
867 remoterevs.append(
867 remoterevs.append(
868 e.callcommand(b'lookup', {b'key': rev,}).result()
868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 )
869 )
870 revs = remoterevs
870 revs = remoterevs
871
871
872 checkout = revs[0]
872 checkout = revs[0]
873 else:
873 else:
874 revs = None
874 revs = None
875 local = destpeer.local()
875 local = destpeer.local()
876 if local:
876 if local:
877 if narrow:
877 if narrow:
878 with local.wlock(), local.lock():
878 with local.wlock(), local.lock():
879 local.setnarrowpats(storeincludepats, storeexcludepats)
879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 narrowspec.copytoworkingcopy(local)
880 narrowspec.copytoworkingcopy(local)
881
881
882 u = util.url(abspath)
882 u = util.url(abspath)
883 defaulturl = bytes(u)
883 defaulturl = bytes(u)
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 if not stream:
885 if not stream:
886 if pull:
886 if pull:
887 stream = False
887 stream = False
888 else:
888 else:
889 stream = None
889 stream = None
890 # internal config: ui.quietbookmarkmove
890 # internal config: ui.quietbookmarkmove
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 with local.ui.configoverride(overrides, b'clone'):
892 with local.ui.configoverride(overrides, b'clone'):
893 exchange.pull(
893 exchange.pull(
894 local,
894 local,
895 srcpeer,
895 srcpeer,
896 revs,
896 revs,
897 streamclonerequested=stream,
897 streamclonerequested=stream,
898 includepats=storeincludepats,
898 includepats=storeincludepats,
899 excludepats=storeexcludepats,
899 excludepats=storeexcludepats,
900 depth=depth,
900 depth=depth,
901 )
901 )
902 elif srcrepo:
902 elif srcrepo:
903 # TODO lift restriction once exchange.push() accepts narrow
903 # TODO lift restriction once exchange.push() accepts narrow
904 # push.
904 # push.
905 if narrow:
905 if narrow:
906 raise error.Abort(
906 raise error.Abort(
907 _(
907 _(
908 b'narrow clone not available for '
908 b'narrow clone not available for '
909 b'remote destinations'
909 b'remote destinations'
910 )
910 )
911 )
911 )
912
912
913 exchange.push(
913 exchange.push(
914 srcrepo,
914 srcrepo,
915 destpeer,
915 destpeer,
916 revs=revs,
916 revs=revs,
917 bookmarks=srcrepo._bookmarks.keys(),
917 bookmarks=srcrepo._bookmarks.keys(),
918 )
918 )
919 else:
919 else:
920 raise error.Abort(
920 raise error.Abort(
921 _(b"clone from remote to remote not supported")
921 _(b"clone from remote to remote not supported")
922 )
922 )
923
923
924 cleandir = None
924 cleandir = None
925
925
926 destrepo = destpeer.local()
926 destrepo = destpeer.local()
927 if destrepo:
927 if destrepo:
928 template = uimod.samplehgrcs[b'cloned']
928 template = uimod.samplehgrcs[b'cloned']
929 u = util.url(abspath)
929 u = util.url(abspath)
930 u.passwd = None
930 u.passwd = None
931 defaulturl = bytes(u)
931 defaulturl = bytes(u)
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934
934
935 if ui.configbool(b'experimental', b'remotenames'):
935 if ui.configbool(b'experimental', b'remotenames'):
936 logexchange.pullremotenames(destrepo, srcpeer)
936 logexchange.pullremotenames(destrepo, srcpeer)
937
937
938 if update:
938 if update:
939 if update is not True:
939 if update is not True:
940 with srcpeer.commandexecutor() as e:
940 with srcpeer.commandexecutor() as e:
941 checkout = e.callcommand(
941 checkout = e.callcommand(
942 b'lookup', {b'key': update,}
942 b'lookup', {b'key': update,}
943 ).result()
943 ).result()
944
944
945 uprev = None
945 uprev = None
946 status = None
946 status = None
947 if checkout is not None:
947 if checkout is not None:
948 # Some extensions (at least hg-git and hg-subversion) have
948 # Some extensions (at least hg-git and hg-subversion) have
949 # a peer.lookup() implementation that returns a name instead
949 # a peer.lookup() implementation that returns a name instead
950 # of a nodeid. We work around it here until we've figured
950 # of a nodeid. We work around it here until we've figured
951 # out a better solution.
951 # out a better solution.
952 if len(checkout) == 20 and checkout in destrepo:
952 if len(checkout) == 20 and checkout in destrepo:
953 uprev = checkout
953 uprev = checkout
954 elif scmutil.isrevsymbol(destrepo, checkout):
954 elif scmutil.isrevsymbol(destrepo, checkout):
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 else:
956 else:
957 if update is not True:
957 if update is not True:
958 try:
958 try:
959 uprev = destrepo.lookup(update)
959 uprev = destrepo.lookup(update)
960 except error.RepoLookupError:
960 except error.RepoLookupError:
961 pass
961 pass
962 if uprev is None:
962 if uprev is None:
963 try:
963 try:
964 uprev = destrepo._bookmarks[b'@']
964 uprev = destrepo._bookmarks[b'@']
965 update = b'@'
965 update = b'@'
966 bn = destrepo[uprev].branch()
966 bn = destrepo[uprev].branch()
967 if bn == b'default':
967 if bn == b'default':
968 status = _(b"updating to bookmark @\n")
968 status = _(b"updating to bookmark @\n")
969 else:
969 else:
970 status = (
970 status = (
971 _(b"updating to bookmark @ on branch %s\n") % bn
971 _(b"updating to bookmark @ on branch %s\n") % bn
972 )
972 )
973 except KeyError:
973 except KeyError:
974 try:
974 try:
975 uprev = destrepo.branchtip(b'default')
975 uprev = destrepo.branchtip(b'default')
976 except error.RepoLookupError:
976 except error.RepoLookupError:
977 uprev = destrepo.lookup(b'tip')
977 uprev = destrepo.lookup(b'tip')
978 if not status:
978 if not status:
979 bn = destrepo[uprev].branch()
979 bn = destrepo[uprev].branch()
980 status = _(b"updating to branch %s\n") % bn
980 status = _(b"updating to branch %s\n") % bn
981 destrepo.ui.status(status)
981 destrepo.ui.status(status)
982 _update(destrepo, uprev)
982 _update(destrepo, uprev)
983 if update in destrepo._bookmarks:
983 if update in destrepo._bookmarks:
984 bookmarks.activate(destrepo, update)
984 bookmarks.activate(destrepo, update)
985 finally:
985 finally:
986 release(srclock, destlock)
986 release(srclock, destlock)
987 if cleandir is not None:
987 if cleandir is not None:
988 shutil.rmtree(cleandir, True)
988 shutil.rmtree(cleandir, True)
989 if srcpeer is not None:
989 if srcpeer is not None:
990 srcpeer.close()
990 srcpeer.close()
991 return srcpeer, destpeer
991 return srcpeer, destpeer
992
992
993
993
994 def _showstats(repo, stats, quietempty=False):
994 def _showstats(repo, stats, quietempty=False):
995 if quietempty and stats.isempty():
995 if quietempty and stats.isempty():
996 return
996 return
997 repo.ui.status(
997 repo.ui.status(
998 _(
998 _(
999 b"%d files updated, %d files merged, "
999 b"%d files updated, %d files merged, "
1000 b"%d files removed, %d files unresolved\n"
1000 b"%d files removed, %d files unresolved\n"
1001 )
1001 )
1002 % (
1002 % (
1003 stats.updatedcount,
1003 stats.updatedcount,
1004 stats.mergedcount,
1004 stats.mergedcount,
1005 stats.removedcount,
1005 stats.removedcount,
1006 stats.unresolvedcount,
1006 stats.unresolvedcount,
1007 )
1007 )
1008 )
1008 )
1009
1009
1010
1010
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 """Update the working directory to node.
1012 """Update the working directory to node.
1013
1013
1014 When overwrite is set, changes are clobbered, merged else
1014 When overwrite is set, changes are clobbered, merged else
1015
1015
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 return mergemod.update(
1017 return mergemod.update(
1018 repo,
1018 repo,
1019 node,
1019 node,
1020 branchmerge=False,
1020 branchmerge=False,
1021 force=overwrite,
1021 force=overwrite,
1022 labels=[b'working copy', b'destination'],
1022 labels=[b'working copy', b'destination'],
1023 updatecheck=updatecheck,
1023 updatecheck=updatecheck,
1024 )
1024 )
1025
1025
1026
1026
1027 def update(repo, node, quietempty=False, updatecheck=None):
1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 """update the working directory to node"""
1028 """update the working directory to node"""
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 _showstats(repo, stats, quietempty)
1030 _showstats(repo, stats, quietempty)
1031 if stats.unresolvedcount:
1031 if stats.unresolvedcount:
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 return stats.unresolvedcount > 0
1033 return stats.unresolvedcount > 0
1034
1034
1035
1035
1036 # naming conflict in clone()
1036 # naming conflict in clone()
1037 _update = update
1037 _update = update
1038
1038
1039
1039
1040 def clean(repo, node, show_stats=True, quietempty=False):
1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 """forcibly switch the working directory to node, clobbering changes"""
1041 """forcibly switch the working directory to node, clobbering changes"""
1042 stats = updaterepo(repo, node, True)
1042 stats = updaterepo(repo, node, True)
1043 assert stats.unresolvedcount == 0
1043 assert stats.unresolvedcount == 0
1044 if show_stats:
1044 if show_stats:
1045 _showstats(repo, stats, quietempty)
1045 _showstats(repo, stats, quietempty)
1046
1046
1047
1047
1048 # naming conflict in updatetotally()
1048 # naming conflict in updatetotally()
1049 _clean = clean
1049 _clean = clean
1050
1050
1051 _VALID_UPDATECHECKS = {
1051 _VALID_UPDATECHECKS = {
1052 mergemod.UPDATECHECK_ABORT,
1052 mergemod.UPDATECHECK_ABORT,
1053 mergemod.UPDATECHECK_NONE,
1053 mergemod.UPDATECHECK_NONE,
1054 mergemod.UPDATECHECK_LINEAR,
1054 mergemod.UPDATECHECK_LINEAR,
1055 mergemod.UPDATECHECK_NO_CONFLICT,
1055 mergemod.UPDATECHECK_NO_CONFLICT,
1056 }
1056 }
1057
1057
1058
1058
1059 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1059 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1060 """Update the working directory with extra care for non-file components
1060 """Update the working directory with extra care for non-file components
1061
1061
1062 This takes care of non-file components below:
1062 This takes care of non-file components below:
1063
1063
1064 :bookmark: might be advanced or (in)activated
1064 :bookmark: might be advanced or (in)activated
1065
1065
1066 This takes arguments below:
1066 This takes arguments below:
1067
1067
1068 :checkout: to which revision the working directory is updated
1068 :checkout: to which revision the working directory is updated
1069 :brev: a name, which might be a bookmark to be activated after updating
1069 :brev: a name, which might be a bookmark to be activated after updating
1070 :clean: whether changes in the working directory can be discarded
1070 :clean: whether changes in the working directory can be discarded
1071 :updatecheck: how to deal with a dirty working directory
1071 :updatecheck: how to deal with a dirty working directory
1072
1072
1073 Valid values for updatecheck are the UPDATECHECK_* constants
1073 Valid values for updatecheck are the UPDATECHECK_* constants
1074 defined in the merge module. Passing `None` will result in using the
1074 defined in the merge module. Passing `None` will result in using the
1075 configured default.
1075 configured default.
1076
1076
1077 * ABORT: abort if the working directory is dirty
1077 * ABORT: abort if the working directory is dirty
1078 * NONE: don't check (merge working directory changes into destination)
1078 * NONE: don't check (merge working directory changes into destination)
1079 * LINEAR: check that update is linear before merging working directory
1079 * LINEAR: check that update is linear before merging working directory
1080 changes into destination
1080 changes into destination
1081 * NO_CONFLICT: check that the update does not result in file merges
1081 * NO_CONFLICT: check that the update does not result in file merges
1082
1082
1083 This returns whether conflict is detected at updating or not.
1083 This returns whether conflict is detected at updating or not.
1084 """
1084 """
1085 if updatecheck is None:
1085 if updatecheck is None:
1086 updatecheck = ui.config(b'commands', b'update.check')
1086 updatecheck = ui.config(b'commands', b'update.check')
1087 if updatecheck not in _VALID_UPDATECHECKS:
1087 if updatecheck not in _VALID_UPDATECHECKS:
1088 # If not configured, or invalid value configured
1088 # If not configured, or invalid value configured
1089 updatecheck = mergemod.UPDATECHECK_LINEAR
1089 updatecheck = mergemod.UPDATECHECK_LINEAR
1090 if updatecheck not in _VALID_UPDATECHECKS:
1090 if updatecheck not in _VALID_UPDATECHECKS:
1091 raise ValueError(
1091 raise ValueError(
1092 r'Invalid updatecheck value %r (can accept %r)'
1092 r'Invalid updatecheck value %r (can accept %r)'
1093 % (updatecheck, _VALID_UPDATECHECKS)
1093 % (updatecheck, _VALID_UPDATECHECKS)
1094 )
1094 )
1095 with repo.wlock():
1095 with repo.wlock():
1096 movemarkfrom = None
1096 movemarkfrom = None
1097 warndest = False
1097 warndest = False
1098 if checkout is None:
1098 if checkout is None:
1099 updata = destutil.destupdate(repo, clean=clean)
1099 updata = destutil.destupdate(repo, clean=clean)
1100 checkout, movemarkfrom, brev = updata
1100 checkout, movemarkfrom, brev = updata
1101 warndest = True
1101 warndest = True
1102
1102
1103 if clean:
1103 if clean:
1104 ret = _clean(repo, checkout)
1104 ret = _clean(repo, checkout)
1105 else:
1105 else:
1106 if updatecheck == mergemod.UPDATECHECK_ABORT:
1106 if updatecheck == mergemod.UPDATECHECK_ABORT:
1107 cmdutil.bailifchanged(repo, merge=False)
1107 cmdutil.bailifchanged(repo, merge=False)
1108 updatecheck = mergemod.UPDATECHECK_NONE
1108 updatecheck = mergemod.UPDATECHECK_NONE
1109 ret = _update(repo, checkout, updatecheck=updatecheck)
1109 ret = _update(repo, checkout, updatecheck=updatecheck)
1110
1110
1111 if not ret and movemarkfrom:
1111 if not ret and movemarkfrom:
1112 if movemarkfrom == repo[b'.'].node():
1112 if movemarkfrom == repo[b'.'].node():
1113 pass # no-op update
1113 pass # no-op update
1114 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1114 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1115 b = ui.label(repo._activebookmark, b'bookmarks.active')
1115 b = ui.label(repo._activebookmark, b'bookmarks.active')
1116 ui.status(_(b"updating bookmark %s\n") % b)
1116 ui.status(_(b"updating bookmark %s\n") % b)
1117 else:
1117 else:
1118 # this can happen with a non-linear update
1118 # this can happen with a non-linear update
1119 b = ui.label(repo._activebookmark, b'bookmarks')
1119 b = ui.label(repo._activebookmark, b'bookmarks')
1120 ui.status(_(b"(leaving bookmark %s)\n") % b)
1120 ui.status(_(b"(leaving bookmark %s)\n") % b)
1121 bookmarks.deactivate(repo)
1121 bookmarks.deactivate(repo)
1122 elif brev in repo._bookmarks:
1122 elif brev in repo._bookmarks:
1123 if brev != repo._activebookmark:
1123 if brev != repo._activebookmark:
1124 b = ui.label(brev, b'bookmarks.active')
1124 b = ui.label(brev, b'bookmarks.active')
1125 ui.status(_(b"(activating bookmark %s)\n") % b)
1125 ui.status(_(b"(activating bookmark %s)\n") % b)
1126 bookmarks.activate(repo, brev)
1126 bookmarks.activate(repo, brev)
1127 elif brev:
1127 elif brev:
1128 if repo._activebookmark:
1128 if repo._activebookmark:
1129 b = ui.label(repo._activebookmark, b'bookmarks')
1129 b = ui.label(repo._activebookmark, b'bookmarks')
1130 ui.status(_(b"(leaving bookmark %s)\n") % b)
1130 ui.status(_(b"(leaving bookmark %s)\n") % b)
1131 bookmarks.deactivate(repo)
1131 bookmarks.deactivate(repo)
1132
1132
1133 if warndest:
1133 if warndest:
1134 destutil.statusotherdests(ui, repo)
1134 destutil.statusotherdests(ui, repo)
1135
1135
1136 return ret
1136 return ret
1137
1137
1138
1138
1139 def merge(
1139 def merge(
1140 repo, node, force=False, remind=True, labels=None,
1140 repo, node, force=False, remind=True, labels=None,
1141 ):
1141 ):
1142 """Branch merge with node, resolving changes. Return true if any
1142 """Branch merge with node, resolving changes. Return true if any
1143 unresolved conflicts."""
1143 unresolved conflicts."""
1144 stats = mergemod.update(
1144 stats = mergemod.merge(repo[node], force=force, labels=labels)
1145 repo,
1146 node,
1147 branchmerge=True,
1148 force=force,
1149 mergeforce=force,
1150 labels=labels,
1151 )
1152 _showstats(repo, stats)
1145 _showstats(repo, stats)
1153 if stats.unresolvedcount:
1146 if stats.unresolvedcount:
1154 repo.ui.status(
1147 repo.ui.status(
1155 _(
1148 _(
1156 b"use 'hg resolve' to retry unresolved file merges "
1149 b"use 'hg resolve' to retry unresolved file merges "
1157 b"or 'hg merge --abort' to abandon\n"
1150 b"or 'hg merge --abort' to abandon\n"
1158 )
1151 )
1159 )
1152 )
1160 elif remind:
1153 elif remind:
1161 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1154 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1162 return stats.unresolvedcount > 0
1155 return stats.unresolvedcount > 0
1163
1156
1164
1157
1165 def abortmerge(ui, repo):
1158 def abortmerge(ui, repo):
1166 ms = mergemod.mergestate.read(repo)
1159 ms = mergemod.mergestate.read(repo)
1167 if ms.active():
1160 if ms.active():
1168 # there were conflicts
1161 # there were conflicts
1169 node = ms.localctx.hex()
1162 node = ms.localctx.hex()
1170 else:
1163 else:
1171 # there were no conficts, mergestate was not stored
1164 # there were no conficts, mergestate was not stored
1172 node = repo[b'.'].hex()
1165 node = repo[b'.'].hex()
1173
1166
1174 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1167 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1175 stats = mergemod.clean_update(repo[node])
1168 stats = mergemod.clean_update(repo[node])
1176 assert stats.unresolvedcount == 0
1169 assert stats.unresolvedcount == 0
1177 _showstats(repo, stats)
1170 _showstats(repo, stats)
1178
1171
1179
1172
1180 def _incoming(
1173 def _incoming(
1181 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1174 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1182 ):
1175 ):
1183 """
1176 """
1184 Helper for incoming / gincoming.
1177 Helper for incoming / gincoming.
1185 displaychlist gets called with
1178 displaychlist gets called with
1186 (remoterepo, incomingchangesetlist, displayer) parameters,
1179 (remoterepo, incomingchangesetlist, displayer) parameters,
1187 and is supposed to contain only code that can't be unified.
1180 and is supposed to contain only code that can't be unified.
1188 """
1181 """
1189 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1182 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1190 other = peer(repo, opts, source)
1183 other = peer(repo, opts, source)
1191 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1184 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1192 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1185 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1193
1186
1194 if revs:
1187 if revs:
1195 revs = [other.lookup(rev) for rev in revs]
1188 revs = [other.lookup(rev) for rev in revs]
1196 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1189 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1197 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1190 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1198 )
1191 )
1199 try:
1192 try:
1200 if not chlist:
1193 if not chlist:
1201 ui.status(_(b"no changes found\n"))
1194 ui.status(_(b"no changes found\n"))
1202 return subreporecurse()
1195 return subreporecurse()
1203 ui.pager(b'incoming')
1196 ui.pager(b'incoming')
1204 displayer = logcmdutil.changesetdisplayer(
1197 displayer = logcmdutil.changesetdisplayer(
1205 ui, other, opts, buffered=buffered
1198 ui, other, opts, buffered=buffered
1206 )
1199 )
1207 displaychlist(other, chlist, displayer)
1200 displaychlist(other, chlist, displayer)
1208 displayer.close()
1201 displayer.close()
1209 finally:
1202 finally:
1210 cleanupfn()
1203 cleanupfn()
1211 subreporecurse()
1204 subreporecurse()
1212 return 0 # exit code is zero since we found incoming changes
1205 return 0 # exit code is zero since we found incoming changes
1213
1206
1214
1207
1215 def incoming(ui, repo, source, opts):
1208 def incoming(ui, repo, source, opts):
1216 def subreporecurse():
1209 def subreporecurse():
1217 ret = 1
1210 ret = 1
1218 if opts.get(b'subrepos'):
1211 if opts.get(b'subrepos'):
1219 ctx = repo[None]
1212 ctx = repo[None]
1220 for subpath in sorted(ctx.substate):
1213 for subpath in sorted(ctx.substate):
1221 sub = ctx.sub(subpath)
1214 sub = ctx.sub(subpath)
1222 ret = min(ret, sub.incoming(ui, source, opts))
1215 ret = min(ret, sub.incoming(ui, source, opts))
1223 return ret
1216 return ret
1224
1217
1225 def display(other, chlist, displayer):
1218 def display(other, chlist, displayer):
1226 limit = logcmdutil.getlimit(opts)
1219 limit = logcmdutil.getlimit(opts)
1227 if opts.get(b'newest_first'):
1220 if opts.get(b'newest_first'):
1228 chlist.reverse()
1221 chlist.reverse()
1229 count = 0
1222 count = 0
1230 for n in chlist:
1223 for n in chlist:
1231 if limit is not None and count >= limit:
1224 if limit is not None and count >= limit:
1232 break
1225 break
1233 parents = [p for p in other.changelog.parents(n) if p != nullid]
1226 parents = [p for p in other.changelog.parents(n) if p != nullid]
1234 if opts.get(b'no_merges') and len(parents) == 2:
1227 if opts.get(b'no_merges') and len(parents) == 2:
1235 continue
1228 continue
1236 count += 1
1229 count += 1
1237 displayer.show(other[n])
1230 displayer.show(other[n])
1238
1231
1239 return _incoming(display, subreporecurse, ui, repo, source, opts)
1232 return _incoming(display, subreporecurse, ui, repo, source, opts)
1240
1233
1241
1234
1242 def _outgoing(ui, repo, dest, opts):
1235 def _outgoing(ui, repo, dest, opts):
1243 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1236 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1244 if not path:
1237 if not path:
1245 raise error.Abort(
1238 raise error.Abort(
1246 _(b'default repository not configured!'),
1239 _(b'default repository not configured!'),
1247 hint=_(b"see 'hg help config.paths'"),
1240 hint=_(b"see 'hg help config.paths'"),
1248 )
1241 )
1249 dest = path.pushloc or path.loc
1242 dest = path.pushloc or path.loc
1250 branches = path.branch, opts.get(b'branch') or []
1243 branches = path.branch, opts.get(b'branch') or []
1251
1244
1252 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1245 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1253 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1246 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1254 if revs:
1247 if revs:
1255 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1248 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1256
1249
1257 other = peer(repo, opts, dest)
1250 other = peer(repo, opts, dest)
1258 outgoing = discovery.findcommonoutgoing(
1251 outgoing = discovery.findcommonoutgoing(
1259 repo, other, revs, force=opts.get(b'force')
1252 repo, other, revs, force=opts.get(b'force')
1260 )
1253 )
1261 o = outgoing.missing
1254 o = outgoing.missing
1262 if not o:
1255 if not o:
1263 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1256 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1264 return o, other
1257 return o, other
1265
1258
1266
1259
1267 def outgoing(ui, repo, dest, opts):
1260 def outgoing(ui, repo, dest, opts):
1268 def recurse():
1261 def recurse():
1269 ret = 1
1262 ret = 1
1270 if opts.get(b'subrepos'):
1263 if opts.get(b'subrepos'):
1271 ctx = repo[None]
1264 ctx = repo[None]
1272 for subpath in sorted(ctx.substate):
1265 for subpath in sorted(ctx.substate):
1273 sub = ctx.sub(subpath)
1266 sub = ctx.sub(subpath)
1274 ret = min(ret, sub.outgoing(ui, dest, opts))
1267 ret = min(ret, sub.outgoing(ui, dest, opts))
1275 return ret
1268 return ret
1276
1269
1277 limit = logcmdutil.getlimit(opts)
1270 limit = logcmdutil.getlimit(opts)
1278 o, other = _outgoing(ui, repo, dest, opts)
1271 o, other = _outgoing(ui, repo, dest, opts)
1279 if not o:
1272 if not o:
1280 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1273 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1281 return recurse()
1274 return recurse()
1282
1275
1283 if opts.get(b'newest_first'):
1276 if opts.get(b'newest_first'):
1284 o.reverse()
1277 o.reverse()
1285 ui.pager(b'outgoing')
1278 ui.pager(b'outgoing')
1286 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1279 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1287 count = 0
1280 count = 0
1288 for n in o:
1281 for n in o:
1289 if limit is not None and count >= limit:
1282 if limit is not None and count >= limit:
1290 break
1283 break
1291 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1284 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1292 if opts.get(b'no_merges') and len(parents) == 2:
1285 if opts.get(b'no_merges') and len(parents) == 2:
1293 continue
1286 continue
1294 count += 1
1287 count += 1
1295 displayer.show(repo[n])
1288 displayer.show(repo[n])
1296 displayer.close()
1289 displayer.close()
1297 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1298 recurse()
1291 recurse()
1299 return 0 # exit code is zero since we found outgoing changes
1292 return 0 # exit code is zero since we found outgoing changes
1300
1293
1301
1294
1302 def verify(repo, level=None):
1295 def verify(repo, level=None):
1303 """verify the consistency of a repository"""
1296 """verify the consistency of a repository"""
1304 ret = verifymod.verify(repo, level=level)
1297 ret = verifymod.verify(repo, level=level)
1305
1298
1306 # Broken subrepo references in hidden csets don't seem worth worrying about,
1299 # Broken subrepo references in hidden csets don't seem worth worrying about,
1307 # since they can't be pushed/pulled, and --hidden can be used if they are a
1300 # since they can't be pushed/pulled, and --hidden can be used if they are a
1308 # concern.
1301 # concern.
1309
1302
1310 # pathto() is needed for -R case
1303 # pathto() is needed for -R case
1311 revs = repo.revs(
1304 revs = repo.revs(
1312 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1305 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1313 )
1306 )
1314
1307
1315 if revs:
1308 if revs:
1316 repo.ui.status(_(b'checking subrepo links\n'))
1309 repo.ui.status(_(b'checking subrepo links\n'))
1317 for rev in revs:
1310 for rev in revs:
1318 ctx = repo[rev]
1311 ctx = repo[rev]
1319 try:
1312 try:
1320 for subpath in ctx.substate:
1313 for subpath in ctx.substate:
1321 try:
1314 try:
1322 ret = (
1315 ret = (
1323 ctx.sub(subpath, allowcreate=False).verify() or ret
1316 ctx.sub(subpath, allowcreate=False).verify() or ret
1324 )
1317 )
1325 except error.RepoError as e:
1318 except error.RepoError as e:
1326 repo.ui.warn(b'%d: %s\n' % (rev, e))
1319 repo.ui.warn(b'%d: %s\n' % (rev, e))
1327 except Exception:
1320 except Exception:
1328 repo.ui.warn(
1321 repo.ui.warn(
1329 _(b'.hgsubstate is corrupt in revision %s\n')
1322 _(b'.hgsubstate is corrupt in revision %s\n')
1330 % node.short(ctx.node())
1323 % node.short(ctx.node())
1331 )
1324 )
1332
1325
1333 return ret
1326 return ret
1334
1327
1335
1328
1336 def remoteui(src, opts):
1329 def remoteui(src, opts):
1337 """build a remote ui from ui or repo and opts"""
1330 """build a remote ui from ui or repo and opts"""
1338 if util.safehasattr(src, b'baseui'): # looks like a repository
1331 if util.safehasattr(src, b'baseui'): # looks like a repository
1339 dst = src.baseui.copy() # drop repo-specific config
1332 dst = src.baseui.copy() # drop repo-specific config
1340 src = src.ui # copy target options from repo
1333 src = src.ui # copy target options from repo
1341 else: # assume it's a global ui object
1334 else: # assume it's a global ui object
1342 dst = src.copy() # keep all global options
1335 dst = src.copy() # keep all global options
1343
1336
1344 # copy ssh-specific options
1337 # copy ssh-specific options
1345 for o in b'ssh', b'remotecmd':
1338 for o in b'ssh', b'remotecmd':
1346 v = opts.get(o) or src.config(b'ui', o)
1339 v = opts.get(o) or src.config(b'ui', o)
1347 if v:
1340 if v:
1348 dst.setconfig(b"ui", o, v, b'copied')
1341 dst.setconfig(b"ui", o, v, b'copied')
1349
1342
1350 # copy bundle-specific options
1343 # copy bundle-specific options
1351 r = src.config(b'bundle', b'mainreporoot')
1344 r = src.config(b'bundle', b'mainreporoot')
1352 if r:
1345 if r:
1353 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1346 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1354
1347
1355 # copy selected local settings to the remote ui
1348 # copy selected local settings to the remote ui
1356 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1349 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1357 for key, val in src.configitems(sect):
1350 for key, val in src.configitems(sect):
1358 dst.setconfig(sect, key, val, b'copied')
1351 dst.setconfig(sect, key, val, b'copied')
1359 v = src.config(b'web', b'cacerts')
1352 v = src.config(b'web', b'cacerts')
1360 if v:
1353 if v:
1361 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1354 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1362
1355
1363 return dst
1356 return dst
1364
1357
1365
1358
1366 # Files of interest
1359 # Files of interest
1367 # Used to check if the repository has changed looking at mtime and size of
1360 # Used to check if the repository has changed looking at mtime and size of
1368 # these files.
1361 # these files.
1369 foi = [
1362 foi = [
1370 (b'spath', b'00changelog.i'),
1363 (b'spath', b'00changelog.i'),
1371 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1364 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1372 (b'spath', b'obsstore'),
1365 (b'spath', b'obsstore'),
1373 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1366 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1374 ]
1367 ]
1375
1368
1376
1369
1377 class cachedlocalrepo(object):
1370 class cachedlocalrepo(object):
1378 """Holds a localrepository that can be cached and reused."""
1371 """Holds a localrepository that can be cached and reused."""
1379
1372
1380 def __init__(self, repo):
1373 def __init__(self, repo):
1381 """Create a new cached repo from an existing repo.
1374 """Create a new cached repo from an existing repo.
1382
1375
1383 We assume the passed in repo was recently created. If the
1376 We assume the passed in repo was recently created. If the
1384 repo has changed between when it was created and when it was
1377 repo has changed between when it was created and when it was
1385 turned into a cache, it may not refresh properly.
1378 turned into a cache, it may not refresh properly.
1386 """
1379 """
1387 assert isinstance(repo, localrepo.localrepository)
1380 assert isinstance(repo, localrepo.localrepository)
1388 self._repo = repo
1381 self._repo = repo
1389 self._state, self.mtime = self._repostate()
1382 self._state, self.mtime = self._repostate()
1390 self._filtername = repo.filtername
1383 self._filtername = repo.filtername
1391
1384
1392 def fetch(self):
1385 def fetch(self):
1393 """Refresh (if necessary) and return a repository.
1386 """Refresh (if necessary) and return a repository.
1394
1387
1395 If the cached instance is out of date, it will be recreated
1388 If the cached instance is out of date, it will be recreated
1396 automatically and returned.
1389 automatically and returned.
1397
1390
1398 Returns a tuple of the repo and a boolean indicating whether a new
1391 Returns a tuple of the repo and a boolean indicating whether a new
1399 repo instance was created.
1392 repo instance was created.
1400 """
1393 """
1401 # We compare the mtimes and sizes of some well-known files to
1394 # We compare the mtimes and sizes of some well-known files to
1402 # determine if the repo changed. This is not precise, as mtimes
1395 # determine if the repo changed. This is not precise, as mtimes
1403 # are susceptible to clock skew and imprecise filesystems and
1396 # are susceptible to clock skew and imprecise filesystems and
1404 # file content can change while maintaining the same size.
1397 # file content can change while maintaining the same size.
1405
1398
1406 state, mtime = self._repostate()
1399 state, mtime = self._repostate()
1407 if state == self._state:
1400 if state == self._state:
1408 return self._repo, False
1401 return self._repo, False
1409
1402
1410 repo = repository(self._repo.baseui, self._repo.url())
1403 repo = repository(self._repo.baseui, self._repo.url())
1411 if self._filtername:
1404 if self._filtername:
1412 self._repo = repo.filtered(self._filtername)
1405 self._repo = repo.filtered(self._filtername)
1413 else:
1406 else:
1414 self._repo = repo.unfiltered()
1407 self._repo = repo.unfiltered()
1415 self._state = state
1408 self._state = state
1416 self.mtime = mtime
1409 self.mtime = mtime
1417
1410
1418 return self._repo, True
1411 return self._repo, True
1419
1412
1420 def _repostate(self):
1413 def _repostate(self):
1421 state = []
1414 state = []
1422 maxmtime = -1
1415 maxmtime = -1
1423 for attr, fname in foi:
1416 for attr, fname in foi:
1424 prefix = getattr(self._repo, attr)
1417 prefix = getattr(self._repo, attr)
1425 p = os.path.join(prefix, fname)
1418 p = os.path.join(prefix, fname)
1426 try:
1419 try:
1427 st = os.stat(p)
1420 st = os.stat(p)
1428 except OSError:
1421 except OSError:
1429 st = os.stat(prefix)
1422 st = os.stat(prefix)
1430 state.append((st[stat.ST_MTIME], st.st_size))
1423 state.append((st[stat.ST_MTIME], st.st_size))
1431 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1424 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1432
1425
1433 return tuple(state), maxmtime
1426 return tuple(state), maxmtime
1434
1427
1435 def copy(self):
1428 def copy(self):
1436 """Obtain a copy of this class instance.
1429 """Obtain a copy of this class instance.
1437
1430
1438 A new localrepository instance is obtained. The new instance should be
1431 A new localrepository instance is obtained. The new instance should be
1439 completely independent of the original.
1432 completely independent of the original.
1440 """
1433 """
1441 repo = repository(self._repo.baseui, self._repo.origroot)
1434 repo = repository(self._repo.baseui, self._repo.origroot)
1442 if self._filtername:
1435 if self._filtername:
1443 repo = repo.filtered(self._filtername)
1436 repo = repo.filtered(self._filtername)
1444 else:
1437 else:
1445 repo = repo.unfiltered()
1438 repo = repo.unfiltered()
1446 c = cachedlocalrepo(repo)
1439 c = cachedlocalrepo(repo)
1447 c._state = self._state
1440 c._state = self._state
1448 c.mtime = self.mtime
1441 c.mtime = self.mtime
1449 return c
1442 return c
@@ -1,2770 +1,2787 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import shutil
11 import shutil
12 import stat
12 import stat
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from .pycompat import delattr
25 from .pycompat import delattr
26 from .thirdparty import attr
26 from .thirdparty import attr
27 from . import (
27 from . import (
28 copies,
28 copies,
29 encoding,
29 encoding,
30 error,
30 error,
31 filemerge,
31 filemerge,
32 match as matchmod,
32 match as matchmod,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 pycompat,
35 pycompat,
36 scmutil,
36 scmutil,
37 subrepoutil,
37 subrepoutil,
38 util,
38 util,
39 worker,
39 worker,
40 )
40 )
41 from .utils import hashutil
41 from .utils import hashutil
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def local(self):
389 def local(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"local accessed but self._local isn't set"
391 msg = b"local accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._local
393 return self._local
394
394
395 @util.propertycache
395 @util.propertycache
396 def localctx(self):
396 def localctx(self):
397 return self._repo[self.local]
397 return self._repo[self.local]
398
398
399 @util.propertycache
399 @util.propertycache
400 def other(self):
400 def other(self):
401 if self._other is None:
401 if self._other is None:
402 msg = b"other accessed but self._other isn't set"
402 msg = b"other accessed but self._other isn't set"
403 raise error.ProgrammingError(msg)
403 raise error.ProgrammingError(msg)
404 return self._other
404 return self._other
405
405
406 @util.propertycache
406 @util.propertycache
407 def otherctx(self):
407 def otherctx(self):
408 return self._repo[self.other]
408 return self._repo[self.other]
409
409
410 def active(self):
410 def active(self):
411 """Whether mergestate is active.
411 """Whether mergestate is active.
412
412
413 Returns True if there appears to be mergestate. This is a rough proxy
413 Returns True if there appears to be mergestate. This is a rough proxy
414 for "is a merge in progress."
414 for "is a merge in progress."
415 """
415 """
416 return bool(self._local) or bool(self._state)
416 return bool(self._local) or bool(self._state)
417
417
418 def commit(self):
418 def commit(self):
419 """Write current state on disk (if necessary)"""
419 """Write current state on disk (if necessary)"""
420 if self._dirty:
420 if self._dirty:
421 records = self._makerecords()
421 records = self._makerecords()
422 self._writerecords(records)
422 self._writerecords(records)
423 self._dirty = False
423 self._dirty = False
424
424
425 def _makerecords(self):
425 def _makerecords(self):
426 records = []
426 records = []
427 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_LOCAL, hex(self._local)))
428 records.append((RECORD_OTHER, hex(self._other)))
428 records.append((RECORD_OTHER, hex(self._other)))
429 if self.mergedriver:
429 if self.mergedriver:
430 records.append(
430 records.append(
431 (
431 (
432 RECORD_MERGE_DRIVER_STATE,
432 RECORD_MERGE_DRIVER_STATE,
433 b'\0'.join([self.mergedriver, self._mdstate]),
433 b'\0'.join([self.mergedriver, self._mdstate]),
434 )
434 )
435 )
435 )
436 # Write out state items. In all cases, the value of the state map entry
436 # Write out state items. In all cases, the value of the state map entry
437 # is written as the contents of the record. The record type depends on
437 # is written as the contents of the record. The record type depends on
438 # the type of state that is stored, and capital-letter records are used
438 # the type of state that is stored, and capital-letter records are used
439 # to prevent older versions of Mercurial that do not support the feature
439 # to prevent older versions of Mercurial that do not support the feature
440 # from loading them.
440 # from loading them.
441 for filename, v in pycompat.iteritems(self._state):
441 for filename, v in pycompat.iteritems(self._state):
442 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
443 # Driver-resolved merge. These are stored in 'D' records.
443 # Driver-resolved merge. These are stored in 'D' records.
444 records.append(
444 records.append(
445 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
446 )
446 )
447 elif v[0] in (
447 elif v[0] in (
448 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_UNRESOLVED_PATH,
449 MERGE_RECORD_RESOLVED_PATH,
449 MERGE_RECORD_RESOLVED_PATH,
450 ):
450 ):
451 # Path conflicts. These are stored in 'P' records. The current
451 # Path conflicts. These are stored in 'P' records. The current
452 # resolution state ('pu' or 'pr') is stored within the record.
452 # resolution state ('pu' or 'pr') is stored within the record.
453 records.append(
453 records.append(
454 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
455 )
455 )
456 elif v[1] == nullhex or v[6] == nullhex:
456 elif v[1] == nullhex or v[6] == nullhex:
457 # Change/Delete or Delete/Change conflicts. These are stored in
457 # Change/Delete or Delete/Change conflicts. These are stored in
458 # 'C' records. v[1] is the local file, and is nullhex when the
458 # 'C' records. v[1] is the local file, and is nullhex when the
459 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # file is deleted locally ('dc'). v[6] is the remote file, and
460 # is nullhex when the file is deleted remotely ('cd').
460 # is nullhex when the file is deleted remotely ('cd').
461 records.append(
461 records.append(
462 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
463 )
463 )
464 else:
464 else:
465 # Normal files. These are stored in 'F' records.
465 # Normal files. These are stored in 'F' records.
466 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
467 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
468 rawextras = b'\0'.join(
468 rawextras = b'\0'.join(
469 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
470 )
470 )
471 records.append(
471 records.append(
472 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
473 )
473 )
474 if self._labels is not None:
474 if self._labels is not None:
475 labels = b'\0'.join(self._labels)
475 labels = b'\0'.join(self._labels)
476 records.append((RECORD_LABELS, labels))
476 records.append((RECORD_LABELS, labels))
477 return records
477 return records
478
478
479 def _writerecords(self, records):
479 def _writerecords(self, records):
480 """Write current state on disk (both v1 and v2)"""
480 """Write current state on disk (both v1 and v2)"""
481 self._writerecordsv1(records)
481 self._writerecordsv1(records)
482 self._writerecordsv2(records)
482 self._writerecordsv2(records)
483
483
484 def _writerecordsv1(self, records):
484 def _writerecordsv1(self, records):
485 """Write current state on disk in a version 1 file"""
485 """Write current state on disk in a version 1 file"""
486 f = self._repo.vfs(self.statepathv1, b'wb')
486 f = self._repo.vfs(self.statepathv1, b'wb')
487 irecords = iter(records)
487 irecords = iter(records)
488 lrecords = next(irecords)
488 lrecords = next(irecords)
489 assert lrecords[0] == RECORD_LOCAL
489 assert lrecords[0] == RECORD_LOCAL
490 f.write(hex(self._local) + b'\n')
490 f.write(hex(self._local) + b'\n')
491 for rtype, data in irecords:
491 for rtype, data in irecords:
492 if rtype == RECORD_MERGED:
492 if rtype == RECORD_MERGED:
493 f.write(b'%s\n' % _droponode(data))
493 f.write(b'%s\n' % _droponode(data))
494 f.close()
494 f.close()
495
495
496 def _writerecordsv2(self, records):
496 def _writerecordsv2(self, records):
497 """Write current state on disk in a version 2 file
497 """Write current state on disk in a version 2 file
498
498
499 See the docstring for _readrecordsv2 for why we use 't'."""
499 See the docstring for _readrecordsv2 for why we use 't'."""
500 # these are the records that all version 2 clients can read
500 # these are the records that all version 2 clients can read
501 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
502 f = self._repo.vfs(self.statepathv2, b'wb')
502 f = self._repo.vfs(self.statepathv2, b'wb')
503 for key, data in records:
503 for key, data in records:
504 assert len(key) == 1
504 assert len(key) == 1
505 if key not in allowlist:
505 if key not in allowlist:
506 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
507 format = b'>sI%is' % len(data)
507 format = b'>sI%is' % len(data)
508 f.write(_pack(format, key, len(data), data))
508 f.write(_pack(format, key, len(data), data))
509 f.close()
509 f.close()
510
510
511 @staticmethod
511 @staticmethod
512 def getlocalkey(path):
512 def getlocalkey(path):
513 """hash the path of a local file context for storage in the .hg/merge
513 """hash the path of a local file context for storage in the .hg/merge
514 directory."""
514 directory."""
515
515
516 return hex(hashutil.sha1(path).digest())
516 return hex(hashutil.sha1(path).digest())
517
517
518 def add(self, fcl, fco, fca, fd):
518 def add(self, fcl, fco, fca, fd):
519 """add a new (potentially?) conflicting file the merge state
519 """add a new (potentially?) conflicting file the merge state
520 fcl: file context for local,
520 fcl: file context for local,
521 fco: file context for remote,
521 fco: file context for remote,
522 fca: file context for ancestors,
522 fca: file context for ancestors,
523 fd: file path of the resulting merge.
523 fd: file path of the resulting merge.
524
524
525 note: also write the local version to the `.hg/merge` directory.
525 note: also write the local version to the `.hg/merge` directory.
526 """
526 """
527 if fcl.isabsent():
527 if fcl.isabsent():
528 localkey = nullhex
528 localkey = nullhex
529 else:
529 else:
530 localkey = mergestate.getlocalkey(fcl.path())
530 localkey = mergestate.getlocalkey(fcl.path())
531 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
532 self._state[fd] = [
532 self._state[fd] = [
533 MERGE_RECORD_UNRESOLVED,
533 MERGE_RECORD_UNRESOLVED,
534 localkey,
534 localkey,
535 fcl.path(),
535 fcl.path(),
536 fca.path(),
536 fca.path(),
537 hex(fca.filenode()),
537 hex(fca.filenode()),
538 fco.path(),
538 fco.path(),
539 hex(fco.filenode()),
539 hex(fco.filenode()),
540 fcl.flags(),
540 fcl.flags(),
541 ]
541 ]
542 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
543 self._dirty = True
543 self._dirty = True
544
544
545 def addpath(self, path, frename, forigin):
545 def addpath(self, path, frename, forigin):
546 """add a new conflicting path to the merge state
546 """add a new conflicting path to the merge state
547 path: the path that conflicts
547 path: the path that conflicts
548 frename: the filename the conflicting file was renamed to
548 frename: the filename the conflicting file was renamed to
549 forigin: origin of the file ('l' or 'r' for local/remote)
549 forigin: origin of the file ('l' or 'r' for local/remote)
550 """
550 """
551 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
552 self._dirty = True
552 self._dirty = True
553
553
554 def __contains__(self, dfile):
554 def __contains__(self, dfile):
555 return dfile in self._state
555 return dfile in self._state
556
556
557 def __getitem__(self, dfile):
557 def __getitem__(self, dfile):
558 return self._state[dfile][0]
558 return self._state[dfile][0]
559
559
560 def __iter__(self):
560 def __iter__(self):
561 return iter(sorted(self._state))
561 return iter(sorted(self._state))
562
562
563 def files(self):
563 def files(self):
564 return self._state.keys()
564 return self._state.keys()
565
565
566 def mark(self, dfile, state):
566 def mark(self, dfile, state):
567 self._state[dfile][0] = state
567 self._state[dfile][0] = state
568 self._dirty = True
568 self._dirty = True
569
569
570 def mdstate(self):
570 def mdstate(self):
571 return self._mdstate
571 return self._mdstate
572
572
573 def unresolved(self):
573 def unresolved(self):
574 """Obtain the paths of unresolved files."""
574 """Obtain the paths of unresolved files."""
575
575
576 for f, entry in pycompat.iteritems(self._state):
576 for f, entry in pycompat.iteritems(self._state):
577 if entry[0] in (
577 if entry[0] in (
578 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED,
579 MERGE_RECORD_UNRESOLVED_PATH,
579 MERGE_RECORD_UNRESOLVED_PATH,
580 ):
580 ):
581 yield f
581 yield f
582
582
583 def driverresolved(self):
583 def driverresolved(self):
584 """Obtain the paths of driver-resolved files."""
584 """Obtain the paths of driver-resolved files."""
585
585
586 for f, entry in self._state.items():
586 for f, entry in self._state.items():
587 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
588 yield f
588 yield f
589
589
590 def extras(self, filename):
590 def extras(self, filename):
591 return self._stateextras.setdefault(filename, {})
591 return self._stateextras.setdefault(filename, {})
592
592
593 def _resolve(self, preresolve, dfile, wctx):
593 def _resolve(self, preresolve, dfile, wctx):
594 """rerun merge process for file path `dfile`"""
594 """rerun merge process for file path `dfile`"""
595 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
596 return True, 0
596 return True, 0
597 stateentry = self._state[dfile]
597 stateentry = self._state[dfile]
598 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
599 octx = self._repo[self._other]
599 octx = self._repo[self._other]
600 extras = self.extras(dfile)
600 extras = self.extras(dfile)
601 anccommitnode = extras.get(b'ancestorlinknode')
601 anccommitnode = extras.get(b'ancestorlinknode')
602 if anccommitnode:
602 if anccommitnode:
603 actx = self._repo[anccommitnode]
603 actx = self._repo[anccommitnode]
604 else:
604 else:
605 actx = None
605 actx = None
606 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fcd = self._filectxorabsent(localkey, wctx, dfile)
607 fco = self._filectxorabsent(onode, octx, ofile)
607 fco = self._filectxorabsent(onode, octx, ofile)
608 # TODO: move this to filectxorabsent
608 # TODO: move this to filectxorabsent
609 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
610 # "premerge" x flags
610 # "premerge" x flags
611 flo = fco.flags()
611 flo = fco.flags()
612 fla = fca.flags()
612 fla = fca.flags()
613 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
614 if fca.node() == nullid and flags != flo:
614 if fca.node() == nullid and flags != flo:
615 if preresolve:
615 if preresolve:
616 self._repo.ui.warn(
616 self._repo.ui.warn(
617 _(
617 _(
618 b'warning: cannot merge flags for %s '
618 b'warning: cannot merge flags for %s '
619 b'without common ancestor - keeping local flags\n'
619 b'without common ancestor - keeping local flags\n'
620 )
620 )
621 % afile
621 % afile
622 )
622 )
623 elif flags == fla:
623 elif flags == fla:
624 flags = flo
624 flags = flo
625 if preresolve:
625 if preresolve:
626 # restore local
626 # restore local
627 if localkey != nullhex:
627 if localkey != nullhex:
628 f = self._repo.vfs(b'merge/' + localkey)
628 f = self._repo.vfs(b'merge/' + localkey)
629 wctx[dfile].write(f.read(), flags)
629 wctx[dfile].write(f.read(), flags)
630 f.close()
630 f.close()
631 else:
631 else:
632 wctx[dfile].remove(ignoremissing=True)
632 wctx[dfile].remove(ignoremissing=True)
633 complete, r, deleted = filemerge.premerge(
633 complete, r, deleted = filemerge.premerge(
634 self._repo,
634 self._repo,
635 wctx,
635 wctx,
636 self._local,
636 self._local,
637 lfile,
637 lfile,
638 fcd,
638 fcd,
639 fco,
639 fco,
640 fca,
640 fca,
641 labels=self._labels,
641 labels=self._labels,
642 )
642 )
643 else:
643 else:
644 complete, r, deleted = filemerge.filemerge(
644 complete, r, deleted = filemerge.filemerge(
645 self._repo,
645 self._repo,
646 wctx,
646 wctx,
647 self._local,
647 self._local,
648 lfile,
648 lfile,
649 fcd,
649 fcd,
650 fco,
650 fco,
651 fca,
651 fca,
652 labels=self._labels,
652 labels=self._labels,
653 )
653 )
654 if r is None:
654 if r is None:
655 # no real conflict
655 # no real conflict
656 del self._state[dfile]
656 del self._state[dfile]
657 self._stateextras.pop(dfile, None)
657 self._stateextras.pop(dfile, None)
658 self._dirty = True
658 self._dirty = True
659 elif not r:
659 elif not r:
660 self.mark(dfile, MERGE_RECORD_RESOLVED)
660 self.mark(dfile, MERGE_RECORD_RESOLVED)
661
661
662 if complete:
662 if complete:
663 action = None
663 action = None
664 if deleted:
664 if deleted:
665 if fcd.isabsent():
665 if fcd.isabsent():
666 # dc: local picked. Need to drop if present, which may
666 # dc: local picked. Need to drop if present, which may
667 # happen on re-resolves.
667 # happen on re-resolves.
668 action = ACTION_FORGET
668 action = ACTION_FORGET
669 else:
669 else:
670 # cd: remote picked (or otherwise deleted)
670 # cd: remote picked (or otherwise deleted)
671 action = ACTION_REMOVE
671 action = ACTION_REMOVE
672 else:
672 else:
673 if fcd.isabsent(): # dc: remote picked
673 if fcd.isabsent(): # dc: remote picked
674 action = ACTION_GET
674 action = ACTION_GET
675 elif fco.isabsent(): # cd: local picked
675 elif fco.isabsent(): # cd: local picked
676 if dfile in self.localctx:
676 if dfile in self.localctx:
677 action = ACTION_ADD_MODIFIED
677 action = ACTION_ADD_MODIFIED
678 else:
678 else:
679 action = ACTION_ADD
679 action = ACTION_ADD
680 # else: regular merges (no action necessary)
680 # else: regular merges (no action necessary)
681 self._results[dfile] = r, action
681 self._results[dfile] = r, action
682
682
683 return complete, r
683 return complete, r
684
684
685 def _filectxorabsent(self, hexnode, ctx, f):
685 def _filectxorabsent(self, hexnode, ctx, f):
686 if hexnode == nullhex:
686 if hexnode == nullhex:
687 return filemerge.absentfilectx(ctx, f)
687 return filemerge.absentfilectx(ctx, f)
688 else:
688 else:
689 return ctx[f]
689 return ctx[f]
690
690
691 def preresolve(self, dfile, wctx):
691 def preresolve(self, dfile, wctx):
692 """run premerge process for dfile
692 """run premerge process for dfile
693
693
694 Returns whether the merge is complete, and the exit code."""
694 Returns whether the merge is complete, and the exit code."""
695 return self._resolve(True, dfile, wctx)
695 return self._resolve(True, dfile, wctx)
696
696
697 def resolve(self, dfile, wctx):
697 def resolve(self, dfile, wctx):
698 """run merge process (assuming premerge was run) for dfile
698 """run merge process (assuming premerge was run) for dfile
699
699
700 Returns the exit code of the merge."""
700 Returns the exit code of the merge."""
701 return self._resolve(False, dfile, wctx)[1]
701 return self._resolve(False, dfile, wctx)[1]
702
702
703 def counts(self):
703 def counts(self):
704 """return counts for updated, merged and removed files in this
704 """return counts for updated, merged and removed files in this
705 session"""
705 session"""
706 updated, merged, removed = 0, 0, 0
706 updated, merged, removed = 0, 0, 0
707 for r, action in pycompat.itervalues(self._results):
707 for r, action in pycompat.itervalues(self._results):
708 if r is None:
708 if r is None:
709 updated += 1
709 updated += 1
710 elif r == 0:
710 elif r == 0:
711 if action == ACTION_REMOVE:
711 if action == ACTION_REMOVE:
712 removed += 1
712 removed += 1
713 else:
713 else:
714 merged += 1
714 merged += 1
715 return updated, merged, removed
715 return updated, merged, removed
716
716
717 def unresolvedcount(self):
717 def unresolvedcount(self):
718 """get unresolved count for this merge (persistent)"""
718 """get unresolved count for this merge (persistent)"""
719 return len(list(self.unresolved()))
719 return len(list(self.unresolved()))
720
720
721 def actions(self):
721 def actions(self):
722 """return lists of actions to perform on the dirstate"""
722 """return lists of actions to perform on the dirstate"""
723 actions = {
723 actions = {
724 ACTION_REMOVE: [],
724 ACTION_REMOVE: [],
725 ACTION_FORGET: [],
725 ACTION_FORGET: [],
726 ACTION_ADD: [],
726 ACTION_ADD: [],
727 ACTION_ADD_MODIFIED: [],
727 ACTION_ADD_MODIFIED: [],
728 ACTION_GET: [],
728 ACTION_GET: [],
729 }
729 }
730 for f, (r, action) in pycompat.iteritems(self._results):
730 for f, (r, action) in pycompat.iteritems(self._results):
731 if action is not None:
731 if action is not None:
732 actions[action].append((f, None, b"merge result"))
732 actions[action].append((f, None, b"merge result"))
733 return actions
733 return actions
734
734
735 def recordactions(self):
735 def recordactions(self):
736 """record remove/add/get actions in the dirstate"""
736 """record remove/add/get actions in the dirstate"""
737 branchmerge = self._repo.dirstate.p2() != nullid
737 branchmerge = self._repo.dirstate.p2() != nullid
738 recordupdates(self._repo, self.actions(), branchmerge, None)
738 recordupdates(self._repo, self.actions(), branchmerge, None)
739
739
740 def queueremove(self, f):
740 def queueremove(self, f):
741 """queues a file to be removed from the dirstate
741 """queues a file to be removed from the dirstate
742
742
743 Meant for use by custom merge drivers."""
743 Meant for use by custom merge drivers."""
744 self._results[f] = 0, ACTION_REMOVE
744 self._results[f] = 0, ACTION_REMOVE
745
745
746 def queueadd(self, f):
746 def queueadd(self, f):
747 """queues a file to be added to the dirstate
747 """queues a file to be added to the dirstate
748
748
749 Meant for use by custom merge drivers."""
749 Meant for use by custom merge drivers."""
750 self._results[f] = 0, ACTION_ADD
750 self._results[f] = 0, ACTION_ADD
751
751
752 def queueget(self, f):
752 def queueget(self, f):
753 """queues a file to be marked modified in the dirstate
753 """queues a file to be marked modified in the dirstate
754
754
755 Meant for use by custom merge drivers."""
755 Meant for use by custom merge drivers."""
756 self._results[f] = 0, ACTION_GET
756 self._results[f] = 0, ACTION_GET
757
757
758
758
759 def _getcheckunknownconfig(repo, section, name):
759 def _getcheckunknownconfig(repo, section, name):
760 config = repo.ui.config(section, name)
760 config = repo.ui.config(section, name)
761 valid = [b'abort', b'ignore', b'warn']
761 valid = [b'abort', b'ignore', b'warn']
762 if config not in valid:
762 if config not in valid:
763 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 validstr = b', '.join([b"'" + v + b"'" for v in valid])
764 raise error.ConfigError(
764 raise error.ConfigError(
765 _(b"%s.%s not valid ('%s' is none of %s)")
765 _(b"%s.%s not valid ('%s' is none of %s)")
766 % (section, name, config, validstr)
766 % (section, name, config, validstr)
767 )
767 )
768 return config
768 return config
769
769
770
770
771 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
772 if wctx.isinmemory():
772 if wctx.isinmemory():
773 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # Nothing to do in IMM because nothing in the "working copy" can be an
774 # unknown file.
774 # unknown file.
775 #
775 #
776 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # Note that we should bail out here, not in ``_checkunknownfiles()``,
777 # because that function does other useful work.
777 # because that function does other useful work.
778 return False
778 return False
779
779
780 if f2 is None:
780 if f2 is None:
781 f2 = f
781 f2 = f
782 return (
782 return (
783 repo.wvfs.audit.check(f)
783 repo.wvfs.audit.check(f)
784 and repo.wvfs.isfileorlink(f)
784 and repo.wvfs.isfileorlink(f)
785 and repo.dirstate.normalize(f) not in repo.dirstate
785 and repo.dirstate.normalize(f) not in repo.dirstate
786 and mctx[f2].cmp(wctx[f])
786 and mctx[f2].cmp(wctx[f])
787 )
787 )
788
788
789
789
790 class _unknowndirschecker(object):
790 class _unknowndirschecker(object):
791 """
791 """
792 Look for any unknown files or directories that may have a path conflict
792 Look for any unknown files or directories that may have a path conflict
793 with a file. If any path prefix of the file exists as a file or link,
793 with a file. If any path prefix of the file exists as a file or link,
794 then it conflicts. If the file itself is a directory that contains any
794 then it conflicts. If the file itself is a directory that contains any
795 file that is not tracked, then it conflicts.
795 file that is not tracked, then it conflicts.
796
796
797 Returns the shortest path at which a conflict occurs, or None if there is
797 Returns the shortest path at which a conflict occurs, or None if there is
798 no conflict.
798 no conflict.
799 """
799 """
800
800
801 def __init__(self):
801 def __init__(self):
802 # A set of paths known to be good. This prevents repeated checking of
802 # A set of paths known to be good. This prevents repeated checking of
803 # dirs. It will be updated with any new dirs that are checked and found
803 # dirs. It will be updated with any new dirs that are checked and found
804 # to be safe.
804 # to be safe.
805 self._unknowndircache = set()
805 self._unknowndircache = set()
806
806
807 # A set of paths that are known to be absent. This prevents repeated
807 # A set of paths that are known to be absent. This prevents repeated
808 # checking of subdirectories that are known not to exist. It will be
808 # checking of subdirectories that are known not to exist. It will be
809 # updated with any new dirs that are checked and found to be absent.
809 # updated with any new dirs that are checked and found to be absent.
810 self._missingdircache = set()
810 self._missingdircache = set()
811
811
812 def __call__(self, repo, wctx, f):
812 def __call__(self, repo, wctx, f):
813 if wctx.isinmemory():
813 if wctx.isinmemory():
814 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
815 return False
815 return False
816
816
817 # Check for path prefixes that exist as unknown files.
817 # Check for path prefixes that exist as unknown files.
818 for p in reversed(list(pathutil.finddirs(f))):
818 for p in reversed(list(pathutil.finddirs(f))):
819 if p in self._missingdircache:
819 if p in self._missingdircache:
820 return
820 return
821 if p in self._unknowndircache:
821 if p in self._unknowndircache:
822 continue
822 continue
823 if repo.wvfs.audit.check(p):
823 if repo.wvfs.audit.check(p):
824 if (
824 if (
825 repo.wvfs.isfileorlink(p)
825 repo.wvfs.isfileorlink(p)
826 and repo.dirstate.normalize(p) not in repo.dirstate
826 and repo.dirstate.normalize(p) not in repo.dirstate
827 ):
827 ):
828 return p
828 return p
829 if not repo.wvfs.lexists(p):
829 if not repo.wvfs.lexists(p):
830 self._missingdircache.add(p)
830 self._missingdircache.add(p)
831 return
831 return
832 self._unknowndircache.add(p)
832 self._unknowndircache.add(p)
833
833
834 # Check if the file conflicts with a directory containing unknown files.
834 # Check if the file conflicts with a directory containing unknown files.
835 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
836 # Does the directory contain any files that are not in the dirstate?
836 # Does the directory contain any files that are not in the dirstate?
837 for p, dirs, files in repo.wvfs.walk(f):
837 for p, dirs, files in repo.wvfs.walk(f):
838 for fn in files:
838 for fn in files:
839 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
840 relf = repo.dirstate.normalize(relf, isknown=True)
840 relf = repo.dirstate.normalize(relf, isknown=True)
841 if relf not in repo.dirstate:
841 if relf not in repo.dirstate:
842 return f
842 return f
843 return None
843 return None
844
844
845
845
846 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
847 """
847 """
848 Considers any actions that care about the presence of conflicting unknown
848 Considers any actions that care about the presence of conflicting unknown
849 files. For some actions, the result is to abort; for others, it is to
849 files. For some actions, the result is to abort; for others, it is to
850 choose a different action.
850 choose a different action.
851 """
851 """
852 fileconflicts = set()
852 fileconflicts = set()
853 pathconflicts = set()
853 pathconflicts = set()
854 warnconflicts = set()
854 warnconflicts = set()
855 abortconflicts = set()
855 abortconflicts = set()
856 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
857 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
858 pathconfig = repo.ui.configbool(
858 pathconfig = repo.ui.configbool(
859 b'experimental', b'merge.checkpathconflicts'
859 b'experimental', b'merge.checkpathconflicts'
860 )
860 )
861 if not force:
861 if not force:
862
862
863 def collectconflicts(conflicts, config):
863 def collectconflicts(conflicts, config):
864 if config == b'abort':
864 if config == b'abort':
865 abortconflicts.update(conflicts)
865 abortconflicts.update(conflicts)
866 elif config == b'warn':
866 elif config == b'warn':
867 warnconflicts.update(conflicts)
867 warnconflicts.update(conflicts)
868
868
869 checkunknowndirs = _unknowndirschecker()
869 checkunknowndirs = _unknowndirschecker()
870 for f, (m, args, msg) in pycompat.iteritems(actions):
870 for f, (m, args, msg) in pycompat.iteritems(actions):
871 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
872 if _checkunknownfile(repo, wctx, mctx, f):
872 if _checkunknownfile(repo, wctx, mctx, f):
873 fileconflicts.add(f)
873 fileconflicts.add(f)
874 elif pathconfig and f not in wctx:
874 elif pathconfig and f not in wctx:
875 path = checkunknowndirs(repo, wctx, f)
875 path = checkunknowndirs(repo, wctx, f)
876 if path is not None:
876 if path is not None:
877 pathconflicts.add(path)
877 pathconflicts.add(path)
878 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 elif m == ACTION_LOCAL_DIR_RENAME_GET:
879 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
880 fileconflicts.add(f)
880 fileconflicts.add(f)
881
881
882 allconflicts = fileconflicts | pathconflicts
882 allconflicts = fileconflicts | pathconflicts
883 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
884 unknownconflicts = allconflicts - ignoredconflicts
884 unknownconflicts = allconflicts - ignoredconflicts
885 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(ignoredconflicts, ignoredconfig)
886 collectconflicts(unknownconflicts, unknownconfig)
886 collectconflicts(unknownconflicts, unknownconfig)
887 else:
887 else:
888 for f, (m, args, msg) in pycompat.iteritems(actions):
888 for f, (m, args, msg) in pycompat.iteritems(actions):
889 if m == ACTION_CREATED_MERGE:
889 if m == ACTION_CREATED_MERGE:
890 fl2, anc = args
890 fl2, anc = args
891 different = _checkunknownfile(repo, wctx, mctx, f)
891 different = _checkunknownfile(repo, wctx, mctx, f)
892 if repo.dirstate._ignore(f):
892 if repo.dirstate._ignore(f):
893 config = ignoredconfig
893 config = ignoredconfig
894 else:
894 else:
895 config = unknownconfig
895 config = unknownconfig
896
896
897 # The behavior when force is True is described by this table:
897 # The behavior when force is True is described by this table:
898 # config different mergeforce | action backup
898 # config different mergeforce | action backup
899 # * n * | get n
899 # * n * | get n
900 # * y y | merge -
900 # * y y | merge -
901 # abort y n | merge - (1)
901 # abort y n | merge - (1)
902 # warn y n | warn + get y
902 # warn y n | warn + get y
903 # ignore y n | get y
903 # ignore y n | get y
904 #
904 #
905 # (1) this is probably the wrong behavior here -- we should
905 # (1) this is probably the wrong behavior here -- we should
906 # probably abort, but some actions like rebases currently
906 # probably abort, but some actions like rebases currently
907 # don't like an abort happening in the middle of
907 # don't like an abort happening in the middle of
908 # merge.update.
908 # merge.update.
909 if not different:
909 if not different:
910 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
911 elif mergeforce or config == b'abort':
911 elif mergeforce or config == b'abort':
912 actions[f] = (
912 actions[f] = (
913 ACTION_MERGE,
913 ACTION_MERGE,
914 (f, f, None, False, anc),
914 (f, f, None, False, anc),
915 b'remote differs from untracked local',
915 b'remote differs from untracked local',
916 )
916 )
917 elif config == b'abort':
917 elif config == b'abort':
918 abortconflicts.add(f)
918 abortconflicts.add(f)
919 else:
919 else:
920 if config == b'warn':
920 if config == b'warn':
921 warnconflicts.add(f)
921 warnconflicts.add(f)
922 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
923
923
924 for f in sorted(abortconflicts):
924 for f in sorted(abortconflicts):
925 warn = repo.ui.warn
925 warn = repo.ui.warn
926 if f in pathconflicts:
926 if f in pathconflicts:
927 if repo.wvfs.isfileorlink(f):
927 if repo.wvfs.isfileorlink(f):
928 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 warn(_(b"%s: untracked file conflicts with directory\n") % f)
929 else:
929 else:
930 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 warn(_(b"%s: untracked directory conflicts with file\n") % f)
931 else:
931 else:
932 warn(_(b"%s: untracked file differs\n") % f)
932 warn(_(b"%s: untracked file differs\n") % f)
933 if abortconflicts:
933 if abortconflicts:
934 raise error.Abort(
934 raise error.Abort(
935 _(
935 _(
936 b"untracked files in working directory "
936 b"untracked files in working directory "
937 b"differ from files in requested revision"
937 b"differ from files in requested revision"
938 )
938 )
939 )
939 )
940
940
941 for f in sorted(warnconflicts):
941 for f in sorted(warnconflicts):
942 if repo.wvfs.isfileorlink(f):
942 if repo.wvfs.isfileorlink(f):
943 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
944 else:
944 else:
945 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
946
946
947 for f, (m, args, msg) in pycompat.iteritems(actions):
947 for f, (m, args, msg) in pycompat.iteritems(actions):
948 if m == ACTION_CREATED:
948 if m == ACTION_CREATED:
949 backup = (
949 backup = (
950 f in fileconflicts
950 f in fileconflicts
951 or f in pathconflicts
951 or f in pathconflicts
952 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 or any(p in pathconflicts for p in pathutil.finddirs(f))
953 )
953 )
954 (flags,) = args
954 (flags,) = args
955 actions[f] = (ACTION_GET, (flags, backup), msg)
955 actions[f] = (ACTION_GET, (flags, backup), msg)
956
956
957
957
958 def _forgetremoved(wctx, mctx, branchmerge):
958 def _forgetremoved(wctx, mctx, branchmerge):
959 """
959 """
960 Forget removed files
960 Forget removed files
961
961
962 If we're jumping between revisions (as opposed to merging), and if
962 If we're jumping between revisions (as opposed to merging), and if
963 neither the working directory nor the target rev has the file,
963 neither the working directory nor the target rev has the file,
964 then we need to remove it from the dirstate, to prevent the
964 then we need to remove it from the dirstate, to prevent the
965 dirstate from listing the file when it is no longer in the
965 dirstate from listing the file when it is no longer in the
966 manifest.
966 manifest.
967
967
968 If we're merging, and the other revision has removed a file
968 If we're merging, and the other revision has removed a file
969 that is not present in the working directory, we need to mark it
969 that is not present in the working directory, we need to mark it
970 as removed.
970 as removed.
971 """
971 """
972
972
973 actions = {}
973 actions = {}
974 m = ACTION_FORGET
974 m = ACTION_FORGET
975 if branchmerge:
975 if branchmerge:
976 m = ACTION_REMOVE
976 m = ACTION_REMOVE
977 for f in wctx.deleted():
977 for f in wctx.deleted():
978 if f not in mctx:
978 if f not in mctx:
979 actions[f] = m, None, b"forget deleted"
979 actions[f] = m, None, b"forget deleted"
980
980
981 if not branchmerge:
981 if not branchmerge:
982 for f in wctx.removed():
982 for f in wctx.removed():
983 if f not in mctx:
983 if f not in mctx:
984 actions[f] = ACTION_FORGET, None, b"forget removed"
984 actions[f] = ACTION_FORGET, None, b"forget removed"
985
985
986 return actions
986 return actions
987
987
988
988
989 def _checkcollision(repo, wmf, actions):
989 def _checkcollision(repo, wmf, actions):
990 """
990 """
991 Check for case-folding collisions.
991 Check for case-folding collisions.
992 """
992 """
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
995 if not narrowmatch.always():
996 pmmf = set(wmf.walk(narrowmatch))
996 pmmf = set(wmf.walk(narrowmatch))
997 if actions:
997 if actions:
998 narrowactions = {}
998 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1000 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1002 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1004 actions = narrowactions
1005 else:
1005 else:
1006 # build provisional merged manifest up
1006 # build provisional merged manifest up
1007 pmmf = set(wmf)
1007 pmmf = set(wmf)
1008
1008
1009 if actions:
1009 if actions:
1010 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1011 for m in (
1011 for m in (
1012 ACTION_ADD,
1012 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1014 ACTION_FORGET,
1015 ACTION_GET,
1015 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1018 ):
1018 ):
1019 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1020 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1022 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1024 f2, flags = args
1025 pmmf.discard(f2)
1025 pmmf.discard(f2)
1026 pmmf.add(f)
1026 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1028 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1031 if move:
1031 if move:
1032 pmmf.discard(f1)
1032 pmmf.discard(f1)
1033 pmmf.add(f)
1033 pmmf.add(f)
1034
1034
1035 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1036 foldmap = {}
1037 for f in pmmf:
1037 for f in pmmf:
1038 fold = util.normcase(f)
1038 fold = util.normcase(f)
1039 if fold in foldmap:
1039 if fold in foldmap:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1043 )
1043 )
1044 foldmap[fold] = f
1044 foldmap[fold] = f
1045
1045
1046 # check case-folding of directories
1046 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1051 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1053 % (lastfull, f)
1054 )
1054 )
1055 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1057 lastfull = f
1057 lastfull = f
1058
1058
1059
1059
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1062
1062
1063 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1064 return True
1064 return True
1065
1065
1066
1066
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1069
1069
1070 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1071 return True
1071 return True
1072
1072
1073
1073
1074 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1075 """
1075 """
1076 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1078 in.
1078 in.
1079 """
1079 """
1080 for f in manifest:
1080 for f in manifest:
1081 for p in pathutil.finddirs(f):
1081 for p in pathutil.finddirs(f):
1082 if p in dirs:
1082 if p in dirs:
1083 yield f, p
1083 yield f, p
1084 break
1084 break
1085
1085
1086
1086
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1088 """
1089 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1091 """
1091 """
1092 mf = wctx.manifest()
1092 mf = wctx.manifest()
1093
1093
1094 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1095 localconflicts = set()
1096
1096
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1099 remoteconflicts = set()
1100
1100
1101 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1104 invalidconflicts = set()
1104 invalidconflicts = set()
1105
1105
1106 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1107 createdfiledirs = set()
1108
1108
1109 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1110 deletedfiles = set()
1111
1111
1112 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1113 if m in (
1113 if m in (
1114 ACTION_CREATED,
1114 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1116 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1118 ):
1118 ):
1119 # This action may create a new local file.
1119 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1121 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1128 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1131 if move:
1131 if move:
1132 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1134 f2, flags = args
1135 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1136
1136
1137 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1138 for p in createdfiledirs:
1139 if p in mf:
1139 if p in mf:
1140 if p in mctx:
1140 if p in mctx:
1141 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1143 # within the remote manifest.
1144 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1145 else:
1145 else:
1146 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1147 # We will need to rename the local file.
1148 localconflicts.add(p)
1148 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1150 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1152 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1154 ):
1154 ):
1155 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1157 # manifest.
1157 # manifest.
1158 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1159
1159
1160 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1161 for p in localconflicts:
1162 if p not in deletedfiles:
1162 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1165 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1167 (p,),
1168 b'local path conflict',
1168 b'local path conflict',
1169 )
1169 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1171
1172 if remoteconflicts:
1172 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1176 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1180 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1182 else:
1182 else:
1183 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1184 fl = args[0]
1184 fl = args[0]
1185 actions[pnew] = (
1185 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1187 (p, fl),
1188 b'remote path conflict',
1188 b'remote path conflict',
1189 )
1189 )
1190 actions[p] = (
1190 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1193 b'path conflict',
1194 )
1194 )
1195 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1196 break
1196 break
1197
1197
1198 if invalidconflicts:
1198 if invalidconflicts:
1199 for p in invalidconflicts:
1199 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1202
1203
1203
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1205 """
1206 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1207
1207
1208 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1209 narrowed.
1210 """
1210 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1214 # over a copy.
1215 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1216 if narrowmatch(f):
1217 pass
1217 pass
1218 elif not branchmerge:
1218 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1223 raise error.Abort(
1224 _(
1224 _(
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1226 b'which is not yet supported'
1227 )
1227 )
1228 % f,
1228 % f,
1229 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1230 )
1230 )
1231 else:
1231 else:
1232 raise error.Abort(
1232 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1234 )
1235
1235
1236
1236
1237 def manifestmerge(
1237 def manifestmerge(
1238 repo,
1238 repo,
1239 wctx,
1239 wctx,
1240 p2,
1240 p2,
1241 pa,
1241 pa,
1242 branchmerge,
1242 branchmerge,
1243 force,
1243 force,
1244 matcher,
1244 matcher,
1245 acceptremote,
1245 acceptremote,
1246 followcopies,
1246 followcopies,
1247 forcefulldiff=False,
1247 forcefulldiff=False,
1248 ):
1248 ):
1249 """
1249 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1251
1252 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1255 """
1255 """
1256 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1257 matcher = None
1257 matcher = None
1258
1258
1259 # manifests fetched in order are going to be faster, so prime the caches
1259 # manifests fetched in order are going to be faster, so prime the caches
1260 [
1260 [
1261 x.manifest()
1261 x.manifest()
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1262 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1263 ]
1263 ]
1264
1264
1265 branch_copies1 = copies.branch_copies()
1265 branch_copies1 = copies.branch_copies()
1266 branch_copies2 = copies.branch_copies()
1266 branch_copies2 = copies.branch_copies()
1267 diverge = {}
1267 diverge = {}
1268 if followcopies:
1268 if followcopies:
1269 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1269 branch_copies1, branch_copies2, diverge = copies.mergecopies(
1270 repo, wctx, p2, pa
1270 repo, wctx, p2, pa
1271 )
1271 )
1272
1272
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1273 boolbm = pycompat.bytestr(bool(branchmerge))
1274 boolf = pycompat.bytestr(bool(force))
1274 boolf = pycompat.bytestr(bool(force))
1275 boolm = pycompat.bytestr(bool(matcher))
1275 boolm = pycompat.bytestr(bool(matcher))
1276 repo.ui.note(_(b"resolving manifests\n"))
1276 repo.ui.note(_(b"resolving manifests\n"))
1277 repo.ui.debug(
1277 repo.ui.debug(
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1278 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1279 )
1279 )
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1280 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1281
1281
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1282 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1283 copied1 = set(branch_copies1.copy.values())
1283 copied1 = set(branch_copies1.copy.values())
1284 copied1.update(branch_copies1.movewithdir.values())
1284 copied1.update(branch_copies1.movewithdir.values())
1285 copied2 = set(branch_copies2.copy.values())
1285 copied2 = set(branch_copies2.copy.values())
1286 copied2.update(branch_copies2.movewithdir.values())
1286 copied2.update(branch_copies2.movewithdir.values())
1287
1287
1288 if b'.hgsubstate' in m1 and wctx.rev() is None:
1288 if b'.hgsubstate' in m1 and wctx.rev() is None:
1289 # Check whether sub state is modified, and overwrite the manifest
1289 # Check whether sub state is modified, and overwrite the manifest
1290 # to flag the change. If wctx is a committed revision, we shouldn't
1290 # to flag the change. If wctx is a committed revision, we shouldn't
1291 # care for the dirty state of the working directory.
1291 # care for the dirty state of the working directory.
1292 if any(wctx.sub(s).dirty() for s in wctx.substate):
1292 if any(wctx.sub(s).dirty() for s in wctx.substate):
1293 m1[b'.hgsubstate'] = modifiednodeid
1293 m1[b'.hgsubstate'] = modifiednodeid
1294
1294
1295 # Don't use m2-vs-ma optimization if:
1295 # Don't use m2-vs-ma optimization if:
1296 # - ma is the same as m1 or m2, which we're just going to diff again later
1296 # - ma is the same as m1 or m2, which we're just going to diff again later
1297 # - The caller specifically asks for a full diff, which is useful during bid
1297 # - The caller specifically asks for a full diff, which is useful during bid
1298 # merge.
1298 # merge.
1299 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1299 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1300 # Identify which files are relevant to the merge, so we can limit the
1300 # Identify which files are relevant to the merge, so we can limit the
1301 # total m1-vs-m2 diff to just those files. This has significant
1301 # total m1-vs-m2 diff to just those files. This has significant
1302 # performance benefits in large repositories.
1302 # performance benefits in large repositories.
1303 relevantfiles = set(ma.diff(m2).keys())
1303 relevantfiles = set(ma.diff(m2).keys())
1304
1304
1305 # For copied and moved files, we need to add the source file too.
1305 # For copied and moved files, we need to add the source file too.
1306 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1306 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
1307 if copyvalue in relevantfiles:
1307 if copyvalue in relevantfiles:
1308 relevantfiles.add(copykey)
1308 relevantfiles.add(copykey)
1309 for movedirkey in branch_copies1.movewithdir:
1309 for movedirkey in branch_copies1.movewithdir:
1310 relevantfiles.add(movedirkey)
1310 relevantfiles.add(movedirkey)
1311 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1311 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1312 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1312 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1313
1313
1314 diff = m1.diff(m2, match=matcher)
1314 diff = m1.diff(m2, match=matcher)
1315
1315
1316 actions = {}
1316 actions = {}
1317 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1317 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1318 if n1 and n2: # file exists on both local and remote side
1318 if n1 and n2: # file exists on both local and remote side
1319 if f not in ma:
1319 if f not in ma:
1320 # TODO: what if they're renamed from different sources?
1320 # TODO: what if they're renamed from different sources?
1321 fa = branch_copies1.copy.get(
1321 fa = branch_copies1.copy.get(
1322 f, None
1322 f, None
1323 ) or branch_copies2.copy.get(f, None)
1323 ) or branch_copies2.copy.get(f, None)
1324 if fa is not None:
1324 if fa is not None:
1325 actions[f] = (
1325 actions[f] = (
1326 ACTION_MERGE,
1326 ACTION_MERGE,
1327 (f, f, fa, False, pa.node()),
1327 (f, f, fa, False, pa.node()),
1328 b'both renamed from %s' % fa,
1328 b'both renamed from %s' % fa,
1329 )
1329 )
1330 else:
1330 else:
1331 actions[f] = (
1331 actions[f] = (
1332 ACTION_MERGE,
1332 ACTION_MERGE,
1333 (f, f, None, False, pa.node()),
1333 (f, f, None, False, pa.node()),
1334 b'both created',
1334 b'both created',
1335 )
1335 )
1336 else:
1336 else:
1337 a = ma[f]
1337 a = ma[f]
1338 fla = ma.flags(f)
1338 fla = ma.flags(f)
1339 nol = b'l' not in fl1 + fl2 + fla
1339 nol = b'l' not in fl1 + fl2 + fla
1340 if n2 == a and fl2 == fla:
1340 if n2 == a and fl2 == fla:
1341 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1341 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1342 elif n1 == a and fl1 == fla: # local unchanged - use remote
1342 elif n1 == a and fl1 == fla: # local unchanged - use remote
1343 if n1 == n2: # optimization: keep local content
1343 if n1 == n2: # optimization: keep local content
1344 actions[f] = (
1344 actions[f] = (
1345 ACTION_EXEC,
1345 ACTION_EXEC,
1346 (fl2,),
1346 (fl2,),
1347 b'update permissions',
1347 b'update permissions',
1348 )
1348 )
1349 else:
1349 else:
1350 actions[f] = (
1350 actions[f] = (
1351 ACTION_GET,
1351 ACTION_GET,
1352 (fl2, False),
1352 (fl2, False),
1353 b'remote is newer',
1353 b'remote is newer',
1354 )
1354 )
1355 elif nol and n2 == a: # remote only changed 'x'
1355 elif nol and n2 == a: # remote only changed 'x'
1356 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1356 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1357 elif nol and n1 == a: # local only changed 'x'
1357 elif nol and n1 == a: # local only changed 'x'
1358 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1358 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1359 else: # both changed something
1359 else: # both changed something
1360 actions[f] = (
1360 actions[f] = (
1361 ACTION_MERGE,
1361 ACTION_MERGE,
1362 (f, f, f, False, pa.node()),
1362 (f, f, f, False, pa.node()),
1363 b'versions differ',
1363 b'versions differ',
1364 )
1364 )
1365 elif n1: # file exists only on local side
1365 elif n1: # file exists only on local side
1366 if f in copied2:
1366 if f in copied2:
1367 pass # we'll deal with it on m2 side
1367 pass # we'll deal with it on m2 side
1368 elif (
1368 elif (
1369 f in branch_copies1.movewithdir
1369 f in branch_copies1.movewithdir
1370 ): # directory rename, move local
1370 ): # directory rename, move local
1371 f2 = branch_copies1.movewithdir[f]
1371 f2 = branch_copies1.movewithdir[f]
1372 if f2 in m2:
1372 if f2 in m2:
1373 actions[f2] = (
1373 actions[f2] = (
1374 ACTION_MERGE,
1374 ACTION_MERGE,
1375 (f, f2, None, True, pa.node()),
1375 (f, f2, None, True, pa.node()),
1376 b'remote directory rename, both created',
1376 b'remote directory rename, both created',
1377 )
1377 )
1378 else:
1378 else:
1379 actions[f2] = (
1379 actions[f2] = (
1380 ACTION_DIR_RENAME_MOVE_LOCAL,
1380 ACTION_DIR_RENAME_MOVE_LOCAL,
1381 (f, fl1),
1381 (f, fl1),
1382 b'remote directory rename - move from %s' % f,
1382 b'remote directory rename - move from %s' % f,
1383 )
1383 )
1384 elif f in branch_copies1.copy:
1384 elif f in branch_copies1.copy:
1385 f2 = branch_copies1.copy[f]
1385 f2 = branch_copies1.copy[f]
1386 actions[f] = (
1386 actions[f] = (
1387 ACTION_MERGE,
1387 ACTION_MERGE,
1388 (f, f2, f2, False, pa.node()),
1388 (f, f2, f2, False, pa.node()),
1389 b'local copied/moved from %s' % f2,
1389 b'local copied/moved from %s' % f2,
1390 )
1390 )
1391 elif f in ma: # clean, a different, no remote
1391 elif f in ma: # clean, a different, no remote
1392 if n1 != ma[f]:
1392 if n1 != ma[f]:
1393 if acceptremote:
1393 if acceptremote:
1394 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1394 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1395 else:
1395 else:
1396 actions[f] = (
1396 actions[f] = (
1397 ACTION_CHANGED_DELETED,
1397 ACTION_CHANGED_DELETED,
1398 (f, None, f, False, pa.node()),
1398 (f, None, f, False, pa.node()),
1399 b'prompt changed/deleted',
1399 b'prompt changed/deleted',
1400 )
1400 )
1401 elif n1 == addednodeid:
1401 elif n1 == addednodeid:
1402 # This extra 'a' is added by working copy manifest to mark
1402 # This extra 'a' is added by working copy manifest to mark
1403 # the file as locally added. We should forget it instead of
1403 # the file as locally added. We should forget it instead of
1404 # deleting it.
1404 # deleting it.
1405 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1405 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1406 else:
1406 else:
1407 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1407 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1408 elif n2: # file exists only on remote side
1408 elif n2: # file exists only on remote side
1409 if f in copied1:
1409 if f in copied1:
1410 pass # we'll deal with it on m1 side
1410 pass # we'll deal with it on m1 side
1411 elif f in branch_copies2.movewithdir:
1411 elif f in branch_copies2.movewithdir:
1412 f2 = branch_copies2.movewithdir[f]
1412 f2 = branch_copies2.movewithdir[f]
1413 if f2 in m1:
1413 if f2 in m1:
1414 actions[f2] = (
1414 actions[f2] = (
1415 ACTION_MERGE,
1415 ACTION_MERGE,
1416 (f2, f, None, False, pa.node()),
1416 (f2, f, None, False, pa.node()),
1417 b'local directory rename, both created',
1417 b'local directory rename, both created',
1418 )
1418 )
1419 else:
1419 else:
1420 actions[f2] = (
1420 actions[f2] = (
1421 ACTION_LOCAL_DIR_RENAME_GET,
1421 ACTION_LOCAL_DIR_RENAME_GET,
1422 (f, fl2),
1422 (f, fl2),
1423 b'local directory rename - get from %s' % f,
1423 b'local directory rename - get from %s' % f,
1424 )
1424 )
1425 elif f in branch_copies2.copy:
1425 elif f in branch_copies2.copy:
1426 f2 = branch_copies2.copy[f]
1426 f2 = branch_copies2.copy[f]
1427 if f2 in m2:
1427 if f2 in m2:
1428 actions[f] = (
1428 actions[f] = (
1429 ACTION_MERGE,
1429 ACTION_MERGE,
1430 (f2, f, f2, False, pa.node()),
1430 (f2, f, f2, False, pa.node()),
1431 b'remote copied from %s' % f2,
1431 b'remote copied from %s' % f2,
1432 )
1432 )
1433 else:
1433 else:
1434 actions[f] = (
1434 actions[f] = (
1435 ACTION_MERGE,
1435 ACTION_MERGE,
1436 (f2, f, f2, True, pa.node()),
1436 (f2, f, f2, True, pa.node()),
1437 b'remote moved from %s' % f2,
1437 b'remote moved from %s' % f2,
1438 )
1438 )
1439 elif f not in ma:
1439 elif f not in ma:
1440 # local unknown, remote created: the logic is described by the
1440 # local unknown, remote created: the logic is described by the
1441 # following table:
1441 # following table:
1442 #
1442 #
1443 # force branchmerge different | action
1443 # force branchmerge different | action
1444 # n * * | create
1444 # n * * | create
1445 # y n * | create
1445 # y n * | create
1446 # y y n | create
1446 # y y n | create
1447 # y y y | merge
1447 # y y y | merge
1448 #
1448 #
1449 # Checking whether the files are different is expensive, so we
1449 # Checking whether the files are different is expensive, so we
1450 # don't do that when we can avoid it.
1450 # don't do that when we can avoid it.
1451 if not force:
1451 if not force:
1452 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1452 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1453 elif not branchmerge:
1453 elif not branchmerge:
1454 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1454 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1455 else:
1455 else:
1456 actions[f] = (
1456 actions[f] = (
1457 ACTION_CREATED_MERGE,
1457 ACTION_CREATED_MERGE,
1458 (fl2, pa.node()),
1458 (fl2, pa.node()),
1459 b'remote created, get or merge',
1459 b'remote created, get or merge',
1460 )
1460 )
1461 elif n2 != ma[f]:
1461 elif n2 != ma[f]:
1462 df = None
1462 df = None
1463 for d in branch_copies1.dirmove:
1463 for d in branch_copies1.dirmove:
1464 if f.startswith(d):
1464 if f.startswith(d):
1465 # new file added in a directory that was moved
1465 # new file added in a directory that was moved
1466 df = branch_copies1.dirmove[d] + f[len(d) :]
1466 df = branch_copies1.dirmove[d] + f[len(d) :]
1467 break
1467 break
1468 if df is not None and df in m1:
1468 if df is not None and df in m1:
1469 actions[df] = (
1469 actions[df] = (
1470 ACTION_MERGE,
1470 ACTION_MERGE,
1471 (df, f, f, False, pa.node()),
1471 (df, f, f, False, pa.node()),
1472 b'local directory rename - respect move '
1472 b'local directory rename - respect move '
1473 b'from %s' % f,
1473 b'from %s' % f,
1474 )
1474 )
1475 elif acceptremote:
1475 elif acceptremote:
1476 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1476 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1477 else:
1477 else:
1478 actions[f] = (
1478 actions[f] = (
1479 ACTION_DELETED_CHANGED,
1479 ACTION_DELETED_CHANGED,
1480 (None, f, f, False, pa.node()),
1480 (None, f, f, False, pa.node()),
1481 b'prompt deleted/changed',
1481 b'prompt deleted/changed',
1482 )
1482 )
1483
1483
1484 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1484 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1485 # If we are merging, look for path conflicts.
1485 # If we are merging, look for path conflicts.
1486 checkpathconflicts(repo, wctx, p2, actions)
1486 checkpathconflicts(repo, wctx, p2, actions)
1487
1487
1488 narrowmatch = repo.narrowmatch()
1488 narrowmatch = repo.narrowmatch()
1489 if not narrowmatch.always():
1489 if not narrowmatch.always():
1490 # Updates "actions" in place
1490 # Updates "actions" in place
1491 _filternarrowactions(narrowmatch, branchmerge, actions)
1491 _filternarrowactions(narrowmatch, branchmerge, actions)
1492
1492
1493 renamedelete = branch_copies1.renamedelete
1493 renamedelete = branch_copies1.renamedelete
1494 renamedelete.update(branch_copies2.renamedelete)
1494 renamedelete.update(branch_copies2.renamedelete)
1495
1495
1496 return actions, diverge, renamedelete
1496 return actions, diverge, renamedelete
1497
1497
1498
1498
1499 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1499 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1500 """Resolves false conflicts where the nodeid changed but the content
1500 """Resolves false conflicts where the nodeid changed but the content
1501 remained the same."""
1501 remained the same."""
1502 # We force a copy of actions.items() because we're going to mutate
1502 # We force a copy of actions.items() because we're going to mutate
1503 # actions as we resolve trivial conflicts.
1503 # actions as we resolve trivial conflicts.
1504 for f, (m, args, msg) in list(actions.items()):
1504 for f, (m, args, msg) in list(actions.items()):
1505 if (
1505 if (
1506 m == ACTION_CHANGED_DELETED
1506 m == ACTION_CHANGED_DELETED
1507 and f in ancestor
1507 and f in ancestor
1508 and not wctx[f].cmp(ancestor[f])
1508 and not wctx[f].cmp(ancestor[f])
1509 ):
1509 ):
1510 # local did change but ended up with same content
1510 # local did change but ended up with same content
1511 actions[f] = ACTION_REMOVE, None, b'prompt same'
1511 actions[f] = ACTION_REMOVE, None, b'prompt same'
1512 elif (
1512 elif (
1513 m == ACTION_DELETED_CHANGED
1513 m == ACTION_DELETED_CHANGED
1514 and f in ancestor
1514 and f in ancestor
1515 and not mctx[f].cmp(ancestor[f])
1515 and not mctx[f].cmp(ancestor[f])
1516 ):
1516 ):
1517 # remote did change but ended up with same content
1517 # remote did change but ended up with same content
1518 del actions[f] # don't get = keep local deleted
1518 del actions[f] # don't get = keep local deleted
1519
1519
1520
1520
1521 def calculateupdates(
1521 def calculateupdates(
1522 repo,
1522 repo,
1523 wctx,
1523 wctx,
1524 mctx,
1524 mctx,
1525 ancestors,
1525 ancestors,
1526 branchmerge,
1526 branchmerge,
1527 force,
1527 force,
1528 acceptremote,
1528 acceptremote,
1529 followcopies,
1529 followcopies,
1530 matcher=None,
1530 matcher=None,
1531 mergeforce=False,
1531 mergeforce=False,
1532 ):
1532 ):
1533 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1533 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1534 # Avoid cycle.
1534 # Avoid cycle.
1535 from . import sparse
1535 from . import sparse
1536
1536
1537 if len(ancestors) == 1: # default
1537 if len(ancestors) == 1: # default
1538 actions, diverge, renamedelete = manifestmerge(
1538 actions, diverge, renamedelete = manifestmerge(
1539 repo,
1539 repo,
1540 wctx,
1540 wctx,
1541 mctx,
1541 mctx,
1542 ancestors[0],
1542 ancestors[0],
1543 branchmerge,
1543 branchmerge,
1544 force,
1544 force,
1545 matcher,
1545 matcher,
1546 acceptremote,
1546 acceptremote,
1547 followcopies,
1547 followcopies,
1548 )
1548 )
1549 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1549 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1550
1550
1551 else: # only when merge.preferancestor=* - the default
1551 else: # only when merge.preferancestor=* - the default
1552 repo.ui.note(
1552 repo.ui.note(
1553 _(b"note: merging %s and %s using bids from ancestors %s\n")
1553 _(b"note: merging %s and %s using bids from ancestors %s\n")
1554 % (
1554 % (
1555 wctx,
1555 wctx,
1556 mctx,
1556 mctx,
1557 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1557 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1558 )
1558 )
1559 )
1559 )
1560
1560
1561 # Call for bids
1561 # Call for bids
1562 fbids = (
1562 fbids = (
1563 {}
1563 {}
1564 ) # mapping filename to bids (action method to list af actions)
1564 ) # mapping filename to bids (action method to list af actions)
1565 diverge, renamedelete = None, None
1565 diverge, renamedelete = None, None
1566 for ancestor in ancestors:
1566 for ancestor in ancestors:
1567 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1567 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1568 actions, diverge1, renamedelete1 = manifestmerge(
1568 actions, diverge1, renamedelete1 = manifestmerge(
1569 repo,
1569 repo,
1570 wctx,
1570 wctx,
1571 mctx,
1571 mctx,
1572 ancestor,
1572 ancestor,
1573 branchmerge,
1573 branchmerge,
1574 force,
1574 force,
1575 matcher,
1575 matcher,
1576 acceptremote,
1576 acceptremote,
1577 followcopies,
1577 followcopies,
1578 forcefulldiff=True,
1578 forcefulldiff=True,
1579 )
1579 )
1580 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1580 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1581
1581
1582 # Track the shortest set of warning on the theory that bid
1582 # Track the shortest set of warning on the theory that bid
1583 # merge will correctly incorporate more information
1583 # merge will correctly incorporate more information
1584 if diverge is None or len(diverge1) < len(diverge):
1584 if diverge is None or len(diverge1) < len(diverge):
1585 diverge = diverge1
1585 diverge = diverge1
1586 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1586 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1587 renamedelete = renamedelete1
1587 renamedelete = renamedelete1
1588
1588
1589 for f, a in sorted(pycompat.iteritems(actions)):
1589 for f, a in sorted(pycompat.iteritems(actions)):
1590 m, args, msg = a
1590 m, args, msg = a
1591 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1591 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1592 if f in fbids:
1592 if f in fbids:
1593 d = fbids[f]
1593 d = fbids[f]
1594 if m in d:
1594 if m in d:
1595 d[m].append(a)
1595 d[m].append(a)
1596 else:
1596 else:
1597 d[m] = [a]
1597 d[m] = [a]
1598 else:
1598 else:
1599 fbids[f] = {m: [a]}
1599 fbids[f] = {m: [a]}
1600
1600
1601 # Pick the best bid for each file
1601 # Pick the best bid for each file
1602 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1602 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1603 actions = {}
1603 actions = {}
1604 for f, bids in sorted(fbids.items()):
1604 for f, bids in sorted(fbids.items()):
1605 # bids is a mapping from action method to list af actions
1605 # bids is a mapping from action method to list af actions
1606 # Consensus?
1606 # Consensus?
1607 if len(bids) == 1: # all bids are the same kind of method
1607 if len(bids) == 1: # all bids are the same kind of method
1608 m, l = list(bids.items())[0]
1608 m, l = list(bids.items())[0]
1609 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1609 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1610 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1610 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1611 actions[f] = l[0]
1611 actions[f] = l[0]
1612 continue
1612 continue
1613 # If keep is an option, just do it.
1613 # If keep is an option, just do it.
1614 if ACTION_KEEP in bids:
1614 if ACTION_KEEP in bids:
1615 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1615 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1616 actions[f] = bids[ACTION_KEEP][0]
1616 actions[f] = bids[ACTION_KEEP][0]
1617 continue
1617 continue
1618 # If there are gets and they all agree [how could they not?], do it.
1618 # If there are gets and they all agree [how could they not?], do it.
1619 if ACTION_GET in bids:
1619 if ACTION_GET in bids:
1620 ga0 = bids[ACTION_GET][0]
1620 ga0 = bids[ACTION_GET][0]
1621 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1621 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1622 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1622 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1623 actions[f] = ga0
1623 actions[f] = ga0
1624 continue
1624 continue
1625 # TODO: Consider other simple actions such as mode changes
1625 # TODO: Consider other simple actions such as mode changes
1626 # Handle inefficient democrazy.
1626 # Handle inefficient democrazy.
1627 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1627 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1628 for m, l in sorted(bids.items()):
1628 for m, l in sorted(bids.items()):
1629 for _f, args, msg in l:
1629 for _f, args, msg in l:
1630 repo.ui.note(b' %s -> %s\n' % (msg, m))
1630 repo.ui.note(b' %s -> %s\n' % (msg, m))
1631 # Pick random action. TODO: Instead, prompt user when resolving
1631 # Pick random action. TODO: Instead, prompt user when resolving
1632 m, l = list(bids.items())[0]
1632 m, l = list(bids.items())[0]
1633 repo.ui.warn(
1633 repo.ui.warn(
1634 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1634 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1635 )
1635 )
1636 actions[f] = l[0]
1636 actions[f] = l[0]
1637 continue
1637 continue
1638 repo.ui.note(_(b'end of auction\n\n'))
1638 repo.ui.note(_(b'end of auction\n\n'))
1639
1639
1640 if wctx.rev() is None:
1640 if wctx.rev() is None:
1641 fractions = _forgetremoved(wctx, mctx, branchmerge)
1641 fractions = _forgetremoved(wctx, mctx, branchmerge)
1642 actions.update(fractions)
1642 actions.update(fractions)
1643
1643
1644 prunedactions = sparse.filterupdatesactions(
1644 prunedactions = sparse.filterupdatesactions(
1645 repo, wctx, mctx, branchmerge, actions
1645 repo, wctx, mctx, branchmerge, actions
1646 )
1646 )
1647 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1647 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1648
1648
1649 return prunedactions, diverge, renamedelete
1649 return prunedactions, diverge, renamedelete
1650
1650
1651
1651
1652 def _getcwd():
1652 def _getcwd():
1653 try:
1653 try:
1654 return encoding.getcwd()
1654 return encoding.getcwd()
1655 except OSError as err:
1655 except OSError as err:
1656 if err.errno == errno.ENOENT:
1656 if err.errno == errno.ENOENT:
1657 return None
1657 return None
1658 raise
1658 raise
1659
1659
1660
1660
1661 def batchremove(repo, wctx, actions):
1661 def batchremove(repo, wctx, actions):
1662 """apply removes to the working directory
1662 """apply removes to the working directory
1663
1663
1664 yields tuples for progress updates
1664 yields tuples for progress updates
1665 """
1665 """
1666 verbose = repo.ui.verbose
1666 verbose = repo.ui.verbose
1667 cwd = _getcwd()
1667 cwd = _getcwd()
1668 i = 0
1668 i = 0
1669 for f, args, msg in actions:
1669 for f, args, msg in actions:
1670 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1670 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1671 if verbose:
1671 if verbose:
1672 repo.ui.note(_(b"removing %s\n") % f)
1672 repo.ui.note(_(b"removing %s\n") % f)
1673 wctx[f].audit()
1673 wctx[f].audit()
1674 try:
1674 try:
1675 wctx[f].remove(ignoremissing=True)
1675 wctx[f].remove(ignoremissing=True)
1676 except OSError as inst:
1676 except OSError as inst:
1677 repo.ui.warn(
1677 repo.ui.warn(
1678 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1678 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1679 )
1679 )
1680 if i == 100:
1680 if i == 100:
1681 yield i, f
1681 yield i, f
1682 i = 0
1682 i = 0
1683 i += 1
1683 i += 1
1684 if i > 0:
1684 if i > 0:
1685 yield i, f
1685 yield i, f
1686
1686
1687 if cwd and not _getcwd():
1687 if cwd and not _getcwd():
1688 # cwd was removed in the course of removing files; print a helpful
1688 # cwd was removed in the course of removing files; print a helpful
1689 # warning.
1689 # warning.
1690 repo.ui.warn(
1690 repo.ui.warn(
1691 _(
1691 _(
1692 b"current directory was removed\n"
1692 b"current directory was removed\n"
1693 b"(consider changing to repo root: %s)\n"
1693 b"(consider changing to repo root: %s)\n"
1694 )
1694 )
1695 % repo.root
1695 % repo.root
1696 )
1696 )
1697
1697
1698
1698
1699 def batchget(repo, mctx, wctx, wantfiledata, actions):
1699 def batchget(repo, mctx, wctx, wantfiledata, actions):
1700 """apply gets to the working directory
1700 """apply gets to the working directory
1701
1701
1702 mctx is the context to get from
1702 mctx is the context to get from
1703
1703
1704 Yields arbitrarily many (False, tuple) for progress updates, followed by
1704 Yields arbitrarily many (False, tuple) for progress updates, followed by
1705 exactly one (True, filedata). When wantfiledata is false, filedata is an
1705 exactly one (True, filedata). When wantfiledata is false, filedata is an
1706 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1706 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1707 mtime) of the file f written for each action.
1707 mtime) of the file f written for each action.
1708 """
1708 """
1709 filedata = {}
1709 filedata = {}
1710 verbose = repo.ui.verbose
1710 verbose = repo.ui.verbose
1711 fctx = mctx.filectx
1711 fctx = mctx.filectx
1712 ui = repo.ui
1712 ui = repo.ui
1713 i = 0
1713 i = 0
1714 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1714 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1715 for f, (flags, backup), msg in actions:
1715 for f, (flags, backup), msg in actions:
1716 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1716 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1717 if verbose:
1717 if verbose:
1718 repo.ui.note(_(b"getting %s\n") % f)
1718 repo.ui.note(_(b"getting %s\n") % f)
1719
1719
1720 if backup:
1720 if backup:
1721 # If a file or directory exists with the same name, back that
1721 # If a file or directory exists with the same name, back that
1722 # up. Otherwise, look to see if there is a file that conflicts
1722 # up. Otherwise, look to see if there is a file that conflicts
1723 # with a directory this file is in, and if so, back that up.
1723 # with a directory this file is in, and if so, back that up.
1724 conflicting = f
1724 conflicting = f
1725 if not repo.wvfs.lexists(f):
1725 if not repo.wvfs.lexists(f):
1726 for p in pathutil.finddirs(f):
1726 for p in pathutil.finddirs(f):
1727 if repo.wvfs.isfileorlink(p):
1727 if repo.wvfs.isfileorlink(p):
1728 conflicting = p
1728 conflicting = p
1729 break
1729 break
1730 if repo.wvfs.lexists(conflicting):
1730 if repo.wvfs.lexists(conflicting):
1731 orig = scmutil.backuppath(ui, repo, conflicting)
1731 orig = scmutil.backuppath(ui, repo, conflicting)
1732 util.rename(repo.wjoin(conflicting), orig)
1732 util.rename(repo.wjoin(conflicting), orig)
1733 wfctx = wctx[f]
1733 wfctx = wctx[f]
1734 wfctx.clearunknown()
1734 wfctx.clearunknown()
1735 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1735 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1736 size = wfctx.write(
1736 size = wfctx.write(
1737 fctx(f).data(),
1737 fctx(f).data(),
1738 flags,
1738 flags,
1739 backgroundclose=True,
1739 backgroundclose=True,
1740 atomictemp=atomictemp,
1740 atomictemp=atomictemp,
1741 )
1741 )
1742 if wantfiledata:
1742 if wantfiledata:
1743 s = wfctx.lstat()
1743 s = wfctx.lstat()
1744 mode = s.st_mode
1744 mode = s.st_mode
1745 mtime = s[stat.ST_MTIME]
1745 mtime = s[stat.ST_MTIME]
1746 filedata[f] = (mode, size, mtime) # for dirstate.normal
1746 filedata[f] = (mode, size, mtime) # for dirstate.normal
1747 if i == 100:
1747 if i == 100:
1748 yield False, (i, f)
1748 yield False, (i, f)
1749 i = 0
1749 i = 0
1750 i += 1
1750 i += 1
1751 if i > 0:
1751 if i > 0:
1752 yield False, (i, f)
1752 yield False, (i, f)
1753 yield True, filedata
1753 yield True, filedata
1754
1754
1755
1755
1756 def _prefetchfiles(repo, ctx, actions):
1756 def _prefetchfiles(repo, ctx, actions):
1757 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1757 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1758 of merge actions. ``ctx`` is the context being merged in."""
1758 of merge actions. ``ctx`` is the context being merged in."""
1759
1759
1760 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1760 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1761 # don't touch the context to be merged in. 'cd' is skipped, because
1761 # don't touch the context to be merged in. 'cd' is skipped, because
1762 # changed/deleted never resolves to something from the remote side.
1762 # changed/deleted never resolves to something from the remote side.
1763 oplist = [
1763 oplist = [
1764 actions[a]
1764 actions[a]
1765 for a in (
1765 for a in (
1766 ACTION_GET,
1766 ACTION_GET,
1767 ACTION_DELETED_CHANGED,
1767 ACTION_DELETED_CHANGED,
1768 ACTION_LOCAL_DIR_RENAME_GET,
1768 ACTION_LOCAL_DIR_RENAME_GET,
1769 ACTION_MERGE,
1769 ACTION_MERGE,
1770 )
1770 )
1771 ]
1771 ]
1772 prefetch = scmutil.prefetchfiles
1772 prefetch = scmutil.prefetchfiles
1773 matchfiles = scmutil.matchfiles
1773 matchfiles = scmutil.matchfiles
1774 prefetch(
1774 prefetch(
1775 repo,
1775 repo,
1776 [ctx.rev()],
1776 [ctx.rev()],
1777 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1777 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1778 )
1778 )
1779
1779
1780
1780
1781 @attr.s(frozen=True)
1781 @attr.s(frozen=True)
1782 class updateresult(object):
1782 class updateresult(object):
1783 updatedcount = attr.ib()
1783 updatedcount = attr.ib()
1784 mergedcount = attr.ib()
1784 mergedcount = attr.ib()
1785 removedcount = attr.ib()
1785 removedcount = attr.ib()
1786 unresolvedcount = attr.ib()
1786 unresolvedcount = attr.ib()
1787
1787
1788 def isempty(self):
1788 def isempty(self):
1789 return not (
1789 return not (
1790 self.updatedcount
1790 self.updatedcount
1791 or self.mergedcount
1791 or self.mergedcount
1792 or self.removedcount
1792 or self.removedcount
1793 or self.unresolvedcount
1793 or self.unresolvedcount
1794 )
1794 )
1795
1795
1796
1796
1797 def emptyactions():
1797 def emptyactions():
1798 """create an actions dict, to be populated and passed to applyupdates()"""
1798 """create an actions dict, to be populated and passed to applyupdates()"""
1799 return dict(
1799 return dict(
1800 (m, [])
1800 (m, [])
1801 for m in (
1801 for m in (
1802 ACTION_ADD,
1802 ACTION_ADD,
1803 ACTION_ADD_MODIFIED,
1803 ACTION_ADD_MODIFIED,
1804 ACTION_FORGET,
1804 ACTION_FORGET,
1805 ACTION_GET,
1805 ACTION_GET,
1806 ACTION_CHANGED_DELETED,
1806 ACTION_CHANGED_DELETED,
1807 ACTION_DELETED_CHANGED,
1807 ACTION_DELETED_CHANGED,
1808 ACTION_REMOVE,
1808 ACTION_REMOVE,
1809 ACTION_DIR_RENAME_MOVE_LOCAL,
1809 ACTION_DIR_RENAME_MOVE_LOCAL,
1810 ACTION_LOCAL_DIR_RENAME_GET,
1810 ACTION_LOCAL_DIR_RENAME_GET,
1811 ACTION_MERGE,
1811 ACTION_MERGE,
1812 ACTION_EXEC,
1812 ACTION_EXEC,
1813 ACTION_KEEP,
1813 ACTION_KEEP,
1814 ACTION_PATH_CONFLICT,
1814 ACTION_PATH_CONFLICT,
1815 ACTION_PATH_CONFLICT_RESOLVE,
1815 ACTION_PATH_CONFLICT_RESOLVE,
1816 )
1816 )
1817 )
1817 )
1818
1818
1819
1819
1820 def applyupdates(
1820 def applyupdates(
1821 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1821 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1822 ):
1822 ):
1823 """apply the merge action list to the working directory
1823 """apply the merge action list to the working directory
1824
1824
1825 wctx is the working copy context
1825 wctx is the working copy context
1826 mctx is the context to be merged into the working copy
1826 mctx is the context to be merged into the working copy
1827
1827
1828 Return a tuple of (counts, filedata), where counts is a tuple
1828 Return a tuple of (counts, filedata), where counts is a tuple
1829 (updated, merged, removed, unresolved) that describes how many
1829 (updated, merged, removed, unresolved) that describes how many
1830 files were affected by the update, and filedata is as described in
1830 files were affected by the update, and filedata is as described in
1831 batchget.
1831 batchget.
1832 """
1832 """
1833
1833
1834 _prefetchfiles(repo, mctx, actions)
1834 _prefetchfiles(repo, mctx, actions)
1835
1835
1836 updated, merged, removed = 0, 0, 0
1836 updated, merged, removed = 0, 0, 0
1837 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1837 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1838 moves = []
1838 moves = []
1839 for m, l in actions.items():
1839 for m, l in actions.items():
1840 l.sort()
1840 l.sort()
1841
1841
1842 # 'cd' and 'dc' actions are treated like other merge conflicts
1842 # 'cd' and 'dc' actions are treated like other merge conflicts
1843 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1843 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1844 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1844 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1845 mergeactions.extend(actions[ACTION_MERGE])
1845 mergeactions.extend(actions[ACTION_MERGE])
1846 for f, args, msg in mergeactions:
1846 for f, args, msg in mergeactions:
1847 f1, f2, fa, move, anc = args
1847 f1, f2, fa, move, anc = args
1848 if f == b'.hgsubstate': # merged internally
1848 if f == b'.hgsubstate': # merged internally
1849 continue
1849 continue
1850 if f1 is None:
1850 if f1 is None:
1851 fcl = filemerge.absentfilectx(wctx, fa)
1851 fcl = filemerge.absentfilectx(wctx, fa)
1852 else:
1852 else:
1853 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1853 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1854 fcl = wctx[f1]
1854 fcl = wctx[f1]
1855 if f2 is None:
1855 if f2 is None:
1856 fco = filemerge.absentfilectx(mctx, fa)
1856 fco = filemerge.absentfilectx(mctx, fa)
1857 else:
1857 else:
1858 fco = mctx[f2]
1858 fco = mctx[f2]
1859 actx = repo[anc]
1859 actx = repo[anc]
1860 if fa in actx:
1860 if fa in actx:
1861 fca = actx[fa]
1861 fca = actx[fa]
1862 else:
1862 else:
1863 # TODO: move to absentfilectx
1863 # TODO: move to absentfilectx
1864 fca = repo.filectx(f1, fileid=nullrev)
1864 fca = repo.filectx(f1, fileid=nullrev)
1865 ms.add(fcl, fco, fca, f)
1865 ms.add(fcl, fco, fca, f)
1866 if f1 != f and move:
1866 if f1 != f and move:
1867 moves.append(f1)
1867 moves.append(f1)
1868
1868
1869 # remove renamed files after safely stored
1869 # remove renamed files after safely stored
1870 for f in moves:
1870 for f in moves:
1871 if wctx[f].lexists():
1871 if wctx[f].lexists():
1872 repo.ui.debug(b"removing %s\n" % f)
1872 repo.ui.debug(b"removing %s\n" % f)
1873 wctx[f].audit()
1873 wctx[f].audit()
1874 wctx[f].remove()
1874 wctx[f].remove()
1875
1875
1876 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1876 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1877 progress = repo.ui.makeprogress(
1877 progress = repo.ui.makeprogress(
1878 _(b'updating'), unit=_(b'files'), total=numupdates
1878 _(b'updating'), unit=_(b'files'), total=numupdates
1879 )
1879 )
1880
1880
1881 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1881 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1882 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1882 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1883
1883
1884 # record path conflicts
1884 # record path conflicts
1885 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1885 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1886 f1, fo = args
1886 f1, fo = args
1887 s = repo.ui.status
1887 s = repo.ui.status
1888 s(
1888 s(
1889 _(
1889 _(
1890 b"%s: path conflict - a file or link has the same name as a "
1890 b"%s: path conflict - a file or link has the same name as a "
1891 b"directory\n"
1891 b"directory\n"
1892 )
1892 )
1893 % f
1893 % f
1894 )
1894 )
1895 if fo == b'l':
1895 if fo == b'l':
1896 s(_(b"the local file has been renamed to %s\n") % f1)
1896 s(_(b"the local file has been renamed to %s\n") % f1)
1897 else:
1897 else:
1898 s(_(b"the remote file has been renamed to %s\n") % f1)
1898 s(_(b"the remote file has been renamed to %s\n") % f1)
1899 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1899 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1900 ms.addpath(f, f1, fo)
1900 ms.addpath(f, f1, fo)
1901 progress.increment(item=f)
1901 progress.increment(item=f)
1902
1902
1903 # When merging in-memory, we can't support worker processes, so set the
1903 # When merging in-memory, we can't support worker processes, so set the
1904 # per-item cost at 0 in that case.
1904 # per-item cost at 0 in that case.
1905 cost = 0 if wctx.isinmemory() else 0.001
1905 cost = 0 if wctx.isinmemory() else 0.001
1906
1906
1907 # remove in parallel (must come before resolving path conflicts and getting)
1907 # remove in parallel (must come before resolving path conflicts and getting)
1908 prog = worker.worker(
1908 prog = worker.worker(
1909 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1909 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1910 )
1910 )
1911 for i, item in prog:
1911 for i, item in prog:
1912 progress.increment(step=i, item=item)
1912 progress.increment(step=i, item=item)
1913 removed = len(actions[ACTION_REMOVE])
1913 removed = len(actions[ACTION_REMOVE])
1914
1914
1915 # resolve path conflicts (must come before getting)
1915 # resolve path conflicts (must come before getting)
1916 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1916 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1917 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1917 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1918 (f0,) = args
1918 (f0,) = args
1919 if wctx[f0].lexists():
1919 if wctx[f0].lexists():
1920 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1920 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1921 wctx[f].audit()
1921 wctx[f].audit()
1922 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1922 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1923 wctx[f0].remove()
1923 wctx[f0].remove()
1924 progress.increment(item=f)
1924 progress.increment(item=f)
1925
1925
1926 # get in parallel.
1926 # get in parallel.
1927 threadsafe = repo.ui.configbool(
1927 threadsafe = repo.ui.configbool(
1928 b'experimental', b'worker.wdir-get-thread-safe'
1928 b'experimental', b'worker.wdir-get-thread-safe'
1929 )
1929 )
1930 prog = worker.worker(
1930 prog = worker.worker(
1931 repo.ui,
1931 repo.ui,
1932 cost,
1932 cost,
1933 batchget,
1933 batchget,
1934 (repo, mctx, wctx, wantfiledata),
1934 (repo, mctx, wctx, wantfiledata),
1935 actions[ACTION_GET],
1935 actions[ACTION_GET],
1936 threadsafe=threadsafe,
1936 threadsafe=threadsafe,
1937 hasretval=True,
1937 hasretval=True,
1938 )
1938 )
1939 getfiledata = {}
1939 getfiledata = {}
1940 for final, res in prog:
1940 for final, res in prog:
1941 if final:
1941 if final:
1942 getfiledata = res
1942 getfiledata = res
1943 else:
1943 else:
1944 i, item = res
1944 i, item = res
1945 progress.increment(step=i, item=item)
1945 progress.increment(step=i, item=item)
1946 updated = len(actions[ACTION_GET])
1946 updated = len(actions[ACTION_GET])
1947
1947
1948 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1948 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1949 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1949 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1950
1950
1951 # forget (manifest only, just log it) (must come first)
1951 # forget (manifest only, just log it) (must come first)
1952 for f, args, msg in actions[ACTION_FORGET]:
1952 for f, args, msg in actions[ACTION_FORGET]:
1953 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1953 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1954 progress.increment(item=f)
1954 progress.increment(item=f)
1955
1955
1956 # re-add (manifest only, just log it)
1956 # re-add (manifest only, just log it)
1957 for f, args, msg in actions[ACTION_ADD]:
1957 for f, args, msg in actions[ACTION_ADD]:
1958 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1958 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1959 progress.increment(item=f)
1959 progress.increment(item=f)
1960
1960
1961 # re-add/mark as modified (manifest only, just log it)
1961 # re-add/mark as modified (manifest only, just log it)
1962 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1962 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1963 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1963 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1964 progress.increment(item=f)
1964 progress.increment(item=f)
1965
1965
1966 # keep (noop, just log it)
1966 # keep (noop, just log it)
1967 for f, args, msg in actions[ACTION_KEEP]:
1967 for f, args, msg in actions[ACTION_KEEP]:
1968 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1968 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1969 # no progress
1969 # no progress
1970
1970
1971 # directory rename, move local
1971 # directory rename, move local
1972 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1972 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1973 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1973 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1974 progress.increment(item=f)
1974 progress.increment(item=f)
1975 f0, flags = args
1975 f0, flags = args
1976 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1976 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1977 wctx[f].audit()
1977 wctx[f].audit()
1978 wctx[f].write(wctx.filectx(f0).data(), flags)
1978 wctx[f].write(wctx.filectx(f0).data(), flags)
1979 wctx[f0].remove()
1979 wctx[f0].remove()
1980 updated += 1
1980 updated += 1
1981
1981
1982 # local directory rename, get
1982 # local directory rename, get
1983 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1983 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1984 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1984 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1985 progress.increment(item=f)
1985 progress.increment(item=f)
1986 f0, flags = args
1986 f0, flags = args
1987 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1987 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1988 wctx[f].write(mctx.filectx(f0).data(), flags)
1988 wctx[f].write(mctx.filectx(f0).data(), flags)
1989 updated += 1
1989 updated += 1
1990
1990
1991 # exec
1991 # exec
1992 for f, args, msg in actions[ACTION_EXEC]:
1992 for f, args, msg in actions[ACTION_EXEC]:
1993 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1993 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1994 progress.increment(item=f)
1994 progress.increment(item=f)
1995 (flags,) = args
1995 (flags,) = args
1996 wctx[f].audit()
1996 wctx[f].audit()
1997 wctx[f].setflags(b'l' in flags, b'x' in flags)
1997 wctx[f].setflags(b'l' in flags, b'x' in flags)
1998 updated += 1
1998 updated += 1
1999
1999
2000 # the ordering is important here -- ms.mergedriver will raise if the merge
2000 # the ordering is important here -- ms.mergedriver will raise if the merge
2001 # driver has changed, and we want to be able to bypass it when overwrite is
2001 # driver has changed, and we want to be able to bypass it when overwrite is
2002 # True
2002 # True
2003 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2003 usemergedriver = not overwrite and mergeactions and ms.mergedriver
2004
2004
2005 if usemergedriver:
2005 if usemergedriver:
2006 if wctx.isinmemory():
2006 if wctx.isinmemory():
2007 raise error.InMemoryMergeConflictsError(
2007 raise error.InMemoryMergeConflictsError(
2008 b"in-memory merge does not support mergedriver"
2008 b"in-memory merge does not support mergedriver"
2009 )
2009 )
2010 ms.commit()
2010 ms.commit()
2011 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2011 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2012 # the driver might leave some files unresolved
2012 # the driver might leave some files unresolved
2013 unresolvedf = set(ms.unresolved())
2013 unresolvedf = set(ms.unresolved())
2014 if not proceed:
2014 if not proceed:
2015 # XXX setting unresolved to at least 1 is a hack to make sure we
2015 # XXX setting unresolved to at least 1 is a hack to make sure we
2016 # error out
2016 # error out
2017 return updateresult(
2017 return updateresult(
2018 updated, merged, removed, max(len(unresolvedf), 1)
2018 updated, merged, removed, max(len(unresolvedf), 1)
2019 )
2019 )
2020 newactions = []
2020 newactions = []
2021 for f, args, msg in mergeactions:
2021 for f, args, msg in mergeactions:
2022 if f in unresolvedf:
2022 if f in unresolvedf:
2023 newactions.append((f, args, msg))
2023 newactions.append((f, args, msg))
2024 mergeactions = newactions
2024 mergeactions = newactions
2025
2025
2026 try:
2026 try:
2027 # premerge
2027 # premerge
2028 tocomplete = []
2028 tocomplete = []
2029 for f, args, msg in mergeactions:
2029 for f, args, msg in mergeactions:
2030 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2030 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2031 progress.increment(item=f)
2031 progress.increment(item=f)
2032 if f == b'.hgsubstate': # subrepo states need updating
2032 if f == b'.hgsubstate': # subrepo states need updating
2033 subrepoutil.submerge(
2033 subrepoutil.submerge(
2034 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2034 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2035 )
2035 )
2036 continue
2036 continue
2037 wctx[f].audit()
2037 wctx[f].audit()
2038 complete, r = ms.preresolve(f, wctx)
2038 complete, r = ms.preresolve(f, wctx)
2039 if not complete:
2039 if not complete:
2040 numupdates += 1
2040 numupdates += 1
2041 tocomplete.append((f, args, msg))
2041 tocomplete.append((f, args, msg))
2042
2042
2043 # merge
2043 # merge
2044 for f, args, msg in tocomplete:
2044 for f, args, msg in tocomplete:
2045 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2045 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2046 progress.increment(item=f, total=numupdates)
2046 progress.increment(item=f, total=numupdates)
2047 ms.resolve(f, wctx)
2047 ms.resolve(f, wctx)
2048
2048
2049 finally:
2049 finally:
2050 ms.commit()
2050 ms.commit()
2051
2051
2052 unresolved = ms.unresolvedcount()
2052 unresolved = ms.unresolvedcount()
2053
2053
2054 if (
2054 if (
2055 usemergedriver
2055 usemergedriver
2056 and not unresolved
2056 and not unresolved
2057 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2057 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2058 ):
2058 ):
2059 if not driverconclude(repo, ms, wctx, labels=labels):
2059 if not driverconclude(repo, ms, wctx, labels=labels):
2060 # XXX setting unresolved to at least 1 is a hack to make sure we
2060 # XXX setting unresolved to at least 1 is a hack to make sure we
2061 # error out
2061 # error out
2062 unresolved = max(unresolved, 1)
2062 unresolved = max(unresolved, 1)
2063
2063
2064 ms.commit()
2064 ms.commit()
2065
2065
2066 msupdated, msmerged, msremoved = ms.counts()
2066 msupdated, msmerged, msremoved = ms.counts()
2067 updated += msupdated
2067 updated += msupdated
2068 merged += msmerged
2068 merged += msmerged
2069 removed += msremoved
2069 removed += msremoved
2070
2070
2071 extraactions = ms.actions()
2071 extraactions = ms.actions()
2072 if extraactions:
2072 if extraactions:
2073 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2073 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2074 for k, acts in pycompat.iteritems(extraactions):
2074 for k, acts in pycompat.iteritems(extraactions):
2075 actions[k].extend(acts)
2075 actions[k].extend(acts)
2076 if k == ACTION_GET and wantfiledata:
2076 if k == ACTION_GET and wantfiledata:
2077 # no filedata until mergestate is updated to provide it
2077 # no filedata until mergestate is updated to provide it
2078 for a in acts:
2078 for a in acts:
2079 getfiledata[a[0]] = None
2079 getfiledata[a[0]] = None
2080 # Remove these files from actions[ACTION_MERGE] as well. This is
2080 # Remove these files from actions[ACTION_MERGE] as well. This is
2081 # important because in recordupdates, files in actions[ACTION_MERGE]
2081 # important because in recordupdates, files in actions[ACTION_MERGE]
2082 # are processed after files in other actions, and the merge driver
2082 # are processed after files in other actions, and the merge driver
2083 # might add files to those actions via extraactions above. This can
2083 # might add files to those actions via extraactions above. This can
2084 # lead to a file being recorded twice, with poor results. This is
2084 # lead to a file being recorded twice, with poor results. This is
2085 # especially problematic for actions[ACTION_REMOVE] (currently only
2085 # especially problematic for actions[ACTION_REMOVE] (currently only
2086 # possible with the merge driver in the initial merge process;
2086 # possible with the merge driver in the initial merge process;
2087 # interrupted merges don't go through this flow).
2087 # interrupted merges don't go through this flow).
2088 #
2088 #
2089 # The real fix here is to have indexes by both file and action so
2089 # The real fix here is to have indexes by both file and action so
2090 # that when the action for a file is changed it is automatically
2090 # that when the action for a file is changed it is automatically
2091 # reflected in the other action lists. But that involves a more
2091 # reflected in the other action lists. But that involves a more
2092 # complex data structure, so this will do for now.
2092 # complex data structure, so this will do for now.
2093 #
2093 #
2094 # We don't need to do the same operation for 'dc' and 'cd' because
2094 # We don't need to do the same operation for 'dc' and 'cd' because
2095 # those lists aren't consulted again.
2095 # those lists aren't consulted again.
2096 mfiles.difference_update(a[0] for a in acts)
2096 mfiles.difference_update(a[0] for a in acts)
2097
2097
2098 actions[ACTION_MERGE] = [
2098 actions[ACTION_MERGE] = [
2099 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2099 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2100 ]
2100 ]
2101
2101
2102 progress.complete()
2102 progress.complete()
2103 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2103 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2104 return updateresult(updated, merged, removed, unresolved), getfiledata
2104 return updateresult(updated, merged, removed, unresolved), getfiledata
2105
2105
2106
2106
2107 def recordupdates(repo, actions, branchmerge, getfiledata):
2107 def recordupdates(repo, actions, branchmerge, getfiledata):
2108 """record merge actions to the dirstate"""
2108 """record merge actions to the dirstate"""
2109 # remove (must come first)
2109 # remove (must come first)
2110 for f, args, msg in actions.get(ACTION_REMOVE, []):
2110 for f, args, msg in actions.get(ACTION_REMOVE, []):
2111 if branchmerge:
2111 if branchmerge:
2112 repo.dirstate.remove(f)
2112 repo.dirstate.remove(f)
2113 else:
2113 else:
2114 repo.dirstate.drop(f)
2114 repo.dirstate.drop(f)
2115
2115
2116 # forget (must come first)
2116 # forget (must come first)
2117 for f, args, msg in actions.get(ACTION_FORGET, []):
2117 for f, args, msg in actions.get(ACTION_FORGET, []):
2118 repo.dirstate.drop(f)
2118 repo.dirstate.drop(f)
2119
2119
2120 # resolve path conflicts
2120 # resolve path conflicts
2121 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2121 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2122 (f0,) = args
2122 (f0,) = args
2123 origf0 = repo.dirstate.copied(f0) or f0
2123 origf0 = repo.dirstate.copied(f0) or f0
2124 repo.dirstate.add(f)
2124 repo.dirstate.add(f)
2125 repo.dirstate.copy(origf0, f)
2125 repo.dirstate.copy(origf0, f)
2126 if f0 == origf0:
2126 if f0 == origf0:
2127 repo.dirstate.remove(f0)
2127 repo.dirstate.remove(f0)
2128 else:
2128 else:
2129 repo.dirstate.drop(f0)
2129 repo.dirstate.drop(f0)
2130
2130
2131 # re-add
2131 # re-add
2132 for f, args, msg in actions.get(ACTION_ADD, []):
2132 for f, args, msg in actions.get(ACTION_ADD, []):
2133 repo.dirstate.add(f)
2133 repo.dirstate.add(f)
2134
2134
2135 # re-add/mark as modified
2135 # re-add/mark as modified
2136 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2136 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2137 if branchmerge:
2137 if branchmerge:
2138 repo.dirstate.normallookup(f)
2138 repo.dirstate.normallookup(f)
2139 else:
2139 else:
2140 repo.dirstate.add(f)
2140 repo.dirstate.add(f)
2141
2141
2142 # exec change
2142 # exec change
2143 for f, args, msg in actions.get(ACTION_EXEC, []):
2143 for f, args, msg in actions.get(ACTION_EXEC, []):
2144 repo.dirstate.normallookup(f)
2144 repo.dirstate.normallookup(f)
2145
2145
2146 # keep
2146 # keep
2147 for f, args, msg in actions.get(ACTION_KEEP, []):
2147 for f, args, msg in actions.get(ACTION_KEEP, []):
2148 pass
2148 pass
2149
2149
2150 # get
2150 # get
2151 for f, args, msg in actions.get(ACTION_GET, []):
2151 for f, args, msg in actions.get(ACTION_GET, []):
2152 if branchmerge:
2152 if branchmerge:
2153 repo.dirstate.otherparent(f)
2153 repo.dirstate.otherparent(f)
2154 else:
2154 else:
2155 parentfiledata = getfiledata[f] if getfiledata else None
2155 parentfiledata = getfiledata[f] if getfiledata else None
2156 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2156 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2157
2157
2158 # merge
2158 # merge
2159 for f, args, msg in actions.get(ACTION_MERGE, []):
2159 for f, args, msg in actions.get(ACTION_MERGE, []):
2160 f1, f2, fa, move, anc = args
2160 f1, f2, fa, move, anc = args
2161 if branchmerge:
2161 if branchmerge:
2162 # We've done a branch merge, mark this file as merged
2162 # We've done a branch merge, mark this file as merged
2163 # so that we properly record the merger later
2163 # so that we properly record the merger later
2164 repo.dirstate.merge(f)
2164 repo.dirstate.merge(f)
2165 if f1 != f2: # copy/rename
2165 if f1 != f2: # copy/rename
2166 if move:
2166 if move:
2167 repo.dirstate.remove(f1)
2167 repo.dirstate.remove(f1)
2168 if f1 != f:
2168 if f1 != f:
2169 repo.dirstate.copy(f1, f)
2169 repo.dirstate.copy(f1, f)
2170 else:
2170 else:
2171 repo.dirstate.copy(f2, f)
2171 repo.dirstate.copy(f2, f)
2172 else:
2172 else:
2173 # We've update-merged a locally modified file, so
2173 # We've update-merged a locally modified file, so
2174 # we set the dirstate to emulate a normal checkout
2174 # we set the dirstate to emulate a normal checkout
2175 # of that file some time in the past. Thus our
2175 # of that file some time in the past. Thus our
2176 # merge will appear as a normal local file
2176 # merge will appear as a normal local file
2177 # modification.
2177 # modification.
2178 if f2 == f: # file not locally copied/moved
2178 if f2 == f: # file not locally copied/moved
2179 repo.dirstate.normallookup(f)
2179 repo.dirstate.normallookup(f)
2180 if move:
2180 if move:
2181 repo.dirstate.drop(f1)
2181 repo.dirstate.drop(f1)
2182
2182
2183 # directory rename, move local
2183 # directory rename, move local
2184 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2184 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2185 f0, flag = args
2185 f0, flag = args
2186 if branchmerge:
2186 if branchmerge:
2187 repo.dirstate.add(f)
2187 repo.dirstate.add(f)
2188 repo.dirstate.remove(f0)
2188 repo.dirstate.remove(f0)
2189 repo.dirstate.copy(f0, f)
2189 repo.dirstate.copy(f0, f)
2190 else:
2190 else:
2191 repo.dirstate.normal(f)
2191 repo.dirstate.normal(f)
2192 repo.dirstate.drop(f0)
2192 repo.dirstate.drop(f0)
2193
2193
2194 # directory rename, get
2194 # directory rename, get
2195 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2195 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2196 f0, flag = args
2196 f0, flag = args
2197 if branchmerge:
2197 if branchmerge:
2198 repo.dirstate.add(f)
2198 repo.dirstate.add(f)
2199 repo.dirstate.copy(f0, f)
2199 repo.dirstate.copy(f0, f)
2200 else:
2200 else:
2201 repo.dirstate.normal(f)
2201 repo.dirstate.normal(f)
2202
2202
2203
2203
2204 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2204 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2205 UPDATECHECK_NONE = b'none'
2205 UPDATECHECK_NONE = b'none'
2206 UPDATECHECK_LINEAR = b'linear'
2206 UPDATECHECK_LINEAR = b'linear'
2207 UPDATECHECK_NO_CONFLICT = b'noconflict'
2207 UPDATECHECK_NO_CONFLICT = b'noconflict'
2208
2208
2209
2209
2210 def update(
2210 def update(
2211 repo,
2211 repo,
2212 node,
2212 node,
2213 branchmerge,
2213 branchmerge,
2214 force,
2214 force,
2215 ancestor=None,
2215 ancestor=None,
2216 mergeancestor=False,
2216 mergeancestor=False,
2217 labels=None,
2217 labels=None,
2218 matcher=None,
2218 matcher=None,
2219 mergeforce=False,
2219 mergeforce=False,
2220 updatedirstate=True,
2220 updatedirstate=True,
2221 updatecheck=None,
2221 updatecheck=None,
2222 wc=None,
2222 wc=None,
2223 ):
2223 ):
2224 """
2224 """
2225 Perform a merge between the working directory and the given node
2225 Perform a merge between the working directory and the given node
2226
2226
2227 node = the node to update to
2227 node = the node to update to
2228 branchmerge = whether to merge between branches
2228 branchmerge = whether to merge between branches
2229 force = whether to force branch merging or file overwriting
2229 force = whether to force branch merging or file overwriting
2230 matcher = a matcher to filter file lists (dirstate not updated)
2230 matcher = a matcher to filter file lists (dirstate not updated)
2231 mergeancestor = whether it is merging with an ancestor. If true,
2231 mergeancestor = whether it is merging with an ancestor. If true,
2232 we should accept the incoming changes for any prompts that occur.
2232 we should accept the incoming changes for any prompts that occur.
2233 If false, merging with an ancestor (fast-forward) is only allowed
2233 If false, merging with an ancestor (fast-forward) is only allowed
2234 between different named branches. This flag is used by rebase extension
2234 between different named branches. This flag is used by rebase extension
2235 as a temporary fix and should be avoided in general.
2235 as a temporary fix and should be avoided in general.
2236 labels = labels to use for base, local and other
2236 labels = labels to use for base, local and other
2237 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2237 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2238 this is True, then 'force' should be True as well.
2238 this is True, then 'force' should be True as well.
2239
2239
2240 The table below shows all the behaviors of the update command given the
2240 The table below shows all the behaviors of the update command given the
2241 -c/--check and -C/--clean or no options, whether the working directory is
2241 -c/--check and -C/--clean or no options, whether the working directory is
2242 dirty, whether a revision is specified, and the relationship of the parent
2242 dirty, whether a revision is specified, and the relationship of the parent
2243 rev to the target rev (linear or not). Match from top first. The -n
2243 rev to the target rev (linear or not). Match from top first. The -n
2244 option doesn't exist on the command line, but represents the
2244 option doesn't exist on the command line, but represents the
2245 experimental.updatecheck=noconflict option.
2245 experimental.updatecheck=noconflict option.
2246
2246
2247 This logic is tested by test-update-branches.t.
2247 This logic is tested by test-update-branches.t.
2248
2248
2249 -c -C -n -m dirty rev linear | result
2249 -c -C -n -m dirty rev linear | result
2250 y y * * * * * | (1)
2250 y y * * * * * | (1)
2251 y * y * * * * | (1)
2251 y * y * * * * | (1)
2252 y * * y * * * | (1)
2252 y * * y * * * | (1)
2253 * y y * * * * | (1)
2253 * y y * * * * | (1)
2254 * y * y * * * | (1)
2254 * y * y * * * | (1)
2255 * * y y * * * | (1)
2255 * * y y * * * | (1)
2256 * * * * * n n | x
2256 * * * * * n n | x
2257 * * * * n * * | ok
2257 * * * * n * * | ok
2258 n n n n y * y | merge
2258 n n n n y * y | merge
2259 n n n n y y n | (2)
2259 n n n n y y n | (2)
2260 n n n y y * * | merge
2260 n n n y y * * | merge
2261 n n y n y * * | merge if no conflict
2261 n n y n y * * | merge if no conflict
2262 n y n n y * * | discard
2262 n y n n y * * | discard
2263 y n n n y * * | (3)
2263 y n n n y * * | (3)
2264
2264
2265 x = can't happen
2265 x = can't happen
2266 * = don't-care
2266 * = don't-care
2267 1 = incompatible options (checked in commands.py)
2267 1 = incompatible options (checked in commands.py)
2268 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2268 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2269 3 = abort: uncommitted changes (checked in commands.py)
2269 3 = abort: uncommitted changes (checked in commands.py)
2270
2270
2271 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2271 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2272 to repo[None] if None is passed.
2272 to repo[None] if None is passed.
2273
2273
2274 Return the same tuple as applyupdates().
2274 Return the same tuple as applyupdates().
2275 """
2275 """
2276 # Avoid cycle.
2276 # Avoid cycle.
2277 from . import sparse
2277 from . import sparse
2278
2278
2279 # This function used to find the default destination if node was None, but
2279 # This function used to find the default destination if node was None, but
2280 # that's now in destutil.py.
2280 # that's now in destutil.py.
2281 assert node is not None
2281 assert node is not None
2282 if not branchmerge and not force:
2282 if not branchmerge and not force:
2283 # TODO: remove the default once all callers that pass branchmerge=False
2283 # TODO: remove the default once all callers that pass branchmerge=False
2284 # and force=False pass a value for updatecheck. We may want to allow
2284 # and force=False pass a value for updatecheck. We may want to allow
2285 # updatecheck='abort' to better suppport some of these callers.
2285 # updatecheck='abort' to better suppport some of these callers.
2286 if updatecheck is None:
2286 if updatecheck is None:
2287 updatecheck = UPDATECHECK_LINEAR
2287 updatecheck = UPDATECHECK_LINEAR
2288 if updatecheck not in (
2288 if updatecheck not in (
2289 UPDATECHECK_NONE,
2289 UPDATECHECK_NONE,
2290 UPDATECHECK_LINEAR,
2290 UPDATECHECK_LINEAR,
2291 UPDATECHECK_NO_CONFLICT,
2291 UPDATECHECK_NO_CONFLICT,
2292 ):
2292 ):
2293 raise ValueError(
2293 raise ValueError(
2294 r'Invalid updatecheck %r (can accept %r)'
2294 r'Invalid updatecheck %r (can accept %r)'
2295 % (
2295 % (
2296 updatecheck,
2296 updatecheck,
2297 (
2297 (
2298 UPDATECHECK_NONE,
2298 UPDATECHECK_NONE,
2299 UPDATECHECK_LINEAR,
2299 UPDATECHECK_LINEAR,
2300 UPDATECHECK_NO_CONFLICT,
2300 UPDATECHECK_NO_CONFLICT,
2301 ),
2301 ),
2302 )
2302 )
2303 )
2303 )
2304 with repo.wlock():
2304 with repo.wlock():
2305 if wc is None:
2305 if wc is None:
2306 wc = repo[None]
2306 wc = repo[None]
2307 pl = wc.parents()
2307 pl = wc.parents()
2308 p1 = pl[0]
2308 p1 = pl[0]
2309 p2 = repo[node]
2309 p2 = repo[node]
2310 if ancestor is not None:
2310 if ancestor is not None:
2311 pas = [repo[ancestor]]
2311 pas = [repo[ancestor]]
2312 else:
2312 else:
2313 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2313 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2314 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2314 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2315 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2315 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2316 else:
2316 else:
2317 pas = [p1.ancestor(p2, warn=branchmerge)]
2317 pas = [p1.ancestor(p2, warn=branchmerge)]
2318
2318
2319 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2319 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2320
2320
2321 overwrite = force and not branchmerge
2321 overwrite = force and not branchmerge
2322 ### check phase
2322 ### check phase
2323 if not overwrite:
2323 if not overwrite:
2324 if len(pl) > 1:
2324 if len(pl) > 1:
2325 raise error.Abort(_(b"outstanding uncommitted merge"))
2325 raise error.Abort(_(b"outstanding uncommitted merge"))
2326 ms = mergestate.read(repo)
2326 ms = mergestate.read(repo)
2327 if list(ms.unresolved()):
2327 if list(ms.unresolved()):
2328 raise error.Abort(
2328 raise error.Abort(
2329 _(b"outstanding merge conflicts"),
2329 _(b"outstanding merge conflicts"),
2330 hint=_(b"use 'hg resolve' to resolve"),
2330 hint=_(b"use 'hg resolve' to resolve"),
2331 )
2331 )
2332 if branchmerge:
2332 if branchmerge:
2333 if pas == [p2]:
2333 if pas == [p2]:
2334 raise error.Abort(
2334 raise error.Abort(
2335 _(
2335 _(
2336 b"merging with a working directory ancestor"
2336 b"merging with a working directory ancestor"
2337 b" has no effect"
2337 b" has no effect"
2338 )
2338 )
2339 )
2339 )
2340 elif pas == [p1]:
2340 elif pas == [p1]:
2341 if not mergeancestor and wc.branch() == p2.branch():
2341 if not mergeancestor and wc.branch() == p2.branch():
2342 raise error.Abort(
2342 raise error.Abort(
2343 _(b"nothing to merge"),
2343 _(b"nothing to merge"),
2344 hint=_(b"use 'hg update' or check 'hg heads'"),
2344 hint=_(b"use 'hg update' or check 'hg heads'"),
2345 )
2345 )
2346 if not force and (wc.files() or wc.deleted()):
2346 if not force and (wc.files() or wc.deleted()):
2347 raise error.Abort(
2347 raise error.Abort(
2348 _(b"uncommitted changes"),
2348 _(b"uncommitted changes"),
2349 hint=_(b"use 'hg status' to list changes"),
2349 hint=_(b"use 'hg status' to list changes"),
2350 )
2350 )
2351 if not wc.isinmemory():
2351 if not wc.isinmemory():
2352 for s in sorted(wc.substate):
2352 for s in sorted(wc.substate):
2353 wc.sub(s).bailifchanged()
2353 wc.sub(s).bailifchanged()
2354
2354
2355 elif not overwrite:
2355 elif not overwrite:
2356 if p1 == p2: # no-op update
2356 if p1 == p2: # no-op update
2357 # call the hooks and exit early
2357 # call the hooks and exit early
2358 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2358 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2359 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2359 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2360 return updateresult(0, 0, 0, 0)
2360 return updateresult(0, 0, 0, 0)
2361
2361
2362 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2362 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2363 [p1],
2363 [p1],
2364 [p2],
2364 [p2],
2365 ): # nonlinear
2365 ): # nonlinear
2366 dirty = wc.dirty(missing=True)
2366 dirty = wc.dirty(missing=True)
2367 if dirty:
2367 if dirty:
2368 # Branching is a bit strange to ensure we do the minimal
2368 # Branching is a bit strange to ensure we do the minimal
2369 # amount of call to obsutil.foreground.
2369 # amount of call to obsutil.foreground.
2370 foreground = obsutil.foreground(repo, [p1.node()])
2370 foreground = obsutil.foreground(repo, [p1.node()])
2371 # note: the <node> variable contains a random identifier
2371 # note: the <node> variable contains a random identifier
2372 if repo[node].node() in foreground:
2372 if repo[node].node() in foreground:
2373 pass # allow updating to successors
2373 pass # allow updating to successors
2374 else:
2374 else:
2375 msg = _(b"uncommitted changes")
2375 msg = _(b"uncommitted changes")
2376 hint = _(b"commit or update --clean to discard changes")
2376 hint = _(b"commit or update --clean to discard changes")
2377 raise error.UpdateAbort(msg, hint=hint)
2377 raise error.UpdateAbort(msg, hint=hint)
2378 else:
2378 else:
2379 # Allow jumping branches if clean and specific rev given
2379 # Allow jumping branches if clean and specific rev given
2380 pass
2380 pass
2381
2381
2382 if overwrite:
2382 if overwrite:
2383 pas = [wc]
2383 pas = [wc]
2384 elif not branchmerge:
2384 elif not branchmerge:
2385 pas = [p1]
2385 pas = [p1]
2386
2386
2387 # deprecated config: merge.followcopies
2387 # deprecated config: merge.followcopies
2388 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2388 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2389 if overwrite:
2389 if overwrite:
2390 followcopies = False
2390 followcopies = False
2391 elif not pas[0]:
2391 elif not pas[0]:
2392 followcopies = False
2392 followcopies = False
2393 if not branchmerge and not wc.dirty(missing=True):
2393 if not branchmerge and not wc.dirty(missing=True):
2394 followcopies = False
2394 followcopies = False
2395
2395
2396 ### calculate phase
2396 ### calculate phase
2397 actionbyfile, diverge, renamedelete = calculateupdates(
2397 actionbyfile, diverge, renamedelete = calculateupdates(
2398 repo,
2398 repo,
2399 wc,
2399 wc,
2400 p2,
2400 p2,
2401 pas,
2401 pas,
2402 branchmerge,
2402 branchmerge,
2403 force,
2403 force,
2404 mergeancestor,
2404 mergeancestor,
2405 followcopies,
2405 followcopies,
2406 matcher=matcher,
2406 matcher=matcher,
2407 mergeforce=mergeforce,
2407 mergeforce=mergeforce,
2408 )
2408 )
2409
2409
2410 if updatecheck == UPDATECHECK_NO_CONFLICT:
2410 if updatecheck == UPDATECHECK_NO_CONFLICT:
2411 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2411 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2412 if m not in (
2412 if m not in (
2413 ACTION_GET,
2413 ACTION_GET,
2414 ACTION_KEEP,
2414 ACTION_KEEP,
2415 ACTION_EXEC,
2415 ACTION_EXEC,
2416 ACTION_REMOVE,
2416 ACTION_REMOVE,
2417 ACTION_PATH_CONFLICT_RESOLVE,
2417 ACTION_PATH_CONFLICT_RESOLVE,
2418 ):
2418 ):
2419 msg = _(b"conflicting changes")
2419 msg = _(b"conflicting changes")
2420 hint = _(b"commit or update --clean to discard changes")
2420 hint = _(b"commit or update --clean to discard changes")
2421 raise error.Abort(msg, hint=hint)
2421 raise error.Abort(msg, hint=hint)
2422
2422
2423 # Prompt and create actions. Most of this is in the resolve phase
2423 # Prompt and create actions. Most of this is in the resolve phase
2424 # already, but we can't handle .hgsubstate in filemerge or
2424 # already, but we can't handle .hgsubstate in filemerge or
2425 # subrepoutil.submerge yet so we have to keep prompting for it.
2425 # subrepoutil.submerge yet so we have to keep prompting for it.
2426 if b'.hgsubstate' in actionbyfile:
2426 if b'.hgsubstate' in actionbyfile:
2427 f = b'.hgsubstate'
2427 f = b'.hgsubstate'
2428 m, args, msg = actionbyfile[f]
2428 m, args, msg = actionbyfile[f]
2429 prompts = filemerge.partextras(labels)
2429 prompts = filemerge.partextras(labels)
2430 prompts[b'f'] = f
2430 prompts[b'f'] = f
2431 if m == ACTION_CHANGED_DELETED:
2431 if m == ACTION_CHANGED_DELETED:
2432 if repo.ui.promptchoice(
2432 if repo.ui.promptchoice(
2433 _(
2433 _(
2434 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2434 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2435 b"use (c)hanged version or (d)elete?"
2435 b"use (c)hanged version or (d)elete?"
2436 b"$$ &Changed $$ &Delete"
2436 b"$$ &Changed $$ &Delete"
2437 )
2437 )
2438 % prompts,
2438 % prompts,
2439 0,
2439 0,
2440 ):
2440 ):
2441 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2441 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2442 elif f in p1:
2442 elif f in p1:
2443 actionbyfile[f] = (
2443 actionbyfile[f] = (
2444 ACTION_ADD_MODIFIED,
2444 ACTION_ADD_MODIFIED,
2445 None,
2445 None,
2446 b'prompt keep',
2446 b'prompt keep',
2447 )
2447 )
2448 else:
2448 else:
2449 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2449 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2450 elif m == ACTION_DELETED_CHANGED:
2450 elif m == ACTION_DELETED_CHANGED:
2451 f1, f2, fa, move, anc = args
2451 f1, f2, fa, move, anc = args
2452 flags = p2[f2].flags()
2452 flags = p2[f2].flags()
2453 if (
2453 if (
2454 repo.ui.promptchoice(
2454 repo.ui.promptchoice(
2455 _(
2455 _(
2456 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2456 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2457 b"use (c)hanged version or leave (d)eleted?"
2457 b"use (c)hanged version or leave (d)eleted?"
2458 b"$$ &Changed $$ &Deleted"
2458 b"$$ &Changed $$ &Deleted"
2459 )
2459 )
2460 % prompts,
2460 % prompts,
2461 0,
2461 0,
2462 )
2462 )
2463 == 0
2463 == 0
2464 ):
2464 ):
2465 actionbyfile[f] = (
2465 actionbyfile[f] = (
2466 ACTION_GET,
2466 ACTION_GET,
2467 (flags, False),
2467 (flags, False),
2468 b'prompt recreating',
2468 b'prompt recreating',
2469 )
2469 )
2470 else:
2470 else:
2471 del actionbyfile[f]
2471 del actionbyfile[f]
2472
2472
2473 # Convert to dictionary-of-lists format
2473 # Convert to dictionary-of-lists format
2474 actions = emptyactions()
2474 actions = emptyactions()
2475 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2475 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2476 if m not in actions:
2476 if m not in actions:
2477 actions[m] = []
2477 actions[m] = []
2478 actions[m].append((f, args, msg))
2478 actions[m].append((f, args, msg))
2479
2479
2480 if not util.fscasesensitive(repo.path):
2480 if not util.fscasesensitive(repo.path):
2481 # check collision between files only in p2 for clean update
2481 # check collision between files only in p2 for clean update
2482 if not branchmerge and (
2482 if not branchmerge and (
2483 force or not wc.dirty(missing=True, branch=False)
2483 force or not wc.dirty(missing=True, branch=False)
2484 ):
2484 ):
2485 _checkcollision(repo, p2.manifest(), None)
2485 _checkcollision(repo, p2.manifest(), None)
2486 else:
2486 else:
2487 _checkcollision(repo, wc.manifest(), actions)
2487 _checkcollision(repo, wc.manifest(), actions)
2488
2488
2489 # divergent renames
2489 # divergent renames
2490 for f, fl in sorted(pycompat.iteritems(diverge)):
2490 for f, fl in sorted(pycompat.iteritems(diverge)):
2491 repo.ui.warn(
2491 repo.ui.warn(
2492 _(
2492 _(
2493 b"note: possible conflict - %s was renamed "
2493 b"note: possible conflict - %s was renamed "
2494 b"multiple times to:\n"
2494 b"multiple times to:\n"
2495 )
2495 )
2496 % f
2496 % f
2497 )
2497 )
2498 for nf in sorted(fl):
2498 for nf in sorted(fl):
2499 repo.ui.warn(b" %s\n" % nf)
2499 repo.ui.warn(b" %s\n" % nf)
2500
2500
2501 # rename and delete
2501 # rename and delete
2502 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2502 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2503 repo.ui.warn(
2503 repo.ui.warn(
2504 _(
2504 _(
2505 b"note: possible conflict - %s was deleted "
2505 b"note: possible conflict - %s was deleted "
2506 b"and renamed to:\n"
2506 b"and renamed to:\n"
2507 )
2507 )
2508 % f
2508 % f
2509 )
2509 )
2510 for nf in sorted(fl):
2510 for nf in sorted(fl):
2511 repo.ui.warn(b" %s\n" % nf)
2511 repo.ui.warn(b" %s\n" % nf)
2512
2512
2513 ### apply phase
2513 ### apply phase
2514 if not branchmerge: # just jump to the new rev
2514 if not branchmerge: # just jump to the new rev
2515 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2515 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2516 # If we're doing a partial update, we need to skip updating
2516 # If we're doing a partial update, we need to skip updating
2517 # the dirstate.
2517 # the dirstate.
2518 always = matcher is None or matcher.always()
2518 always = matcher is None or matcher.always()
2519 updatedirstate = updatedirstate and always and not wc.isinmemory()
2519 updatedirstate = updatedirstate and always and not wc.isinmemory()
2520 if updatedirstate:
2520 if updatedirstate:
2521 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2521 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2522 # note that we're in the middle of an update
2522 # note that we're in the middle of an update
2523 repo.vfs.write(b'updatestate', p2.hex())
2523 repo.vfs.write(b'updatestate', p2.hex())
2524
2524
2525 # Advertise fsmonitor when its presence could be useful.
2525 # Advertise fsmonitor when its presence could be useful.
2526 #
2526 #
2527 # We only advertise when performing an update from an empty working
2527 # We only advertise when performing an update from an empty working
2528 # directory. This typically only occurs during initial clone.
2528 # directory. This typically only occurs during initial clone.
2529 #
2529 #
2530 # We give users a mechanism to disable the warning in case it is
2530 # We give users a mechanism to disable the warning in case it is
2531 # annoying.
2531 # annoying.
2532 #
2532 #
2533 # We only allow on Linux and MacOS because that's where fsmonitor is
2533 # We only allow on Linux and MacOS because that's where fsmonitor is
2534 # considered stable.
2534 # considered stable.
2535 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2535 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2536 fsmonitorthreshold = repo.ui.configint(
2536 fsmonitorthreshold = repo.ui.configint(
2537 b'fsmonitor', b'warn_update_file_count'
2537 b'fsmonitor', b'warn_update_file_count'
2538 )
2538 )
2539 try:
2539 try:
2540 # avoid cycle: extensions -> cmdutil -> merge
2540 # avoid cycle: extensions -> cmdutil -> merge
2541 from . import extensions
2541 from . import extensions
2542
2542
2543 extensions.find(b'fsmonitor')
2543 extensions.find(b'fsmonitor')
2544 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2544 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2545 # We intentionally don't look at whether fsmonitor has disabled
2545 # We intentionally don't look at whether fsmonitor has disabled
2546 # itself because a) fsmonitor may have already printed a warning
2546 # itself because a) fsmonitor may have already printed a warning
2547 # b) we only care about the config state here.
2547 # b) we only care about the config state here.
2548 except KeyError:
2548 except KeyError:
2549 fsmonitorenabled = False
2549 fsmonitorenabled = False
2550
2550
2551 if (
2551 if (
2552 fsmonitorwarning
2552 fsmonitorwarning
2553 and not fsmonitorenabled
2553 and not fsmonitorenabled
2554 and p1.node() == nullid
2554 and p1.node() == nullid
2555 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2555 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2556 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2556 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2557 ):
2557 ):
2558 repo.ui.warn(
2558 repo.ui.warn(
2559 _(
2559 _(
2560 b'(warning: large working directory being used without '
2560 b'(warning: large working directory being used without '
2561 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2561 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2562 b'see "hg help -e fsmonitor")\n'
2562 b'see "hg help -e fsmonitor")\n'
2563 )
2563 )
2564 )
2564 )
2565
2565
2566 wantfiledata = updatedirstate and not branchmerge
2566 wantfiledata = updatedirstate and not branchmerge
2567 stats, getfiledata = applyupdates(
2567 stats, getfiledata = applyupdates(
2568 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2568 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2569 )
2569 )
2570
2570
2571 if updatedirstate:
2571 if updatedirstate:
2572 with repo.dirstate.parentchange():
2572 with repo.dirstate.parentchange():
2573 repo.setparents(fp1, fp2)
2573 repo.setparents(fp1, fp2)
2574 recordupdates(repo, actions, branchmerge, getfiledata)
2574 recordupdates(repo, actions, branchmerge, getfiledata)
2575 # update completed, clear state
2575 # update completed, clear state
2576 util.unlink(repo.vfs.join(b'updatestate'))
2576 util.unlink(repo.vfs.join(b'updatestate'))
2577
2577
2578 if not branchmerge:
2578 if not branchmerge:
2579 repo.dirstate.setbranch(p2.branch())
2579 repo.dirstate.setbranch(p2.branch())
2580
2580
2581 # If we're updating to a location, clean up any stale temporary includes
2581 # If we're updating to a location, clean up any stale temporary includes
2582 # (ex: this happens during hg rebase --abort).
2582 # (ex: this happens during hg rebase --abort).
2583 if not branchmerge:
2583 if not branchmerge:
2584 sparse.prunetemporaryincludes(repo)
2584 sparse.prunetemporaryincludes(repo)
2585
2585
2586 if updatedirstate:
2586 if updatedirstate:
2587 repo.hook(
2587 repo.hook(
2588 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2588 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2589 )
2589 )
2590 return stats
2590 return stats
2591
2591
2592
2592
2593 def merge(ctx, labels=None, force=False, wc=None):
2594 """Merge another topological branch into the working copy.
2595
2596 force = whether the merge was run with 'merge --force' (deprecated)
2597 """
2598
2599 return update(
2600 ctx.repo(),
2601 ctx.rev(),
2602 labels=labels,
2603 branchmerge=True,
2604 force=force,
2605 mergeforce=force,
2606 wc=wc,
2607 )
2608
2609
2593 def clean_update(ctx, wc=None):
2610 def clean_update(ctx, wc=None):
2594 """Do a clean update to the given commit.
2611 """Do a clean update to the given commit.
2595
2612
2596 This involves updating to the commit and discarding any changes in the
2613 This involves updating to the commit and discarding any changes in the
2597 working copy.
2614 working copy.
2598 """
2615 """
2599 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2616 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2600
2617
2601
2618
2602 def revert_to(ctx, matcher=None, wc=None):
2619 def revert_to(ctx, matcher=None, wc=None):
2603 """Revert the working copy to the given commit.
2620 """Revert the working copy to the given commit.
2604
2621
2605 The working copy will keep its current parent(s) but its content will
2622 The working copy will keep its current parent(s) but its content will
2606 be the same as in the given commit.
2623 be the same as in the given commit.
2607 """
2624 """
2608
2625
2609 return update(
2626 return update(
2610 ctx.repo(),
2627 ctx.repo(),
2611 ctx.rev(),
2628 ctx.rev(),
2612 branchmerge=False,
2629 branchmerge=False,
2613 force=True,
2630 force=True,
2614 updatedirstate=False,
2631 updatedirstate=False,
2615 matcher=matcher,
2632 matcher=matcher,
2616 wc=wc,
2633 wc=wc,
2617 )
2634 )
2618
2635
2619
2636
2620 def graft(
2637 def graft(
2621 repo,
2638 repo,
2622 ctx,
2639 ctx,
2623 base=None,
2640 base=None,
2624 labels=None,
2641 labels=None,
2625 keepparent=False,
2642 keepparent=False,
2626 keepconflictparent=False,
2643 keepconflictparent=False,
2627 wctx=None,
2644 wctx=None,
2628 ):
2645 ):
2629 """Do a graft-like merge.
2646 """Do a graft-like merge.
2630
2647
2631 This is a merge where the merge ancestor is chosen such that one
2648 This is a merge where the merge ancestor is chosen such that one
2632 or more changesets are grafted onto the current changeset. In
2649 or more changesets are grafted onto the current changeset. In
2633 addition to the merge, this fixes up the dirstate to include only
2650 addition to the merge, this fixes up the dirstate to include only
2634 a single parent (if keepparent is False) and tries to duplicate any
2651 a single parent (if keepparent is False) and tries to duplicate any
2635 renames/copies appropriately.
2652 renames/copies appropriately.
2636
2653
2637 ctx - changeset to rebase
2654 ctx - changeset to rebase
2638 base - merge base, or ctx.p1() if not specified
2655 base - merge base, or ctx.p1() if not specified
2639 labels - merge labels eg ['local', 'graft']
2656 labels - merge labels eg ['local', 'graft']
2640 keepparent - keep second parent if any
2657 keepparent - keep second parent if any
2641 keepconflictparent - if unresolved, keep parent used for the merge
2658 keepconflictparent - if unresolved, keep parent used for the merge
2642
2659
2643 """
2660 """
2644 # If we're grafting a descendant onto an ancestor, be sure to pass
2661 # If we're grafting a descendant onto an ancestor, be sure to pass
2645 # mergeancestor=True to update. This does two things: 1) allows the merge if
2662 # mergeancestor=True to update. This does two things: 1) allows the merge if
2646 # the destination is the same as the parent of the ctx (so we can use graft
2663 # the destination is the same as the parent of the ctx (so we can use graft
2647 # to copy commits), and 2) informs update that the incoming changes are
2664 # to copy commits), and 2) informs update that the incoming changes are
2648 # newer than the destination so it doesn't prompt about "remote changed foo
2665 # newer than the destination so it doesn't prompt about "remote changed foo
2649 # which local deleted".
2666 # which local deleted".
2650 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2667 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2651 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2668 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2652 wctx = wctx or repo[None]
2669 wctx = wctx or repo[None]
2653 pctx = wctx.p1()
2670 pctx = wctx.p1()
2654 base = base or ctx.p1()
2671 base = base or ctx.p1()
2655 mergeancestor = (
2672 mergeancestor = (
2656 repo.changelog.isancestor(pctx.node(), ctx.node())
2673 repo.changelog.isancestor(pctx.node(), ctx.node())
2657 or pctx.rev() == base.rev()
2674 or pctx.rev() == base.rev()
2658 )
2675 )
2659
2676
2660 stats = update(
2677 stats = update(
2661 repo,
2678 repo,
2662 ctx.node(),
2679 ctx.node(),
2663 True,
2680 True,
2664 True,
2681 True,
2665 base.node(),
2682 base.node(),
2666 mergeancestor=mergeancestor,
2683 mergeancestor=mergeancestor,
2667 labels=labels,
2684 labels=labels,
2668 wc=wctx,
2685 wc=wctx,
2669 )
2686 )
2670
2687
2671 if keepconflictparent and stats.unresolvedcount:
2688 if keepconflictparent and stats.unresolvedcount:
2672 pother = ctx.node()
2689 pother = ctx.node()
2673 else:
2690 else:
2674 pother = nullid
2691 pother = nullid
2675 parents = ctx.parents()
2692 parents = ctx.parents()
2676 if keepparent and len(parents) == 2 and base in parents:
2693 if keepparent and len(parents) == 2 and base in parents:
2677 parents.remove(base)
2694 parents.remove(base)
2678 pother = parents[0].node()
2695 pother = parents[0].node()
2679 # Never set both parents equal to each other
2696 # Never set both parents equal to each other
2680 if pother == pctx.node():
2697 if pother == pctx.node():
2681 pother = nullid
2698 pother = nullid
2682
2699
2683 if wctx.isinmemory():
2700 if wctx.isinmemory():
2684 wctx.setparents(pctx.node(), pother)
2701 wctx.setparents(pctx.node(), pother)
2685 # fix up dirstate for copies and renames
2702 # fix up dirstate for copies and renames
2686 copies.graftcopies(wctx, ctx, base)
2703 copies.graftcopies(wctx, ctx, base)
2687 else:
2704 else:
2688 with repo.dirstate.parentchange():
2705 with repo.dirstate.parentchange():
2689 repo.setparents(pctx.node(), pother)
2706 repo.setparents(pctx.node(), pother)
2690 repo.dirstate.write(repo.currenttransaction())
2707 repo.dirstate.write(repo.currenttransaction())
2691 # fix up dirstate for copies and renames
2708 # fix up dirstate for copies and renames
2692 copies.graftcopies(wctx, ctx, base)
2709 copies.graftcopies(wctx, ctx, base)
2693 return stats
2710 return stats
2694
2711
2695
2712
2696 def purge(
2713 def purge(
2697 repo,
2714 repo,
2698 matcher,
2715 matcher,
2699 unknown=True,
2716 unknown=True,
2700 ignored=False,
2717 ignored=False,
2701 removeemptydirs=True,
2718 removeemptydirs=True,
2702 removefiles=True,
2719 removefiles=True,
2703 abortonerror=False,
2720 abortonerror=False,
2704 noop=False,
2721 noop=False,
2705 ):
2722 ):
2706 """Purge the working directory of untracked files.
2723 """Purge the working directory of untracked files.
2707
2724
2708 ``matcher`` is a matcher configured to scan the working directory -
2725 ``matcher`` is a matcher configured to scan the working directory -
2709 potentially a subset.
2726 potentially a subset.
2710
2727
2711 ``unknown`` controls whether unknown files should be purged.
2728 ``unknown`` controls whether unknown files should be purged.
2712
2729
2713 ``ignored`` controls whether ignored files should be purged.
2730 ``ignored`` controls whether ignored files should be purged.
2714
2731
2715 ``removeemptydirs`` controls whether empty directories should be removed.
2732 ``removeemptydirs`` controls whether empty directories should be removed.
2716
2733
2717 ``removefiles`` controls whether files are removed.
2734 ``removefiles`` controls whether files are removed.
2718
2735
2719 ``abortonerror`` causes an exception to be raised if an error occurs
2736 ``abortonerror`` causes an exception to be raised if an error occurs
2720 deleting a file or directory.
2737 deleting a file or directory.
2721
2738
2722 ``noop`` controls whether to actually remove files. If not defined, actions
2739 ``noop`` controls whether to actually remove files. If not defined, actions
2723 will be taken.
2740 will be taken.
2724
2741
2725 Returns an iterable of relative paths in the working directory that were
2742 Returns an iterable of relative paths in the working directory that were
2726 or would be removed.
2743 or would be removed.
2727 """
2744 """
2728
2745
2729 def remove(removefn, path):
2746 def remove(removefn, path):
2730 try:
2747 try:
2731 removefn(path)
2748 removefn(path)
2732 except OSError:
2749 except OSError:
2733 m = _(b'%s cannot be removed') % path
2750 m = _(b'%s cannot be removed') % path
2734 if abortonerror:
2751 if abortonerror:
2735 raise error.Abort(m)
2752 raise error.Abort(m)
2736 else:
2753 else:
2737 repo.ui.warn(_(b'warning: %s\n') % m)
2754 repo.ui.warn(_(b'warning: %s\n') % m)
2738
2755
2739 # There's no API to copy a matcher. So mutate the passed matcher and
2756 # There's no API to copy a matcher. So mutate the passed matcher and
2740 # restore it when we're done.
2757 # restore it when we're done.
2741 oldtraversedir = matcher.traversedir
2758 oldtraversedir = matcher.traversedir
2742
2759
2743 res = []
2760 res = []
2744
2761
2745 try:
2762 try:
2746 if removeemptydirs:
2763 if removeemptydirs:
2747 directories = []
2764 directories = []
2748 matcher.traversedir = directories.append
2765 matcher.traversedir = directories.append
2749
2766
2750 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2767 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2751
2768
2752 if removefiles:
2769 if removefiles:
2753 for f in sorted(status.unknown + status.ignored):
2770 for f in sorted(status.unknown + status.ignored):
2754 if not noop:
2771 if not noop:
2755 repo.ui.note(_(b'removing file %s\n') % f)
2772 repo.ui.note(_(b'removing file %s\n') % f)
2756 remove(repo.wvfs.unlink, f)
2773 remove(repo.wvfs.unlink, f)
2757 res.append(f)
2774 res.append(f)
2758
2775
2759 if removeemptydirs:
2776 if removeemptydirs:
2760 for f in sorted(directories, reverse=True):
2777 for f in sorted(directories, reverse=True):
2761 if matcher(f) and not repo.wvfs.listdir(f):
2778 if matcher(f) and not repo.wvfs.listdir(f):
2762 if not noop:
2779 if not noop:
2763 repo.ui.note(_(b'removing directory %s\n') % f)
2780 repo.ui.note(_(b'removing directory %s\n') % f)
2764 remove(repo.wvfs.rmdir, f)
2781 remove(repo.wvfs.rmdir, f)
2765 res.append(f)
2782 res.append(f)
2766
2783
2767 return res
2784 return res
2768
2785
2769 finally:
2786 finally:
2770 matcher.traversedir = oldtraversedir
2787 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now