##// END OF EJS Templates
clone: update to active bookmark, if set...
Dan Villiom Podlaski Christiansen -
r46814:72007a9a default
parent child Browse files
Show More
@@ -1,1502 +1,1506
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 pycompat,
44 pycompat,
45 requirements,
45 requirements,
46 scmutil,
46 scmutil,
47 sshpeer,
47 sshpeer,
48 statichttprepo,
48 statichttprepo,
49 ui as uimod,
49 ui as uimod,
50 unionrepo,
50 unionrepo,
51 url,
51 url,
52 util,
52 util,
53 verify as verifymod,
53 verify as verifymod,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import hashutil
56 from .utils import hashutil
57
57
58 release = lock.release
58 release = lock.release
59
59
60 # shared features
60 # shared features
61 sharedbookmarks = b'bookmarks'
61 sharedbookmarks = b'bookmarks'
62
62
63
63
64 def _local(path):
64 def _local(path):
65 path = util.expandpath(util.urllocalpath(path))
65 path = util.expandpath(util.urllocalpath(path))
66
66
67 try:
67 try:
68 # we use os.stat() directly here instead of os.path.isfile()
68 # we use os.stat() directly here instead of os.path.isfile()
69 # because the latter started returning `False` on invalid path
69 # because the latter started returning `False` on invalid path
70 # exceptions starting in 3.8 and we care about handling
70 # exceptions starting in 3.8 and we care about handling
71 # invalid paths specially here.
71 # invalid paths specially here.
72 st = os.stat(path)
72 st = os.stat(path)
73 isfile = stat.S_ISREG(st.st_mode)
73 isfile = stat.S_ISREG(st.st_mode)
74 # Python 2 raises TypeError, Python 3 ValueError.
74 # Python 2 raises TypeError, Python 3 ValueError.
75 except (TypeError, ValueError) as e:
75 except (TypeError, ValueError) as e:
76 raise error.Abort(
76 raise error.Abort(
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
77 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
78 )
78 )
79 except OSError:
79 except OSError:
80 isfile = False
80 isfile = False
81
81
82 return isfile and bundlerepo or localrepo
82 return isfile and bundlerepo or localrepo
83
83
84
84
85 def addbranchrevs(lrepo, other, branches, revs):
85 def addbranchrevs(lrepo, other, branches, revs):
86 peer = other.peer() # a courtesy to callers using a localrepo for other
86 peer = other.peer() # a courtesy to callers using a localrepo for other
87 hashbranch, branches = branches
87 hashbranch, branches = branches
88 if not hashbranch and not branches:
88 if not hashbranch and not branches:
89 x = revs or None
89 x = revs or None
90 if revs:
90 if revs:
91 y = revs[0]
91 y = revs[0]
92 else:
92 else:
93 y = None
93 y = None
94 return x, y
94 return x, y
95 if revs:
95 if revs:
96 revs = list(revs)
96 revs = list(revs)
97 else:
97 else:
98 revs = []
98 revs = []
99
99
100 if not peer.capable(b'branchmap'):
100 if not peer.capable(b'branchmap'):
101 if branches:
101 if branches:
102 raise error.Abort(_(b"remote branch lookup not supported"))
102 raise error.Abort(_(b"remote branch lookup not supported"))
103 revs.append(hashbranch)
103 revs.append(hashbranch)
104 return revs, revs[0]
104 return revs, revs[0]
105
105
106 with peer.commandexecutor() as e:
106 with peer.commandexecutor() as e:
107 branchmap = e.callcommand(b'branchmap', {}).result()
107 branchmap = e.callcommand(b'branchmap', {}).result()
108
108
109 def primary(branch):
109 def primary(branch):
110 if branch == b'.':
110 if branch == b'.':
111 if not lrepo:
111 if not lrepo:
112 raise error.Abort(_(b"dirstate branch not accessible"))
112 raise error.Abort(_(b"dirstate branch not accessible"))
113 branch = lrepo.dirstate.branch()
113 branch = lrepo.dirstate.branch()
114 if branch in branchmap:
114 if branch in branchmap:
115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
115 revs.extend(hex(r) for r in reversed(branchmap[branch]))
116 return True
116 return True
117 else:
117 else:
118 return False
118 return False
119
119
120 for branch in branches:
120 for branch in branches:
121 if not primary(branch):
121 if not primary(branch):
122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
122 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
123 if hashbranch:
123 if hashbranch:
124 if not primary(hashbranch):
124 if not primary(hashbranch):
125 revs.append(hashbranch)
125 revs.append(hashbranch)
126 return revs, revs[0]
126 return revs, revs[0]
127
127
128
128
129 def parseurl(path, branches=None):
129 def parseurl(path, branches=None):
130 '''parse url#branch, returning (url, (branch, branches))'''
130 '''parse url#branch, returning (url, (branch, branches))'''
131
131
132 u = util.url(path)
132 u = util.url(path)
133 branch = None
133 branch = None
134 if u.fragment:
134 if u.fragment:
135 branch = u.fragment
135 branch = u.fragment
136 u.fragment = None
136 u.fragment = None
137 return bytes(u), (branch, branches or [])
137 return bytes(u), (branch, branches or [])
138
138
139
139
140 schemes = {
140 schemes = {
141 b'bundle': bundlerepo,
141 b'bundle': bundlerepo,
142 b'union': unionrepo,
142 b'union': unionrepo,
143 b'file': _local,
143 b'file': _local,
144 b'http': httppeer,
144 b'http': httppeer,
145 b'https': httppeer,
145 b'https': httppeer,
146 b'ssh': sshpeer,
146 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
147 b'static-http': statichttprepo,
148 }
148 }
149
149
150
150
151 def _peerlookup(path):
151 def _peerlookup(path):
152 u = util.url(path)
152 u = util.url(path)
153 scheme = u.scheme or b'file'
153 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
154 thing = schemes.get(scheme) or schemes[b'file']
155 try:
155 try:
156 return thing(path)
156 return thing(path)
157 except TypeError:
157 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
159 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
160 if not util.safehasattr(thing, b'instance'):
161 raise
161 raise
162 return thing
162 return thing
163
163
164
164
165 def islocal(repo):
165 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
166 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
167 if isinstance(repo, bytes):
168 try:
168 try:
169 return _peerlookup(repo).islocal(repo)
169 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
170 except AttributeError:
171 return False
171 return False
172 return repo.local()
172 return repo.local()
173
173
174
174
175 def openpath(ui, path, sendaccept=True):
175 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
176 '''open path with open if local, url.open if remote'''
177 pathurl = util.url(path, parsequery=False, parsefragment=False)
177 pathurl = util.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
178 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
179 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
180 else:
181 return url.open(ui, path, sendaccept=sendaccept)
181 return url.open(ui, path, sendaccept=sendaccept)
182
182
183
183
184 # a list of (ui, repo) functions called for wire peer initialization
184 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
185 wirepeersetupfuncs = []
186
186
187
187
188 def _peerorrepo(
188 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
190 ):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
192 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
193 ui, path, create, intents=intents, createopts=createopts
194 )
194 )
195 ui = getattr(obj, "ui", ui)
195 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
196 for f in presetupfuncs or []:
197 f(ui, obj)
197 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
199 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
200 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
202 hook = getattr(module, 'reposetup', None)
203 if hook:
203 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
204 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
205 hook(ui, obj)
206 ui.log(
206 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
208 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213 return obj
213 return obj
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 peer = _peerorrepo(
225 peer = _peerorrepo(
226 ui,
226 ui,
227 path,
227 path,
228 create,
228 create,
229 presetupfuncs=presetupfuncs,
229 presetupfuncs=presetupfuncs,
230 intents=intents,
230 intents=intents,
231 createopts=createopts,
231 createopts=createopts,
232 )
232 )
233 repo = peer.local()
233 repo = peer.local()
234 if not repo:
234 if not repo:
235 raise error.Abort(
235 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
236 _(b"repository '%s' is not local") % (path or peer.url())
237 )
237 )
238 return repo.filtered(b'visible')
238 return repo.filtered(b'visible')
239
239
240
240
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
242 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
243 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
244 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
245 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
246 ).peer()
247
247
248
248
249 def defaultdest(source):
249 def defaultdest(source):
250 """return default destination of clone if none is given
250 """return default destination of clone if none is given
251
251
252 >>> defaultdest(b'foo')
252 >>> defaultdest(b'foo')
253 'foo'
253 'foo'
254 >>> defaultdest(b'/foo/bar')
254 >>> defaultdest(b'/foo/bar')
255 'bar'
255 'bar'
256 >>> defaultdest(b'/')
256 >>> defaultdest(b'/')
257 ''
257 ''
258 >>> defaultdest(b'')
258 >>> defaultdest(b'')
259 ''
259 ''
260 >>> defaultdest(b'http://example.org/')
260 >>> defaultdest(b'http://example.org/')
261 ''
261 ''
262 >>> defaultdest(b'http://example.org/foo/')
262 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
263 'foo'
264 """
264 """
265 path = util.url(source).path
265 path = util.url(source).path
266 if not path:
266 if not path:
267 return b''
267 return b''
268 return os.path.basename(os.path.normpath(path))
268 return os.path.basename(os.path.normpath(path))
269
269
270
270
271 def sharedreposource(repo):
271 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
272 """Returns repository object for source repository of a shared repo.
273
273
274 If repo is not a shared repository, returns None.
274 If repo is not a shared repository, returns None.
275 """
275 """
276 if repo.sharedpath == repo.path:
276 if repo.sharedpath == repo.path:
277 return None
277 return None
278
278
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
280 return repo.srcrepo
281
281
282 # the sharedpath always ends in the .hg; we want the path to the repo
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
283 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = parseurl(source)
284 srcurl, branches = parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
285 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
286 repo.srcrepo = srcrepo
287 return srcrepo
287 return srcrepo
288
288
289
289
290 def share(
290 def share(
291 ui,
291 ui,
292 source,
292 source,
293 dest=None,
293 dest=None,
294 update=True,
294 update=True,
295 bookmarks=True,
295 bookmarks=True,
296 defaultpath=None,
296 defaultpath=None,
297 relative=False,
297 relative=False,
298 ):
298 ):
299 '''create a shared repository'''
299 '''create a shared repository'''
300
300
301 if not islocal(source):
301 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
302 raise error.Abort(_(b'can only share local repositories'))
303
303
304 if not dest:
304 if not dest:
305 dest = defaultdest(source)
305 dest = defaultdest(source)
306 else:
306 else:
307 dest = ui.expandpath(dest)
307 dest = ui.expandpath(dest)
308
308
309 if isinstance(source, bytes):
309 if isinstance(source, bytes):
310 origsource = ui.expandpath(source)
310 origsource = ui.expandpath(source)
311 source, branches = parseurl(origsource)
311 source, branches = parseurl(origsource)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(util.urllocalpath(source))
571 defaultpath = os.path.abspath(util.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 origsource = ui.expandpath(source)
674 origsource = ui.expandpath(source)
675 source, branches = parseurl(origsource, branch)
675 source, branches = parseurl(origsource, branch)
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
681 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
682
682
683 if dest is None:
683 if dest is None:
684 dest = defaultdest(source)
684 dest = defaultdest(source)
685 if dest:
685 if dest:
686 ui.status(_(b"destination directory: %s\n") % dest)
686 ui.status(_(b"destination directory: %s\n") % dest)
687 else:
687 else:
688 dest = ui.expandpath(dest)
688 dest = ui.expandpath(dest)
689
689
690 dest = util.urllocalpath(dest)
690 dest = util.urllocalpath(dest)
691 source = util.urllocalpath(source)
691 source = util.urllocalpath(source)
692
692
693 if not dest:
693 if not dest:
694 raise error.InputError(_(b"empty destination path is not valid"))
694 raise error.InputError(_(b"empty destination path is not valid"))
695
695
696 destvfs = vfsmod.vfs(dest, expandpath=True)
696 destvfs = vfsmod.vfs(dest, expandpath=True)
697 if destvfs.lexists():
697 if destvfs.lexists():
698 if not destvfs.isdir():
698 if not destvfs.isdir():
699 raise error.InputError(_(b"destination '%s' already exists") % dest)
699 raise error.InputError(_(b"destination '%s' already exists") % dest)
700 elif destvfs.listdir():
700 elif destvfs.listdir():
701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
701 raise error.InputError(_(b"destination '%s' is not empty") % dest)
702
702
703 createopts = {}
703 createopts = {}
704 narrow = False
704 narrow = False
705
705
706 if storeincludepats is not None:
706 if storeincludepats is not None:
707 narrowspec.validatepatterns(storeincludepats)
707 narrowspec.validatepatterns(storeincludepats)
708 narrow = True
708 narrow = True
709
709
710 if storeexcludepats is not None:
710 if storeexcludepats is not None:
711 narrowspec.validatepatterns(storeexcludepats)
711 narrowspec.validatepatterns(storeexcludepats)
712 narrow = True
712 narrow = True
713
713
714 if narrow:
714 if narrow:
715 # Include everything by default if only exclusion patterns defined.
715 # Include everything by default if only exclusion patterns defined.
716 if storeexcludepats and not storeincludepats:
716 if storeexcludepats and not storeincludepats:
717 storeincludepats = {b'path:.'}
717 storeincludepats = {b'path:.'}
718
718
719 createopts[b'narrowfiles'] = True
719 createopts[b'narrowfiles'] = True
720
720
721 if depth:
721 if depth:
722 createopts[b'shallowfilestore'] = True
722 createopts[b'shallowfilestore'] = True
723
723
724 if srcpeer.capable(b'lfs-serve'):
724 if srcpeer.capable(b'lfs-serve'):
725 # Repository creation honors the config if it disabled the extension, so
725 # Repository creation honors the config if it disabled the extension, so
726 # we can't just announce that lfs will be enabled. This check avoids
726 # we can't just announce that lfs will be enabled. This check avoids
727 # saying that lfs will be enabled, and then saying it's an unknown
727 # saying that lfs will be enabled, and then saying it's an unknown
728 # feature. The lfs creation option is set in either case so that a
728 # feature. The lfs creation option is set in either case so that a
729 # requirement is added. If the extension is explicitly disabled but the
729 # requirement is added. If the extension is explicitly disabled but the
730 # requirement is set, the clone aborts early, before transferring any
730 # requirement is set, the clone aborts early, before transferring any
731 # data.
731 # data.
732 createopts[b'lfs'] = True
732 createopts[b'lfs'] = True
733
733
734 if extensions.disabled_help(b'lfs'):
734 if extensions.disabled_help(b'lfs'):
735 ui.status(
735 ui.status(
736 _(
736 _(
737 b'(remote is using large file support (lfs), but it is '
737 b'(remote is using large file support (lfs), but it is '
738 b'explicitly disabled in the local configuration)\n'
738 b'explicitly disabled in the local configuration)\n'
739 )
739 )
740 )
740 )
741 else:
741 else:
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs); lfs will '
744 b'(remote is using large file support (lfs); lfs will '
745 b'be enabled for this repository)\n'
745 b'be enabled for this repository)\n'
746 )
746 )
747 )
747 )
748
748
749 shareopts = shareopts or {}
749 shareopts = shareopts or {}
750 sharepool = shareopts.get(b'pool')
750 sharepool = shareopts.get(b'pool')
751 sharenamemode = shareopts.get(b'mode')
751 sharenamemode = shareopts.get(b'mode')
752 if sharepool and islocal(dest):
752 if sharepool and islocal(dest):
753 sharepath = None
753 sharepath = None
754 if sharenamemode == b'identity':
754 if sharenamemode == b'identity':
755 # Resolve the name from the initial changeset in the remote
755 # Resolve the name from the initial changeset in the remote
756 # repository. This returns nullid when the remote is empty. It
756 # repository. This returns nullid when the remote is empty. It
757 # raises RepoLookupError if revision 0 is filtered or otherwise
757 # raises RepoLookupError if revision 0 is filtered or otherwise
758 # not available. If we fail to resolve, sharing is not enabled.
758 # not available. If we fail to resolve, sharing is not enabled.
759 try:
759 try:
760 with srcpeer.commandexecutor() as e:
760 with srcpeer.commandexecutor() as e:
761 rootnode = e.callcommand(
761 rootnode = e.callcommand(
762 b'lookup',
762 b'lookup',
763 {
763 {
764 b'key': b'0',
764 b'key': b'0',
765 },
765 },
766 ).result()
766 ).result()
767
767
768 if rootnode != nullid:
768 if rootnode != nullid:
769 sharepath = os.path.join(sharepool, hex(rootnode))
769 sharepath = os.path.join(sharepool, hex(rootnode))
770 else:
770 else:
771 ui.status(
771 ui.status(
772 _(
772 _(
773 b'(not using pooled storage: '
773 b'(not using pooled storage: '
774 b'remote appears to be empty)\n'
774 b'remote appears to be empty)\n'
775 )
775 )
776 )
776 )
777 except error.RepoLookupError:
777 except error.RepoLookupError:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'unable to resolve identity of remote)\n'
781 b'unable to resolve identity of remote)\n'
782 )
782 )
783 )
783 )
784 elif sharenamemode == b'remote':
784 elif sharenamemode == b'remote':
785 sharepath = os.path.join(
785 sharepath = os.path.join(
786 sharepool, hex(hashutil.sha1(source).digest())
786 sharepool, hex(hashutil.sha1(source).digest())
787 )
787 )
788 else:
788 else:
789 raise error.Abort(
789 raise error.Abort(
790 _(b'unknown share naming mode: %s') % sharenamemode
790 _(b'unknown share naming mode: %s') % sharenamemode
791 )
791 )
792
792
793 # TODO this is a somewhat arbitrary restriction.
793 # TODO this is a somewhat arbitrary restriction.
794 if narrow:
794 if narrow:
795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
795 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
796 sharepath = None
796 sharepath = None
797
797
798 if sharepath:
798 if sharepath:
799 return clonewithshare(
799 return clonewithshare(
800 ui,
800 ui,
801 peeropts,
801 peeropts,
802 sharepath,
802 sharepath,
803 source,
803 source,
804 srcpeer,
804 srcpeer,
805 dest,
805 dest,
806 pull=pull,
806 pull=pull,
807 rev=revs,
807 rev=revs,
808 update=update,
808 update=update,
809 stream=stream,
809 stream=stream,
810 )
810 )
811
811
812 srclock = destlock = cleandir = None
812 srclock = destlock = cleandir = None
813 srcrepo = srcpeer.local()
813 srcrepo = srcpeer.local()
814 try:
814 try:
815 abspath = origsource
815 abspath = origsource
816 if islocal(origsource):
816 if islocal(origsource):
817 abspath = os.path.abspath(util.urllocalpath(origsource))
817 abspath = os.path.abspath(util.urllocalpath(origsource))
818
818
819 if islocal(dest):
819 if islocal(dest):
820 cleandir = dest
820 cleandir = dest
821
821
822 copy = False
822 copy = False
823 if (
823 if (
824 srcrepo
824 srcrepo
825 and srcrepo.cancopy()
825 and srcrepo.cancopy()
826 and islocal(dest)
826 and islocal(dest)
827 and not phases.hassecret(srcrepo)
827 and not phases.hassecret(srcrepo)
828 ):
828 ):
829 copy = not pull and not revs
829 copy = not pull and not revs
830
830
831 # TODO this is a somewhat arbitrary restriction.
831 # TODO this is a somewhat arbitrary restriction.
832 if narrow:
832 if narrow:
833 copy = False
833 copy = False
834
834
835 if copy:
835 if copy:
836 try:
836 try:
837 # we use a lock here because if we race with commit, we
837 # we use a lock here because if we race with commit, we
838 # can end up with extra data in the cloned revlogs that's
838 # can end up with extra data in the cloned revlogs that's
839 # not pointed to by changesets, thus causing verify to
839 # not pointed to by changesets, thus causing verify to
840 # fail
840 # fail
841 srclock = srcrepo.lock(wait=False)
841 srclock = srcrepo.lock(wait=False)
842 except error.LockError:
842 except error.LockError:
843 copy = False
843 copy = False
844
844
845 if copy:
845 if copy:
846 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
846 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
847 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
847 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
848 if not os.path.exists(dest):
848 if not os.path.exists(dest):
849 util.makedirs(dest)
849 util.makedirs(dest)
850 else:
850 else:
851 # only clean up directories we create ourselves
851 # only clean up directories we create ourselves
852 cleandir = hgdir
852 cleandir = hgdir
853 try:
853 try:
854 destpath = hgdir
854 destpath = hgdir
855 util.makedir(destpath, notindexed=True)
855 util.makedir(destpath, notindexed=True)
856 except OSError as inst:
856 except OSError as inst:
857 if inst.errno == errno.EEXIST:
857 if inst.errno == errno.EEXIST:
858 cleandir = None
858 cleandir = None
859 raise error.Abort(
859 raise error.Abort(
860 _(b"destination '%s' already exists") % dest
860 _(b"destination '%s' already exists") % dest
861 )
861 )
862 raise
862 raise
863
863
864 destlock = copystore(ui, srcrepo, destpath)
864 destlock = copystore(ui, srcrepo, destpath)
865 # copy bookmarks over
865 # copy bookmarks over
866 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
866 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
867 dstbookmarks = os.path.join(destpath, b'bookmarks')
867 dstbookmarks = os.path.join(destpath, b'bookmarks')
868 if os.path.exists(srcbookmarks):
868 if os.path.exists(srcbookmarks):
869 util.copyfile(srcbookmarks, dstbookmarks)
869 util.copyfile(srcbookmarks, dstbookmarks)
870
870
871 dstcachedir = os.path.join(destpath, b'cache')
871 dstcachedir = os.path.join(destpath, b'cache')
872 for cache in cacheutil.cachetocopy(srcrepo):
872 for cache in cacheutil.cachetocopy(srcrepo):
873 _copycache(srcrepo, dstcachedir, cache)
873 _copycache(srcrepo, dstcachedir, cache)
874
874
875 # we need to re-init the repo after manually copying the data
875 # we need to re-init the repo after manually copying the data
876 # into it
876 # into it
877 destpeer = peer(srcrepo, peeropts, dest)
877 destpeer = peer(srcrepo, peeropts, dest)
878 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
878 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
879 else:
879 else:
880 try:
880 try:
881 # only pass ui when no srcrepo
881 # only pass ui when no srcrepo
882 destpeer = peer(
882 destpeer = peer(
883 srcrepo or ui,
883 srcrepo or ui,
884 peeropts,
884 peeropts,
885 dest,
885 dest,
886 create=True,
886 create=True,
887 createopts=createopts,
887 createopts=createopts,
888 )
888 )
889 except OSError as inst:
889 except OSError as inst:
890 if inst.errno == errno.EEXIST:
890 if inst.errno == errno.EEXIST:
891 cleandir = None
891 cleandir = None
892 raise error.Abort(
892 raise error.Abort(
893 _(b"destination '%s' already exists") % dest
893 _(b"destination '%s' already exists") % dest
894 )
894 )
895 raise
895 raise
896
896
897 if revs:
897 if revs:
898 if not srcpeer.capable(b'lookup'):
898 if not srcpeer.capable(b'lookup'):
899 raise error.Abort(
899 raise error.Abort(
900 _(
900 _(
901 b"src repository does not support "
901 b"src repository does not support "
902 b"revision lookup and so doesn't "
902 b"revision lookup and so doesn't "
903 b"support clone by revision"
903 b"support clone by revision"
904 )
904 )
905 )
905 )
906
906
907 # TODO this is batchable.
907 # TODO this is batchable.
908 remoterevs = []
908 remoterevs = []
909 for rev in revs:
909 for rev in revs:
910 with srcpeer.commandexecutor() as e:
910 with srcpeer.commandexecutor() as e:
911 remoterevs.append(
911 remoterevs.append(
912 e.callcommand(
912 e.callcommand(
913 b'lookup',
913 b'lookup',
914 {
914 {
915 b'key': rev,
915 b'key': rev,
916 },
916 },
917 ).result()
917 ).result()
918 )
918 )
919 revs = remoterevs
919 revs = remoterevs
920
920
921 checkout = revs[0]
921 checkout = revs[0]
922 else:
922 else:
923 revs = None
923 revs = None
924 local = destpeer.local()
924 local = destpeer.local()
925 if local:
925 if local:
926 if narrow:
926 if narrow:
927 with local.wlock(), local.lock():
927 with local.wlock(), local.lock():
928 local.setnarrowpats(storeincludepats, storeexcludepats)
928 local.setnarrowpats(storeincludepats, storeexcludepats)
929 narrowspec.copytoworkingcopy(local)
929 narrowspec.copytoworkingcopy(local)
930
930
931 u = util.url(abspath)
931 u = util.url(abspath)
932 defaulturl = bytes(u)
932 defaulturl = bytes(u)
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934 if not stream:
934 if not stream:
935 if pull:
935 if pull:
936 stream = False
936 stream = False
937 else:
937 else:
938 stream = None
938 stream = None
939 # internal config: ui.quietbookmarkmove
939 # internal config: ui.quietbookmarkmove
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
940 overrides = {(b'ui', b'quietbookmarkmove'): True}
941 with local.ui.configoverride(overrides, b'clone'):
941 with local.ui.configoverride(overrides, b'clone'):
942 exchange.pull(
942 exchange.pull(
943 local,
943 local,
944 srcpeer,
944 srcpeer,
945 revs,
945 revs,
946 streamclonerequested=stream,
946 streamclonerequested=stream,
947 includepats=storeincludepats,
947 includepats=storeincludepats,
948 excludepats=storeexcludepats,
948 excludepats=storeexcludepats,
949 depth=depth,
949 depth=depth,
950 )
950 )
951 elif srcrepo:
951 elif srcrepo:
952 # TODO lift restriction once exchange.push() accepts narrow
952 # TODO lift restriction once exchange.push() accepts narrow
953 # push.
953 # push.
954 if narrow:
954 if narrow:
955 raise error.Abort(
955 raise error.Abort(
956 _(
956 _(
957 b'narrow clone not available for '
957 b'narrow clone not available for '
958 b'remote destinations'
958 b'remote destinations'
959 )
959 )
960 )
960 )
961
961
962 exchange.push(
962 exchange.push(
963 srcrepo,
963 srcrepo,
964 destpeer,
964 destpeer,
965 revs=revs,
965 revs=revs,
966 bookmarks=srcrepo._bookmarks.keys(),
966 bookmarks=srcrepo._bookmarks.keys(),
967 )
967 )
968 else:
968 else:
969 raise error.Abort(
969 raise error.Abort(
970 _(b"clone from remote to remote not supported")
970 _(b"clone from remote to remote not supported")
971 )
971 )
972
972
973 cleandir = None
973 cleandir = None
974
974
975 destrepo = destpeer.local()
975 destrepo = destpeer.local()
976 if destrepo:
976 if destrepo:
977 template = uimod.samplehgrcs[b'cloned']
977 template = uimod.samplehgrcs[b'cloned']
978 u = util.url(abspath)
978 u = util.url(abspath)
979 u.passwd = None
979 u.passwd = None
980 defaulturl = bytes(u)
980 defaulturl = bytes(u)
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
981 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
982 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
983
983
984 if ui.configbool(b'experimental', b'remotenames'):
984 if ui.configbool(b'experimental', b'remotenames'):
985 logexchange.pullremotenames(destrepo, srcpeer)
985 logexchange.pullremotenames(destrepo, srcpeer)
986
986
987 if update:
987 if update:
988 if update is not True:
988 if update is not True:
989 with srcpeer.commandexecutor() as e:
989 with srcpeer.commandexecutor() as e:
990 checkout = e.callcommand(
990 checkout = e.callcommand(
991 b'lookup',
991 b'lookup',
992 {
992 {
993 b'key': update,
993 b'key': update,
994 },
994 },
995 ).result()
995 ).result()
996
996
997 uprev = None
997 uprev = None
998 status = None
998 status = None
999 if checkout is not None:
999 if checkout is not None:
1000 # Some extensions (at least hg-git and hg-subversion) have
1000 # Some extensions (at least hg-git and hg-subversion) have
1001 # a peer.lookup() implementation that returns a name instead
1001 # a peer.lookup() implementation that returns a name instead
1002 # of a nodeid. We work around it here until we've figured
1002 # of a nodeid. We work around it here until we've figured
1003 # out a better solution.
1003 # out a better solution.
1004 if len(checkout) == 20 and checkout in destrepo:
1004 if len(checkout) == 20 and checkout in destrepo:
1005 uprev = checkout
1005 uprev = checkout
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1006 elif scmutil.isrevsymbol(destrepo, checkout):
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1007 uprev = scmutil.revsymbol(destrepo, checkout).node()
1008 else:
1008 else:
1009 if update is not True:
1009 if update is not True:
1010 try:
1010 try:
1011 uprev = destrepo.lookup(update)
1011 uprev = destrepo.lookup(update)
1012 except error.RepoLookupError:
1012 except error.RepoLookupError:
1013 pass
1013 pass
1014 if uprev is None:
1014 if uprev is None:
1015 try:
1015 try:
1016 if destrepo._activebookmark:
1017 uprev = destrepo.lookup(destrepo._activebookmark)
1018 update = destrepo._activebookmark
1019 else:
1016 uprev = destrepo._bookmarks[b'@']
1020 uprev = destrepo._bookmarks[b'@']
1017 update = b'@'
1021 update = b'@'
1018 bn = destrepo[uprev].branch()
1022 bn = destrepo[uprev].branch()
1019 if bn == b'default':
1023 if bn == b'default':
1020 status = _(b"updating to bookmark @\n")
1024 status = _(b"updating to bookmark %s\n" % update)
1021 else:
1025 else:
1022 status = (
1026 status = (
1023 _(b"updating to bookmark @ on branch %s\n") % bn
1027 _(b"updating to bookmark %s on branch %s\n")
1024 )
1028 ) % (update, bn)
1025 except KeyError:
1029 except KeyError:
1026 try:
1030 try:
1027 uprev = destrepo.branchtip(b'default')
1031 uprev = destrepo.branchtip(b'default')
1028 except error.RepoLookupError:
1032 except error.RepoLookupError:
1029 uprev = destrepo.lookup(b'tip')
1033 uprev = destrepo.lookup(b'tip')
1030 if not status:
1034 if not status:
1031 bn = destrepo[uprev].branch()
1035 bn = destrepo[uprev].branch()
1032 status = _(b"updating to branch %s\n") % bn
1036 status = _(b"updating to branch %s\n") % bn
1033 destrepo.ui.status(status)
1037 destrepo.ui.status(status)
1034 _update(destrepo, uprev)
1038 _update(destrepo, uprev)
1035 if update in destrepo._bookmarks:
1039 if update in destrepo._bookmarks:
1036 bookmarks.activate(destrepo, update)
1040 bookmarks.activate(destrepo, update)
1037 finally:
1041 finally:
1038 release(srclock, destlock)
1042 release(srclock, destlock)
1039 if cleandir is not None:
1043 if cleandir is not None:
1040 shutil.rmtree(cleandir, True)
1044 shutil.rmtree(cleandir, True)
1041 if srcpeer is not None:
1045 if srcpeer is not None:
1042 srcpeer.close()
1046 srcpeer.close()
1043 return srcpeer, destpeer
1047 return srcpeer, destpeer
1044
1048
1045
1049
1046 def _showstats(repo, stats, quietempty=False):
1050 def _showstats(repo, stats, quietempty=False):
1047 if quietempty and stats.isempty():
1051 if quietempty and stats.isempty():
1048 return
1052 return
1049 repo.ui.status(
1053 repo.ui.status(
1050 _(
1054 _(
1051 b"%d files updated, %d files merged, "
1055 b"%d files updated, %d files merged, "
1052 b"%d files removed, %d files unresolved\n"
1056 b"%d files removed, %d files unresolved\n"
1053 )
1057 )
1054 % (
1058 % (
1055 stats.updatedcount,
1059 stats.updatedcount,
1056 stats.mergedcount,
1060 stats.mergedcount,
1057 stats.removedcount,
1061 stats.removedcount,
1058 stats.unresolvedcount,
1062 stats.unresolvedcount,
1059 )
1063 )
1060 )
1064 )
1061
1065
1062
1066
1063 def updaterepo(repo, node, overwrite, updatecheck=None):
1067 def updaterepo(repo, node, overwrite, updatecheck=None):
1064 """Update the working directory to node.
1068 """Update the working directory to node.
1065
1069
1066 When overwrite is set, changes are clobbered, merged else
1070 When overwrite is set, changes are clobbered, merged else
1067
1071
1068 returns stats (see pydoc mercurial.merge.applyupdates)"""
1072 returns stats (see pydoc mercurial.merge.applyupdates)"""
1069 repo.ui.deprecwarn(
1073 repo.ui.deprecwarn(
1070 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1074 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1071 b'5.7',
1075 b'5.7',
1072 )
1076 )
1073 return mergemod._update(
1077 return mergemod._update(
1074 repo,
1078 repo,
1075 node,
1079 node,
1076 branchmerge=False,
1080 branchmerge=False,
1077 force=overwrite,
1081 force=overwrite,
1078 labels=[b'working copy', b'destination'],
1082 labels=[b'working copy', b'destination'],
1079 updatecheck=updatecheck,
1083 updatecheck=updatecheck,
1080 )
1084 )
1081
1085
1082
1086
1083 def update(repo, node, quietempty=False, updatecheck=None):
1087 def update(repo, node, quietempty=False, updatecheck=None):
1084 """update the working directory to node"""
1088 """update the working directory to node"""
1085 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1089 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1086 _showstats(repo, stats, quietempty)
1090 _showstats(repo, stats, quietempty)
1087 if stats.unresolvedcount:
1091 if stats.unresolvedcount:
1088 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1092 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1089 return stats.unresolvedcount > 0
1093 return stats.unresolvedcount > 0
1090
1094
1091
1095
1092 # naming conflict in clone()
1096 # naming conflict in clone()
1093 _update = update
1097 _update = update
1094
1098
1095
1099
1096 def clean(repo, node, show_stats=True, quietempty=False):
1100 def clean(repo, node, show_stats=True, quietempty=False):
1097 """forcibly switch the working directory to node, clobbering changes"""
1101 """forcibly switch the working directory to node, clobbering changes"""
1098 stats = mergemod.clean_update(repo[node])
1102 stats = mergemod.clean_update(repo[node])
1099 assert stats.unresolvedcount == 0
1103 assert stats.unresolvedcount == 0
1100 if show_stats:
1104 if show_stats:
1101 _showstats(repo, stats, quietempty)
1105 _showstats(repo, stats, quietempty)
1102
1106
1103
1107
1104 # naming conflict in updatetotally()
1108 # naming conflict in updatetotally()
1105 _clean = clean
1109 _clean = clean
1106
1110
1107 _VALID_UPDATECHECKS = {
1111 _VALID_UPDATECHECKS = {
1108 mergemod.UPDATECHECK_ABORT,
1112 mergemod.UPDATECHECK_ABORT,
1109 mergemod.UPDATECHECK_NONE,
1113 mergemod.UPDATECHECK_NONE,
1110 mergemod.UPDATECHECK_LINEAR,
1114 mergemod.UPDATECHECK_LINEAR,
1111 mergemod.UPDATECHECK_NO_CONFLICT,
1115 mergemod.UPDATECHECK_NO_CONFLICT,
1112 }
1116 }
1113
1117
1114
1118
1115 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1119 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1116 """Update the working directory with extra care for non-file components
1120 """Update the working directory with extra care for non-file components
1117
1121
1118 This takes care of non-file components below:
1122 This takes care of non-file components below:
1119
1123
1120 :bookmark: might be advanced or (in)activated
1124 :bookmark: might be advanced or (in)activated
1121
1125
1122 This takes arguments below:
1126 This takes arguments below:
1123
1127
1124 :checkout: to which revision the working directory is updated
1128 :checkout: to which revision the working directory is updated
1125 :brev: a name, which might be a bookmark to be activated after updating
1129 :brev: a name, which might be a bookmark to be activated after updating
1126 :clean: whether changes in the working directory can be discarded
1130 :clean: whether changes in the working directory can be discarded
1127 :updatecheck: how to deal with a dirty working directory
1131 :updatecheck: how to deal with a dirty working directory
1128
1132
1129 Valid values for updatecheck are the UPDATECHECK_* constants
1133 Valid values for updatecheck are the UPDATECHECK_* constants
1130 defined in the merge module. Passing `None` will result in using the
1134 defined in the merge module. Passing `None` will result in using the
1131 configured default.
1135 configured default.
1132
1136
1133 * ABORT: abort if the working directory is dirty
1137 * ABORT: abort if the working directory is dirty
1134 * NONE: don't check (merge working directory changes into destination)
1138 * NONE: don't check (merge working directory changes into destination)
1135 * LINEAR: check that update is linear before merging working directory
1139 * LINEAR: check that update is linear before merging working directory
1136 changes into destination
1140 changes into destination
1137 * NO_CONFLICT: check that the update does not result in file merges
1141 * NO_CONFLICT: check that the update does not result in file merges
1138
1142
1139 This returns whether conflict is detected at updating or not.
1143 This returns whether conflict is detected at updating or not.
1140 """
1144 """
1141 if updatecheck is None:
1145 if updatecheck is None:
1142 updatecheck = ui.config(b'commands', b'update.check')
1146 updatecheck = ui.config(b'commands', b'update.check')
1143 if updatecheck not in _VALID_UPDATECHECKS:
1147 if updatecheck not in _VALID_UPDATECHECKS:
1144 # If not configured, or invalid value configured
1148 # If not configured, or invalid value configured
1145 updatecheck = mergemod.UPDATECHECK_LINEAR
1149 updatecheck = mergemod.UPDATECHECK_LINEAR
1146 if updatecheck not in _VALID_UPDATECHECKS:
1150 if updatecheck not in _VALID_UPDATECHECKS:
1147 raise ValueError(
1151 raise ValueError(
1148 r'Invalid updatecheck value %r (can accept %r)'
1152 r'Invalid updatecheck value %r (can accept %r)'
1149 % (updatecheck, _VALID_UPDATECHECKS)
1153 % (updatecheck, _VALID_UPDATECHECKS)
1150 )
1154 )
1151 with repo.wlock():
1155 with repo.wlock():
1152 movemarkfrom = None
1156 movemarkfrom = None
1153 warndest = False
1157 warndest = False
1154 if checkout is None:
1158 if checkout is None:
1155 updata = destutil.destupdate(repo, clean=clean)
1159 updata = destutil.destupdate(repo, clean=clean)
1156 checkout, movemarkfrom, brev = updata
1160 checkout, movemarkfrom, brev = updata
1157 warndest = True
1161 warndest = True
1158
1162
1159 if clean:
1163 if clean:
1160 ret = _clean(repo, checkout)
1164 ret = _clean(repo, checkout)
1161 else:
1165 else:
1162 if updatecheck == mergemod.UPDATECHECK_ABORT:
1166 if updatecheck == mergemod.UPDATECHECK_ABORT:
1163 cmdutil.bailifchanged(repo, merge=False)
1167 cmdutil.bailifchanged(repo, merge=False)
1164 updatecheck = mergemod.UPDATECHECK_NONE
1168 updatecheck = mergemod.UPDATECHECK_NONE
1165 ret = _update(repo, checkout, updatecheck=updatecheck)
1169 ret = _update(repo, checkout, updatecheck=updatecheck)
1166
1170
1167 if not ret and movemarkfrom:
1171 if not ret and movemarkfrom:
1168 if movemarkfrom == repo[b'.'].node():
1172 if movemarkfrom == repo[b'.'].node():
1169 pass # no-op update
1173 pass # no-op update
1170 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1174 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1171 b = ui.label(repo._activebookmark, b'bookmarks.active')
1175 b = ui.label(repo._activebookmark, b'bookmarks.active')
1172 ui.status(_(b"updating bookmark %s\n") % b)
1176 ui.status(_(b"updating bookmark %s\n") % b)
1173 else:
1177 else:
1174 # this can happen with a non-linear update
1178 # this can happen with a non-linear update
1175 b = ui.label(repo._activebookmark, b'bookmarks')
1179 b = ui.label(repo._activebookmark, b'bookmarks')
1176 ui.status(_(b"(leaving bookmark %s)\n") % b)
1180 ui.status(_(b"(leaving bookmark %s)\n") % b)
1177 bookmarks.deactivate(repo)
1181 bookmarks.deactivate(repo)
1178 elif brev in repo._bookmarks:
1182 elif brev in repo._bookmarks:
1179 if brev != repo._activebookmark:
1183 if brev != repo._activebookmark:
1180 b = ui.label(brev, b'bookmarks.active')
1184 b = ui.label(brev, b'bookmarks.active')
1181 ui.status(_(b"(activating bookmark %s)\n") % b)
1185 ui.status(_(b"(activating bookmark %s)\n") % b)
1182 bookmarks.activate(repo, brev)
1186 bookmarks.activate(repo, brev)
1183 elif brev:
1187 elif brev:
1184 if repo._activebookmark:
1188 if repo._activebookmark:
1185 b = ui.label(repo._activebookmark, b'bookmarks')
1189 b = ui.label(repo._activebookmark, b'bookmarks')
1186 ui.status(_(b"(leaving bookmark %s)\n") % b)
1190 ui.status(_(b"(leaving bookmark %s)\n") % b)
1187 bookmarks.deactivate(repo)
1191 bookmarks.deactivate(repo)
1188
1192
1189 if warndest:
1193 if warndest:
1190 destutil.statusotherdests(ui, repo)
1194 destutil.statusotherdests(ui, repo)
1191
1195
1192 return ret
1196 return ret
1193
1197
1194
1198
1195 def merge(
1199 def merge(
1196 ctx,
1200 ctx,
1197 force=False,
1201 force=False,
1198 remind=True,
1202 remind=True,
1199 labels=None,
1203 labels=None,
1200 ):
1204 ):
1201 """Branch merge with node, resolving changes. Return true if any
1205 """Branch merge with node, resolving changes. Return true if any
1202 unresolved conflicts."""
1206 unresolved conflicts."""
1203 repo = ctx.repo()
1207 repo = ctx.repo()
1204 stats = mergemod.merge(ctx, force=force, labels=labels)
1208 stats = mergemod.merge(ctx, force=force, labels=labels)
1205 _showstats(repo, stats)
1209 _showstats(repo, stats)
1206 if stats.unresolvedcount:
1210 if stats.unresolvedcount:
1207 repo.ui.status(
1211 repo.ui.status(
1208 _(
1212 _(
1209 b"use 'hg resolve' to retry unresolved file merges "
1213 b"use 'hg resolve' to retry unresolved file merges "
1210 b"or 'hg merge --abort' to abandon\n"
1214 b"or 'hg merge --abort' to abandon\n"
1211 )
1215 )
1212 )
1216 )
1213 elif remind:
1217 elif remind:
1214 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1218 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1215 return stats.unresolvedcount > 0
1219 return stats.unresolvedcount > 0
1216
1220
1217
1221
1218 def abortmerge(ui, repo):
1222 def abortmerge(ui, repo):
1219 ms = mergestatemod.mergestate.read(repo)
1223 ms = mergestatemod.mergestate.read(repo)
1220 if ms.active():
1224 if ms.active():
1221 # there were conflicts
1225 # there were conflicts
1222 node = ms.localctx.hex()
1226 node = ms.localctx.hex()
1223 else:
1227 else:
1224 # there were no conficts, mergestate was not stored
1228 # there were no conficts, mergestate was not stored
1225 node = repo[b'.'].hex()
1229 node = repo[b'.'].hex()
1226
1230
1227 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1231 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1228 stats = mergemod.clean_update(repo[node])
1232 stats = mergemod.clean_update(repo[node])
1229 assert stats.unresolvedcount == 0
1233 assert stats.unresolvedcount == 0
1230 _showstats(repo, stats)
1234 _showstats(repo, stats)
1231
1235
1232
1236
1233 def _incoming(
1237 def _incoming(
1234 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1238 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1235 ):
1239 ):
1236 """
1240 """
1237 Helper for incoming / gincoming.
1241 Helper for incoming / gincoming.
1238 displaychlist gets called with
1242 displaychlist gets called with
1239 (remoterepo, incomingchangesetlist, displayer) parameters,
1243 (remoterepo, incomingchangesetlist, displayer) parameters,
1240 and is supposed to contain only code that can't be unified.
1244 and is supposed to contain only code that can't be unified.
1241 """
1245 """
1242 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1246 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1243 other = peer(repo, opts, source)
1247 other = peer(repo, opts, source)
1244 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1248 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1245 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1249 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1246
1250
1247 if revs:
1251 if revs:
1248 revs = [other.lookup(rev) for rev in revs]
1252 revs = [other.lookup(rev) for rev in revs]
1249 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1253 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1250 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1254 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1251 )
1255 )
1252 try:
1256 try:
1253 if not chlist:
1257 if not chlist:
1254 ui.status(_(b"no changes found\n"))
1258 ui.status(_(b"no changes found\n"))
1255 return subreporecurse()
1259 return subreporecurse()
1256 ui.pager(b'incoming')
1260 ui.pager(b'incoming')
1257 displayer = logcmdutil.changesetdisplayer(
1261 displayer = logcmdutil.changesetdisplayer(
1258 ui, other, opts, buffered=buffered
1262 ui, other, opts, buffered=buffered
1259 )
1263 )
1260 displaychlist(other, chlist, displayer)
1264 displaychlist(other, chlist, displayer)
1261 displayer.close()
1265 displayer.close()
1262 finally:
1266 finally:
1263 cleanupfn()
1267 cleanupfn()
1264 subreporecurse()
1268 subreporecurse()
1265 return 0 # exit code is zero since we found incoming changes
1269 return 0 # exit code is zero since we found incoming changes
1266
1270
1267
1271
1268 def incoming(ui, repo, source, opts):
1272 def incoming(ui, repo, source, opts):
1269 def subreporecurse():
1273 def subreporecurse():
1270 ret = 1
1274 ret = 1
1271 if opts.get(b'subrepos'):
1275 if opts.get(b'subrepos'):
1272 ctx = repo[None]
1276 ctx = repo[None]
1273 for subpath in sorted(ctx.substate):
1277 for subpath in sorted(ctx.substate):
1274 sub = ctx.sub(subpath)
1278 sub = ctx.sub(subpath)
1275 ret = min(ret, sub.incoming(ui, source, opts))
1279 ret = min(ret, sub.incoming(ui, source, opts))
1276 return ret
1280 return ret
1277
1281
1278 def display(other, chlist, displayer):
1282 def display(other, chlist, displayer):
1279 limit = logcmdutil.getlimit(opts)
1283 limit = logcmdutil.getlimit(opts)
1280 if opts.get(b'newest_first'):
1284 if opts.get(b'newest_first'):
1281 chlist.reverse()
1285 chlist.reverse()
1282 count = 0
1286 count = 0
1283 for n in chlist:
1287 for n in chlist:
1284 if limit is not None and count >= limit:
1288 if limit is not None and count >= limit:
1285 break
1289 break
1286 parents = [p for p in other.changelog.parents(n) if p != nullid]
1290 parents = [p for p in other.changelog.parents(n) if p != nullid]
1287 if opts.get(b'no_merges') and len(parents) == 2:
1291 if opts.get(b'no_merges') and len(parents) == 2:
1288 continue
1292 continue
1289 count += 1
1293 count += 1
1290 displayer.show(other[n])
1294 displayer.show(other[n])
1291
1295
1292 return _incoming(display, subreporecurse, ui, repo, source, opts)
1296 return _incoming(display, subreporecurse, ui, repo, source, opts)
1293
1297
1294
1298
1295 def _outgoing(ui, repo, dest, opts):
1299 def _outgoing(ui, repo, dest, opts):
1296 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1300 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1297 if not path:
1301 if not path:
1298 raise error.Abort(
1302 raise error.Abort(
1299 _(b'default repository not configured!'),
1303 _(b'default repository not configured!'),
1300 hint=_(b"see 'hg help config.paths'"),
1304 hint=_(b"see 'hg help config.paths'"),
1301 )
1305 )
1302 dest = path.pushloc or path.loc
1306 dest = path.pushloc or path.loc
1303 branches = path.branch, opts.get(b'branch') or []
1307 branches = path.branch, opts.get(b'branch') or []
1304
1308
1305 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1309 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1306 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1310 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1307 if revs:
1311 if revs:
1308 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1312 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1309
1313
1310 other = peer(repo, opts, dest)
1314 other = peer(repo, opts, dest)
1311 outgoing = discovery.findcommonoutgoing(
1315 outgoing = discovery.findcommonoutgoing(
1312 repo, other, revs, force=opts.get(b'force')
1316 repo, other, revs, force=opts.get(b'force')
1313 )
1317 )
1314 o = outgoing.missing
1318 o = outgoing.missing
1315 if not o:
1319 if not o:
1316 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1320 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1317 return o, other
1321 return o, other
1318
1322
1319
1323
1320 def outgoing(ui, repo, dest, opts):
1324 def outgoing(ui, repo, dest, opts):
1321 def recurse():
1325 def recurse():
1322 ret = 1
1326 ret = 1
1323 if opts.get(b'subrepos'):
1327 if opts.get(b'subrepos'):
1324 ctx = repo[None]
1328 ctx = repo[None]
1325 for subpath in sorted(ctx.substate):
1329 for subpath in sorted(ctx.substate):
1326 sub = ctx.sub(subpath)
1330 sub = ctx.sub(subpath)
1327 ret = min(ret, sub.outgoing(ui, dest, opts))
1331 ret = min(ret, sub.outgoing(ui, dest, opts))
1328 return ret
1332 return ret
1329
1333
1330 limit = logcmdutil.getlimit(opts)
1334 limit = logcmdutil.getlimit(opts)
1331 o, other = _outgoing(ui, repo, dest, opts)
1335 o, other = _outgoing(ui, repo, dest, opts)
1332 if not o:
1336 if not o:
1333 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1337 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1334 return recurse()
1338 return recurse()
1335
1339
1336 if opts.get(b'newest_first'):
1340 if opts.get(b'newest_first'):
1337 o.reverse()
1341 o.reverse()
1338 ui.pager(b'outgoing')
1342 ui.pager(b'outgoing')
1339 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1343 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1340 count = 0
1344 count = 0
1341 for n in o:
1345 for n in o:
1342 if limit is not None and count >= limit:
1346 if limit is not None and count >= limit:
1343 break
1347 break
1344 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1348 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1345 if opts.get(b'no_merges') and len(parents) == 2:
1349 if opts.get(b'no_merges') and len(parents) == 2:
1346 continue
1350 continue
1347 count += 1
1351 count += 1
1348 displayer.show(repo[n])
1352 displayer.show(repo[n])
1349 displayer.close()
1353 displayer.close()
1350 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1354 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1351 recurse()
1355 recurse()
1352 return 0 # exit code is zero since we found outgoing changes
1356 return 0 # exit code is zero since we found outgoing changes
1353
1357
1354
1358
1355 def verify(repo, level=None):
1359 def verify(repo, level=None):
1356 """verify the consistency of a repository"""
1360 """verify the consistency of a repository"""
1357 ret = verifymod.verify(repo, level=level)
1361 ret = verifymod.verify(repo, level=level)
1358
1362
1359 # Broken subrepo references in hidden csets don't seem worth worrying about,
1363 # Broken subrepo references in hidden csets don't seem worth worrying about,
1360 # since they can't be pushed/pulled, and --hidden can be used if they are a
1364 # since they can't be pushed/pulled, and --hidden can be used if they are a
1361 # concern.
1365 # concern.
1362
1366
1363 # pathto() is needed for -R case
1367 # pathto() is needed for -R case
1364 revs = repo.revs(
1368 revs = repo.revs(
1365 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1369 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1366 )
1370 )
1367
1371
1368 if revs:
1372 if revs:
1369 repo.ui.status(_(b'checking subrepo links\n'))
1373 repo.ui.status(_(b'checking subrepo links\n'))
1370 for rev in revs:
1374 for rev in revs:
1371 ctx = repo[rev]
1375 ctx = repo[rev]
1372 try:
1376 try:
1373 for subpath in ctx.substate:
1377 for subpath in ctx.substate:
1374 try:
1378 try:
1375 ret = (
1379 ret = (
1376 ctx.sub(subpath, allowcreate=False).verify() or ret
1380 ctx.sub(subpath, allowcreate=False).verify() or ret
1377 )
1381 )
1378 except error.RepoError as e:
1382 except error.RepoError as e:
1379 repo.ui.warn(b'%d: %s\n' % (rev, e))
1383 repo.ui.warn(b'%d: %s\n' % (rev, e))
1380 except Exception:
1384 except Exception:
1381 repo.ui.warn(
1385 repo.ui.warn(
1382 _(b'.hgsubstate is corrupt in revision %s\n')
1386 _(b'.hgsubstate is corrupt in revision %s\n')
1383 % short(ctx.node())
1387 % short(ctx.node())
1384 )
1388 )
1385
1389
1386 return ret
1390 return ret
1387
1391
1388
1392
1389 def remoteui(src, opts):
1393 def remoteui(src, opts):
1390 """build a remote ui from ui or repo and opts"""
1394 """build a remote ui from ui or repo and opts"""
1391 if util.safehasattr(src, b'baseui'): # looks like a repository
1395 if util.safehasattr(src, b'baseui'): # looks like a repository
1392 dst = src.baseui.copy() # drop repo-specific config
1396 dst = src.baseui.copy() # drop repo-specific config
1393 src = src.ui # copy target options from repo
1397 src = src.ui # copy target options from repo
1394 else: # assume it's a global ui object
1398 else: # assume it's a global ui object
1395 dst = src.copy() # keep all global options
1399 dst = src.copy() # keep all global options
1396
1400
1397 # copy ssh-specific options
1401 # copy ssh-specific options
1398 for o in b'ssh', b'remotecmd':
1402 for o in b'ssh', b'remotecmd':
1399 v = opts.get(o) or src.config(b'ui', o)
1403 v = opts.get(o) or src.config(b'ui', o)
1400 if v:
1404 if v:
1401 dst.setconfig(b"ui", o, v, b'copied')
1405 dst.setconfig(b"ui", o, v, b'copied')
1402
1406
1403 # copy bundle-specific options
1407 # copy bundle-specific options
1404 r = src.config(b'bundle', b'mainreporoot')
1408 r = src.config(b'bundle', b'mainreporoot')
1405 if r:
1409 if r:
1406 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1410 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1407
1411
1408 # copy selected local settings to the remote ui
1412 # copy selected local settings to the remote ui
1409 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1413 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1410 for key, val in src.configitems(sect):
1414 for key, val in src.configitems(sect):
1411 dst.setconfig(sect, key, val, b'copied')
1415 dst.setconfig(sect, key, val, b'copied')
1412 v = src.config(b'web', b'cacerts')
1416 v = src.config(b'web', b'cacerts')
1413 if v:
1417 if v:
1414 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1418 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1415
1419
1416 return dst
1420 return dst
1417
1421
1418
1422
1419 # Files of interest
1423 # Files of interest
1420 # Used to check if the repository has changed looking at mtime and size of
1424 # Used to check if the repository has changed looking at mtime and size of
1421 # these files.
1425 # these files.
1422 foi = [
1426 foi = [
1423 (b'spath', b'00changelog.i'),
1427 (b'spath', b'00changelog.i'),
1424 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1428 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1425 (b'spath', b'obsstore'),
1429 (b'spath', b'obsstore'),
1426 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1430 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1427 ]
1431 ]
1428
1432
1429
1433
1430 class cachedlocalrepo(object):
1434 class cachedlocalrepo(object):
1431 """Holds a localrepository that can be cached and reused."""
1435 """Holds a localrepository that can be cached and reused."""
1432
1436
1433 def __init__(self, repo):
1437 def __init__(self, repo):
1434 """Create a new cached repo from an existing repo.
1438 """Create a new cached repo from an existing repo.
1435
1439
1436 We assume the passed in repo was recently created. If the
1440 We assume the passed in repo was recently created. If the
1437 repo has changed between when it was created and when it was
1441 repo has changed between when it was created and when it was
1438 turned into a cache, it may not refresh properly.
1442 turned into a cache, it may not refresh properly.
1439 """
1443 """
1440 assert isinstance(repo, localrepo.localrepository)
1444 assert isinstance(repo, localrepo.localrepository)
1441 self._repo = repo
1445 self._repo = repo
1442 self._state, self.mtime = self._repostate()
1446 self._state, self.mtime = self._repostate()
1443 self._filtername = repo.filtername
1447 self._filtername = repo.filtername
1444
1448
1445 def fetch(self):
1449 def fetch(self):
1446 """Refresh (if necessary) and return a repository.
1450 """Refresh (if necessary) and return a repository.
1447
1451
1448 If the cached instance is out of date, it will be recreated
1452 If the cached instance is out of date, it will be recreated
1449 automatically and returned.
1453 automatically and returned.
1450
1454
1451 Returns a tuple of the repo and a boolean indicating whether a new
1455 Returns a tuple of the repo and a boolean indicating whether a new
1452 repo instance was created.
1456 repo instance was created.
1453 """
1457 """
1454 # We compare the mtimes and sizes of some well-known files to
1458 # We compare the mtimes and sizes of some well-known files to
1455 # determine if the repo changed. This is not precise, as mtimes
1459 # determine if the repo changed. This is not precise, as mtimes
1456 # are susceptible to clock skew and imprecise filesystems and
1460 # are susceptible to clock skew and imprecise filesystems and
1457 # file content can change while maintaining the same size.
1461 # file content can change while maintaining the same size.
1458
1462
1459 state, mtime = self._repostate()
1463 state, mtime = self._repostate()
1460 if state == self._state:
1464 if state == self._state:
1461 return self._repo, False
1465 return self._repo, False
1462
1466
1463 repo = repository(self._repo.baseui, self._repo.url())
1467 repo = repository(self._repo.baseui, self._repo.url())
1464 if self._filtername:
1468 if self._filtername:
1465 self._repo = repo.filtered(self._filtername)
1469 self._repo = repo.filtered(self._filtername)
1466 else:
1470 else:
1467 self._repo = repo.unfiltered()
1471 self._repo = repo.unfiltered()
1468 self._state = state
1472 self._state = state
1469 self.mtime = mtime
1473 self.mtime = mtime
1470
1474
1471 return self._repo, True
1475 return self._repo, True
1472
1476
1473 def _repostate(self):
1477 def _repostate(self):
1474 state = []
1478 state = []
1475 maxmtime = -1
1479 maxmtime = -1
1476 for attr, fname in foi:
1480 for attr, fname in foi:
1477 prefix = getattr(self._repo, attr)
1481 prefix = getattr(self._repo, attr)
1478 p = os.path.join(prefix, fname)
1482 p = os.path.join(prefix, fname)
1479 try:
1483 try:
1480 st = os.stat(p)
1484 st = os.stat(p)
1481 except OSError:
1485 except OSError:
1482 st = os.stat(prefix)
1486 st = os.stat(prefix)
1483 state.append((st[stat.ST_MTIME], st.st_size))
1487 state.append((st[stat.ST_MTIME], st.st_size))
1484 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1488 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1485
1489
1486 return tuple(state), maxmtime
1490 return tuple(state), maxmtime
1487
1491
1488 def copy(self):
1492 def copy(self):
1489 """Obtain a copy of this class instance.
1493 """Obtain a copy of this class instance.
1490
1494
1491 A new localrepository instance is obtained. The new instance should be
1495 A new localrepository instance is obtained. The new instance should be
1492 completely independent of the original.
1496 completely independent of the original.
1493 """
1497 """
1494 repo = repository(self._repo.baseui, self._repo.origroot)
1498 repo = repository(self._repo.baseui, self._repo.origroot)
1495 if self._filtername:
1499 if self._filtername:
1496 repo = repo.filtered(self._filtername)
1500 repo = repo.filtered(self._filtername)
1497 else:
1501 else:
1498 repo = repo.unfiltered()
1502 repo = repo.unfiltered()
1499 c = cachedlocalrepo(repo)
1503 c = cachedlocalrepo(repo)
1500 c._state = self._state
1504 c._state = self._state
1501 c.mtime = self.mtime
1505 c.mtime = self.mtime
1502 return c
1506 return c
General Comments 0
You need to be logged in to leave comments. Login now