##// END OF EJS Templates
outgoing: move filtering logic in its own function...
marmoute -
r47676:50b79f8b default
parent child Browse files
Show More
@@ -1,1538 +1,1551 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .pycompat import getattr
23 from .pycompat import getattr
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 bundlerepo,
27 bundlerepo,
28 cacheutil,
28 cacheutil,
29 cmdutil,
29 cmdutil,
30 destutil,
30 destutil,
31 discovery,
31 discovery,
32 error,
32 error,
33 exchange,
33 exchange,
34 extensions,
34 extensions,
35 httppeer,
35 httppeer,
36 localrepo,
36 localrepo,
37 lock,
37 lock,
38 logcmdutil,
38 logcmdutil,
39 logexchange,
39 logexchange,
40 merge as mergemod,
40 merge as mergemod,
41 mergestate as mergestatemod,
41 mergestate as mergestatemod,
42 narrowspec,
42 narrowspec,
43 phases,
43 phases,
44 requirements,
44 requirements,
45 scmutil,
45 scmutil,
46 sshpeer,
46 sshpeer,
47 statichttprepo,
47 statichttprepo,
48 ui as uimod,
48 ui as uimod,
49 unionrepo,
49 unionrepo,
50 url,
50 url,
51 util,
51 util,
52 verify as verifymod,
52 verify as verifymod,
53 vfs as vfsmod,
53 vfs as vfsmod,
54 )
54 )
55 from .utils import (
55 from .utils import (
56 hashutil,
56 hashutil,
57 stringutil,
57 stringutil,
58 urlutil,
58 urlutil,
59 )
59 )
60
60
61
61
62 release = lock.release
62 release = lock.release
63
63
64 # shared features
64 # shared features
65 sharedbookmarks = b'bookmarks'
65 sharedbookmarks = b'bookmarks'
66
66
67
67
68 def _local(path):
68 def _local(path):
69 path = util.expandpath(urlutil.urllocalpath(path))
69 path = util.expandpath(urlutil.urllocalpath(path))
70
70
71 try:
71 try:
72 # we use os.stat() directly here instead of os.path.isfile()
72 # we use os.stat() directly here instead of os.path.isfile()
73 # because the latter started returning `False` on invalid path
73 # because the latter started returning `False` on invalid path
74 # exceptions starting in 3.8 and we care about handling
74 # exceptions starting in 3.8 and we care about handling
75 # invalid paths specially here.
75 # invalid paths specially here.
76 st = os.stat(path)
76 st = os.stat(path)
77 isfile = stat.S_ISREG(st.st_mode)
77 isfile = stat.S_ISREG(st.st_mode)
78 # Python 2 raises TypeError, Python 3 ValueError.
78 # Python 2 raises TypeError, Python 3 ValueError.
79 except (TypeError, ValueError) as e:
79 except (TypeError, ValueError) as e:
80 raise error.Abort(
80 raise error.Abort(
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
81 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
82 )
82 )
83 except OSError:
83 except OSError:
84 isfile = False
84 isfile = False
85
85
86 return isfile and bundlerepo or localrepo
86 return isfile and bundlerepo or localrepo
87
87
88
88
89 def addbranchrevs(lrepo, other, branches, revs):
89 def addbranchrevs(lrepo, other, branches, revs):
90 peer = other.peer() # a courtesy to callers using a localrepo for other
90 peer = other.peer() # a courtesy to callers using a localrepo for other
91 hashbranch, branches = branches
91 hashbranch, branches = branches
92 if not hashbranch and not branches:
92 if not hashbranch and not branches:
93 x = revs or None
93 x = revs or None
94 if revs:
94 if revs:
95 y = revs[0]
95 y = revs[0]
96 else:
96 else:
97 y = None
97 y = None
98 return x, y
98 return x, y
99 if revs:
99 if revs:
100 revs = list(revs)
100 revs = list(revs)
101 else:
101 else:
102 revs = []
102 revs = []
103
103
104 if not peer.capable(b'branchmap'):
104 if not peer.capable(b'branchmap'):
105 if branches:
105 if branches:
106 raise error.Abort(_(b"remote branch lookup not supported"))
106 raise error.Abort(_(b"remote branch lookup not supported"))
107 revs.append(hashbranch)
107 revs.append(hashbranch)
108 return revs, revs[0]
108 return revs, revs[0]
109
109
110 with peer.commandexecutor() as e:
110 with peer.commandexecutor() as e:
111 branchmap = e.callcommand(b'branchmap', {}).result()
111 branchmap = e.callcommand(b'branchmap', {}).result()
112
112
113 def primary(branch):
113 def primary(branch):
114 if branch == b'.':
114 if branch == b'.':
115 if not lrepo:
115 if not lrepo:
116 raise error.Abort(_(b"dirstate branch not accessible"))
116 raise error.Abort(_(b"dirstate branch not accessible"))
117 branch = lrepo.dirstate.branch()
117 branch = lrepo.dirstate.branch()
118 if branch in branchmap:
118 if branch in branchmap:
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
119 revs.extend(hex(r) for r in reversed(branchmap[branch]))
120 return True
120 return True
121 else:
121 else:
122 return False
122 return False
123
123
124 for branch in branches:
124 for branch in branches:
125 if not primary(branch):
125 if not primary(branch):
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
127 if hashbranch:
127 if hashbranch:
128 if not primary(hashbranch):
128 if not primary(hashbranch):
129 revs.append(hashbranch)
129 revs.append(hashbranch)
130 return revs, revs[0]
130 return revs, revs[0]
131
131
132
132
133 def parseurl(path, branches=None):
133 def parseurl(path, branches=None):
134 '''parse url#branch, returning (url, (branch, branches))'''
134 '''parse url#branch, returning (url, (branch, branches))'''
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
135 msg = b'parseurl(...) moved to mercurial.utils.urlutil'
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
136 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
137 return urlutil.parseurl(path, branches=branches)
137 return urlutil.parseurl(path, branches=branches)
138
138
139
139
140 schemes = {
140 schemes = {
141 b'bundle': bundlerepo,
141 b'bundle': bundlerepo,
142 b'union': unionrepo,
142 b'union': unionrepo,
143 b'file': _local,
143 b'file': _local,
144 b'http': httppeer,
144 b'http': httppeer,
145 b'https': httppeer,
145 b'https': httppeer,
146 b'ssh': sshpeer,
146 b'ssh': sshpeer,
147 b'static-http': statichttprepo,
147 b'static-http': statichttprepo,
148 }
148 }
149
149
150
150
151 def _peerlookup(path):
151 def _peerlookup(path):
152 u = urlutil.url(path)
152 u = urlutil.url(path)
153 scheme = u.scheme or b'file'
153 scheme = u.scheme or b'file'
154 thing = schemes.get(scheme) or schemes[b'file']
154 thing = schemes.get(scheme) or schemes[b'file']
155 try:
155 try:
156 return thing(path)
156 return thing(path)
157 except TypeError:
157 except TypeError:
158 # we can't test callable(thing) because 'thing' can be an unloaded
158 # we can't test callable(thing) because 'thing' can be an unloaded
159 # module that implements __call__
159 # module that implements __call__
160 if not util.safehasattr(thing, b'instance'):
160 if not util.safehasattr(thing, b'instance'):
161 raise
161 raise
162 return thing
162 return thing
163
163
164
164
165 def islocal(repo):
165 def islocal(repo):
166 '''return true if repo (or path pointing to repo) is local'''
166 '''return true if repo (or path pointing to repo) is local'''
167 if isinstance(repo, bytes):
167 if isinstance(repo, bytes):
168 try:
168 try:
169 return _peerlookup(repo).islocal(repo)
169 return _peerlookup(repo).islocal(repo)
170 except AttributeError:
170 except AttributeError:
171 return False
171 return False
172 return repo.local()
172 return repo.local()
173
173
174
174
175 def openpath(ui, path, sendaccept=True):
175 def openpath(ui, path, sendaccept=True):
176 '''open path with open if local, url.open if remote'''
176 '''open path with open if local, url.open if remote'''
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
177 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
178 if pathurl.islocal():
178 if pathurl.islocal():
179 return util.posixfile(pathurl.localpath(), b'rb')
179 return util.posixfile(pathurl.localpath(), b'rb')
180 else:
180 else:
181 return url.open(ui, path, sendaccept=sendaccept)
181 return url.open(ui, path, sendaccept=sendaccept)
182
182
183
183
184 # a list of (ui, repo) functions called for wire peer initialization
184 # a list of (ui, repo) functions called for wire peer initialization
185 wirepeersetupfuncs = []
185 wirepeersetupfuncs = []
186
186
187
187
188 def _peerorrepo(
188 def _peerorrepo(
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
189 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
190 ):
190 ):
191 """return a repository object for the specified path"""
191 """return a repository object for the specified path"""
192 obj = _peerlookup(path).instance(
192 obj = _peerlookup(path).instance(
193 ui, path, create, intents=intents, createopts=createopts
193 ui, path, create, intents=intents, createopts=createopts
194 )
194 )
195 ui = getattr(obj, "ui", ui)
195 ui = getattr(obj, "ui", ui)
196 for f in presetupfuncs or []:
196 for f in presetupfuncs or []:
197 f(ui, obj)
197 f(ui, obj)
198 ui.log(b'extension', b'- executing reposetup hooks\n')
198 ui.log(b'extension', b'- executing reposetup hooks\n')
199 with util.timedcm('all reposetup') as allreposetupstats:
199 with util.timedcm('all reposetup') as allreposetupstats:
200 for name, module in extensions.extensions(ui):
200 for name, module in extensions.extensions(ui):
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
201 ui.log(b'extension', b' - running reposetup for %s\n', name)
202 hook = getattr(module, 'reposetup', None)
202 hook = getattr(module, 'reposetup', None)
203 if hook:
203 if hook:
204 with util.timedcm('reposetup %r', name) as stats:
204 with util.timedcm('reposetup %r', name) as stats:
205 hook(ui, obj)
205 hook(ui, obj)
206 ui.log(
206 ui.log(
207 b'extension', b' > reposetup for %s took %s\n', name, stats
207 b'extension', b' > reposetup for %s took %s\n', name, stats
208 )
208 )
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
209 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
210 if not obj.local():
210 if not obj.local():
211 for f in wirepeersetupfuncs:
211 for f in wirepeersetupfuncs:
212 f(ui, obj)
212 f(ui, obj)
213 return obj
213 return obj
214
214
215
215
216 def repository(
216 def repository(
217 ui,
217 ui,
218 path=b'',
218 path=b'',
219 create=False,
219 create=False,
220 presetupfuncs=None,
220 presetupfuncs=None,
221 intents=None,
221 intents=None,
222 createopts=None,
222 createopts=None,
223 ):
223 ):
224 """return a repository object for the specified path"""
224 """return a repository object for the specified path"""
225 peer = _peerorrepo(
225 peer = _peerorrepo(
226 ui,
226 ui,
227 path,
227 path,
228 create,
228 create,
229 presetupfuncs=presetupfuncs,
229 presetupfuncs=presetupfuncs,
230 intents=intents,
230 intents=intents,
231 createopts=createopts,
231 createopts=createopts,
232 )
232 )
233 repo = peer.local()
233 repo = peer.local()
234 if not repo:
234 if not repo:
235 raise error.Abort(
235 raise error.Abort(
236 _(b"repository '%s' is not local") % (path or peer.url())
236 _(b"repository '%s' is not local") % (path or peer.url())
237 )
237 )
238 return repo.filtered(b'visible')
238 return repo.filtered(b'visible')
239
239
240
240
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
241 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
242 '''return a repository peer for the specified path'''
242 '''return a repository peer for the specified path'''
243 rui = remoteui(uiorrepo, opts)
243 rui = remoteui(uiorrepo, opts)
244 return _peerorrepo(
244 return _peerorrepo(
245 rui, path, create, intents=intents, createopts=createopts
245 rui, path, create, intents=intents, createopts=createopts
246 ).peer()
246 ).peer()
247
247
248
248
249 def defaultdest(source):
249 def defaultdest(source):
250 """return default destination of clone if none is given
250 """return default destination of clone if none is given
251
251
252 >>> defaultdest(b'foo')
252 >>> defaultdest(b'foo')
253 'foo'
253 'foo'
254 >>> defaultdest(b'/foo/bar')
254 >>> defaultdest(b'/foo/bar')
255 'bar'
255 'bar'
256 >>> defaultdest(b'/')
256 >>> defaultdest(b'/')
257 ''
257 ''
258 >>> defaultdest(b'')
258 >>> defaultdest(b'')
259 ''
259 ''
260 >>> defaultdest(b'http://example.org/')
260 >>> defaultdest(b'http://example.org/')
261 ''
261 ''
262 >>> defaultdest(b'http://example.org/foo/')
262 >>> defaultdest(b'http://example.org/foo/')
263 'foo'
263 'foo'
264 """
264 """
265 path = urlutil.url(source).path
265 path = urlutil.url(source).path
266 if not path:
266 if not path:
267 return b''
267 return b''
268 return os.path.basename(os.path.normpath(path))
268 return os.path.basename(os.path.normpath(path))
269
269
270
270
271 def sharedreposource(repo):
271 def sharedreposource(repo):
272 """Returns repository object for source repository of a shared repo.
272 """Returns repository object for source repository of a shared repo.
273
273
274 If repo is not a shared repository, returns None.
274 If repo is not a shared repository, returns None.
275 """
275 """
276 if repo.sharedpath == repo.path:
276 if repo.sharedpath == repo.path:
277 return None
277 return None
278
278
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
279 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
280 return repo.srcrepo
280 return repo.srcrepo
281
281
282 # the sharedpath always ends in the .hg; we want the path to the repo
282 # the sharedpath always ends in the .hg; we want the path to the repo
283 source = repo.vfs.split(repo.sharedpath)[0]
283 source = repo.vfs.split(repo.sharedpath)[0]
284 srcurl, branches = urlutil.parseurl(source)
284 srcurl, branches = urlutil.parseurl(source)
285 srcrepo = repository(repo.ui, srcurl)
285 srcrepo = repository(repo.ui, srcurl)
286 repo.srcrepo = srcrepo
286 repo.srcrepo = srcrepo
287 return srcrepo
287 return srcrepo
288
288
289
289
290 def share(
290 def share(
291 ui,
291 ui,
292 source,
292 source,
293 dest=None,
293 dest=None,
294 update=True,
294 update=True,
295 bookmarks=True,
295 bookmarks=True,
296 defaultpath=None,
296 defaultpath=None,
297 relative=False,
297 relative=False,
298 ):
298 ):
299 '''create a shared repository'''
299 '''create a shared repository'''
300
300
301 if not islocal(source):
301 if not islocal(source):
302 raise error.Abort(_(b'can only share local repositories'))
302 raise error.Abort(_(b'can only share local repositories'))
303
303
304 if not dest:
304 if not dest:
305 dest = defaultdest(source)
305 dest = defaultdest(source)
306 else:
306 else:
307 dest = ui.expandpath(dest)
307 dest = ui.expandpath(dest)
308
308
309 if isinstance(source, bytes):
309 if isinstance(source, bytes):
310 origsource = ui.expandpath(source)
310 origsource = ui.expandpath(source)
311 source, branches = urlutil.parseurl(origsource)
311 source, branches = urlutil.parseurl(origsource)
312 srcrepo = repository(ui, source)
312 srcrepo = repository(ui, source)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
313 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
314 else:
314 else:
315 srcrepo = source.local()
315 srcrepo = source.local()
316 checkout = None
316 checkout = None
317
317
318 shareditems = set()
318 shareditems = set()
319 if bookmarks:
319 if bookmarks:
320 shareditems.add(sharedbookmarks)
320 shareditems.add(sharedbookmarks)
321
321
322 r = repository(
322 r = repository(
323 ui,
323 ui,
324 dest,
324 dest,
325 create=True,
325 create=True,
326 createopts={
326 createopts={
327 b'sharedrepo': srcrepo,
327 b'sharedrepo': srcrepo,
328 b'sharedrelative': relative,
328 b'sharedrelative': relative,
329 b'shareditems': shareditems,
329 b'shareditems': shareditems,
330 },
330 },
331 )
331 )
332
332
333 postshare(srcrepo, r, defaultpath=defaultpath)
333 postshare(srcrepo, r, defaultpath=defaultpath)
334 r = repository(ui, dest)
334 r = repository(ui, dest)
335 _postshareupdate(r, update, checkout=checkout)
335 _postshareupdate(r, update, checkout=checkout)
336 return r
336 return r
337
337
338
338
339 def _prependsourcehgrc(repo):
339 def _prependsourcehgrc(repo):
340 """copies the source repo config and prepend it in current repo .hg/hgrc
340 """copies the source repo config and prepend it in current repo .hg/hgrc
341 on unshare. This is only done if the share was perfomed using share safe
341 on unshare. This is only done if the share was perfomed using share safe
342 method where we share config of source in shares"""
342 method where we share config of source in shares"""
343 srcvfs = vfsmod.vfs(repo.sharedpath)
343 srcvfs = vfsmod.vfs(repo.sharedpath)
344 dstvfs = vfsmod.vfs(repo.path)
344 dstvfs = vfsmod.vfs(repo.path)
345
345
346 if not srcvfs.exists(b'hgrc'):
346 if not srcvfs.exists(b'hgrc'):
347 return
347 return
348
348
349 currentconfig = b''
349 currentconfig = b''
350 if dstvfs.exists(b'hgrc'):
350 if dstvfs.exists(b'hgrc'):
351 currentconfig = dstvfs.read(b'hgrc')
351 currentconfig = dstvfs.read(b'hgrc')
352
352
353 with dstvfs(b'hgrc', b'wb') as fp:
353 with dstvfs(b'hgrc', b'wb') as fp:
354 sourceconfig = srcvfs.read(b'hgrc')
354 sourceconfig = srcvfs.read(b'hgrc')
355 fp.write(b"# Config copied from shared source\n")
355 fp.write(b"# Config copied from shared source\n")
356 fp.write(sourceconfig)
356 fp.write(sourceconfig)
357 fp.write(b'\n')
357 fp.write(b'\n')
358 fp.write(currentconfig)
358 fp.write(currentconfig)
359
359
360
360
361 def unshare(ui, repo):
361 def unshare(ui, repo):
362 """convert a shared repository to a normal one
362 """convert a shared repository to a normal one
363
363
364 Copy the store data to the repo and remove the sharedpath data.
364 Copy the store data to the repo and remove the sharedpath data.
365
365
366 Returns a new repository object representing the unshared repository.
366 Returns a new repository object representing the unshared repository.
367
367
368 The passed repository object is not usable after this function is
368 The passed repository object is not usable after this function is
369 called.
369 called.
370 """
370 """
371
371
372 with repo.lock():
372 with repo.lock():
373 # we use locks here because if we race with commit, we
373 # we use locks here because if we race with commit, we
374 # can end up with extra data in the cloned revlogs that's
374 # can end up with extra data in the cloned revlogs that's
375 # not pointed to by changesets, thus causing verify to
375 # not pointed to by changesets, thus causing verify to
376 # fail
376 # fail
377 destlock = copystore(ui, repo, repo.path)
377 destlock = copystore(ui, repo, repo.path)
378 with destlock or util.nullcontextmanager():
378 with destlock or util.nullcontextmanager():
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
379 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
380 # we were sharing .hg/hgrc of the share source with the current
380 # we were sharing .hg/hgrc of the share source with the current
381 # repo. We need to copy that while unsharing otherwise it can
381 # repo. We need to copy that while unsharing otherwise it can
382 # disable hooks and other checks
382 # disable hooks and other checks
383 _prependsourcehgrc(repo)
383 _prependsourcehgrc(repo)
384
384
385 sharefile = repo.vfs.join(b'sharedpath')
385 sharefile = repo.vfs.join(b'sharedpath')
386 util.rename(sharefile, sharefile + b'.old')
386 util.rename(sharefile, sharefile + b'.old')
387
387
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
388 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
389 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
390 scmutil.writereporequirements(repo)
390 scmutil.writereporequirements(repo)
391
391
392 # Removing share changes some fundamental properties of the repo instance.
392 # Removing share changes some fundamental properties of the repo instance.
393 # So we instantiate a new repo object and operate on it rather than
393 # So we instantiate a new repo object and operate on it rather than
394 # try to keep the existing repo usable.
394 # try to keep the existing repo usable.
395 newrepo = repository(repo.baseui, repo.root, create=False)
395 newrepo = repository(repo.baseui, repo.root, create=False)
396
396
397 # TODO: figure out how to access subrepos that exist, but were previously
397 # TODO: figure out how to access subrepos that exist, but were previously
398 # removed from .hgsub
398 # removed from .hgsub
399 c = newrepo[b'.']
399 c = newrepo[b'.']
400 subs = c.substate
400 subs = c.substate
401 for s in sorted(subs):
401 for s in sorted(subs):
402 c.sub(s).unshare()
402 c.sub(s).unshare()
403
403
404 localrepo.poisonrepository(repo)
404 localrepo.poisonrepository(repo)
405
405
406 return newrepo
406 return newrepo
407
407
408
408
409 def postshare(sourcerepo, destrepo, defaultpath=None):
409 def postshare(sourcerepo, destrepo, defaultpath=None):
410 """Called after a new shared repo is created.
410 """Called after a new shared repo is created.
411
411
412 The new repo only has a requirements file and pointer to the source.
412 The new repo only has a requirements file and pointer to the source.
413 This function configures additional shared data.
413 This function configures additional shared data.
414
414
415 Extensions can wrap this function and write additional entries to
415 Extensions can wrap this function and write additional entries to
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
416 destrepo/.hg/shared to indicate additional pieces of data to be shared.
417 """
417 """
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
418 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
419 if default:
419 if default:
420 template = b'[paths]\ndefault = %s\n'
420 template = b'[paths]\ndefault = %s\n'
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
421 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
422 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
423 with destrepo.wlock():
423 with destrepo.wlock():
424 narrowspec.copytoworkingcopy(destrepo)
424 narrowspec.copytoworkingcopy(destrepo)
425
425
426
426
427 def _postshareupdate(repo, update, checkout=None):
427 def _postshareupdate(repo, update, checkout=None):
428 """Maybe perform a working directory update after a shared repo is created.
428 """Maybe perform a working directory update after a shared repo is created.
429
429
430 ``update`` can be a boolean or a revision to update to.
430 ``update`` can be a boolean or a revision to update to.
431 """
431 """
432 if not update:
432 if not update:
433 return
433 return
434
434
435 repo.ui.status(_(b"updating working directory\n"))
435 repo.ui.status(_(b"updating working directory\n"))
436 if update is not True:
436 if update is not True:
437 checkout = update
437 checkout = update
438 for test in (checkout, b'default', b'tip'):
438 for test in (checkout, b'default', b'tip'):
439 if test is None:
439 if test is None:
440 continue
440 continue
441 try:
441 try:
442 uprev = repo.lookup(test)
442 uprev = repo.lookup(test)
443 break
443 break
444 except error.RepoLookupError:
444 except error.RepoLookupError:
445 continue
445 continue
446 _update(repo, uprev)
446 _update(repo, uprev)
447
447
448
448
449 def copystore(ui, srcrepo, destpath):
449 def copystore(ui, srcrepo, destpath):
450 """copy files from store of srcrepo in destpath
450 """copy files from store of srcrepo in destpath
451
451
452 returns destlock
452 returns destlock
453 """
453 """
454 destlock = None
454 destlock = None
455 try:
455 try:
456 hardlink = None
456 hardlink = None
457 topic = _(b'linking') if hardlink else _(b'copying')
457 topic = _(b'linking') if hardlink else _(b'copying')
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
458 with ui.makeprogress(topic, unit=_(b'files')) as progress:
459 num = 0
459 num = 0
460 srcpublishing = srcrepo.publishing()
460 srcpublishing = srcrepo.publishing()
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
461 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
462 dstvfs = vfsmod.vfs(destpath)
462 dstvfs = vfsmod.vfs(destpath)
463 for f in srcrepo.store.copylist():
463 for f in srcrepo.store.copylist():
464 if srcpublishing and f.endswith(b'phaseroots'):
464 if srcpublishing and f.endswith(b'phaseroots'):
465 continue
465 continue
466 dstbase = os.path.dirname(f)
466 dstbase = os.path.dirname(f)
467 if dstbase and not dstvfs.exists(dstbase):
467 if dstbase and not dstvfs.exists(dstbase):
468 dstvfs.mkdir(dstbase)
468 dstvfs.mkdir(dstbase)
469 if srcvfs.exists(f):
469 if srcvfs.exists(f):
470 if f.endswith(b'data'):
470 if f.endswith(b'data'):
471 # 'dstbase' may be empty (e.g. revlog format 0)
471 # 'dstbase' may be empty (e.g. revlog format 0)
472 lockfile = os.path.join(dstbase, b"lock")
472 lockfile = os.path.join(dstbase, b"lock")
473 # lock to avoid premature writing to the target
473 # lock to avoid premature writing to the target
474 destlock = lock.lock(dstvfs, lockfile)
474 destlock = lock.lock(dstvfs, lockfile)
475 hardlink, n = util.copyfiles(
475 hardlink, n = util.copyfiles(
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
476 srcvfs.join(f), dstvfs.join(f), hardlink, progress
477 )
477 )
478 num += n
478 num += n
479 if hardlink:
479 if hardlink:
480 ui.debug(b"linked %d files\n" % num)
480 ui.debug(b"linked %d files\n" % num)
481 else:
481 else:
482 ui.debug(b"copied %d files\n" % num)
482 ui.debug(b"copied %d files\n" % num)
483 return destlock
483 return destlock
484 except: # re-raises
484 except: # re-raises
485 release(destlock)
485 release(destlock)
486 raise
486 raise
487
487
488
488
489 def clonewithshare(
489 def clonewithshare(
490 ui,
490 ui,
491 peeropts,
491 peeropts,
492 sharepath,
492 sharepath,
493 source,
493 source,
494 srcpeer,
494 srcpeer,
495 dest,
495 dest,
496 pull=False,
496 pull=False,
497 rev=None,
497 rev=None,
498 update=True,
498 update=True,
499 stream=False,
499 stream=False,
500 ):
500 ):
501 """Perform a clone using a shared repo.
501 """Perform a clone using a shared repo.
502
502
503 The store for the repository will be located at <sharepath>/.hg. The
503 The store for the repository will be located at <sharepath>/.hg. The
504 specified revisions will be cloned or pulled from "source". A shared repo
504 specified revisions will be cloned or pulled from "source". A shared repo
505 will be created at "dest" and a working copy will be created if "update" is
505 will be created at "dest" and a working copy will be created if "update" is
506 True.
506 True.
507 """
507 """
508 revs = None
508 revs = None
509 if rev:
509 if rev:
510 if not srcpeer.capable(b'lookup'):
510 if not srcpeer.capable(b'lookup'):
511 raise error.Abort(
511 raise error.Abort(
512 _(
512 _(
513 b"src repository does not support "
513 b"src repository does not support "
514 b"revision lookup and so doesn't "
514 b"revision lookup and so doesn't "
515 b"support clone by revision"
515 b"support clone by revision"
516 )
516 )
517 )
517 )
518
518
519 # TODO this is batchable.
519 # TODO this is batchable.
520 remoterevs = []
520 remoterevs = []
521 for r in rev:
521 for r in rev:
522 with srcpeer.commandexecutor() as e:
522 with srcpeer.commandexecutor() as e:
523 remoterevs.append(
523 remoterevs.append(
524 e.callcommand(
524 e.callcommand(
525 b'lookup',
525 b'lookup',
526 {
526 {
527 b'key': r,
527 b'key': r,
528 },
528 },
529 ).result()
529 ).result()
530 )
530 )
531 revs = remoterevs
531 revs = remoterevs
532
532
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
533 # Obtain a lock before checking for or cloning the pooled repo otherwise
534 # 2 clients may race creating or populating it.
534 # 2 clients may race creating or populating it.
535 pooldir = os.path.dirname(sharepath)
535 pooldir = os.path.dirname(sharepath)
536 # lock class requires the directory to exist.
536 # lock class requires the directory to exist.
537 try:
537 try:
538 util.makedir(pooldir, False)
538 util.makedir(pooldir, False)
539 except OSError as e:
539 except OSError as e:
540 if e.errno != errno.EEXIST:
540 if e.errno != errno.EEXIST:
541 raise
541 raise
542
542
543 poolvfs = vfsmod.vfs(pooldir)
543 poolvfs = vfsmod.vfs(pooldir)
544 basename = os.path.basename(sharepath)
544 basename = os.path.basename(sharepath)
545
545
546 with lock.lock(poolvfs, b'%s.lock' % basename):
546 with lock.lock(poolvfs, b'%s.lock' % basename):
547 if os.path.exists(sharepath):
547 if os.path.exists(sharepath):
548 ui.status(
548 ui.status(
549 _(b'(sharing from existing pooled repository %s)\n') % basename
549 _(b'(sharing from existing pooled repository %s)\n') % basename
550 )
550 )
551 else:
551 else:
552 ui.status(
552 ui.status(
553 _(b'(sharing from new pooled repository %s)\n') % basename
553 _(b'(sharing from new pooled repository %s)\n') % basename
554 )
554 )
555 # Always use pull mode because hardlinks in share mode don't work
555 # Always use pull mode because hardlinks in share mode don't work
556 # well. Never update because working copies aren't necessary in
556 # well. Never update because working copies aren't necessary in
557 # share mode.
557 # share mode.
558 clone(
558 clone(
559 ui,
559 ui,
560 peeropts,
560 peeropts,
561 source,
561 source,
562 dest=sharepath,
562 dest=sharepath,
563 pull=True,
563 pull=True,
564 revs=rev,
564 revs=rev,
565 update=False,
565 update=False,
566 stream=stream,
566 stream=stream,
567 )
567 )
568
568
569 # Resolve the value to put in [paths] section for the source.
569 # Resolve the value to put in [paths] section for the source.
570 if islocal(source):
570 if islocal(source):
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
571 defaultpath = os.path.abspath(urlutil.urllocalpath(source))
572 else:
572 else:
573 defaultpath = source
573 defaultpath = source
574
574
575 sharerepo = repository(ui, path=sharepath)
575 sharerepo = repository(ui, path=sharepath)
576 destrepo = share(
576 destrepo = share(
577 ui,
577 ui,
578 sharerepo,
578 sharerepo,
579 dest=dest,
579 dest=dest,
580 update=False,
580 update=False,
581 bookmarks=False,
581 bookmarks=False,
582 defaultpath=defaultpath,
582 defaultpath=defaultpath,
583 )
583 )
584
584
585 # We need to perform a pull against the dest repo to fetch bookmarks
585 # We need to perform a pull against the dest repo to fetch bookmarks
586 # and other non-store data that isn't shared by default. In the case of
586 # and other non-store data that isn't shared by default. In the case of
587 # non-existing shared repo, this means we pull from the remote twice. This
587 # non-existing shared repo, this means we pull from the remote twice. This
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
588 # is a bit weird. But at the time it was implemented, there wasn't an easy
589 # way to pull just non-changegroup data.
589 # way to pull just non-changegroup data.
590 exchange.pull(destrepo, srcpeer, heads=revs)
590 exchange.pull(destrepo, srcpeer, heads=revs)
591
591
592 _postshareupdate(destrepo, update)
592 _postshareupdate(destrepo, update)
593
593
594 return srcpeer, peer(ui, peeropts, dest)
594 return srcpeer, peer(ui, peeropts, dest)
595
595
596
596
597 # Recomputing caches is often slow on big repos, so copy them.
597 # Recomputing caches is often slow on big repos, so copy them.
598 def _copycache(srcrepo, dstcachedir, fname):
598 def _copycache(srcrepo, dstcachedir, fname):
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
599 """copy a cache from srcrepo to destcachedir (if it exists)"""
600 srcfname = srcrepo.cachevfs.join(fname)
600 srcfname = srcrepo.cachevfs.join(fname)
601 dstfname = os.path.join(dstcachedir, fname)
601 dstfname = os.path.join(dstcachedir, fname)
602 if os.path.exists(srcfname):
602 if os.path.exists(srcfname):
603 if not os.path.exists(dstcachedir):
603 if not os.path.exists(dstcachedir):
604 os.mkdir(dstcachedir)
604 os.mkdir(dstcachedir)
605 util.copyfile(srcfname, dstfname)
605 util.copyfile(srcfname, dstfname)
606
606
607
607
608 def clone(
608 def clone(
609 ui,
609 ui,
610 peeropts,
610 peeropts,
611 source,
611 source,
612 dest=None,
612 dest=None,
613 pull=False,
613 pull=False,
614 revs=None,
614 revs=None,
615 update=True,
615 update=True,
616 stream=False,
616 stream=False,
617 branch=None,
617 branch=None,
618 shareopts=None,
618 shareopts=None,
619 storeincludepats=None,
619 storeincludepats=None,
620 storeexcludepats=None,
620 storeexcludepats=None,
621 depth=None,
621 depth=None,
622 ):
622 ):
623 """Make a copy of an existing repository.
623 """Make a copy of an existing repository.
624
624
625 Create a copy of an existing repository in a new directory. The
625 Create a copy of an existing repository in a new directory. The
626 source and destination are URLs, as passed to the repository
626 source and destination are URLs, as passed to the repository
627 function. Returns a pair of repository peers, the source and
627 function. Returns a pair of repository peers, the source and
628 newly created destination.
628 newly created destination.
629
629
630 The location of the source is added to the new repository's
630 The location of the source is added to the new repository's
631 .hg/hgrc file, as the default to be used for future pulls and
631 .hg/hgrc file, as the default to be used for future pulls and
632 pushes.
632 pushes.
633
633
634 If an exception is raised, the partly cloned/updated destination
634 If an exception is raised, the partly cloned/updated destination
635 repository will be deleted.
635 repository will be deleted.
636
636
637 Arguments:
637 Arguments:
638
638
639 source: repository object or URL
639 source: repository object or URL
640
640
641 dest: URL of destination repository to create (defaults to base
641 dest: URL of destination repository to create (defaults to base
642 name of source repository)
642 name of source repository)
643
643
644 pull: always pull from source repository, even in local case or if the
644 pull: always pull from source repository, even in local case or if the
645 server prefers streaming
645 server prefers streaming
646
646
647 stream: stream raw data uncompressed from repository (fast over
647 stream: stream raw data uncompressed from repository (fast over
648 LAN, slow over WAN)
648 LAN, slow over WAN)
649
649
650 revs: revision to clone up to (implies pull=True)
650 revs: revision to clone up to (implies pull=True)
651
651
652 update: update working directory after clone completes, if
652 update: update working directory after clone completes, if
653 destination is local repository (True means update to default rev,
653 destination is local repository (True means update to default rev,
654 anything else is treated as a revision)
654 anything else is treated as a revision)
655
655
656 branch: branches to clone
656 branch: branches to clone
657
657
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
658 shareopts: dict of options to control auto sharing behavior. The "pool" key
659 activates auto sharing mode and defines the directory for stores. The
659 activates auto sharing mode and defines the directory for stores. The
660 "mode" key determines how to construct the directory name of the shared
660 "mode" key determines how to construct the directory name of the shared
661 repository. "identity" means the name is derived from the node of the first
661 repository. "identity" means the name is derived from the node of the first
662 changeset in the repository. "remote" means the name is derived from the
662 changeset in the repository. "remote" means the name is derived from the
663 remote's path/URL. Defaults to "identity."
663 remote's path/URL. Defaults to "identity."
664
664
665 storeincludepats and storeexcludepats: sets of file patterns to include and
665 storeincludepats and storeexcludepats: sets of file patterns to include and
666 exclude in the repository copy, respectively. If not defined, all files
666 exclude in the repository copy, respectively. If not defined, all files
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
667 will be included (a "full" clone). Otherwise a "narrow" clone containing
668 only the requested files will be performed. If ``storeincludepats`` is not
668 only the requested files will be performed. If ``storeincludepats`` is not
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
669 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
670 ``path:.``. If both are empty sets, no files will be cloned.
670 ``path:.``. If both are empty sets, no files will be cloned.
671 """
671 """
672
672
673 if isinstance(source, bytes):
673 if isinstance(source, bytes):
674 origsource = ui.expandpath(source)
674 origsource = ui.expandpath(source)
675 source, branches = urlutil.parseurl(origsource, branch)
675 source, branches = urlutil.parseurl(origsource, branch)
676 srcpeer = peer(ui, peeropts, source)
676 srcpeer = peer(ui, peeropts, source)
677 else:
677 else:
678 srcpeer = source.peer() # in case we were called with a localrepo
678 srcpeer = source.peer() # in case we were called with a localrepo
679 branches = (None, branch or [])
679 branches = (None, branch or [])
680 origsource = source = srcpeer.url()
680 origsource = source = srcpeer.url()
681 srclock = destlock = cleandir = None
681 srclock = destlock = cleandir = None
682 destpeer = None
682 destpeer = None
683 try:
683 try:
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
684 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
685
685
686 if dest is None:
686 if dest is None:
687 dest = defaultdest(source)
687 dest = defaultdest(source)
688 if dest:
688 if dest:
689 ui.status(_(b"destination directory: %s\n") % dest)
689 ui.status(_(b"destination directory: %s\n") % dest)
690 else:
690 else:
691 dest = ui.expandpath(dest)
691 dest = ui.expandpath(dest)
692
692
693 dest = urlutil.urllocalpath(dest)
693 dest = urlutil.urllocalpath(dest)
694 source = urlutil.urllocalpath(source)
694 source = urlutil.urllocalpath(source)
695
695
696 if not dest:
696 if not dest:
697 raise error.InputError(_(b"empty destination path is not valid"))
697 raise error.InputError(_(b"empty destination path is not valid"))
698
698
699 destvfs = vfsmod.vfs(dest, expandpath=True)
699 destvfs = vfsmod.vfs(dest, expandpath=True)
700 if destvfs.lexists():
700 if destvfs.lexists():
701 if not destvfs.isdir():
701 if not destvfs.isdir():
702 raise error.InputError(
702 raise error.InputError(
703 _(b"destination '%s' already exists") % dest
703 _(b"destination '%s' already exists") % dest
704 )
704 )
705 elif destvfs.listdir():
705 elif destvfs.listdir():
706 raise error.InputError(
706 raise error.InputError(
707 _(b"destination '%s' is not empty") % dest
707 _(b"destination '%s' is not empty") % dest
708 )
708 )
709
709
710 createopts = {}
710 createopts = {}
711 narrow = False
711 narrow = False
712
712
713 if storeincludepats is not None:
713 if storeincludepats is not None:
714 narrowspec.validatepatterns(storeincludepats)
714 narrowspec.validatepatterns(storeincludepats)
715 narrow = True
715 narrow = True
716
716
717 if storeexcludepats is not None:
717 if storeexcludepats is not None:
718 narrowspec.validatepatterns(storeexcludepats)
718 narrowspec.validatepatterns(storeexcludepats)
719 narrow = True
719 narrow = True
720
720
721 if narrow:
721 if narrow:
722 # Include everything by default if only exclusion patterns defined.
722 # Include everything by default if only exclusion patterns defined.
723 if storeexcludepats and not storeincludepats:
723 if storeexcludepats and not storeincludepats:
724 storeincludepats = {b'path:.'}
724 storeincludepats = {b'path:.'}
725
725
726 createopts[b'narrowfiles'] = True
726 createopts[b'narrowfiles'] = True
727
727
728 if depth:
728 if depth:
729 createopts[b'shallowfilestore'] = True
729 createopts[b'shallowfilestore'] = True
730
730
731 if srcpeer.capable(b'lfs-serve'):
731 if srcpeer.capable(b'lfs-serve'):
732 # Repository creation honors the config if it disabled the extension, so
732 # Repository creation honors the config if it disabled the extension, so
733 # we can't just announce that lfs will be enabled. This check avoids
733 # we can't just announce that lfs will be enabled. This check avoids
734 # saying that lfs will be enabled, and then saying it's an unknown
734 # saying that lfs will be enabled, and then saying it's an unknown
735 # feature. The lfs creation option is set in either case so that a
735 # feature. The lfs creation option is set in either case so that a
736 # requirement is added. If the extension is explicitly disabled but the
736 # requirement is added. If the extension is explicitly disabled but the
737 # requirement is set, the clone aborts early, before transferring any
737 # requirement is set, the clone aborts early, before transferring any
738 # data.
738 # data.
739 createopts[b'lfs'] = True
739 createopts[b'lfs'] = True
740
740
741 if extensions.disabled_help(b'lfs'):
741 if extensions.disabled_help(b'lfs'):
742 ui.status(
742 ui.status(
743 _(
743 _(
744 b'(remote is using large file support (lfs), but it is '
744 b'(remote is using large file support (lfs), but it is '
745 b'explicitly disabled in the local configuration)\n'
745 b'explicitly disabled in the local configuration)\n'
746 )
746 )
747 )
747 )
748 else:
748 else:
749 ui.status(
749 ui.status(
750 _(
750 _(
751 b'(remote is using large file support (lfs); lfs will '
751 b'(remote is using large file support (lfs); lfs will '
752 b'be enabled for this repository)\n'
752 b'be enabled for this repository)\n'
753 )
753 )
754 )
754 )
755
755
756 shareopts = shareopts or {}
756 shareopts = shareopts or {}
757 sharepool = shareopts.get(b'pool')
757 sharepool = shareopts.get(b'pool')
758 sharenamemode = shareopts.get(b'mode')
758 sharenamemode = shareopts.get(b'mode')
759 if sharepool and islocal(dest):
759 if sharepool and islocal(dest):
760 sharepath = None
760 sharepath = None
761 if sharenamemode == b'identity':
761 if sharenamemode == b'identity':
762 # Resolve the name from the initial changeset in the remote
762 # Resolve the name from the initial changeset in the remote
763 # repository. This returns nullid when the remote is empty. It
763 # repository. This returns nullid when the remote is empty. It
764 # raises RepoLookupError if revision 0 is filtered or otherwise
764 # raises RepoLookupError if revision 0 is filtered or otherwise
765 # not available. If we fail to resolve, sharing is not enabled.
765 # not available. If we fail to resolve, sharing is not enabled.
766 try:
766 try:
767 with srcpeer.commandexecutor() as e:
767 with srcpeer.commandexecutor() as e:
768 rootnode = e.callcommand(
768 rootnode = e.callcommand(
769 b'lookup',
769 b'lookup',
770 {
770 {
771 b'key': b'0',
771 b'key': b'0',
772 },
772 },
773 ).result()
773 ).result()
774
774
775 if rootnode != nullid:
775 if rootnode != nullid:
776 sharepath = os.path.join(sharepool, hex(rootnode))
776 sharepath = os.path.join(sharepool, hex(rootnode))
777 else:
777 else:
778 ui.status(
778 ui.status(
779 _(
779 _(
780 b'(not using pooled storage: '
780 b'(not using pooled storage: '
781 b'remote appears to be empty)\n'
781 b'remote appears to be empty)\n'
782 )
782 )
783 )
783 )
784 except error.RepoLookupError:
784 except error.RepoLookupError:
785 ui.status(
785 ui.status(
786 _(
786 _(
787 b'(not using pooled storage: '
787 b'(not using pooled storage: '
788 b'unable to resolve identity of remote)\n'
788 b'unable to resolve identity of remote)\n'
789 )
789 )
790 )
790 )
791 elif sharenamemode == b'remote':
791 elif sharenamemode == b'remote':
792 sharepath = os.path.join(
792 sharepath = os.path.join(
793 sharepool, hex(hashutil.sha1(source).digest())
793 sharepool, hex(hashutil.sha1(source).digest())
794 )
794 )
795 else:
795 else:
796 raise error.Abort(
796 raise error.Abort(
797 _(b'unknown share naming mode: %s') % sharenamemode
797 _(b'unknown share naming mode: %s') % sharenamemode
798 )
798 )
799
799
800 # TODO this is a somewhat arbitrary restriction.
800 # TODO this is a somewhat arbitrary restriction.
801 if narrow:
801 if narrow:
802 ui.status(
802 ui.status(
803 _(b'(pooled storage not supported for narrow clones)\n')
803 _(b'(pooled storage not supported for narrow clones)\n')
804 )
804 )
805 sharepath = None
805 sharepath = None
806
806
807 if sharepath:
807 if sharepath:
808 return clonewithshare(
808 return clonewithshare(
809 ui,
809 ui,
810 peeropts,
810 peeropts,
811 sharepath,
811 sharepath,
812 source,
812 source,
813 srcpeer,
813 srcpeer,
814 dest,
814 dest,
815 pull=pull,
815 pull=pull,
816 rev=revs,
816 rev=revs,
817 update=update,
817 update=update,
818 stream=stream,
818 stream=stream,
819 )
819 )
820
820
821 srcrepo = srcpeer.local()
821 srcrepo = srcpeer.local()
822
822
823 abspath = origsource
823 abspath = origsource
824 if islocal(origsource):
824 if islocal(origsource):
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
825 abspath = os.path.abspath(urlutil.urllocalpath(origsource))
826
826
827 if islocal(dest):
827 if islocal(dest):
828 cleandir = dest
828 cleandir = dest
829
829
830 copy = False
830 copy = False
831 if (
831 if (
832 srcrepo
832 srcrepo
833 and srcrepo.cancopy()
833 and srcrepo.cancopy()
834 and islocal(dest)
834 and islocal(dest)
835 and not phases.hassecret(srcrepo)
835 and not phases.hassecret(srcrepo)
836 ):
836 ):
837 copy = not pull and not revs
837 copy = not pull and not revs
838
838
839 # TODO this is a somewhat arbitrary restriction.
839 # TODO this is a somewhat arbitrary restriction.
840 if narrow:
840 if narrow:
841 copy = False
841 copy = False
842
842
843 if copy:
843 if copy:
844 try:
844 try:
845 # we use a lock here because if we race with commit, we
845 # we use a lock here because if we race with commit, we
846 # can end up with extra data in the cloned revlogs that's
846 # can end up with extra data in the cloned revlogs that's
847 # not pointed to by changesets, thus causing verify to
847 # not pointed to by changesets, thus causing verify to
848 # fail
848 # fail
849 srclock = srcrepo.lock(wait=False)
849 srclock = srcrepo.lock(wait=False)
850 except error.LockError:
850 except error.LockError:
851 copy = False
851 copy = False
852
852
853 if copy:
853 if copy:
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
854 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
855 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
856 if not os.path.exists(dest):
856 if not os.path.exists(dest):
857 util.makedirs(dest)
857 util.makedirs(dest)
858 else:
858 else:
859 # only clean up directories we create ourselves
859 # only clean up directories we create ourselves
860 cleandir = hgdir
860 cleandir = hgdir
861 try:
861 try:
862 destpath = hgdir
862 destpath = hgdir
863 util.makedir(destpath, notindexed=True)
863 util.makedir(destpath, notindexed=True)
864 except OSError as inst:
864 except OSError as inst:
865 if inst.errno == errno.EEXIST:
865 if inst.errno == errno.EEXIST:
866 cleandir = None
866 cleandir = None
867 raise error.Abort(
867 raise error.Abort(
868 _(b"destination '%s' already exists") % dest
868 _(b"destination '%s' already exists") % dest
869 )
869 )
870 raise
870 raise
871
871
872 destlock = copystore(ui, srcrepo, destpath)
872 destlock = copystore(ui, srcrepo, destpath)
873 # copy bookmarks over
873 # copy bookmarks over
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
874 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
875 dstbookmarks = os.path.join(destpath, b'bookmarks')
876 if os.path.exists(srcbookmarks):
876 if os.path.exists(srcbookmarks):
877 util.copyfile(srcbookmarks, dstbookmarks)
877 util.copyfile(srcbookmarks, dstbookmarks)
878
878
879 dstcachedir = os.path.join(destpath, b'cache')
879 dstcachedir = os.path.join(destpath, b'cache')
880 for cache in cacheutil.cachetocopy(srcrepo):
880 for cache in cacheutil.cachetocopy(srcrepo):
881 _copycache(srcrepo, dstcachedir, cache)
881 _copycache(srcrepo, dstcachedir, cache)
882
882
883 # we need to re-init the repo after manually copying the data
883 # we need to re-init the repo after manually copying the data
884 # into it
884 # into it
885 destpeer = peer(srcrepo, peeropts, dest)
885 destpeer = peer(srcrepo, peeropts, dest)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
886 srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
887 else:
887 else:
888 try:
888 try:
889 # only pass ui when no srcrepo
889 # only pass ui when no srcrepo
890 destpeer = peer(
890 destpeer = peer(
891 srcrepo or ui,
891 srcrepo or ui,
892 peeropts,
892 peeropts,
893 dest,
893 dest,
894 create=True,
894 create=True,
895 createopts=createopts,
895 createopts=createopts,
896 )
896 )
897 except OSError as inst:
897 except OSError as inst:
898 if inst.errno == errno.EEXIST:
898 if inst.errno == errno.EEXIST:
899 cleandir = None
899 cleandir = None
900 raise error.Abort(
900 raise error.Abort(
901 _(b"destination '%s' already exists") % dest
901 _(b"destination '%s' already exists") % dest
902 )
902 )
903 raise
903 raise
904
904
905 if revs:
905 if revs:
906 if not srcpeer.capable(b'lookup'):
906 if not srcpeer.capable(b'lookup'):
907 raise error.Abort(
907 raise error.Abort(
908 _(
908 _(
909 b"src repository does not support "
909 b"src repository does not support "
910 b"revision lookup and so doesn't "
910 b"revision lookup and so doesn't "
911 b"support clone by revision"
911 b"support clone by revision"
912 )
912 )
913 )
913 )
914
914
915 # TODO this is batchable.
915 # TODO this is batchable.
916 remoterevs = []
916 remoterevs = []
917 for rev in revs:
917 for rev in revs:
918 with srcpeer.commandexecutor() as e:
918 with srcpeer.commandexecutor() as e:
919 remoterevs.append(
919 remoterevs.append(
920 e.callcommand(
920 e.callcommand(
921 b'lookup',
921 b'lookup',
922 {
922 {
923 b'key': rev,
923 b'key': rev,
924 },
924 },
925 ).result()
925 ).result()
926 )
926 )
927 revs = remoterevs
927 revs = remoterevs
928
928
929 checkout = revs[0]
929 checkout = revs[0]
930 else:
930 else:
931 revs = None
931 revs = None
932 local = destpeer.local()
932 local = destpeer.local()
933 if local:
933 if local:
934 if narrow:
934 if narrow:
935 with local.wlock(), local.lock():
935 with local.wlock(), local.lock():
936 local.setnarrowpats(storeincludepats, storeexcludepats)
936 local.setnarrowpats(storeincludepats, storeexcludepats)
937 narrowspec.copytoworkingcopy(local)
937 narrowspec.copytoworkingcopy(local)
938
938
939 u = urlutil.url(abspath)
939 u = urlutil.url(abspath)
940 defaulturl = bytes(u)
940 defaulturl = bytes(u)
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
941 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
942 if not stream:
942 if not stream:
943 if pull:
943 if pull:
944 stream = False
944 stream = False
945 else:
945 else:
946 stream = None
946 stream = None
947 # internal config: ui.quietbookmarkmove
947 # internal config: ui.quietbookmarkmove
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
948 overrides = {(b'ui', b'quietbookmarkmove'): True}
949 with local.ui.configoverride(overrides, b'clone'):
949 with local.ui.configoverride(overrides, b'clone'):
950 exchange.pull(
950 exchange.pull(
951 local,
951 local,
952 srcpeer,
952 srcpeer,
953 revs,
953 revs,
954 streamclonerequested=stream,
954 streamclonerequested=stream,
955 includepats=storeincludepats,
955 includepats=storeincludepats,
956 excludepats=storeexcludepats,
956 excludepats=storeexcludepats,
957 depth=depth,
957 depth=depth,
958 )
958 )
959 elif srcrepo:
959 elif srcrepo:
960 # TODO lift restriction once exchange.push() accepts narrow
960 # TODO lift restriction once exchange.push() accepts narrow
961 # push.
961 # push.
962 if narrow:
962 if narrow:
963 raise error.Abort(
963 raise error.Abort(
964 _(
964 _(
965 b'narrow clone not available for '
965 b'narrow clone not available for '
966 b'remote destinations'
966 b'remote destinations'
967 )
967 )
968 )
968 )
969
969
970 exchange.push(
970 exchange.push(
971 srcrepo,
971 srcrepo,
972 destpeer,
972 destpeer,
973 revs=revs,
973 revs=revs,
974 bookmarks=srcrepo._bookmarks.keys(),
974 bookmarks=srcrepo._bookmarks.keys(),
975 )
975 )
976 else:
976 else:
977 raise error.Abort(
977 raise error.Abort(
978 _(b"clone from remote to remote not supported")
978 _(b"clone from remote to remote not supported")
979 )
979 )
980
980
981 cleandir = None
981 cleandir = None
982
982
983 destrepo = destpeer.local()
983 destrepo = destpeer.local()
984 if destrepo:
984 if destrepo:
985 template = uimod.samplehgrcs[b'cloned']
985 template = uimod.samplehgrcs[b'cloned']
986 u = urlutil.url(abspath)
986 u = urlutil.url(abspath)
987 u.passwd = None
987 u.passwd = None
988 defaulturl = bytes(u)
988 defaulturl = bytes(u)
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
989 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
990 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
991
991
992 if ui.configbool(b'experimental', b'remotenames'):
992 if ui.configbool(b'experimental', b'remotenames'):
993 logexchange.pullremotenames(destrepo, srcpeer)
993 logexchange.pullremotenames(destrepo, srcpeer)
994
994
995 if update:
995 if update:
996 if update is not True:
996 if update is not True:
997 with srcpeer.commandexecutor() as e:
997 with srcpeer.commandexecutor() as e:
998 checkout = e.callcommand(
998 checkout = e.callcommand(
999 b'lookup',
999 b'lookup',
1000 {
1000 {
1001 b'key': update,
1001 b'key': update,
1002 },
1002 },
1003 ).result()
1003 ).result()
1004
1004
1005 uprev = None
1005 uprev = None
1006 status = None
1006 status = None
1007 if checkout is not None:
1007 if checkout is not None:
1008 # Some extensions (at least hg-git and hg-subversion) have
1008 # Some extensions (at least hg-git and hg-subversion) have
1009 # a peer.lookup() implementation that returns a name instead
1009 # a peer.lookup() implementation that returns a name instead
1010 # of a nodeid. We work around it here until we've figured
1010 # of a nodeid. We work around it here until we've figured
1011 # out a better solution.
1011 # out a better solution.
1012 if len(checkout) == 20 and checkout in destrepo:
1012 if len(checkout) == 20 and checkout in destrepo:
1013 uprev = checkout
1013 uprev = checkout
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1014 elif scmutil.isrevsymbol(destrepo, checkout):
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1015 uprev = scmutil.revsymbol(destrepo, checkout).node()
1016 else:
1016 else:
1017 if update is not True:
1017 if update is not True:
1018 try:
1018 try:
1019 uprev = destrepo.lookup(update)
1019 uprev = destrepo.lookup(update)
1020 except error.RepoLookupError:
1020 except error.RepoLookupError:
1021 pass
1021 pass
1022 if uprev is None:
1022 if uprev is None:
1023 try:
1023 try:
1024 if destrepo._activebookmark:
1024 if destrepo._activebookmark:
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1025 uprev = destrepo.lookup(destrepo._activebookmark)
1026 update = destrepo._activebookmark
1026 update = destrepo._activebookmark
1027 else:
1027 else:
1028 uprev = destrepo._bookmarks[b'@']
1028 uprev = destrepo._bookmarks[b'@']
1029 update = b'@'
1029 update = b'@'
1030 bn = destrepo[uprev].branch()
1030 bn = destrepo[uprev].branch()
1031 if bn == b'default':
1031 if bn == b'default':
1032 status = _(b"updating to bookmark %s\n" % update)
1032 status = _(b"updating to bookmark %s\n" % update)
1033 else:
1033 else:
1034 status = (
1034 status = (
1035 _(b"updating to bookmark %s on branch %s\n")
1035 _(b"updating to bookmark %s on branch %s\n")
1036 ) % (update, bn)
1036 ) % (update, bn)
1037 except KeyError:
1037 except KeyError:
1038 try:
1038 try:
1039 uprev = destrepo.branchtip(b'default')
1039 uprev = destrepo.branchtip(b'default')
1040 except error.RepoLookupError:
1040 except error.RepoLookupError:
1041 uprev = destrepo.lookup(b'tip')
1041 uprev = destrepo.lookup(b'tip')
1042 if not status:
1042 if not status:
1043 bn = destrepo[uprev].branch()
1043 bn = destrepo[uprev].branch()
1044 status = _(b"updating to branch %s\n") % bn
1044 status = _(b"updating to branch %s\n") % bn
1045 destrepo.ui.status(status)
1045 destrepo.ui.status(status)
1046 _update(destrepo, uprev)
1046 _update(destrepo, uprev)
1047 if update in destrepo._bookmarks:
1047 if update in destrepo._bookmarks:
1048 bookmarks.activate(destrepo, update)
1048 bookmarks.activate(destrepo, update)
1049 if destlock is not None:
1049 if destlock is not None:
1050 release(destlock)
1050 release(destlock)
1051 # here is a tiny windows were someone could end up writing the
1051 # here is a tiny windows were someone could end up writing the
1052 # repository before the cache are sure to be warm. This is "fine"
1052 # repository before the cache are sure to be warm. This is "fine"
1053 # as the only "bad" outcome would be some slowness. That potential
1053 # as the only "bad" outcome would be some slowness. That potential
1054 # slowness already affect reader.
1054 # slowness already affect reader.
1055 with destrepo.lock():
1055 with destrepo.lock():
1056 destrepo.updatecaches(full=True)
1056 destrepo.updatecaches(full=True)
1057 finally:
1057 finally:
1058 release(srclock, destlock)
1058 release(srclock, destlock)
1059 if cleandir is not None:
1059 if cleandir is not None:
1060 shutil.rmtree(cleandir, True)
1060 shutil.rmtree(cleandir, True)
1061 if srcpeer is not None:
1061 if srcpeer is not None:
1062 srcpeer.close()
1062 srcpeer.close()
1063 if destpeer and destpeer.local() is None:
1063 if destpeer and destpeer.local() is None:
1064 destpeer.close()
1064 destpeer.close()
1065 return srcpeer, destpeer
1065 return srcpeer, destpeer
1066
1066
1067
1067
1068 def _showstats(repo, stats, quietempty=False):
1068 def _showstats(repo, stats, quietempty=False):
1069 if quietempty and stats.isempty():
1069 if quietempty and stats.isempty():
1070 return
1070 return
1071 repo.ui.status(
1071 repo.ui.status(
1072 _(
1072 _(
1073 b"%d files updated, %d files merged, "
1073 b"%d files updated, %d files merged, "
1074 b"%d files removed, %d files unresolved\n"
1074 b"%d files removed, %d files unresolved\n"
1075 )
1075 )
1076 % (
1076 % (
1077 stats.updatedcount,
1077 stats.updatedcount,
1078 stats.mergedcount,
1078 stats.mergedcount,
1079 stats.removedcount,
1079 stats.removedcount,
1080 stats.unresolvedcount,
1080 stats.unresolvedcount,
1081 )
1081 )
1082 )
1082 )
1083
1083
1084
1084
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1085 def updaterepo(repo, node, overwrite, updatecheck=None):
1086 """Update the working directory to node.
1086 """Update the working directory to node.
1087
1087
1088 When overwrite is set, changes are clobbered, merged else
1088 When overwrite is set, changes are clobbered, merged else
1089
1089
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1090 returns stats (see pydoc mercurial.merge.applyupdates)"""
1091 repo.ui.deprecwarn(
1091 repo.ui.deprecwarn(
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1092 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1093 b'5.7',
1093 b'5.7',
1094 )
1094 )
1095 return mergemod._update(
1095 return mergemod._update(
1096 repo,
1096 repo,
1097 node,
1097 node,
1098 branchmerge=False,
1098 branchmerge=False,
1099 force=overwrite,
1099 force=overwrite,
1100 labels=[b'working copy', b'destination'],
1100 labels=[b'working copy', b'destination'],
1101 updatecheck=updatecheck,
1101 updatecheck=updatecheck,
1102 )
1102 )
1103
1103
1104
1104
1105 def update(repo, node, quietempty=False, updatecheck=None):
1105 def update(repo, node, quietempty=False, updatecheck=None):
1106 """update the working directory to node"""
1106 """update the working directory to node"""
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1107 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1108 _showstats(repo, stats, quietempty)
1108 _showstats(repo, stats, quietempty)
1109 if stats.unresolvedcount:
1109 if stats.unresolvedcount:
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1110 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1111 return stats.unresolvedcount > 0
1111 return stats.unresolvedcount > 0
1112
1112
1113
1113
1114 # naming conflict in clone()
1114 # naming conflict in clone()
1115 _update = update
1115 _update = update
1116
1116
1117
1117
1118 def clean(repo, node, show_stats=True, quietempty=False):
1118 def clean(repo, node, show_stats=True, quietempty=False):
1119 """forcibly switch the working directory to node, clobbering changes"""
1119 """forcibly switch the working directory to node, clobbering changes"""
1120 stats = mergemod.clean_update(repo[node])
1120 stats = mergemod.clean_update(repo[node])
1121 assert stats.unresolvedcount == 0
1121 assert stats.unresolvedcount == 0
1122 if show_stats:
1122 if show_stats:
1123 _showstats(repo, stats, quietempty)
1123 _showstats(repo, stats, quietempty)
1124 return False
1124 return False
1125
1125
1126
1126
1127 # naming conflict in updatetotally()
1127 # naming conflict in updatetotally()
1128 _clean = clean
1128 _clean = clean
1129
1129
1130 _VALID_UPDATECHECKS = {
1130 _VALID_UPDATECHECKS = {
1131 mergemod.UPDATECHECK_ABORT,
1131 mergemod.UPDATECHECK_ABORT,
1132 mergemod.UPDATECHECK_NONE,
1132 mergemod.UPDATECHECK_NONE,
1133 mergemod.UPDATECHECK_LINEAR,
1133 mergemod.UPDATECHECK_LINEAR,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1134 mergemod.UPDATECHECK_NO_CONFLICT,
1135 }
1135 }
1136
1136
1137
1137
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1138 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1139 """Update the working directory with extra care for non-file components
1139 """Update the working directory with extra care for non-file components
1140
1140
1141 This takes care of non-file components below:
1141 This takes care of non-file components below:
1142
1142
1143 :bookmark: might be advanced or (in)activated
1143 :bookmark: might be advanced or (in)activated
1144
1144
1145 This takes arguments below:
1145 This takes arguments below:
1146
1146
1147 :checkout: to which revision the working directory is updated
1147 :checkout: to which revision the working directory is updated
1148 :brev: a name, which might be a bookmark to be activated after updating
1148 :brev: a name, which might be a bookmark to be activated after updating
1149 :clean: whether changes in the working directory can be discarded
1149 :clean: whether changes in the working directory can be discarded
1150 :updatecheck: how to deal with a dirty working directory
1150 :updatecheck: how to deal with a dirty working directory
1151
1151
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1152 Valid values for updatecheck are the UPDATECHECK_* constants
1153 defined in the merge module. Passing `None` will result in using the
1153 defined in the merge module. Passing `None` will result in using the
1154 configured default.
1154 configured default.
1155
1155
1156 * ABORT: abort if the working directory is dirty
1156 * ABORT: abort if the working directory is dirty
1157 * NONE: don't check (merge working directory changes into destination)
1157 * NONE: don't check (merge working directory changes into destination)
1158 * LINEAR: check that update is linear before merging working directory
1158 * LINEAR: check that update is linear before merging working directory
1159 changes into destination
1159 changes into destination
1160 * NO_CONFLICT: check that the update does not result in file merges
1160 * NO_CONFLICT: check that the update does not result in file merges
1161
1161
1162 This returns whether conflict is detected at updating or not.
1162 This returns whether conflict is detected at updating or not.
1163 """
1163 """
1164 if updatecheck is None:
1164 if updatecheck is None:
1165 updatecheck = ui.config(b'commands', b'update.check')
1165 updatecheck = ui.config(b'commands', b'update.check')
1166 if updatecheck not in _VALID_UPDATECHECKS:
1166 if updatecheck not in _VALID_UPDATECHECKS:
1167 # If not configured, or invalid value configured
1167 # If not configured, or invalid value configured
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1168 updatecheck = mergemod.UPDATECHECK_LINEAR
1169 if updatecheck not in _VALID_UPDATECHECKS:
1169 if updatecheck not in _VALID_UPDATECHECKS:
1170 raise ValueError(
1170 raise ValueError(
1171 r'Invalid updatecheck value %r (can accept %r)'
1171 r'Invalid updatecheck value %r (can accept %r)'
1172 % (updatecheck, _VALID_UPDATECHECKS)
1172 % (updatecheck, _VALID_UPDATECHECKS)
1173 )
1173 )
1174 with repo.wlock():
1174 with repo.wlock():
1175 movemarkfrom = None
1175 movemarkfrom = None
1176 warndest = False
1176 warndest = False
1177 if checkout is None:
1177 if checkout is None:
1178 updata = destutil.destupdate(repo, clean=clean)
1178 updata = destutil.destupdate(repo, clean=clean)
1179 checkout, movemarkfrom, brev = updata
1179 checkout, movemarkfrom, brev = updata
1180 warndest = True
1180 warndest = True
1181
1181
1182 if clean:
1182 if clean:
1183 ret = _clean(repo, checkout)
1183 ret = _clean(repo, checkout)
1184 else:
1184 else:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1185 if updatecheck == mergemod.UPDATECHECK_ABORT:
1186 cmdutil.bailifchanged(repo, merge=False)
1186 cmdutil.bailifchanged(repo, merge=False)
1187 updatecheck = mergemod.UPDATECHECK_NONE
1187 updatecheck = mergemod.UPDATECHECK_NONE
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1188 ret = _update(repo, checkout, updatecheck=updatecheck)
1189
1189
1190 if not ret and movemarkfrom:
1190 if not ret and movemarkfrom:
1191 if movemarkfrom == repo[b'.'].node():
1191 if movemarkfrom == repo[b'.'].node():
1192 pass # no-op update
1192 pass # no-op update
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1193 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1194 b = ui.label(repo._activebookmark, b'bookmarks.active')
1195 ui.status(_(b"updating bookmark %s\n") % b)
1195 ui.status(_(b"updating bookmark %s\n") % b)
1196 else:
1196 else:
1197 # this can happen with a non-linear update
1197 # this can happen with a non-linear update
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1198 b = ui.label(repo._activebookmark, b'bookmarks')
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1199 ui.status(_(b"(leaving bookmark %s)\n") % b)
1200 bookmarks.deactivate(repo)
1200 bookmarks.deactivate(repo)
1201 elif brev in repo._bookmarks:
1201 elif brev in repo._bookmarks:
1202 if brev != repo._activebookmark:
1202 if brev != repo._activebookmark:
1203 b = ui.label(brev, b'bookmarks.active')
1203 b = ui.label(brev, b'bookmarks.active')
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1204 ui.status(_(b"(activating bookmark %s)\n") % b)
1205 bookmarks.activate(repo, brev)
1205 bookmarks.activate(repo, brev)
1206 elif brev:
1206 elif brev:
1207 if repo._activebookmark:
1207 if repo._activebookmark:
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1208 b = ui.label(repo._activebookmark, b'bookmarks')
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1209 ui.status(_(b"(leaving bookmark %s)\n") % b)
1210 bookmarks.deactivate(repo)
1210 bookmarks.deactivate(repo)
1211
1211
1212 if warndest:
1212 if warndest:
1213 destutil.statusotherdests(ui, repo)
1213 destutil.statusotherdests(ui, repo)
1214
1214
1215 return ret
1215 return ret
1216
1216
1217
1217
1218 def merge(
1218 def merge(
1219 ctx,
1219 ctx,
1220 force=False,
1220 force=False,
1221 remind=True,
1221 remind=True,
1222 labels=None,
1222 labels=None,
1223 ):
1223 ):
1224 """Branch merge with node, resolving changes. Return true if any
1224 """Branch merge with node, resolving changes. Return true if any
1225 unresolved conflicts."""
1225 unresolved conflicts."""
1226 repo = ctx.repo()
1226 repo = ctx.repo()
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1227 stats = mergemod.merge(ctx, force=force, labels=labels)
1228 _showstats(repo, stats)
1228 _showstats(repo, stats)
1229 if stats.unresolvedcount:
1229 if stats.unresolvedcount:
1230 repo.ui.status(
1230 repo.ui.status(
1231 _(
1231 _(
1232 b"use 'hg resolve' to retry unresolved file merges "
1232 b"use 'hg resolve' to retry unresolved file merges "
1233 b"or 'hg merge --abort' to abandon\n"
1233 b"or 'hg merge --abort' to abandon\n"
1234 )
1234 )
1235 )
1235 )
1236 elif remind:
1236 elif remind:
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1237 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1238 return stats.unresolvedcount > 0
1238 return stats.unresolvedcount > 0
1239
1239
1240
1240
1241 def abortmerge(ui, repo):
1241 def abortmerge(ui, repo):
1242 ms = mergestatemod.mergestate.read(repo)
1242 ms = mergestatemod.mergestate.read(repo)
1243 if ms.active():
1243 if ms.active():
1244 # there were conflicts
1244 # there were conflicts
1245 node = ms.localctx.hex()
1245 node = ms.localctx.hex()
1246 else:
1246 else:
1247 # there were no conficts, mergestate was not stored
1247 # there were no conficts, mergestate was not stored
1248 node = repo[b'.'].hex()
1248 node = repo[b'.'].hex()
1249
1249
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1250 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1251 stats = mergemod.clean_update(repo[node])
1251 stats = mergemod.clean_update(repo[node])
1252 assert stats.unresolvedcount == 0
1252 assert stats.unresolvedcount == 0
1253 _showstats(repo, stats)
1253 _showstats(repo, stats)
1254
1254
1255
1255
1256 def _incoming(
1256 def _incoming(
1257 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1257 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1258 ):
1258 ):
1259 """
1259 """
1260 Helper for incoming / gincoming.
1260 Helper for incoming / gincoming.
1261 displaychlist gets called with
1261 displaychlist gets called with
1262 (remoterepo, incomingchangesetlist, displayer) parameters,
1262 (remoterepo, incomingchangesetlist, displayer) parameters,
1263 and is supposed to contain only code that can't be unified.
1263 and is supposed to contain only code that can't be unified.
1264 """
1264 """
1265 source, branches = urlutil.parseurl(
1265 source, branches = urlutil.parseurl(
1266 ui.expandpath(source), opts.get(b'branch')
1266 ui.expandpath(source), opts.get(b'branch')
1267 )
1267 )
1268 other = peer(repo, opts, source)
1268 other = peer(repo, opts, source)
1269 cleanupfn = other.close
1269 cleanupfn = other.close
1270 try:
1270 try:
1271 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1271 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
1272 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1272 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1273
1273
1274 if revs:
1274 if revs:
1275 revs = [other.lookup(rev) for rev in revs]
1275 revs = [other.lookup(rev) for rev in revs]
1276 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1276 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1277 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1277 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1278 )
1278 )
1279
1279
1280 if not chlist:
1280 if not chlist:
1281 ui.status(_(b"no changes found\n"))
1281 ui.status(_(b"no changes found\n"))
1282 return subreporecurse()
1282 return subreporecurse()
1283 ui.pager(b'incoming')
1283 ui.pager(b'incoming')
1284 displayer = logcmdutil.changesetdisplayer(
1284 displayer = logcmdutil.changesetdisplayer(
1285 ui, other, opts, buffered=buffered
1285 ui, other, opts, buffered=buffered
1286 )
1286 )
1287 displaychlist(other, chlist, displayer)
1287 displaychlist(other, chlist, displayer)
1288 displayer.close()
1288 displayer.close()
1289 finally:
1289 finally:
1290 cleanupfn()
1290 cleanupfn()
1291 subreporecurse()
1291 subreporecurse()
1292 return 0 # exit code is zero since we found incoming changes
1292 return 0 # exit code is zero since we found incoming changes
1293
1293
1294
1294
1295 def incoming(ui, repo, source, opts):
1295 def incoming(ui, repo, source, opts):
1296 def subreporecurse():
1296 def subreporecurse():
1297 ret = 1
1297 ret = 1
1298 if opts.get(b'subrepos'):
1298 if opts.get(b'subrepos'):
1299 ctx = repo[None]
1299 ctx = repo[None]
1300 for subpath in sorted(ctx.substate):
1300 for subpath in sorted(ctx.substate):
1301 sub = ctx.sub(subpath)
1301 sub = ctx.sub(subpath)
1302 ret = min(ret, sub.incoming(ui, source, opts))
1302 ret = min(ret, sub.incoming(ui, source, opts))
1303 return ret
1303 return ret
1304
1304
1305 def display(other, chlist, displayer):
1305 def display(other, chlist, displayer):
1306 limit = logcmdutil.getlimit(opts)
1306 limit = logcmdutil.getlimit(opts)
1307 if opts.get(b'newest_first'):
1307 if opts.get(b'newest_first'):
1308 chlist.reverse()
1308 chlist.reverse()
1309 count = 0
1309 count = 0
1310 for n in chlist:
1310 for n in chlist:
1311 if limit is not None and count >= limit:
1311 if limit is not None and count >= limit:
1312 break
1312 break
1313 parents = [p for p in other.changelog.parents(n) if p != nullid]
1313 parents = [p for p in other.changelog.parents(n) if p != nullid]
1314 if opts.get(b'no_merges') and len(parents) == 2:
1314 if opts.get(b'no_merges') and len(parents) == 2:
1315 continue
1315 continue
1316 count += 1
1316 count += 1
1317 displayer.show(other[n])
1317 displayer.show(other[n])
1318
1318
1319 return _incoming(display, subreporecurse, ui, repo, source, opts)
1319 return _incoming(display, subreporecurse, ui, repo, source, opts)
1320
1320
1321
1321
1322 def _outgoing(ui, repo, dest, opts):
1322 def _outgoing(ui, repo, dest, opts):
1323 path = ui.getpath(dest, default=(b'default-push', b'default'))
1323 path = ui.getpath(dest, default=(b'default-push', b'default'))
1324 if not path:
1324 if not path:
1325 raise error.Abort(
1325 raise error.Abort(
1326 _(b'default repository not configured!'),
1326 _(b'default repository not configured!'),
1327 hint=_(b"see 'hg help config.paths'"),
1327 hint=_(b"see 'hg help config.paths'"),
1328 )
1328 )
1329 dest = path.pushloc or path.loc
1329 dest = path.pushloc or path.loc
1330 branches = path.branch, opts.get(b'branch') or []
1330 branches = path.branch, opts.get(b'branch') or []
1331
1331
1332 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1332 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1333 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1333 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1334 if revs:
1334 if revs:
1335 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1335 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1336
1336
1337 other = peer(repo, opts, dest)
1337 other = peer(repo, opts, dest)
1338 try:
1338 try:
1339 outgoing = discovery.findcommonoutgoing(
1339 outgoing = discovery.findcommonoutgoing(
1340 repo, other, revs, force=opts.get(b'force')
1340 repo, other, revs, force=opts.get(b'force')
1341 )
1341 )
1342 o = outgoing.missing
1342 o = outgoing.missing
1343 if not o:
1343 if not o:
1344 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1344 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1345 return o, other
1345 return o, other
1346 except: # re-raises
1346 except: # re-raises
1347 other.close()
1347 other.close()
1348 raise
1348 raise
1349
1349
1350
1350
1351 def _outgoing_recurse(ui, repo, dest, opts):
1351 def _outgoing_recurse(ui, repo, dest, opts):
1352 ret = 1
1352 ret = 1
1353 if opts.get(b'subrepos'):
1353 if opts.get(b'subrepos'):
1354 ctx = repo[None]
1354 ctx = repo[None]
1355 for subpath in sorted(ctx.substate):
1355 for subpath in sorted(ctx.substate):
1356 sub = ctx.sub(subpath)
1356 sub = ctx.sub(subpath)
1357 ret = min(ret, sub.outgoing(ui, dest, opts))
1357 ret = min(ret, sub.outgoing(ui, dest, opts))
1358 return ret
1358 return ret
1359
1359
1360
1360
1361 def _outgoing_filter(repo, revs, opts):
1362 """apply revision filtering/ordering option for outgoing"""
1363 limit = logcmdutil.getlimit(opts)
1364 no_merges = opts.get(b'no_merges')
1365 if opts.get(b'newest_first'):
1366 revs.reverse()
1367 if limit is None and not no_merges:
1368 for r in revs:
1369 yield r
1370 return
1371
1372 count = 0
1373 cl = repo.changelog
1374 for n in revs:
1375 if limit is not None and count >= limit:
1376 break
1377 parents = [p for p in cl.parents(n) if p != nullid]
1378 if no_merges and len(parents) == 2:
1379 continue
1380 count += 1
1381 yield n
1382
1383
1361 def outgoing(ui, repo, dest, opts):
1384 def outgoing(ui, repo, dest, opts):
1362
1385
1363 limit = logcmdutil.getlimit(opts)
1364 o, other = _outgoing(ui, repo, dest, opts)
1386 o, other = _outgoing(ui, repo, dest, opts)
1365 ret = 1
1387 ret = 1
1366 try:
1388 try:
1367 if o:
1389 if o:
1368 ret = 0
1390 ret = 0
1369
1391
1370 if opts.get(b'newest_first'):
1371 o.reverse()
1372 ui.pager(b'outgoing')
1392 ui.pager(b'outgoing')
1373 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1393 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1374 count = 0
1394 for n in _outgoing_filter(repo, o, opts):
1375 for n in o:
1376 if limit is not None and count >= limit:
1377 break
1378 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1379 if opts.get(b'no_merges') and len(parents) == 2:
1380 continue
1381 count += 1
1382 displayer.show(repo[n])
1395 displayer.show(repo[n])
1383 displayer.close()
1396 displayer.close()
1384 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1397 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1385 ret = min(ret, _outgoing_recurse(ui, repo, dest, opts))
1398 ret = min(ret, _outgoing_recurse(ui, repo, dest, opts))
1386 return ret # exit code is zero since we found outgoing changes
1399 return ret # exit code is zero since we found outgoing changes
1387 finally:
1400 finally:
1388 other.close()
1401 other.close()
1389
1402
1390
1403
1391 def verify(repo, level=None):
1404 def verify(repo, level=None):
1392 """verify the consistency of a repository"""
1405 """verify the consistency of a repository"""
1393 ret = verifymod.verify(repo, level=level)
1406 ret = verifymod.verify(repo, level=level)
1394
1407
1395 # Broken subrepo references in hidden csets don't seem worth worrying about,
1408 # Broken subrepo references in hidden csets don't seem worth worrying about,
1396 # since they can't be pushed/pulled, and --hidden can be used if they are a
1409 # since they can't be pushed/pulled, and --hidden can be used if they are a
1397 # concern.
1410 # concern.
1398
1411
1399 # pathto() is needed for -R case
1412 # pathto() is needed for -R case
1400 revs = repo.revs(
1413 revs = repo.revs(
1401 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1414 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1402 )
1415 )
1403
1416
1404 if revs:
1417 if revs:
1405 repo.ui.status(_(b'checking subrepo links\n'))
1418 repo.ui.status(_(b'checking subrepo links\n'))
1406 for rev in revs:
1419 for rev in revs:
1407 ctx = repo[rev]
1420 ctx = repo[rev]
1408 try:
1421 try:
1409 for subpath in ctx.substate:
1422 for subpath in ctx.substate:
1410 try:
1423 try:
1411 ret = (
1424 ret = (
1412 ctx.sub(subpath, allowcreate=False).verify() or ret
1425 ctx.sub(subpath, allowcreate=False).verify() or ret
1413 )
1426 )
1414 except error.RepoError as e:
1427 except error.RepoError as e:
1415 repo.ui.warn(b'%d: %s\n' % (rev, e))
1428 repo.ui.warn(b'%d: %s\n' % (rev, e))
1416 except Exception:
1429 except Exception:
1417 repo.ui.warn(
1430 repo.ui.warn(
1418 _(b'.hgsubstate is corrupt in revision %s\n')
1431 _(b'.hgsubstate is corrupt in revision %s\n')
1419 % short(ctx.node())
1432 % short(ctx.node())
1420 )
1433 )
1421
1434
1422 return ret
1435 return ret
1423
1436
1424
1437
1425 def remoteui(src, opts):
1438 def remoteui(src, opts):
1426 """build a remote ui from ui or repo and opts"""
1439 """build a remote ui from ui or repo and opts"""
1427 if util.safehasattr(src, b'baseui'): # looks like a repository
1440 if util.safehasattr(src, b'baseui'): # looks like a repository
1428 dst = src.baseui.copy() # drop repo-specific config
1441 dst = src.baseui.copy() # drop repo-specific config
1429 src = src.ui # copy target options from repo
1442 src = src.ui # copy target options from repo
1430 else: # assume it's a global ui object
1443 else: # assume it's a global ui object
1431 dst = src.copy() # keep all global options
1444 dst = src.copy() # keep all global options
1432
1445
1433 # copy ssh-specific options
1446 # copy ssh-specific options
1434 for o in b'ssh', b'remotecmd':
1447 for o in b'ssh', b'remotecmd':
1435 v = opts.get(o) or src.config(b'ui', o)
1448 v = opts.get(o) or src.config(b'ui', o)
1436 if v:
1449 if v:
1437 dst.setconfig(b"ui", o, v, b'copied')
1450 dst.setconfig(b"ui", o, v, b'copied')
1438
1451
1439 # copy bundle-specific options
1452 # copy bundle-specific options
1440 r = src.config(b'bundle', b'mainreporoot')
1453 r = src.config(b'bundle', b'mainreporoot')
1441 if r:
1454 if r:
1442 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1455 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1443
1456
1444 # copy selected local settings to the remote ui
1457 # copy selected local settings to the remote ui
1445 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1458 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1446 for key, val in src.configitems(sect):
1459 for key, val in src.configitems(sect):
1447 dst.setconfig(sect, key, val, b'copied')
1460 dst.setconfig(sect, key, val, b'copied')
1448 v = src.config(b'web', b'cacerts')
1461 v = src.config(b'web', b'cacerts')
1449 if v:
1462 if v:
1450 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1463 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1451
1464
1452 return dst
1465 return dst
1453
1466
1454
1467
1455 # Files of interest
1468 # Files of interest
1456 # Used to check if the repository has changed looking at mtime and size of
1469 # Used to check if the repository has changed looking at mtime and size of
1457 # these files.
1470 # these files.
1458 foi = [
1471 foi = [
1459 (b'spath', b'00changelog.i'),
1472 (b'spath', b'00changelog.i'),
1460 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1473 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1461 (b'spath', b'obsstore'),
1474 (b'spath', b'obsstore'),
1462 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1475 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1463 ]
1476 ]
1464
1477
1465
1478
1466 class cachedlocalrepo(object):
1479 class cachedlocalrepo(object):
1467 """Holds a localrepository that can be cached and reused."""
1480 """Holds a localrepository that can be cached and reused."""
1468
1481
1469 def __init__(self, repo):
1482 def __init__(self, repo):
1470 """Create a new cached repo from an existing repo.
1483 """Create a new cached repo from an existing repo.
1471
1484
1472 We assume the passed in repo was recently created. If the
1485 We assume the passed in repo was recently created. If the
1473 repo has changed between when it was created and when it was
1486 repo has changed between when it was created and when it was
1474 turned into a cache, it may not refresh properly.
1487 turned into a cache, it may not refresh properly.
1475 """
1488 """
1476 assert isinstance(repo, localrepo.localrepository)
1489 assert isinstance(repo, localrepo.localrepository)
1477 self._repo = repo
1490 self._repo = repo
1478 self._state, self.mtime = self._repostate()
1491 self._state, self.mtime = self._repostate()
1479 self._filtername = repo.filtername
1492 self._filtername = repo.filtername
1480
1493
1481 def fetch(self):
1494 def fetch(self):
1482 """Refresh (if necessary) and return a repository.
1495 """Refresh (if necessary) and return a repository.
1483
1496
1484 If the cached instance is out of date, it will be recreated
1497 If the cached instance is out of date, it will be recreated
1485 automatically and returned.
1498 automatically and returned.
1486
1499
1487 Returns a tuple of the repo and a boolean indicating whether a new
1500 Returns a tuple of the repo and a boolean indicating whether a new
1488 repo instance was created.
1501 repo instance was created.
1489 """
1502 """
1490 # We compare the mtimes and sizes of some well-known files to
1503 # We compare the mtimes and sizes of some well-known files to
1491 # determine if the repo changed. This is not precise, as mtimes
1504 # determine if the repo changed. This is not precise, as mtimes
1492 # are susceptible to clock skew and imprecise filesystems and
1505 # are susceptible to clock skew and imprecise filesystems and
1493 # file content can change while maintaining the same size.
1506 # file content can change while maintaining the same size.
1494
1507
1495 state, mtime = self._repostate()
1508 state, mtime = self._repostate()
1496 if state == self._state:
1509 if state == self._state:
1497 return self._repo, False
1510 return self._repo, False
1498
1511
1499 repo = repository(self._repo.baseui, self._repo.url())
1512 repo = repository(self._repo.baseui, self._repo.url())
1500 if self._filtername:
1513 if self._filtername:
1501 self._repo = repo.filtered(self._filtername)
1514 self._repo = repo.filtered(self._filtername)
1502 else:
1515 else:
1503 self._repo = repo.unfiltered()
1516 self._repo = repo.unfiltered()
1504 self._state = state
1517 self._state = state
1505 self.mtime = mtime
1518 self.mtime = mtime
1506
1519
1507 return self._repo, True
1520 return self._repo, True
1508
1521
1509 def _repostate(self):
1522 def _repostate(self):
1510 state = []
1523 state = []
1511 maxmtime = -1
1524 maxmtime = -1
1512 for attr, fname in foi:
1525 for attr, fname in foi:
1513 prefix = getattr(self._repo, attr)
1526 prefix = getattr(self._repo, attr)
1514 p = os.path.join(prefix, fname)
1527 p = os.path.join(prefix, fname)
1515 try:
1528 try:
1516 st = os.stat(p)
1529 st = os.stat(p)
1517 except OSError:
1530 except OSError:
1518 st = os.stat(prefix)
1531 st = os.stat(prefix)
1519 state.append((st[stat.ST_MTIME], st.st_size))
1532 state.append((st[stat.ST_MTIME], st.st_size))
1520 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1533 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1521
1534
1522 return tuple(state), maxmtime
1535 return tuple(state), maxmtime
1523
1536
1524 def copy(self):
1537 def copy(self):
1525 """Obtain a copy of this class instance.
1538 """Obtain a copy of this class instance.
1526
1539
1527 A new localrepository instance is obtained. The new instance should be
1540 A new localrepository instance is obtained. The new instance should be
1528 completely independent of the original.
1541 completely independent of the original.
1529 """
1542 """
1530 repo = repository(self._repo.baseui, self._repo.origroot)
1543 repo = repository(self._repo.baseui, self._repo.origroot)
1531 if self._filtername:
1544 if self._filtername:
1532 repo = repo.filtered(self._filtername)
1545 repo = repo.filtered(self._filtername)
1533 else:
1546 else:
1534 repo = repo.unfiltered()
1547 repo = repo.unfiltered()
1535 c = cachedlocalrepo(repo)
1548 c = cachedlocalrepo(repo)
1536 c._state = self._state
1549 c._state = self._state
1537 c.mtime = self.mtime
1550 c.mtime = self.mtime
1538 return c
1551 return c
General Comments 0
You need to be logged in to leave comments. Login now