##// END OF EJS Templates
clean: check that there are no conflicts after...
Martin von Zweigbergk -
r45005:abcc82bf default
parent child Browse files
Show More
@@ -1,1459 +1,1459 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import getattr
18 from .pycompat import getattr
19
19
20 from . import (
20 from . import (
21 bookmarks,
21 bookmarks,
22 bundlerepo,
22 bundlerepo,
23 cacheutil,
23 cacheutil,
24 cmdutil,
24 cmdutil,
25 destutil,
25 destutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 exchange,
28 exchange,
29 extensions,
29 extensions,
30 httppeer,
30 httppeer,
31 localrepo,
31 localrepo,
32 lock,
32 lock,
33 logcmdutil,
33 logcmdutil,
34 logexchange,
34 logexchange,
35 merge as mergemod,
35 merge as mergemod,
36 narrowspec,
36 narrowspec,
37 node,
37 node,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50 from .utils import hashutil
50 from .utils import hashutil
51 from .interfaces import repository as repositorymod
51 from .interfaces import repository as repositorymod
52
52
53 release = lock.release
53 release = lock.release
54
54
55 # shared features
55 # shared features
56 sharedbookmarks = b'bookmarks'
56 sharedbookmarks = b'bookmarks'
57
57
58
58
59 def _local(path):
59 def _local(path):
60 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
61
61
62 try:
62 try:
63 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
64 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
65 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
66 raise error.Abort(
66 raise error.Abort(
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 )
68 )
69
69
70 return isfile and bundlerepo or localrepo
70 return isfile and bundlerepo or localrepo
71
71
72
72
73 def addbranchrevs(lrepo, other, branches, revs):
73 def addbranchrevs(lrepo, other, branches, revs):
74 peer = other.peer() # a courtesy to callers using a localrepo for other
74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 hashbranch, branches = branches
75 hashbranch, branches = branches
76 if not hashbranch and not branches:
76 if not hashbranch and not branches:
77 x = revs or None
77 x = revs or None
78 if revs:
78 if revs:
79 y = revs[0]
79 y = revs[0]
80 else:
80 else:
81 y = None
81 y = None
82 return x, y
82 return x, y
83 if revs:
83 if revs:
84 revs = list(revs)
84 revs = list(revs)
85 else:
85 else:
86 revs = []
86 revs = []
87
87
88 if not peer.capable(b'branchmap'):
88 if not peer.capable(b'branchmap'):
89 if branches:
89 if branches:
90 raise error.Abort(_(b"remote branch lookup not supported"))
90 raise error.Abort(_(b"remote branch lookup not supported"))
91 revs.append(hashbranch)
91 revs.append(hashbranch)
92 return revs, revs[0]
92 return revs, revs[0]
93
93
94 with peer.commandexecutor() as e:
94 with peer.commandexecutor() as e:
95 branchmap = e.callcommand(b'branchmap', {}).result()
95 branchmap = e.callcommand(b'branchmap', {}).result()
96
96
97 def primary(branch):
97 def primary(branch):
98 if branch == b'.':
98 if branch == b'.':
99 if not lrepo:
99 if not lrepo:
100 raise error.Abort(_(b"dirstate branch not accessible"))
100 raise error.Abort(_(b"dirstate branch not accessible"))
101 branch = lrepo.dirstate.branch()
101 branch = lrepo.dirstate.branch()
102 if branch in branchmap:
102 if branch in branchmap:
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 return True
104 return True
105 else:
105 else:
106 return False
106 return False
107
107
108 for branch in branches:
108 for branch in branches:
109 if not primary(branch):
109 if not primary(branch):
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 if hashbranch:
111 if hashbranch:
112 if not primary(hashbranch):
112 if not primary(hashbranch):
113 revs.append(hashbranch)
113 revs.append(hashbranch)
114 return revs, revs[0]
114 return revs, revs[0]
115
115
116
116
117 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
118 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
119
119
120 u = util.url(path)
120 u = util.url(path)
121 branch = None
121 branch = None
122 if u.fragment:
122 if u.fragment:
123 branch = u.fragment
123 branch = u.fragment
124 u.fragment = None
124 u.fragment = None
125 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
126
126
127
127
128 schemes = {
128 schemes = {
129 b'bundle': bundlerepo,
129 b'bundle': bundlerepo,
130 b'union': unionrepo,
130 b'union': unionrepo,
131 b'file': _local,
131 b'file': _local,
132 b'http': httppeer,
132 b'http': httppeer,
133 b'https': httppeer,
133 b'https': httppeer,
134 b'ssh': sshpeer,
134 b'ssh': sshpeer,
135 b'static-http': statichttprepo,
135 b'static-http': statichttprepo,
136 }
136 }
137
137
138
138
139 def _peerlookup(path):
139 def _peerlookup(path):
140 u = util.url(path)
140 u = util.url(path)
141 scheme = u.scheme or b'file'
141 scheme = u.scheme or b'file'
142 thing = schemes.get(scheme) or schemes[b'file']
142 thing = schemes.get(scheme) or schemes[b'file']
143 try:
143 try:
144 return thing(path)
144 return thing(path)
145 except TypeError:
145 except TypeError:
146 # we can't test callable(thing) because 'thing' can be an unloaded
146 # we can't test callable(thing) because 'thing' can be an unloaded
147 # module that implements __call__
147 # module that implements __call__
148 if not util.safehasattr(thing, b'instance'):
148 if not util.safehasattr(thing, b'instance'):
149 raise
149 raise
150 return thing
150 return thing
151
151
152
152
153 def islocal(repo):
153 def islocal(repo):
154 '''return true if repo (or path pointing to repo) is local'''
154 '''return true if repo (or path pointing to repo) is local'''
155 if isinstance(repo, bytes):
155 if isinstance(repo, bytes):
156 try:
156 try:
157 return _peerlookup(repo).islocal(repo)
157 return _peerlookup(repo).islocal(repo)
158 except AttributeError:
158 except AttributeError:
159 return False
159 return False
160 return repo.local()
160 return repo.local()
161
161
162
162
163 def openpath(ui, path, sendaccept=True):
163 def openpath(ui, path, sendaccept=True):
164 '''open path with open if local, url.open if remote'''
164 '''open path with open if local, url.open if remote'''
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 if pathurl.islocal():
166 if pathurl.islocal():
167 return util.posixfile(pathurl.localpath(), b'rb')
167 return util.posixfile(pathurl.localpath(), b'rb')
168 else:
168 else:
169 return url.open(ui, path, sendaccept=sendaccept)
169 return url.open(ui, path, sendaccept=sendaccept)
170
170
171
171
172 # a list of (ui, repo) functions called for wire peer initialization
172 # a list of (ui, repo) functions called for wire peer initialization
173 wirepeersetupfuncs = []
173 wirepeersetupfuncs = []
174
174
175
175
176 def _peerorrepo(
176 def _peerorrepo(
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 ):
178 ):
179 """return a repository object for the specified path"""
179 """return a repository object for the specified path"""
180 obj = _peerlookup(path).instance(
180 obj = _peerlookup(path).instance(
181 ui, path, create, intents=intents, createopts=createopts
181 ui, path, create, intents=intents, createopts=createopts
182 )
182 )
183 ui = getattr(obj, "ui", ui)
183 ui = getattr(obj, "ui", ui)
184 for f in presetupfuncs or []:
184 for f in presetupfuncs or []:
185 f(ui, obj)
185 f(ui, obj)
186 ui.log(b'extension', b'- executing reposetup hooks\n')
186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 with util.timedcm('all reposetup') as allreposetupstats:
187 with util.timedcm('all reposetup') as allreposetupstats:
188 for name, module in extensions.extensions(ui):
188 for name, module in extensions.extensions(ui):
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 hook = getattr(module, 'reposetup', None)
190 hook = getattr(module, 'reposetup', None)
191 if hook:
191 if hook:
192 with util.timedcm('reposetup %r', name) as stats:
192 with util.timedcm('reposetup %r', name) as stats:
193 hook(ui, obj)
193 hook(ui, obj)
194 ui.log(
194 ui.log(
195 b'extension', b' > reposetup for %s took %s\n', name, stats
195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 )
196 )
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 if not obj.local():
198 if not obj.local():
199 for f in wirepeersetupfuncs:
199 for f in wirepeersetupfuncs:
200 f(ui, obj)
200 f(ui, obj)
201 return obj
201 return obj
202
202
203
203
204 def repository(
204 def repository(
205 ui,
205 ui,
206 path=b'',
206 path=b'',
207 create=False,
207 create=False,
208 presetupfuncs=None,
208 presetupfuncs=None,
209 intents=None,
209 intents=None,
210 createopts=None,
210 createopts=None,
211 ):
211 ):
212 """return a repository object for the specified path"""
212 """return a repository object for the specified path"""
213 peer = _peerorrepo(
213 peer = _peerorrepo(
214 ui,
214 ui,
215 path,
215 path,
216 create,
216 create,
217 presetupfuncs=presetupfuncs,
217 presetupfuncs=presetupfuncs,
218 intents=intents,
218 intents=intents,
219 createopts=createopts,
219 createopts=createopts,
220 )
220 )
221 repo = peer.local()
221 repo = peer.local()
222 if not repo:
222 if not repo:
223 raise error.Abort(
223 raise error.Abort(
224 _(b"repository '%s' is not local") % (path or peer.url())
224 _(b"repository '%s' is not local") % (path or peer.url())
225 )
225 )
226 return repo.filtered(b'visible')
226 return repo.filtered(b'visible')
227
227
228
228
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 '''return a repository peer for the specified path'''
230 '''return a repository peer for the specified path'''
231 rui = remoteui(uiorrepo, opts)
231 rui = remoteui(uiorrepo, opts)
232 return _peerorrepo(
232 return _peerorrepo(
233 rui, path, create, intents=intents, createopts=createopts
233 rui, path, create, intents=intents, createopts=createopts
234 ).peer()
234 ).peer()
235
235
236
236
237 def defaultdest(source):
237 def defaultdest(source):
238 '''return default destination of clone if none is given
238 '''return default destination of clone if none is given
239
239
240 >>> defaultdest(b'foo')
240 >>> defaultdest(b'foo')
241 'foo'
241 'foo'
242 >>> defaultdest(b'/foo/bar')
242 >>> defaultdest(b'/foo/bar')
243 'bar'
243 'bar'
244 >>> defaultdest(b'/')
244 >>> defaultdest(b'/')
245 ''
245 ''
246 >>> defaultdest(b'')
246 >>> defaultdest(b'')
247 ''
247 ''
248 >>> defaultdest(b'http://example.org/')
248 >>> defaultdest(b'http://example.org/')
249 ''
249 ''
250 >>> defaultdest(b'http://example.org/foo/')
250 >>> defaultdest(b'http://example.org/foo/')
251 'foo'
251 'foo'
252 '''
252 '''
253 path = util.url(source).path
253 path = util.url(source).path
254 if not path:
254 if not path:
255 return b''
255 return b''
256 return os.path.basename(os.path.normpath(path))
256 return os.path.basename(os.path.normpath(path))
257
257
258
258
259 def sharedreposource(repo):
259 def sharedreposource(repo):
260 """Returns repository object for source repository of a shared repo.
260 """Returns repository object for source repository of a shared repo.
261
261
262 If repo is not a shared repository, returns None.
262 If repo is not a shared repository, returns None.
263 """
263 """
264 if repo.sharedpath == repo.path:
264 if repo.sharedpath == repo.path:
265 return None
265 return None
266
266
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 return repo.srcrepo
268 return repo.srcrepo
269
269
270 # the sharedpath always ends in the .hg; we want the path to the repo
270 # the sharedpath always ends in the .hg; we want the path to the repo
271 source = repo.vfs.split(repo.sharedpath)[0]
271 source = repo.vfs.split(repo.sharedpath)[0]
272 srcurl, branches = parseurl(source)
272 srcurl, branches = parseurl(source)
273 srcrepo = repository(repo.ui, srcurl)
273 srcrepo = repository(repo.ui, srcurl)
274 repo.srcrepo = srcrepo
274 repo.srcrepo = srcrepo
275 return srcrepo
275 return srcrepo
276
276
277
277
278 def share(
278 def share(
279 ui,
279 ui,
280 source,
280 source,
281 dest=None,
281 dest=None,
282 update=True,
282 update=True,
283 bookmarks=True,
283 bookmarks=True,
284 defaultpath=None,
284 defaultpath=None,
285 relative=False,
285 relative=False,
286 ):
286 ):
287 '''create a shared repository'''
287 '''create a shared repository'''
288
288
289 if not islocal(source):
289 if not islocal(source):
290 raise error.Abort(_(b'can only share local repositories'))
290 raise error.Abort(_(b'can only share local repositories'))
291
291
292 if not dest:
292 if not dest:
293 dest = defaultdest(source)
293 dest = defaultdest(source)
294 else:
294 else:
295 dest = ui.expandpath(dest)
295 dest = ui.expandpath(dest)
296
296
297 if isinstance(source, bytes):
297 if isinstance(source, bytes):
298 origsource = ui.expandpath(source)
298 origsource = ui.expandpath(source)
299 source, branches = parseurl(origsource)
299 source, branches = parseurl(origsource)
300 srcrepo = repository(ui, source)
300 srcrepo = repository(ui, source)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 else:
302 else:
303 srcrepo = source.local()
303 srcrepo = source.local()
304 checkout = None
304 checkout = None
305
305
306 shareditems = set()
306 shareditems = set()
307 if bookmarks:
307 if bookmarks:
308 shareditems.add(sharedbookmarks)
308 shareditems.add(sharedbookmarks)
309
309
310 r = repository(
310 r = repository(
311 ui,
311 ui,
312 dest,
312 dest,
313 create=True,
313 create=True,
314 createopts={
314 createopts={
315 b'sharedrepo': srcrepo,
315 b'sharedrepo': srcrepo,
316 b'sharedrelative': relative,
316 b'sharedrelative': relative,
317 b'shareditems': shareditems,
317 b'shareditems': shareditems,
318 },
318 },
319 )
319 )
320
320
321 postshare(srcrepo, r, defaultpath=defaultpath)
321 postshare(srcrepo, r, defaultpath=defaultpath)
322 r = repository(ui, dest)
322 r = repository(ui, dest)
323 _postshareupdate(r, update, checkout=checkout)
323 _postshareupdate(r, update, checkout=checkout)
324 return r
324 return r
325
325
326
326
327 def unshare(ui, repo):
327 def unshare(ui, repo):
328 """convert a shared repository to a normal one
328 """convert a shared repository to a normal one
329
329
330 Copy the store data to the repo and remove the sharedpath data.
330 Copy the store data to the repo and remove the sharedpath data.
331
331
332 Returns a new repository object representing the unshared repository.
332 Returns a new repository object representing the unshared repository.
333
333
334 The passed repository object is not usable after this function is
334 The passed repository object is not usable after this function is
335 called.
335 called.
336 """
336 """
337
337
338 with repo.lock():
338 with repo.lock():
339 # we use locks here because if we race with commit, we
339 # we use locks here because if we race with commit, we
340 # can end up with extra data in the cloned revlogs that's
340 # can end up with extra data in the cloned revlogs that's
341 # not pointed to by changesets, thus causing verify to
341 # not pointed to by changesets, thus causing verify to
342 # fail
342 # fail
343 destlock = copystore(ui, repo, repo.path)
343 destlock = copystore(ui, repo, repo.path)
344 with destlock or util.nullcontextmanager():
344 with destlock or util.nullcontextmanager():
345
345
346 sharefile = repo.vfs.join(b'sharedpath')
346 sharefile = repo.vfs.join(b'sharedpath')
347 util.rename(sharefile, sharefile + b'.old')
347 util.rename(sharefile, sharefile + b'.old')
348
348
349 repo.requirements.discard(b'shared')
349 repo.requirements.discard(b'shared')
350 repo.requirements.discard(b'relshared')
350 repo.requirements.discard(b'relshared')
351 repo._writerequirements()
351 repo._writerequirements()
352
352
353 # Removing share changes some fundamental properties of the repo instance.
353 # Removing share changes some fundamental properties of the repo instance.
354 # So we instantiate a new repo object and operate on it rather than
354 # So we instantiate a new repo object and operate on it rather than
355 # try to keep the existing repo usable.
355 # try to keep the existing repo usable.
356 newrepo = repository(repo.baseui, repo.root, create=False)
356 newrepo = repository(repo.baseui, repo.root, create=False)
357
357
358 # TODO: figure out how to access subrepos that exist, but were previously
358 # TODO: figure out how to access subrepos that exist, but were previously
359 # removed from .hgsub
359 # removed from .hgsub
360 c = newrepo[b'.']
360 c = newrepo[b'.']
361 subs = c.substate
361 subs = c.substate
362 for s in sorted(subs):
362 for s in sorted(subs):
363 c.sub(s).unshare()
363 c.sub(s).unshare()
364
364
365 localrepo.poisonrepository(repo)
365 localrepo.poisonrepository(repo)
366
366
367 return newrepo
367 return newrepo
368
368
369
369
370 def postshare(sourcerepo, destrepo, defaultpath=None):
370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 """Called after a new shared repo is created.
371 """Called after a new shared repo is created.
372
372
373 The new repo only has a requirements file and pointer to the source.
373 The new repo only has a requirements file and pointer to the source.
374 This function configures additional shared data.
374 This function configures additional shared data.
375
375
376 Extensions can wrap this function and write additional entries to
376 Extensions can wrap this function and write additional entries to
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 """
378 """
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 if default:
380 if default:
381 template = b'[paths]\ndefault = %s\n'
381 template = b'[paths]\ndefault = %s\n'
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 with destrepo.wlock():
384 with destrepo.wlock():
385 narrowspec.copytoworkingcopy(destrepo)
385 narrowspec.copytoworkingcopy(destrepo)
386
386
387
387
388 def _postshareupdate(repo, update, checkout=None):
388 def _postshareupdate(repo, update, checkout=None):
389 """Maybe perform a working directory update after a shared repo is created.
389 """Maybe perform a working directory update after a shared repo is created.
390
390
391 ``update`` can be a boolean or a revision to update to.
391 ``update`` can be a boolean or a revision to update to.
392 """
392 """
393 if not update:
393 if not update:
394 return
394 return
395
395
396 repo.ui.status(_(b"updating working directory\n"))
396 repo.ui.status(_(b"updating working directory\n"))
397 if update is not True:
397 if update is not True:
398 checkout = update
398 checkout = update
399 for test in (checkout, b'default', b'tip'):
399 for test in (checkout, b'default', b'tip'):
400 if test is None:
400 if test is None:
401 continue
401 continue
402 try:
402 try:
403 uprev = repo.lookup(test)
403 uprev = repo.lookup(test)
404 break
404 break
405 except error.RepoLookupError:
405 except error.RepoLookupError:
406 continue
406 continue
407 _update(repo, uprev)
407 _update(repo, uprev)
408
408
409
409
410 def copystore(ui, srcrepo, destpath):
410 def copystore(ui, srcrepo, destpath):
411 '''copy files from store of srcrepo in destpath
411 '''copy files from store of srcrepo in destpath
412
412
413 returns destlock
413 returns destlock
414 '''
414 '''
415 destlock = None
415 destlock = None
416 try:
416 try:
417 hardlink = None
417 hardlink = None
418 topic = _(b'linking') if hardlink else _(b'copying')
418 topic = _(b'linking') if hardlink else _(b'copying')
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 num = 0
420 num = 0
421 srcpublishing = srcrepo.publishing()
421 srcpublishing = srcrepo.publishing()
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 dstvfs = vfsmod.vfs(destpath)
423 dstvfs = vfsmod.vfs(destpath)
424 for f in srcrepo.store.copylist():
424 for f in srcrepo.store.copylist():
425 if srcpublishing and f.endswith(b'phaseroots'):
425 if srcpublishing and f.endswith(b'phaseroots'):
426 continue
426 continue
427 dstbase = os.path.dirname(f)
427 dstbase = os.path.dirname(f)
428 if dstbase and not dstvfs.exists(dstbase):
428 if dstbase and not dstvfs.exists(dstbase):
429 dstvfs.mkdir(dstbase)
429 dstvfs.mkdir(dstbase)
430 if srcvfs.exists(f):
430 if srcvfs.exists(f):
431 if f.endswith(b'data'):
431 if f.endswith(b'data'):
432 # 'dstbase' may be empty (e.g. revlog format 0)
432 # 'dstbase' may be empty (e.g. revlog format 0)
433 lockfile = os.path.join(dstbase, b"lock")
433 lockfile = os.path.join(dstbase, b"lock")
434 # lock to avoid premature writing to the target
434 # lock to avoid premature writing to the target
435 destlock = lock.lock(dstvfs, lockfile)
435 destlock = lock.lock(dstvfs, lockfile)
436 hardlink, n = util.copyfiles(
436 hardlink, n = util.copyfiles(
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 )
438 )
439 num += n
439 num += n
440 if hardlink:
440 if hardlink:
441 ui.debug(b"linked %d files\n" % num)
441 ui.debug(b"linked %d files\n" % num)
442 else:
442 else:
443 ui.debug(b"copied %d files\n" % num)
443 ui.debug(b"copied %d files\n" % num)
444 return destlock
444 return destlock
445 except: # re-raises
445 except: # re-raises
446 release(destlock)
446 release(destlock)
447 raise
447 raise
448
448
449
449
450 def clonewithshare(
450 def clonewithshare(
451 ui,
451 ui,
452 peeropts,
452 peeropts,
453 sharepath,
453 sharepath,
454 source,
454 source,
455 srcpeer,
455 srcpeer,
456 dest,
456 dest,
457 pull=False,
457 pull=False,
458 rev=None,
458 rev=None,
459 update=True,
459 update=True,
460 stream=False,
460 stream=False,
461 ):
461 ):
462 """Perform a clone using a shared repo.
462 """Perform a clone using a shared repo.
463
463
464 The store for the repository will be located at <sharepath>/.hg. The
464 The store for the repository will be located at <sharepath>/.hg. The
465 specified revisions will be cloned or pulled from "source". A shared repo
465 specified revisions will be cloned or pulled from "source". A shared repo
466 will be created at "dest" and a working copy will be created if "update" is
466 will be created at "dest" and a working copy will be created if "update" is
467 True.
467 True.
468 """
468 """
469 revs = None
469 revs = None
470 if rev:
470 if rev:
471 if not srcpeer.capable(b'lookup'):
471 if not srcpeer.capable(b'lookup'):
472 raise error.Abort(
472 raise error.Abort(
473 _(
473 _(
474 b"src repository does not support "
474 b"src repository does not support "
475 b"revision lookup and so doesn't "
475 b"revision lookup and so doesn't "
476 b"support clone by revision"
476 b"support clone by revision"
477 )
477 )
478 )
478 )
479
479
480 # TODO this is batchable.
480 # TODO this is batchable.
481 remoterevs = []
481 remoterevs = []
482 for r in rev:
482 for r in rev:
483 with srcpeer.commandexecutor() as e:
483 with srcpeer.commandexecutor() as e:
484 remoterevs.append(
484 remoterevs.append(
485 e.callcommand(b'lookup', {b'key': r,}).result()
485 e.callcommand(b'lookup', {b'key': r,}).result()
486 )
486 )
487 revs = remoterevs
487 revs = remoterevs
488
488
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 # 2 clients may race creating or populating it.
490 # 2 clients may race creating or populating it.
491 pooldir = os.path.dirname(sharepath)
491 pooldir = os.path.dirname(sharepath)
492 # lock class requires the directory to exist.
492 # lock class requires the directory to exist.
493 try:
493 try:
494 util.makedir(pooldir, False)
494 util.makedir(pooldir, False)
495 except OSError as e:
495 except OSError as e:
496 if e.errno != errno.EEXIST:
496 if e.errno != errno.EEXIST:
497 raise
497 raise
498
498
499 poolvfs = vfsmod.vfs(pooldir)
499 poolvfs = vfsmod.vfs(pooldir)
500 basename = os.path.basename(sharepath)
500 basename = os.path.basename(sharepath)
501
501
502 with lock.lock(poolvfs, b'%s.lock' % basename):
502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 if os.path.exists(sharepath):
503 if os.path.exists(sharepath):
504 ui.status(
504 ui.status(
505 _(b'(sharing from existing pooled repository %s)\n') % basename
505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 )
506 )
507 else:
507 else:
508 ui.status(
508 ui.status(
509 _(b'(sharing from new pooled repository %s)\n') % basename
509 _(b'(sharing from new pooled repository %s)\n') % basename
510 )
510 )
511 # Always use pull mode because hardlinks in share mode don't work
511 # Always use pull mode because hardlinks in share mode don't work
512 # well. Never update because working copies aren't necessary in
512 # well. Never update because working copies aren't necessary in
513 # share mode.
513 # share mode.
514 clone(
514 clone(
515 ui,
515 ui,
516 peeropts,
516 peeropts,
517 source,
517 source,
518 dest=sharepath,
518 dest=sharepath,
519 pull=True,
519 pull=True,
520 revs=rev,
520 revs=rev,
521 update=False,
521 update=False,
522 stream=stream,
522 stream=stream,
523 )
523 )
524
524
525 # Resolve the value to put in [paths] section for the source.
525 # Resolve the value to put in [paths] section for the source.
526 if islocal(source):
526 if islocal(source):
527 defaultpath = os.path.abspath(util.urllocalpath(source))
527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 else:
528 else:
529 defaultpath = source
529 defaultpath = source
530
530
531 sharerepo = repository(ui, path=sharepath)
531 sharerepo = repository(ui, path=sharepath)
532 destrepo = share(
532 destrepo = share(
533 ui,
533 ui,
534 sharerepo,
534 sharerepo,
535 dest=dest,
535 dest=dest,
536 update=False,
536 update=False,
537 bookmarks=False,
537 bookmarks=False,
538 defaultpath=defaultpath,
538 defaultpath=defaultpath,
539 )
539 )
540
540
541 # We need to perform a pull against the dest repo to fetch bookmarks
541 # We need to perform a pull against the dest repo to fetch bookmarks
542 # and other non-store data that isn't shared by default. In the case of
542 # and other non-store data that isn't shared by default. In the case of
543 # non-existing shared repo, this means we pull from the remote twice. This
543 # non-existing shared repo, this means we pull from the remote twice. This
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 # way to pull just non-changegroup data.
545 # way to pull just non-changegroup data.
546 exchange.pull(destrepo, srcpeer, heads=revs)
546 exchange.pull(destrepo, srcpeer, heads=revs)
547
547
548 _postshareupdate(destrepo, update)
548 _postshareupdate(destrepo, update)
549
549
550 return srcpeer, peer(ui, peeropts, dest)
550 return srcpeer, peer(ui, peeropts, dest)
551
551
552
552
553 # Recomputing branch cache might be slow on big repos,
553 # Recomputing branch cache might be slow on big repos,
554 # so just copy it
554 # so just copy it
555 def _copycache(srcrepo, dstcachedir, fname):
555 def _copycache(srcrepo, dstcachedir, fname):
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
559 if os.path.exists(srcbranchcache):
559 if os.path.exists(srcbranchcache):
560 if not os.path.exists(dstcachedir):
560 if not os.path.exists(dstcachedir):
561 os.mkdir(dstcachedir)
561 os.mkdir(dstcachedir)
562 util.copyfile(srcbranchcache, dstbranchcache)
562 util.copyfile(srcbranchcache, dstbranchcache)
563
563
564
564
565 def clone(
565 def clone(
566 ui,
566 ui,
567 peeropts,
567 peeropts,
568 source,
568 source,
569 dest=None,
569 dest=None,
570 pull=False,
570 pull=False,
571 revs=None,
571 revs=None,
572 update=True,
572 update=True,
573 stream=False,
573 stream=False,
574 branch=None,
574 branch=None,
575 shareopts=None,
575 shareopts=None,
576 storeincludepats=None,
576 storeincludepats=None,
577 storeexcludepats=None,
577 storeexcludepats=None,
578 depth=None,
578 depth=None,
579 ):
579 ):
580 """Make a copy of an existing repository.
580 """Make a copy of an existing repository.
581
581
582 Create a copy of an existing repository in a new directory. The
582 Create a copy of an existing repository in a new directory. The
583 source and destination are URLs, as passed to the repository
583 source and destination are URLs, as passed to the repository
584 function. Returns a pair of repository peers, the source and
584 function. Returns a pair of repository peers, the source and
585 newly created destination.
585 newly created destination.
586
586
587 The location of the source is added to the new repository's
587 The location of the source is added to the new repository's
588 .hg/hgrc file, as the default to be used for future pulls and
588 .hg/hgrc file, as the default to be used for future pulls and
589 pushes.
589 pushes.
590
590
591 If an exception is raised, the partly cloned/updated destination
591 If an exception is raised, the partly cloned/updated destination
592 repository will be deleted.
592 repository will be deleted.
593
593
594 Arguments:
594 Arguments:
595
595
596 source: repository object or URL
596 source: repository object or URL
597
597
598 dest: URL of destination repository to create (defaults to base
598 dest: URL of destination repository to create (defaults to base
599 name of source repository)
599 name of source repository)
600
600
601 pull: always pull from source repository, even in local case or if the
601 pull: always pull from source repository, even in local case or if the
602 server prefers streaming
602 server prefers streaming
603
603
604 stream: stream raw data uncompressed from repository (fast over
604 stream: stream raw data uncompressed from repository (fast over
605 LAN, slow over WAN)
605 LAN, slow over WAN)
606
606
607 revs: revision to clone up to (implies pull=True)
607 revs: revision to clone up to (implies pull=True)
608
608
609 update: update working directory after clone completes, if
609 update: update working directory after clone completes, if
610 destination is local repository (True means update to default rev,
610 destination is local repository (True means update to default rev,
611 anything else is treated as a revision)
611 anything else is treated as a revision)
612
612
613 branch: branches to clone
613 branch: branches to clone
614
614
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 activates auto sharing mode and defines the directory for stores. The
616 activates auto sharing mode and defines the directory for stores. The
617 "mode" key determines how to construct the directory name of the shared
617 "mode" key determines how to construct the directory name of the shared
618 repository. "identity" means the name is derived from the node of the first
618 repository. "identity" means the name is derived from the node of the first
619 changeset in the repository. "remote" means the name is derived from the
619 changeset in the repository. "remote" means the name is derived from the
620 remote's path/URL. Defaults to "identity."
620 remote's path/URL. Defaults to "identity."
621
621
622 storeincludepats and storeexcludepats: sets of file patterns to include and
622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 exclude in the repository copy, respectively. If not defined, all files
623 exclude in the repository copy, respectively. If not defined, all files
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 only the requested files will be performed. If ``storeincludepats`` is not
625 only the requested files will be performed. If ``storeincludepats`` is not
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 ``path:.``. If both are empty sets, no files will be cloned.
627 ``path:.``. If both are empty sets, no files will be cloned.
628 """
628 """
629
629
630 if isinstance(source, bytes):
630 if isinstance(source, bytes):
631 origsource = ui.expandpath(source)
631 origsource = ui.expandpath(source)
632 source, branches = parseurl(origsource, branch)
632 source, branches = parseurl(origsource, branch)
633 srcpeer = peer(ui, peeropts, source)
633 srcpeer = peer(ui, peeropts, source)
634 else:
634 else:
635 srcpeer = source.peer() # in case we were called with a localrepo
635 srcpeer = source.peer() # in case we were called with a localrepo
636 branches = (None, branch or [])
636 branches = (None, branch or [])
637 origsource = source = srcpeer.url()
637 origsource = source = srcpeer.url()
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639
639
640 if dest is None:
640 if dest is None:
641 dest = defaultdest(source)
641 dest = defaultdest(source)
642 if dest:
642 if dest:
643 ui.status(_(b"destination directory: %s\n") % dest)
643 ui.status(_(b"destination directory: %s\n") % dest)
644 else:
644 else:
645 dest = ui.expandpath(dest)
645 dest = ui.expandpath(dest)
646
646
647 dest = util.urllocalpath(dest)
647 dest = util.urllocalpath(dest)
648 source = util.urllocalpath(source)
648 source = util.urllocalpath(source)
649
649
650 if not dest:
650 if not dest:
651 raise error.Abort(_(b"empty destination path is not valid"))
651 raise error.Abort(_(b"empty destination path is not valid"))
652
652
653 destvfs = vfsmod.vfs(dest, expandpath=True)
653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 if destvfs.lexists():
654 if destvfs.lexists():
655 if not destvfs.isdir():
655 if not destvfs.isdir():
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 elif destvfs.listdir():
657 elif destvfs.listdir():
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659
659
660 createopts = {}
660 createopts = {}
661 narrow = False
661 narrow = False
662
662
663 if storeincludepats is not None:
663 if storeincludepats is not None:
664 narrowspec.validatepatterns(storeincludepats)
664 narrowspec.validatepatterns(storeincludepats)
665 narrow = True
665 narrow = True
666
666
667 if storeexcludepats is not None:
667 if storeexcludepats is not None:
668 narrowspec.validatepatterns(storeexcludepats)
668 narrowspec.validatepatterns(storeexcludepats)
669 narrow = True
669 narrow = True
670
670
671 if narrow:
671 if narrow:
672 # Include everything by default if only exclusion patterns defined.
672 # Include everything by default if only exclusion patterns defined.
673 if storeexcludepats and not storeincludepats:
673 if storeexcludepats and not storeincludepats:
674 storeincludepats = {b'path:.'}
674 storeincludepats = {b'path:.'}
675
675
676 createopts[b'narrowfiles'] = True
676 createopts[b'narrowfiles'] = True
677
677
678 if depth:
678 if depth:
679 createopts[b'shallowfilestore'] = True
679 createopts[b'shallowfilestore'] = True
680
680
681 if srcpeer.capable(b'lfs-serve'):
681 if srcpeer.capable(b'lfs-serve'):
682 # Repository creation honors the config if it disabled the extension, so
682 # Repository creation honors the config if it disabled the extension, so
683 # we can't just announce that lfs will be enabled. This check avoids
683 # we can't just announce that lfs will be enabled. This check avoids
684 # saying that lfs will be enabled, and then saying it's an unknown
684 # saying that lfs will be enabled, and then saying it's an unknown
685 # feature. The lfs creation option is set in either case so that a
685 # feature. The lfs creation option is set in either case so that a
686 # requirement is added. If the extension is explicitly disabled but the
686 # requirement is added. If the extension is explicitly disabled but the
687 # requirement is set, the clone aborts early, before transferring any
687 # requirement is set, the clone aborts early, before transferring any
688 # data.
688 # data.
689 createopts[b'lfs'] = True
689 createopts[b'lfs'] = True
690
690
691 if extensions.disabledext(b'lfs'):
691 if extensions.disabledext(b'lfs'):
692 ui.status(
692 ui.status(
693 _(
693 _(
694 b'(remote is using large file support (lfs), but it is '
694 b'(remote is using large file support (lfs), but it is '
695 b'explicitly disabled in the local configuration)\n'
695 b'explicitly disabled in the local configuration)\n'
696 )
696 )
697 )
697 )
698 else:
698 else:
699 ui.status(
699 ui.status(
700 _(
700 _(
701 b'(remote is using large file support (lfs); lfs will '
701 b'(remote is using large file support (lfs); lfs will '
702 b'be enabled for this repository)\n'
702 b'be enabled for this repository)\n'
703 )
703 )
704 )
704 )
705
705
706 shareopts = shareopts or {}
706 shareopts = shareopts or {}
707 sharepool = shareopts.get(b'pool')
707 sharepool = shareopts.get(b'pool')
708 sharenamemode = shareopts.get(b'mode')
708 sharenamemode = shareopts.get(b'mode')
709 if sharepool and islocal(dest):
709 if sharepool and islocal(dest):
710 sharepath = None
710 sharepath = None
711 if sharenamemode == b'identity':
711 if sharenamemode == b'identity':
712 # Resolve the name from the initial changeset in the remote
712 # Resolve the name from the initial changeset in the remote
713 # repository. This returns nullid when the remote is empty. It
713 # repository. This returns nullid when the remote is empty. It
714 # raises RepoLookupError if revision 0 is filtered or otherwise
714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 # not available. If we fail to resolve, sharing is not enabled.
715 # not available. If we fail to resolve, sharing is not enabled.
716 try:
716 try:
717 with srcpeer.commandexecutor() as e:
717 with srcpeer.commandexecutor() as e:
718 rootnode = e.callcommand(
718 rootnode = e.callcommand(
719 b'lookup', {b'key': b'0',}
719 b'lookup', {b'key': b'0',}
720 ).result()
720 ).result()
721
721
722 if rootnode != node.nullid:
722 if rootnode != node.nullid:
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 else:
724 else:
725 ui.status(
725 ui.status(
726 _(
726 _(
727 b'(not using pooled storage: '
727 b'(not using pooled storage: '
728 b'remote appears to be empty)\n'
728 b'remote appears to be empty)\n'
729 )
729 )
730 )
730 )
731 except error.RepoLookupError:
731 except error.RepoLookupError:
732 ui.status(
732 ui.status(
733 _(
733 _(
734 b'(not using pooled storage: '
734 b'(not using pooled storage: '
735 b'unable to resolve identity of remote)\n'
735 b'unable to resolve identity of remote)\n'
736 )
736 )
737 )
737 )
738 elif sharenamemode == b'remote':
738 elif sharenamemode == b'remote':
739 sharepath = os.path.join(
739 sharepath = os.path.join(
740 sharepool, node.hex(hashutil.sha1(source).digest())
740 sharepool, node.hex(hashutil.sha1(source).digest())
741 )
741 )
742 else:
742 else:
743 raise error.Abort(
743 raise error.Abort(
744 _(b'unknown share naming mode: %s') % sharenamemode
744 _(b'unknown share naming mode: %s') % sharenamemode
745 )
745 )
746
746
747 # TODO this is a somewhat arbitrary restriction.
747 # TODO this is a somewhat arbitrary restriction.
748 if narrow:
748 if narrow:
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 sharepath = None
750 sharepath = None
751
751
752 if sharepath:
752 if sharepath:
753 return clonewithshare(
753 return clonewithshare(
754 ui,
754 ui,
755 peeropts,
755 peeropts,
756 sharepath,
756 sharepath,
757 source,
757 source,
758 srcpeer,
758 srcpeer,
759 dest,
759 dest,
760 pull=pull,
760 pull=pull,
761 rev=revs,
761 rev=revs,
762 update=update,
762 update=update,
763 stream=stream,
763 stream=stream,
764 )
764 )
765
765
766 srclock = destlock = cleandir = None
766 srclock = destlock = cleandir = None
767 srcrepo = srcpeer.local()
767 srcrepo = srcpeer.local()
768 try:
768 try:
769 abspath = origsource
769 abspath = origsource
770 if islocal(origsource):
770 if islocal(origsource):
771 abspath = os.path.abspath(util.urllocalpath(origsource))
771 abspath = os.path.abspath(util.urllocalpath(origsource))
772
772
773 if islocal(dest):
773 if islocal(dest):
774 cleandir = dest
774 cleandir = dest
775
775
776 copy = False
776 copy = False
777 if (
777 if (
778 srcrepo
778 srcrepo
779 and srcrepo.cancopy()
779 and srcrepo.cancopy()
780 and islocal(dest)
780 and islocal(dest)
781 and not phases.hassecret(srcrepo)
781 and not phases.hassecret(srcrepo)
782 ):
782 ):
783 copy = not pull and not revs
783 copy = not pull and not revs
784
784
785 # TODO this is a somewhat arbitrary restriction.
785 # TODO this is a somewhat arbitrary restriction.
786 if narrow:
786 if narrow:
787 copy = False
787 copy = False
788
788
789 if copy:
789 if copy:
790 try:
790 try:
791 # we use a lock here because if we race with commit, we
791 # we use a lock here because if we race with commit, we
792 # can end up with extra data in the cloned revlogs that's
792 # can end up with extra data in the cloned revlogs that's
793 # not pointed to by changesets, thus causing verify to
793 # not pointed to by changesets, thus causing verify to
794 # fail
794 # fail
795 srclock = srcrepo.lock(wait=False)
795 srclock = srcrepo.lock(wait=False)
796 except error.LockError:
796 except error.LockError:
797 copy = False
797 copy = False
798
798
799 if copy:
799 if copy:
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 if not os.path.exists(dest):
802 if not os.path.exists(dest):
803 util.makedirs(dest)
803 util.makedirs(dest)
804 else:
804 else:
805 # only clean up directories we create ourselves
805 # only clean up directories we create ourselves
806 cleandir = hgdir
806 cleandir = hgdir
807 try:
807 try:
808 destpath = hgdir
808 destpath = hgdir
809 util.makedir(destpath, notindexed=True)
809 util.makedir(destpath, notindexed=True)
810 except OSError as inst:
810 except OSError as inst:
811 if inst.errno == errno.EEXIST:
811 if inst.errno == errno.EEXIST:
812 cleandir = None
812 cleandir = None
813 raise error.Abort(
813 raise error.Abort(
814 _(b"destination '%s' already exists") % dest
814 _(b"destination '%s' already exists") % dest
815 )
815 )
816 raise
816 raise
817
817
818 destlock = copystore(ui, srcrepo, destpath)
818 destlock = copystore(ui, srcrepo, destpath)
819 # copy bookmarks over
819 # copy bookmarks over
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 if os.path.exists(srcbookmarks):
822 if os.path.exists(srcbookmarks):
823 util.copyfile(srcbookmarks, dstbookmarks)
823 util.copyfile(srcbookmarks, dstbookmarks)
824
824
825 dstcachedir = os.path.join(destpath, b'cache')
825 dstcachedir = os.path.join(destpath, b'cache')
826 for cache in cacheutil.cachetocopy(srcrepo):
826 for cache in cacheutil.cachetocopy(srcrepo):
827 _copycache(srcrepo, dstcachedir, cache)
827 _copycache(srcrepo, dstcachedir, cache)
828
828
829 # we need to re-init the repo after manually copying the data
829 # we need to re-init the repo after manually copying the data
830 # into it
830 # into it
831 destpeer = peer(srcrepo, peeropts, dest)
831 destpeer = peer(srcrepo, peeropts, dest)
832 srcrepo.hook(
832 srcrepo.hook(
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 )
834 )
835 else:
835 else:
836 try:
836 try:
837 # only pass ui when no srcrepo
837 # only pass ui when no srcrepo
838 destpeer = peer(
838 destpeer = peer(
839 srcrepo or ui,
839 srcrepo or ui,
840 peeropts,
840 peeropts,
841 dest,
841 dest,
842 create=True,
842 create=True,
843 createopts=createopts,
843 createopts=createopts,
844 )
844 )
845 except OSError as inst:
845 except OSError as inst:
846 if inst.errno == errno.EEXIST:
846 if inst.errno == errno.EEXIST:
847 cleandir = None
847 cleandir = None
848 raise error.Abort(
848 raise error.Abort(
849 _(b"destination '%s' already exists") % dest
849 _(b"destination '%s' already exists") % dest
850 )
850 )
851 raise
851 raise
852
852
853 if revs:
853 if revs:
854 if not srcpeer.capable(b'lookup'):
854 if not srcpeer.capable(b'lookup'):
855 raise error.Abort(
855 raise error.Abort(
856 _(
856 _(
857 b"src repository does not support "
857 b"src repository does not support "
858 b"revision lookup and so doesn't "
858 b"revision lookup and so doesn't "
859 b"support clone by revision"
859 b"support clone by revision"
860 )
860 )
861 )
861 )
862
862
863 # TODO this is batchable.
863 # TODO this is batchable.
864 remoterevs = []
864 remoterevs = []
865 for rev in revs:
865 for rev in revs:
866 with srcpeer.commandexecutor() as e:
866 with srcpeer.commandexecutor() as e:
867 remoterevs.append(
867 remoterevs.append(
868 e.callcommand(b'lookup', {b'key': rev,}).result()
868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 )
869 )
870 revs = remoterevs
870 revs = remoterevs
871
871
872 checkout = revs[0]
872 checkout = revs[0]
873 else:
873 else:
874 revs = None
874 revs = None
875 local = destpeer.local()
875 local = destpeer.local()
876 if local:
876 if local:
877 if narrow:
877 if narrow:
878 with local.wlock(), local.lock():
878 with local.wlock(), local.lock():
879 local.setnarrowpats(storeincludepats, storeexcludepats)
879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 narrowspec.copytoworkingcopy(local)
880 narrowspec.copytoworkingcopy(local)
881
881
882 u = util.url(abspath)
882 u = util.url(abspath)
883 defaulturl = bytes(u)
883 defaulturl = bytes(u)
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 if not stream:
885 if not stream:
886 if pull:
886 if pull:
887 stream = False
887 stream = False
888 else:
888 else:
889 stream = None
889 stream = None
890 # internal config: ui.quietbookmarkmove
890 # internal config: ui.quietbookmarkmove
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 with local.ui.configoverride(overrides, b'clone'):
892 with local.ui.configoverride(overrides, b'clone'):
893 exchange.pull(
893 exchange.pull(
894 local,
894 local,
895 srcpeer,
895 srcpeer,
896 revs,
896 revs,
897 streamclonerequested=stream,
897 streamclonerequested=stream,
898 includepats=storeincludepats,
898 includepats=storeincludepats,
899 excludepats=storeexcludepats,
899 excludepats=storeexcludepats,
900 depth=depth,
900 depth=depth,
901 )
901 )
902 elif srcrepo:
902 elif srcrepo:
903 # TODO lift restriction once exchange.push() accepts narrow
903 # TODO lift restriction once exchange.push() accepts narrow
904 # push.
904 # push.
905 if narrow:
905 if narrow:
906 raise error.Abort(
906 raise error.Abort(
907 _(
907 _(
908 b'narrow clone not available for '
908 b'narrow clone not available for '
909 b'remote destinations'
909 b'remote destinations'
910 )
910 )
911 )
911 )
912
912
913 exchange.push(
913 exchange.push(
914 srcrepo,
914 srcrepo,
915 destpeer,
915 destpeer,
916 revs=revs,
916 revs=revs,
917 bookmarks=srcrepo._bookmarks.keys(),
917 bookmarks=srcrepo._bookmarks.keys(),
918 )
918 )
919 else:
919 else:
920 raise error.Abort(
920 raise error.Abort(
921 _(b"clone from remote to remote not supported")
921 _(b"clone from remote to remote not supported")
922 )
922 )
923
923
924 cleandir = None
924 cleandir = None
925
925
926 destrepo = destpeer.local()
926 destrepo = destpeer.local()
927 if destrepo:
927 if destrepo:
928 template = uimod.samplehgrcs[b'cloned']
928 template = uimod.samplehgrcs[b'cloned']
929 u = util.url(abspath)
929 u = util.url(abspath)
930 u.passwd = None
930 u.passwd = None
931 defaulturl = bytes(u)
931 defaulturl = bytes(u)
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934
934
935 if ui.configbool(b'experimental', b'remotenames'):
935 if ui.configbool(b'experimental', b'remotenames'):
936 logexchange.pullremotenames(destrepo, srcpeer)
936 logexchange.pullremotenames(destrepo, srcpeer)
937
937
938 if update:
938 if update:
939 if update is not True:
939 if update is not True:
940 with srcpeer.commandexecutor() as e:
940 with srcpeer.commandexecutor() as e:
941 checkout = e.callcommand(
941 checkout = e.callcommand(
942 b'lookup', {b'key': update,}
942 b'lookup', {b'key': update,}
943 ).result()
943 ).result()
944
944
945 uprev = None
945 uprev = None
946 status = None
946 status = None
947 if checkout is not None:
947 if checkout is not None:
948 # Some extensions (at least hg-git and hg-subversion) have
948 # Some extensions (at least hg-git and hg-subversion) have
949 # a peer.lookup() implementation that returns a name instead
949 # a peer.lookup() implementation that returns a name instead
950 # of a nodeid. We work around it here until we've figured
950 # of a nodeid. We work around it here until we've figured
951 # out a better solution.
951 # out a better solution.
952 if len(checkout) == 20 and checkout in destrepo:
952 if len(checkout) == 20 and checkout in destrepo:
953 uprev = checkout
953 uprev = checkout
954 elif scmutil.isrevsymbol(destrepo, checkout):
954 elif scmutil.isrevsymbol(destrepo, checkout):
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 else:
956 else:
957 if update is not True:
957 if update is not True:
958 try:
958 try:
959 uprev = destrepo.lookup(update)
959 uprev = destrepo.lookup(update)
960 except error.RepoLookupError:
960 except error.RepoLookupError:
961 pass
961 pass
962 if uprev is None:
962 if uprev is None:
963 try:
963 try:
964 uprev = destrepo._bookmarks[b'@']
964 uprev = destrepo._bookmarks[b'@']
965 update = b'@'
965 update = b'@'
966 bn = destrepo[uprev].branch()
966 bn = destrepo[uprev].branch()
967 if bn == b'default':
967 if bn == b'default':
968 status = _(b"updating to bookmark @\n")
968 status = _(b"updating to bookmark @\n")
969 else:
969 else:
970 status = (
970 status = (
971 _(b"updating to bookmark @ on branch %s\n") % bn
971 _(b"updating to bookmark @ on branch %s\n") % bn
972 )
972 )
973 except KeyError:
973 except KeyError:
974 try:
974 try:
975 uprev = destrepo.branchtip(b'default')
975 uprev = destrepo.branchtip(b'default')
976 except error.RepoLookupError:
976 except error.RepoLookupError:
977 uprev = destrepo.lookup(b'tip')
977 uprev = destrepo.lookup(b'tip')
978 if not status:
978 if not status:
979 bn = destrepo[uprev].branch()
979 bn = destrepo[uprev].branch()
980 status = _(b"updating to branch %s\n") % bn
980 status = _(b"updating to branch %s\n") % bn
981 destrepo.ui.status(status)
981 destrepo.ui.status(status)
982 _update(destrepo, uprev)
982 _update(destrepo, uprev)
983 if update in destrepo._bookmarks:
983 if update in destrepo._bookmarks:
984 bookmarks.activate(destrepo, update)
984 bookmarks.activate(destrepo, update)
985 finally:
985 finally:
986 release(srclock, destlock)
986 release(srclock, destlock)
987 if cleandir is not None:
987 if cleandir is not None:
988 shutil.rmtree(cleandir, True)
988 shutil.rmtree(cleandir, True)
989 if srcpeer is not None:
989 if srcpeer is not None:
990 srcpeer.close()
990 srcpeer.close()
991 return srcpeer, destpeer
991 return srcpeer, destpeer
992
992
993
993
994 def _showstats(repo, stats, quietempty=False):
994 def _showstats(repo, stats, quietempty=False):
995 if quietempty and stats.isempty():
995 if quietempty and stats.isempty():
996 return
996 return
997 repo.ui.status(
997 repo.ui.status(
998 _(
998 _(
999 b"%d files updated, %d files merged, "
999 b"%d files updated, %d files merged, "
1000 b"%d files removed, %d files unresolved\n"
1000 b"%d files removed, %d files unresolved\n"
1001 )
1001 )
1002 % (
1002 % (
1003 stats.updatedcount,
1003 stats.updatedcount,
1004 stats.mergedcount,
1004 stats.mergedcount,
1005 stats.removedcount,
1005 stats.removedcount,
1006 stats.unresolvedcount,
1006 stats.unresolvedcount,
1007 )
1007 )
1008 )
1008 )
1009
1009
1010
1010
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 """Update the working directory to node.
1012 """Update the working directory to node.
1013
1013
1014 When overwrite is set, changes are clobbered, merged else
1014 When overwrite is set, changes are clobbered, merged else
1015
1015
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 return mergemod.update(
1017 return mergemod.update(
1018 repo,
1018 repo,
1019 node,
1019 node,
1020 branchmerge=False,
1020 branchmerge=False,
1021 force=overwrite,
1021 force=overwrite,
1022 labels=[b'working copy', b'destination'],
1022 labels=[b'working copy', b'destination'],
1023 updatecheck=updatecheck,
1023 updatecheck=updatecheck,
1024 )
1024 )
1025
1025
1026
1026
1027 def update(repo, node, quietempty=False, updatecheck=None):
1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 """update the working directory to node"""
1028 """update the working directory to node"""
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 _showstats(repo, stats, quietempty)
1030 _showstats(repo, stats, quietempty)
1031 if stats.unresolvedcount:
1031 if stats.unresolvedcount:
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 return stats.unresolvedcount > 0
1033 return stats.unresolvedcount > 0
1034
1034
1035
1035
1036 # naming conflict in clone()
1036 # naming conflict in clone()
1037 _update = update
1037 _update = update
1038
1038
1039
1039
1040 def clean(repo, node, show_stats=True, quietempty=False):
1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 """forcibly switch the working directory to node, clobbering changes"""
1041 """forcibly switch the working directory to node, clobbering changes"""
1042 stats = updaterepo(repo, node, True)
1042 stats = updaterepo(repo, node, True)
1043 assert stats.unresolvedcount == 0
1043 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1044 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1044 if show_stats:
1045 if show_stats:
1045 _showstats(repo, stats, quietempty)
1046 _showstats(repo, stats, quietempty)
1046 return stats.unresolvedcount > 0
1047
1047
1048
1048
1049 # naming conflict in updatetotally()
1049 # naming conflict in updatetotally()
1050 _clean = clean
1050 _clean = clean
1051
1051
1052 _VALID_UPDATECHECKS = {
1052 _VALID_UPDATECHECKS = {
1053 mergemod.UPDATECHECK_ABORT,
1053 mergemod.UPDATECHECK_ABORT,
1054 mergemod.UPDATECHECK_NONE,
1054 mergemod.UPDATECHECK_NONE,
1055 mergemod.UPDATECHECK_LINEAR,
1055 mergemod.UPDATECHECK_LINEAR,
1056 mergemod.UPDATECHECK_NO_CONFLICT,
1056 mergemod.UPDATECHECK_NO_CONFLICT,
1057 }
1057 }
1058
1058
1059
1059
1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1061 """Update the working directory with extra care for non-file components
1061 """Update the working directory with extra care for non-file components
1062
1062
1063 This takes care of non-file components below:
1063 This takes care of non-file components below:
1064
1064
1065 :bookmark: might be advanced or (in)activated
1065 :bookmark: might be advanced or (in)activated
1066
1066
1067 This takes arguments below:
1067 This takes arguments below:
1068
1068
1069 :checkout: to which revision the working directory is updated
1069 :checkout: to which revision the working directory is updated
1070 :brev: a name, which might be a bookmark to be activated after updating
1070 :brev: a name, which might be a bookmark to be activated after updating
1071 :clean: whether changes in the working directory can be discarded
1071 :clean: whether changes in the working directory can be discarded
1072 :updatecheck: how to deal with a dirty working directory
1072 :updatecheck: how to deal with a dirty working directory
1073
1073
1074 Valid values for updatecheck are the UPDATECHECK_* constants
1074 Valid values for updatecheck are the UPDATECHECK_* constants
1075 defined in the merge module. Passing `None` will result in using the
1075 defined in the merge module. Passing `None` will result in using the
1076 configured default.
1076 configured default.
1077
1077
1078 * ABORT: abort if the working directory is dirty
1078 * ABORT: abort if the working directory is dirty
1079 * NONE: don't check (merge working directory changes into destination)
1079 * NONE: don't check (merge working directory changes into destination)
1080 * LINEAR: check that update is linear before merging working directory
1080 * LINEAR: check that update is linear before merging working directory
1081 changes into destination
1081 changes into destination
1082 * NO_CONFLICT: check that the update does not result in file merges
1082 * NO_CONFLICT: check that the update does not result in file merges
1083
1083
1084 This returns whether conflict is detected at updating or not.
1084 This returns whether conflict is detected at updating or not.
1085 """
1085 """
1086 if updatecheck is None:
1086 if updatecheck is None:
1087 updatecheck = ui.config(b'commands', b'update.check')
1087 updatecheck = ui.config(b'commands', b'update.check')
1088 if updatecheck not in _VALID_UPDATECHECKS:
1088 if updatecheck not in _VALID_UPDATECHECKS:
1089 # If not configured, or invalid value configured
1089 # If not configured, or invalid value configured
1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1091 if updatecheck not in _VALID_UPDATECHECKS:
1091 if updatecheck not in _VALID_UPDATECHECKS:
1092 raise ValueError(
1092 raise ValueError(
1093 r'Invalid updatecheck value %r (can accept %r)'
1093 r'Invalid updatecheck value %r (can accept %r)'
1094 % (updatecheck, _VALID_UPDATECHECKS)
1094 % (updatecheck, _VALID_UPDATECHECKS)
1095 )
1095 )
1096 with repo.wlock():
1096 with repo.wlock():
1097 movemarkfrom = None
1097 movemarkfrom = None
1098 warndest = False
1098 warndest = False
1099 if checkout is None:
1099 if checkout is None:
1100 updata = destutil.destupdate(repo, clean=clean)
1100 updata = destutil.destupdate(repo, clean=clean)
1101 checkout, movemarkfrom, brev = updata
1101 checkout, movemarkfrom, brev = updata
1102 warndest = True
1102 warndest = True
1103
1103
1104 if clean:
1104 if clean:
1105 ret = _clean(repo, checkout)
1105 ret = _clean(repo, checkout)
1106 else:
1106 else:
1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1108 cmdutil.bailifchanged(repo, merge=False)
1108 cmdutil.bailifchanged(repo, merge=False)
1109 updatecheck = mergemod.UPDATECHECK_NONE
1109 updatecheck = mergemod.UPDATECHECK_NONE
1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1111
1111
1112 if not ret and movemarkfrom:
1112 if not ret and movemarkfrom:
1113 if movemarkfrom == repo[b'.'].node():
1113 if movemarkfrom == repo[b'.'].node():
1114 pass # no-op update
1114 pass # no-op update
1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1117 ui.status(_(b"updating bookmark %s\n") % b)
1117 ui.status(_(b"updating bookmark %s\n") % b)
1118 else:
1118 else:
1119 # this can happen with a non-linear update
1119 # this can happen with a non-linear update
1120 b = ui.label(repo._activebookmark, b'bookmarks')
1120 b = ui.label(repo._activebookmark, b'bookmarks')
1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1122 bookmarks.deactivate(repo)
1122 bookmarks.deactivate(repo)
1123 elif brev in repo._bookmarks:
1123 elif brev in repo._bookmarks:
1124 if brev != repo._activebookmark:
1124 if brev != repo._activebookmark:
1125 b = ui.label(brev, b'bookmarks.active')
1125 b = ui.label(brev, b'bookmarks.active')
1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1127 bookmarks.activate(repo, brev)
1127 bookmarks.activate(repo, brev)
1128 elif brev:
1128 elif brev:
1129 if repo._activebookmark:
1129 if repo._activebookmark:
1130 b = ui.label(repo._activebookmark, b'bookmarks')
1130 b = ui.label(repo._activebookmark, b'bookmarks')
1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1132 bookmarks.deactivate(repo)
1132 bookmarks.deactivate(repo)
1133
1133
1134 if warndest:
1134 if warndest:
1135 destutil.statusotherdests(ui, repo)
1135 destutil.statusotherdests(ui, repo)
1136
1136
1137 return ret
1137 return ret
1138
1138
1139
1139
1140 def merge(
1140 def merge(
1141 repo,
1141 repo,
1142 node,
1142 node,
1143 force=None,
1143 force=None,
1144 remind=True,
1144 remind=True,
1145 mergeforce=False,
1145 mergeforce=False,
1146 labels=None,
1146 labels=None,
1147 abort=False,
1147 abort=False,
1148 ):
1148 ):
1149 """Branch merge with node, resolving changes. Return true if any
1149 """Branch merge with node, resolving changes. Return true if any
1150 unresolved conflicts."""
1150 unresolved conflicts."""
1151 if abort:
1151 if abort:
1152 return abortmerge(repo.ui, repo)
1152 return abortmerge(repo.ui, repo)
1153
1153
1154 stats = mergemod.update(
1154 stats = mergemod.update(
1155 repo,
1155 repo,
1156 node,
1156 node,
1157 branchmerge=True,
1157 branchmerge=True,
1158 force=force,
1158 force=force,
1159 mergeforce=mergeforce,
1159 mergeforce=mergeforce,
1160 labels=labels,
1160 labels=labels,
1161 )
1161 )
1162 _showstats(repo, stats)
1162 _showstats(repo, stats)
1163 if stats.unresolvedcount:
1163 if stats.unresolvedcount:
1164 repo.ui.status(
1164 repo.ui.status(
1165 _(
1165 _(
1166 b"use 'hg resolve' to retry unresolved file merges "
1166 b"use 'hg resolve' to retry unresolved file merges "
1167 b"or 'hg merge --abort' to abandon\n"
1167 b"or 'hg merge --abort' to abandon\n"
1168 )
1168 )
1169 )
1169 )
1170 elif remind:
1170 elif remind:
1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1172 return stats.unresolvedcount > 0
1172 return stats.unresolvedcount > 0
1173
1173
1174
1174
1175 def abortmerge(ui, repo):
1175 def abortmerge(ui, repo):
1176 ms = mergemod.mergestate.read(repo)
1176 ms = mergemod.mergestate.read(repo)
1177 if ms.active():
1177 if ms.active():
1178 # there were conflicts
1178 # there were conflicts
1179 node = ms.localctx.hex()
1179 node = ms.localctx.hex()
1180 else:
1180 else:
1181 # there were no conficts, mergestate was not stored
1181 # there were no conficts, mergestate was not stored
1182 node = repo[b'.'].hex()
1182 node = repo[b'.'].hex()
1183
1183
1184 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1184 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1185 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1185 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1186 _showstats(repo, stats)
1186 _showstats(repo, stats)
1187 return stats.unresolvedcount > 0
1187 return stats.unresolvedcount > 0
1188
1188
1189
1189
1190 def _incoming(
1190 def _incoming(
1191 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1191 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1192 ):
1192 ):
1193 """
1193 """
1194 Helper for incoming / gincoming.
1194 Helper for incoming / gincoming.
1195 displaychlist gets called with
1195 displaychlist gets called with
1196 (remoterepo, incomingchangesetlist, displayer) parameters,
1196 (remoterepo, incomingchangesetlist, displayer) parameters,
1197 and is supposed to contain only code that can't be unified.
1197 and is supposed to contain only code that can't be unified.
1198 """
1198 """
1199 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1199 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1200 other = peer(repo, opts, source)
1200 other = peer(repo, opts, source)
1201 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1201 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1202 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1202 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1203
1203
1204 if revs:
1204 if revs:
1205 revs = [other.lookup(rev) for rev in revs]
1205 revs = [other.lookup(rev) for rev in revs]
1206 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1206 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1207 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1207 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1208 )
1208 )
1209 try:
1209 try:
1210 if not chlist:
1210 if not chlist:
1211 ui.status(_(b"no changes found\n"))
1211 ui.status(_(b"no changes found\n"))
1212 return subreporecurse()
1212 return subreporecurse()
1213 ui.pager(b'incoming')
1213 ui.pager(b'incoming')
1214 displayer = logcmdutil.changesetdisplayer(
1214 displayer = logcmdutil.changesetdisplayer(
1215 ui, other, opts, buffered=buffered
1215 ui, other, opts, buffered=buffered
1216 )
1216 )
1217 displaychlist(other, chlist, displayer)
1217 displaychlist(other, chlist, displayer)
1218 displayer.close()
1218 displayer.close()
1219 finally:
1219 finally:
1220 cleanupfn()
1220 cleanupfn()
1221 subreporecurse()
1221 subreporecurse()
1222 return 0 # exit code is zero since we found incoming changes
1222 return 0 # exit code is zero since we found incoming changes
1223
1223
1224
1224
1225 def incoming(ui, repo, source, opts):
1225 def incoming(ui, repo, source, opts):
1226 def subreporecurse():
1226 def subreporecurse():
1227 ret = 1
1227 ret = 1
1228 if opts.get(b'subrepos'):
1228 if opts.get(b'subrepos'):
1229 ctx = repo[None]
1229 ctx = repo[None]
1230 for subpath in sorted(ctx.substate):
1230 for subpath in sorted(ctx.substate):
1231 sub = ctx.sub(subpath)
1231 sub = ctx.sub(subpath)
1232 ret = min(ret, sub.incoming(ui, source, opts))
1232 ret = min(ret, sub.incoming(ui, source, opts))
1233 return ret
1233 return ret
1234
1234
1235 def display(other, chlist, displayer):
1235 def display(other, chlist, displayer):
1236 limit = logcmdutil.getlimit(opts)
1236 limit = logcmdutil.getlimit(opts)
1237 if opts.get(b'newest_first'):
1237 if opts.get(b'newest_first'):
1238 chlist.reverse()
1238 chlist.reverse()
1239 count = 0
1239 count = 0
1240 for n in chlist:
1240 for n in chlist:
1241 if limit is not None and count >= limit:
1241 if limit is not None and count >= limit:
1242 break
1242 break
1243 parents = [p for p in other.changelog.parents(n) if p != nullid]
1243 parents = [p for p in other.changelog.parents(n) if p != nullid]
1244 if opts.get(b'no_merges') and len(parents) == 2:
1244 if opts.get(b'no_merges') and len(parents) == 2:
1245 continue
1245 continue
1246 count += 1
1246 count += 1
1247 displayer.show(other[n])
1247 displayer.show(other[n])
1248
1248
1249 return _incoming(display, subreporecurse, ui, repo, source, opts)
1249 return _incoming(display, subreporecurse, ui, repo, source, opts)
1250
1250
1251
1251
1252 def _outgoing(ui, repo, dest, opts):
1252 def _outgoing(ui, repo, dest, opts):
1253 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1253 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1254 if not path:
1254 if not path:
1255 raise error.Abort(
1255 raise error.Abort(
1256 _(b'default repository not configured!'),
1256 _(b'default repository not configured!'),
1257 hint=_(b"see 'hg help config.paths'"),
1257 hint=_(b"see 'hg help config.paths'"),
1258 )
1258 )
1259 dest = path.pushloc or path.loc
1259 dest = path.pushloc or path.loc
1260 branches = path.branch, opts.get(b'branch') or []
1260 branches = path.branch, opts.get(b'branch') or []
1261
1261
1262 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1262 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1263 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1263 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1264 if revs:
1264 if revs:
1265 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1265 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1266
1266
1267 other = peer(repo, opts, dest)
1267 other = peer(repo, opts, dest)
1268 outgoing = discovery.findcommonoutgoing(
1268 outgoing = discovery.findcommonoutgoing(
1269 repo, other, revs, force=opts.get(b'force')
1269 repo, other, revs, force=opts.get(b'force')
1270 )
1270 )
1271 o = outgoing.missing
1271 o = outgoing.missing
1272 if not o:
1272 if not o:
1273 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1273 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1274 return o, other
1274 return o, other
1275
1275
1276
1276
1277 def outgoing(ui, repo, dest, opts):
1277 def outgoing(ui, repo, dest, opts):
1278 def recurse():
1278 def recurse():
1279 ret = 1
1279 ret = 1
1280 if opts.get(b'subrepos'):
1280 if opts.get(b'subrepos'):
1281 ctx = repo[None]
1281 ctx = repo[None]
1282 for subpath in sorted(ctx.substate):
1282 for subpath in sorted(ctx.substate):
1283 sub = ctx.sub(subpath)
1283 sub = ctx.sub(subpath)
1284 ret = min(ret, sub.outgoing(ui, dest, opts))
1284 ret = min(ret, sub.outgoing(ui, dest, opts))
1285 return ret
1285 return ret
1286
1286
1287 limit = logcmdutil.getlimit(opts)
1287 limit = logcmdutil.getlimit(opts)
1288 o, other = _outgoing(ui, repo, dest, opts)
1288 o, other = _outgoing(ui, repo, dest, opts)
1289 if not o:
1289 if not o:
1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1291 return recurse()
1291 return recurse()
1292
1292
1293 if opts.get(b'newest_first'):
1293 if opts.get(b'newest_first'):
1294 o.reverse()
1294 o.reverse()
1295 ui.pager(b'outgoing')
1295 ui.pager(b'outgoing')
1296 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1296 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1297 count = 0
1297 count = 0
1298 for n in o:
1298 for n in o:
1299 if limit is not None and count >= limit:
1299 if limit is not None and count >= limit:
1300 break
1300 break
1301 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1301 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1302 if opts.get(b'no_merges') and len(parents) == 2:
1302 if opts.get(b'no_merges') and len(parents) == 2:
1303 continue
1303 continue
1304 count += 1
1304 count += 1
1305 displayer.show(repo[n])
1305 displayer.show(repo[n])
1306 displayer.close()
1306 displayer.close()
1307 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1307 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1308 recurse()
1308 recurse()
1309 return 0 # exit code is zero since we found outgoing changes
1309 return 0 # exit code is zero since we found outgoing changes
1310
1310
1311
1311
1312 def verify(repo, level=None):
1312 def verify(repo, level=None):
1313 """verify the consistency of a repository"""
1313 """verify the consistency of a repository"""
1314 ret = verifymod.verify(repo, level=level)
1314 ret = verifymod.verify(repo, level=level)
1315
1315
1316 # Broken subrepo references in hidden csets don't seem worth worrying about,
1316 # Broken subrepo references in hidden csets don't seem worth worrying about,
1317 # since they can't be pushed/pulled, and --hidden can be used if they are a
1317 # since they can't be pushed/pulled, and --hidden can be used if they are a
1318 # concern.
1318 # concern.
1319
1319
1320 # pathto() is needed for -R case
1320 # pathto() is needed for -R case
1321 revs = repo.revs(
1321 revs = repo.revs(
1322 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1322 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1323 )
1323 )
1324
1324
1325 if revs:
1325 if revs:
1326 repo.ui.status(_(b'checking subrepo links\n'))
1326 repo.ui.status(_(b'checking subrepo links\n'))
1327 for rev in revs:
1327 for rev in revs:
1328 ctx = repo[rev]
1328 ctx = repo[rev]
1329 try:
1329 try:
1330 for subpath in ctx.substate:
1330 for subpath in ctx.substate:
1331 try:
1331 try:
1332 ret = (
1332 ret = (
1333 ctx.sub(subpath, allowcreate=False).verify() or ret
1333 ctx.sub(subpath, allowcreate=False).verify() or ret
1334 )
1334 )
1335 except error.RepoError as e:
1335 except error.RepoError as e:
1336 repo.ui.warn(b'%d: %s\n' % (rev, e))
1336 repo.ui.warn(b'%d: %s\n' % (rev, e))
1337 except Exception:
1337 except Exception:
1338 repo.ui.warn(
1338 repo.ui.warn(
1339 _(b'.hgsubstate is corrupt in revision %s\n')
1339 _(b'.hgsubstate is corrupt in revision %s\n')
1340 % node.short(ctx.node())
1340 % node.short(ctx.node())
1341 )
1341 )
1342
1342
1343 return ret
1343 return ret
1344
1344
1345
1345
1346 def remoteui(src, opts):
1346 def remoteui(src, opts):
1347 """build a remote ui from ui or repo and opts"""
1347 """build a remote ui from ui or repo and opts"""
1348 if util.safehasattr(src, b'baseui'): # looks like a repository
1348 if util.safehasattr(src, b'baseui'): # looks like a repository
1349 dst = src.baseui.copy() # drop repo-specific config
1349 dst = src.baseui.copy() # drop repo-specific config
1350 src = src.ui # copy target options from repo
1350 src = src.ui # copy target options from repo
1351 else: # assume it's a global ui object
1351 else: # assume it's a global ui object
1352 dst = src.copy() # keep all global options
1352 dst = src.copy() # keep all global options
1353
1353
1354 # copy ssh-specific options
1354 # copy ssh-specific options
1355 for o in b'ssh', b'remotecmd':
1355 for o in b'ssh', b'remotecmd':
1356 v = opts.get(o) or src.config(b'ui', o)
1356 v = opts.get(o) or src.config(b'ui', o)
1357 if v:
1357 if v:
1358 dst.setconfig(b"ui", o, v, b'copied')
1358 dst.setconfig(b"ui", o, v, b'copied')
1359
1359
1360 # copy bundle-specific options
1360 # copy bundle-specific options
1361 r = src.config(b'bundle', b'mainreporoot')
1361 r = src.config(b'bundle', b'mainreporoot')
1362 if r:
1362 if r:
1363 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1363 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1364
1364
1365 # copy selected local settings to the remote ui
1365 # copy selected local settings to the remote ui
1366 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1366 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1367 for key, val in src.configitems(sect):
1367 for key, val in src.configitems(sect):
1368 dst.setconfig(sect, key, val, b'copied')
1368 dst.setconfig(sect, key, val, b'copied')
1369 v = src.config(b'web', b'cacerts')
1369 v = src.config(b'web', b'cacerts')
1370 if v:
1370 if v:
1371 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1371 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1372
1372
1373 return dst
1373 return dst
1374
1374
1375
1375
1376 # Files of interest
1376 # Files of interest
1377 # Used to check if the repository has changed looking at mtime and size of
1377 # Used to check if the repository has changed looking at mtime and size of
1378 # these files.
1378 # these files.
1379 foi = [
1379 foi = [
1380 (b'spath', b'00changelog.i'),
1380 (b'spath', b'00changelog.i'),
1381 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1381 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1382 (b'spath', b'obsstore'),
1382 (b'spath', b'obsstore'),
1383 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1383 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1384 ]
1384 ]
1385
1385
1386
1386
1387 class cachedlocalrepo(object):
1387 class cachedlocalrepo(object):
1388 """Holds a localrepository that can be cached and reused."""
1388 """Holds a localrepository that can be cached and reused."""
1389
1389
1390 def __init__(self, repo):
1390 def __init__(self, repo):
1391 """Create a new cached repo from an existing repo.
1391 """Create a new cached repo from an existing repo.
1392
1392
1393 We assume the passed in repo was recently created. If the
1393 We assume the passed in repo was recently created. If the
1394 repo has changed between when it was created and when it was
1394 repo has changed between when it was created and when it was
1395 turned into a cache, it may not refresh properly.
1395 turned into a cache, it may not refresh properly.
1396 """
1396 """
1397 assert isinstance(repo, localrepo.localrepository)
1397 assert isinstance(repo, localrepo.localrepository)
1398 self._repo = repo
1398 self._repo = repo
1399 self._state, self.mtime = self._repostate()
1399 self._state, self.mtime = self._repostate()
1400 self._filtername = repo.filtername
1400 self._filtername = repo.filtername
1401
1401
1402 def fetch(self):
1402 def fetch(self):
1403 """Refresh (if necessary) and return a repository.
1403 """Refresh (if necessary) and return a repository.
1404
1404
1405 If the cached instance is out of date, it will be recreated
1405 If the cached instance is out of date, it will be recreated
1406 automatically and returned.
1406 automatically and returned.
1407
1407
1408 Returns a tuple of the repo and a boolean indicating whether a new
1408 Returns a tuple of the repo and a boolean indicating whether a new
1409 repo instance was created.
1409 repo instance was created.
1410 """
1410 """
1411 # We compare the mtimes and sizes of some well-known files to
1411 # We compare the mtimes and sizes of some well-known files to
1412 # determine if the repo changed. This is not precise, as mtimes
1412 # determine if the repo changed. This is not precise, as mtimes
1413 # are susceptible to clock skew and imprecise filesystems and
1413 # are susceptible to clock skew and imprecise filesystems and
1414 # file content can change while maintaining the same size.
1414 # file content can change while maintaining the same size.
1415
1415
1416 state, mtime = self._repostate()
1416 state, mtime = self._repostate()
1417 if state == self._state:
1417 if state == self._state:
1418 return self._repo, False
1418 return self._repo, False
1419
1419
1420 repo = repository(self._repo.baseui, self._repo.url())
1420 repo = repository(self._repo.baseui, self._repo.url())
1421 if self._filtername:
1421 if self._filtername:
1422 self._repo = repo.filtered(self._filtername)
1422 self._repo = repo.filtered(self._filtername)
1423 else:
1423 else:
1424 self._repo = repo.unfiltered()
1424 self._repo = repo.unfiltered()
1425 self._state = state
1425 self._state = state
1426 self.mtime = mtime
1426 self.mtime = mtime
1427
1427
1428 return self._repo, True
1428 return self._repo, True
1429
1429
1430 def _repostate(self):
1430 def _repostate(self):
1431 state = []
1431 state = []
1432 maxmtime = -1
1432 maxmtime = -1
1433 for attr, fname in foi:
1433 for attr, fname in foi:
1434 prefix = getattr(self._repo, attr)
1434 prefix = getattr(self._repo, attr)
1435 p = os.path.join(prefix, fname)
1435 p = os.path.join(prefix, fname)
1436 try:
1436 try:
1437 st = os.stat(p)
1437 st = os.stat(p)
1438 except OSError:
1438 except OSError:
1439 st = os.stat(prefix)
1439 st = os.stat(prefix)
1440 state.append((st[stat.ST_MTIME], st.st_size))
1440 state.append((st[stat.ST_MTIME], st.st_size))
1441 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1441 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1442
1442
1443 return tuple(state), maxmtime
1443 return tuple(state), maxmtime
1444
1444
1445 def copy(self):
1445 def copy(self):
1446 """Obtain a copy of this class instance.
1446 """Obtain a copy of this class instance.
1447
1447
1448 A new localrepository instance is obtained. The new instance should be
1448 A new localrepository instance is obtained. The new instance should be
1449 completely independent of the original.
1449 completely independent of the original.
1450 """
1450 """
1451 repo = repository(self._repo.baseui, self._repo.origroot)
1451 repo = repository(self._repo.baseui, self._repo.origroot)
1452 if self._filtername:
1452 if self._filtername:
1453 repo = repo.filtered(self._filtername)
1453 repo = repo.filtered(self._filtername)
1454 else:
1454 else:
1455 repo = repo.unfiltered()
1455 repo = repo.unfiltered()
1456 c = cachedlocalrepo(repo)
1456 c = cachedlocalrepo(repo)
1457 c._state = self._state
1457 c._state = self._state
1458 c.mtime = self.mtime
1458 c.mtime = self.mtime
1459 return c
1459 return c
General Comments 0
You need to be logged in to leave comments. Login now