##// END OF EJS Templates
clean: delete obsolete unlinking of .hg/graftstate...
Martin von Zweigbergk -
r44750:3245cdea default
parent child Browse files
Show More
@@ -1,1450 +1,1449 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import shutil
13 import shutil
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import getattr
18 from .pycompat import getattr
19
19
20 from . import (
20 from . import (
21 bookmarks,
21 bookmarks,
22 bundlerepo,
22 bundlerepo,
23 cacheutil,
23 cacheutil,
24 cmdutil,
24 cmdutil,
25 destutil,
25 destutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 exchange,
28 exchange,
29 extensions,
29 extensions,
30 httppeer,
30 httppeer,
31 localrepo,
31 localrepo,
32 lock,
32 lock,
33 logcmdutil,
33 logcmdutil,
34 logexchange,
34 logexchange,
35 merge as mergemod,
35 merge as mergemod,
36 narrowspec,
36 narrowspec,
37 node,
37 node,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50 from .utils import hashutil
50 from .utils import hashutil
51 from .interfaces import repository as repositorymod
51 from .interfaces import repository as repositorymod
52
52
53 release = lock.release
53 release = lock.release
54
54
55 # shared features
55 # shared features
56 sharedbookmarks = b'bookmarks'
56 sharedbookmarks = b'bookmarks'
57
57
58
58
59 def _local(path):
59 def _local(path):
60 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
61
61
62 try:
62 try:
63 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
64 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
65 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
66 raise error.Abort(
66 raise error.Abort(
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
68 )
68 )
69
69
70 return isfile and bundlerepo or localrepo
70 return isfile and bundlerepo or localrepo
71
71
72
72
73 def addbranchrevs(lrepo, other, branches, revs):
73 def addbranchrevs(lrepo, other, branches, revs):
74 peer = other.peer() # a courtesy to callers using a localrepo for other
74 peer = other.peer() # a courtesy to callers using a localrepo for other
75 hashbranch, branches = branches
75 hashbranch, branches = branches
76 if not hashbranch and not branches:
76 if not hashbranch and not branches:
77 x = revs or None
77 x = revs or None
78 if revs:
78 if revs:
79 y = revs[0]
79 y = revs[0]
80 else:
80 else:
81 y = None
81 y = None
82 return x, y
82 return x, y
83 if revs:
83 if revs:
84 revs = list(revs)
84 revs = list(revs)
85 else:
85 else:
86 revs = []
86 revs = []
87
87
88 if not peer.capable(b'branchmap'):
88 if not peer.capable(b'branchmap'):
89 if branches:
89 if branches:
90 raise error.Abort(_(b"remote branch lookup not supported"))
90 raise error.Abort(_(b"remote branch lookup not supported"))
91 revs.append(hashbranch)
91 revs.append(hashbranch)
92 return revs, revs[0]
92 return revs, revs[0]
93
93
94 with peer.commandexecutor() as e:
94 with peer.commandexecutor() as e:
95 branchmap = e.callcommand(b'branchmap', {}).result()
95 branchmap = e.callcommand(b'branchmap', {}).result()
96
96
97 def primary(branch):
97 def primary(branch):
98 if branch == b'.':
98 if branch == b'.':
99 if not lrepo:
99 if not lrepo:
100 raise error.Abort(_(b"dirstate branch not accessible"))
100 raise error.Abort(_(b"dirstate branch not accessible"))
101 branch = lrepo.dirstate.branch()
101 branch = lrepo.dirstate.branch()
102 if branch in branchmap:
102 if branch in branchmap:
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
104 return True
104 return True
105 else:
105 else:
106 return False
106 return False
107
107
108 for branch in branches:
108 for branch in branches:
109 if not primary(branch):
109 if not primary(branch):
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
111 if hashbranch:
111 if hashbranch:
112 if not primary(hashbranch):
112 if not primary(hashbranch):
113 revs.append(hashbranch)
113 revs.append(hashbranch)
114 return revs, revs[0]
114 return revs, revs[0]
115
115
116
116
117 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
118 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
119
119
120 u = util.url(path)
120 u = util.url(path)
121 branch = None
121 branch = None
122 if u.fragment:
122 if u.fragment:
123 branch = u.fragment
123 branch = u.fragment
124 u.fragment = None
124 u.fragment = None
125 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
126
126
127
127
128 schemes = {
128 schemes = {
129 b'bundle': bundlerepo,
129 b'bundle': bundlerepo,
130 b'union': unionrepo,
130 b'union': unionrepo,
131 b'file': _local,
131 b'file': _local,
132 b'http': httppeer,
132 b'http': httppeer,
133 b'https': httppeer,
133 b'https': httppeer,
134 b'ssh': sshpeer,
134 b'ssh': sshpeer,
135 b'static-http': statichttprepo,
135 b'static-http': statichttprepo,
136 }
136 }
137
137
138
138
139 def _peerlookup(path):
139 def _peerlookup(path):
140 u = util.url(path)
140 u = util.url(path)
141 scheme = u.scheme or b'file'
141 scheme = u.scheme or b'file'
142 thing = schemes.get(scheme) or schemes[b'file']
142 thing = schemes.get(scheme) or schemes[b'file']
143 try:
143 try:
144 return thing(path)
144 return thing(path)
145 except TypeError:
145 except TypeError:
146 # we can't test callable(thing) because 'thing' can be an unloaded
146 # we can't test callable(thing) because 'thing' can be an unloaded
147 # module that implements __call__
147 # module that implements __call__
148 if not util.safehasattr(thing, b'instance'):
148 if not util.safehasattr(thing, b'instance'):
149 raise
149 raise
150 return thing
150 return thing
151
151
152
152
153 def islocal(repo):
153 def islocal(repo):
154 '''return true if repo (or path pointing to repo) is local'''
154 '''return true if repo (or path pointing to repo) is local'''
155 if isinstance(repo, bytes):
155 if isinstance(repo, bytes):
156 try:
156 try:
157 return _peerlookup(repo).islocal(repo)
157 return _peerlookup(repo).islocal(repo)
158 except AttributeError:
158 except AttributeError:
159 return False
159 return False
160 return repo.local()
160 return repo.local()
161
161
162
162
163 def openpath(ui, path, sendaccept=True):
163 def openpath(ui, path, sendaccept=True):
164 '''open path with open if local, url.open if remote'''
164 '''open path with open if local, url.open if remote'''
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
166 if pathurl.islocal():
166 if pathurl.islocal():
167 return util.posixfile(pathurl.localpath(), b'rb')
167 return util.posixfile(pathurl.localpath(), b'rb')
168 else:
168 else:
169 return url.open(ui, path, sendaccept=sendaccept)
169 return url.open(ui, path, sendaccept=sendaccept)
170
170
171
171
172 # a list of (ui, repo) functions called for wire peer initialization
172 # a list of (ui, repo) functions called for wire peer initialization
173 wirepeersetupfuncs = []
173 wirepeersetupfuncs = []
174
174
175
175
176 def _peerorrepo(
176 def _peerorrepo(
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
178 ):
178 ):
179 """return a repository object for the specified path"""
179 """return a repository object for the specified path"""
180 obj = _peerlookup(path).instance(
180 obj = _peerlookup(path).instance(
181 ui, path, create, intents=intents, createopts=createopts
181 ui, path, create, intents=intents, createopts=createopts
182 )
182 )
183 ui = getattr(obj, "ui", ui)
183 ui = getattr(obj, "ui", ui)
184 for f in presetupfuncs or []:
184 for f in presetupfuncs or []:
185 f(ui, obj)
185 f(ui, obj)
186 ui.log(b'extension', b'- executing reposetup hooks\n')
186 ui.log(b'extension', b'- executing reposetup hooks\n')
187 with util.timedcm('all reposetup') as allreposetupstats:
187 with util.timedcm('all reposetup') as allreposetupstats:
188 for name, module in extensions.extensions(ui):
188 for name, module in extensions.extensions(ui):
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
190 hook = getattr(module, 'reposetup', None)
190 hook = getattr(module, 'reposetup', None)
191 if hook:
191 if hook:
192 with util.timedcm('reposetup %r', name) as stats:
192 with util.timedcm('reposetup %r', name) as stats:
193 hook(ui, obj)
193 hook(ui, obj)
194 ui.log(
194 ui.log(
195 b'extension', b' > reposetup for %s took %s\n', name, stats
195 b'extension', b' > reposetup for %s took %s\n', name, stats
196 )
196 )
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
198 if not obj.local():
198 if not obj.local():
199 for f in wirepeersetupfuncs:
199 for f in wirepeersetupfuncs:
200 f(ui, obj)
200 f(ui, obj)
201 return obj
201 return obj
202
202
203
203
204 def repository(
204 def repository(
205 ui,
205 ui,
206 path=b'',
206 path=b'',
207 create=False,
207 create=False,
208 presetupfuncs=None,
208 presetupfuncs=None,
209 intents=None,
209 intents=None,
210 createopts=None,
210 createopts=None,
211 ):
211 ):
212 """return a repository object for the specified path"""
212 """return a repository object for the specified path"""
213 peer = _peerorrepo(
213 peer = _peerorrepo(
214 ui,
214 ui,
215 path,
215 path,
216 create,
216 create,
217 presetupfuncs=presetupfuncs,
217 presetupfuncs=presetupfuncs,
218 intents=intents,
218 intents=intents,
219 createopts=createopts,
219 createopts=createopts,
220 )
220 )
221 repo = peer.local()
221 repo = peer.local()
222 if not repo:
222 if not repo:
223 raise error.Abort(
223 raise error.Abort(
224 _(b"repository '%s' is not local") % (path or peer.url())
224 _(b"repository '%s' is not local") % (path or peer.url())
225 )
225 )
226 return repo.filtered(b'visible')
226 return repo.filtered(b'visible')
227
227
228
228
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
230 '''return a repository peer for the specified path'''
230 '''return a repository peer for the specified path'''
231 rui = remoteui(uiorrepo, opts)
231 rui = remoteui(uiorrepo, opts)
232 return _peerorrepo(
232 return _peerorrepo(
233 rui, path, create, intents=intents, createopts=createopts
233 rui, path, create, intents=intents, createopts=createopts
234 ).peer()
234 ).peer()
235
235
236
236
237 def defaultdest(source):
237 def defaultdest(source):
238 '''return default destination of clone if none is given
238 '''return default destination of clone if none is given
239
239
240 >>> defaultdest(b'foo')
240 >>> defaultdest(b'foo')
241 'foo'
241 'foo'
242 >>> defaultdest(b'/foo/bar')
242 >>> defaultdest(b'/foo/bar')
243 'bar'
243 'bar'
244 >>> defaultdest(b'/')
244 >>> defaultdest(b'/')
245 ''
245 ''
246 >>> defaultdest(b'')
246 >>> defaultdest(b'')
247 ''
247 ''
248 >>> defaultdest(b'http://example.org/')
248 >>> defaultdest(b'http://example.org/')
249 ''
249 ''
250 >>> defaultdest(b'http://example.org/foo/')
250 >>> defaultdest(b'http://example.org/foo/')
251 'foo'
251 'foo'
252 '''
252 '''
253 path = util.url(source).path
253 path = util.url(source).path
254 if not path:
254 if not path:
255 return b''
255 return b''
256 return os.path.basename(os.path.normpath(path))
256 return os.path.basename(os.path.normpath(path))
257
257
258
258
259 def sharedreposource(repo):
259 def sharedreposource(repo):
260 """Returns repository object for source repository of a shared repo.
260 """Returns repository object for source repository of a shared repo.
261
261
262 If repo is not a shared repository, returns None.
262 If repo is not a shared repository, returns None.
263 """
263 """
264 if repo.sharedpath == repo.path:
264 if repo.sharedpath == repo.path:
265 return None
265 return None
266
266
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
268 return repo.srcrepo
268 return repo.srcrepo
269
269
270 # the sharedpath always ends in the .hg; we want the path to the repo
270 # the sharedpath always ends in the .hg; we want the path to the repo
271 source = repo.vfs.split(repo.sharedpath)[0]
271 source = repo.vfs.split(repo.sharedpath)[0]
272 srcurl, branches = parseurl(source)
272 srcurl, branches = parseurl(source)
273 srcrepo = repository(repo.ui, srcurl)
273 srcrepo = repository(repo.ui, srcurl)
274 repo.srcrepo = srcrepo
274 repo.srcrepo = srcrepo
275 return srcrepo
275 return srcrepo
276
276
277
277
278 def share(
278 def share(
279 ui,
279 ui,
280 source,
280 source,
281 dest=None,
281 dest=None,
282 update=True,
282 update=True,
283 bookmarks=True,
283 bookmarks=True,
284 defaultpath=None,
284 defaultpath=None,
285 relative=False,
285 relative=False,
286 ):
286 ):
287 '''create a shared repository'''
287 '''create a shared repository'''
288
288
289 if not islocal(source):
289 if not islocal(source):
290 raise error.Abort(_(b'can only share local repositories'))
290 raise error.Abort(_(b'can only share local repositories'))
291
291
292 if not dest:
292 if not dest:
293 dest = defaultdest(source)
293 dest = defaultdest(source)
294 else:
294 else:
295 dest = ui.expandpath(dest)
295 dest = ui.expandpath(dest)
296
296
297 if isinstance(source, bytes):
297 if isinstance(source, bytes):
298 origsource = ui.expandpath(source)
298 origsource = ui.expandpath(source)
299 source, branches = parseurl(origsource)
299 source, branches = parseurl(origsource)
300 srcrepo = repository(ui, source)
300 srcrepo = repository(ui, source)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
302 else:
302 else:
303 srcrepo = source.local()
303 srcrepo = source.local()
304 checkout = None
304 checkout = None
305
305
306 shareditems = set()
306 shareditems = set()
307 if bookmarks:
307 if bookmarks:
308 shareditems.add(sharedbookmarks)
308 shareditems.add(sharedbookmarks)
309
309
310 r = repository(
310 r = repository(
311 ui,
311 ui,
312 dest,
312 dest,
313 create=True,
313 create=True,
314 createopts={
314 createopts={
315 b'sharedrepo': srcrepo,
315 b'sharedrepo': srcrepo,
316 b'sharedrelative': relative,
316 b'sharedrelative': relative,
317 b'shareditems': shareditems,
317 b'shareditems': shareditems,
318 },
318 },
319 )
319 )
320
320
321 postshare(srcrepo, r, defaultpath=defaultpath)
321 postshare(srcrepo, r, defaultpath=defaultpath)
322 r = repository(ui, dest)
322 r = repository(ui, dest)
323 _postshareupdate(r, update, checkout=checkout)
323 _postshareupdate(r, update, checkout=checkout)
324 return r
324 return r
325
325
326
326
327 def unshare(ui, repo):
327 def unshare(ui, repo):
328 """convert a shared repository to a normal one
328 """convert a shared repository to a normal one
329
329
330 Copy the store data to the repo and remove the sharedpath data.
330 Copy the store data to the repo and remove the sharedpath data.
331
331
332 Returns a new repository object representing the unshared repository.
332 Returns a new repository object representing the unshared repository.
333
333
334 The passed repository object is not usable after this function is
334 The passed repository object is not usable after this function is
335 called.
335 called.
336 """
336 """
337
337
338 with repo.lock():
338 with repo.lock():
339 # we use locks here because if we race with commit, we
339 # we use locks here because if we race with commit, we
340 # can end up with extra data in the cloned revlogs that's
340 # can end up with extra data in the cloned revlogs that's
341 # not pointed to by changesets, thus causing verify to
341 # not pointed to by changesets, thus causing verify to
342 # fail
342 # fail
343 destlock = copystore(ui, repo, repo.path)
343 destlock = copystore(ui, repo, repo.path)
344 with destlock or util.nullcontextmanager():
344 with destlock or util.nullcontextmanager():
345
345
346 sharefile = repo.vfs.join(b'sharedpath')
346 sharefile = repo.vfs.join(b'sharedpath')
347 util.rename(sharefile, sharefile + b'.old')
347 util.rename(sharefile, sharefile + b'.old')
348
348
349 repo.requirements.discard(b'shared')
349 repo.requirements.discard(b'shared')
350 repo.requirements.discard(b'relshared')
350 repo.requirements.discard(b'relshared')
351 repo._writerequirements()
351 repo._writerequirements()
352
352
353 # Removing share changes some fundamental properties of the repo instance.
353 # Removing share changes some fundamental properties of the repo instance.
354 # So we instantiate a new repo object and operate on it rather than
354 # So we instantiate a new repo object and operate on it rather than
355 # try to keep the existing repo usable.
355 # try to keep the existing repo usable.
356 newrepo = repository(repo.baseui, repo.root, create=False)
356 newrepo = repository(repo.baseui, repo.root, create=False)
357
357
358 # TODO: figure out how to access subrepos that exist, but were previously
358 # TODO: figure out how to access subrepos that exist, but were previously
359 # removed from .hgsub
359 # removed from .hgsub
360 c = newrepo[b'.']
360 c = newrepo[b'.']
361 subs = c.substate
361 subs = c.substate
362 for s in sorted(subs):
362 for s in sorted(subs):
363 c.sub(s).unshare()
363 c.sub(s).unshare()
364
364
365 localrepo.poisonrepository(repo)
365 localrepo.poisonrepository(repo)
366
366
367 return newrepo
367 return newrepo
368
368
369
369
370 def postshare(sourcerepo, destrepo, defaultpath=None):
370 def postshare(sourcerepo, destrepo, defaultpath=None):
371 """Called after a new shared repo is created.
371 """Called after a new shared repo is created.
372
372
373 The new repo only has a requirements file and pointer to the source.
373 The new repo only has a requirements file and pointer to the source.
374 This function configures additional shared data.
374 This function configures additional shared data.
375
375
376 Extensions can wrap this function and write additional entries to
376 Extensions can wrap this function and write additional entries to
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
378 """
378 """
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
380 if default:
380 if default:
381 template = b'[paths]\ndefault = %s\n'
381 template = b'[paths]\ndefault = %s\n'
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
384 with destrepo.wlock():
384 with destrepo.wlock():
385 narrowspec.copytoworkingcopy(destrepo)
385 narrowspec.copytoworkingcopy(destrepo)
386
386
387
387
388 def _postshareupdate(repo, update, checkout=None):
388 def _postshareupdate(repo, update, checkout=None):
389 """Maybe perform a working directory update after a shared repo is created.
389 """Maybe perform a working directory update after a shared repo is created.
390
390
391 ``update`` can be a boolean or a revision to update to.
391 ``update`` can be a boolean or a revision to update to.
392 """
392 """
393 if not update:
393 if not update:
394 return
394 return
395
395
396 repo.ui.status(_(b"updating working directory\n"))
396 repo.ui.status(_(b"updating working directory\n"))
397 if update is not True:
397 if update is not True:
398 checkout = update
398 checkout = update
399 for test in (checkout, b'default', b'tip'):
399 for test in (checkout, b'default', b'tip'):
400 if test is None:
400 if test is None:
401 continue
401 continue
402 try:
402 try:
403 uprev = repo.lookup(test)
403 uprev = repo.lookup(test)
404 break
404 break
405 except error.RepoLookupError:
405 except error.RepoLookupError:
406 continue
406 continue
407 _update(repo, uprev)
407 _update(repo, uprev)
408
408
409
409
410 def copystore(ui, srcrepo, destpath):
410 def copystore(ui, srcrepo, destpath):
411 '''copy files from store of srcrepo in destpath
411 '''copy files from store of srcrepo in destpath
412
412
413 returns destlock
413 returns destlock
414 '''
414 '''
415 destlock = None
415 destlock = None
416 try:
416 try:
417 hardlink = None
417 hardlink = None
418 topic = _(b'linking') if hardlink else _(b'copying')
418 topic = _(b'linking') if hardlink else _(b'copying')
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
420 num = 0
420 num = 0
421 srcpublishing = srcrepo.publishing()
421 srcpublishing = srcrepo.publishing()
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
423 dstvfs = vfsmod.vfs(destpath)
423 dstvfs = vfsmod.vfs(destpath)
424 for f in srcrepo.store.copylist():
424 for f in srcrepo.store.copylist():
425 if srcpublishing and f.endswith(b'phaseroots'):
425 if srcpublishing and f.endswith(b'phaseroots'):
426 continue
426 continue
427 dstbase = os.path.dirname(f)
427 dstbase = os.path.dirname(f)
428 if dstbase and not dstvfs.exists(dstbase):
428 if dstbase and not dstvfs.exists(dstbase):
429 dstvfs.mkdir(dstbase)
429 dstvfs.mkdir(dstbase)
430 if srcvfs.exists(f):
430 if srcvfs.exists(f):
431 if f.endswith(b'data'):
431 if f.endswith(b'data'):
432 # 'dstbase' may be empty (e.g. revlog format 0)
432 # 'dstbase' may be empty (e.g. revlog format 0)
433 lockfile = os.path.join(dstbase, b"lock")
433 lockfile = os.path.join(dstbase, b"lock")
434 # lock to avoid premature writing to the target
434 # lock to avoid premature writing to the target
435 destlock = lock.lock(dstvfs, lockfile)
435 destlock = lock.lock(dstvfs, lockfile)
436 hardlink, n = util.copyfiles(
436 hardlink, n = util.copyfiles(
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
438 )
438 )
439 num += n
439 num += n
440 if hardlink:
440 if hardlink:
441 ui.debug(b"linked %d files\n" % num)
441 ui.debug(b"linked %d files\n" % num)
442 else:
442 else:
443 ui.debug(b"copied %d files\n" % num)
443 ui.debug(b"copied %d files\n" % num)
444 return destlock
444 return destlock
445 except: # re-raises
445 except: # re-raises
446 release(destlock)
446 release(destlock)
447 raise
447 raise
448
448
449
449
450 def clonewithshare(
450 def clonewithshare(
451 ui,
451 ui,
452 peeropts,
452 peeropts,
453 sharepath,
453 sharepath,
454 source,
454 source,
455 srcpeer,
455 srcpeer,
456 dest,
456 dest,
457 pull=False,
457 pull=False,
458 rev=None,
458 rev=None,
459 update=True,
459 update=True,
460 stream=False,
460 stream=False,
461 ):
461 ):
462 """Perform a clone using a shared repo.
462 """Perform a clone using a shared repo.
463
463
464 The store for the repository will be located at <sharepath>/.hg. The
464 The store for the repository will be located at <sharepath>/.hg. The
465 specified revisions will be cloned or pulled from "source". A shared repo
465 specified revisions will be cloned or pulled from "source". A shared repo
466 will be created at "dest" and a working copy will be created if "update" is
466 will be created at "dest" and a working copy will be created if "update" is
467 True.
467 True.
468 """
468 """
469 revs = None
469 revs = None
470 if rev:
470 if rev:
471 if not srcpeer.capable(b'lookup'):
471 if not srcpeer.capable(b'lookup'):
472 raise error.Abort(
472 raise error.Abort(
473 _(
473 _(
474 b"src repository does not support "
474 b"src repository does not support "
475 b"revision lookup and so doesn't "
475 b"revision lookup and so doesn't "
476 b"support clone by revision"
476 b"support clone by revision"
477 )
477 )
478 )
478 )
479
479
480 # TODO this is batchable.
480 # TODO this is batchable.
481 remoterevs = []
481 remoterevs = []
482 for r in rev:
482 for r in rev:
483 with srcpeer.commandexecutor() as e:
483 with srcpeer.commandexecutor() as e:
484 remoterevs.append(
484 remoterevs.append(
485 e.callcommand(b'lookup', {b'key': r,}).result()
485 e.callcommand(b'lookup', {b'key': r,}).result()
486 )
486 )
487 revs = remoterevs
487 revs = remoterevs
488
488
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
490 # 2 clients may race creating or populating it.
490 # 2 clients may race creating or populating it.
491 pooldir = os.path.dirname(sharepath)
491 pooldir = os.path.dirname(sharepath)
492 # lock class requires the directory to exist.
492 # lock class requires the directory to exist.
493 try:
493 try:
494 util.makedir(pooldir, False)
494 util.makedir(pooldir, False)
495 except OSError as e:
495 except OSError as e:
496 if e.errno != errno.EEXIST:
496 if e.errno != errno.EEXIST:
497 raise
497 raise
498
498
499 poolvfs = vfsmod.vfs(pooldir)
499 poolvfs = vfsmod.vfs(pooldir)
500 basename = os.path.basename(sharepath)
500 basename = os.path.basename(sharepath)
501
501
502 with lock.lock(poolvfs, b'%s.lock' % basename):
502 with lock.lock(poolvfs, b'%s.lock' % basename):
503 if os.path.exists(sharepath):
503 if os.path.exists(sharepath):
504 ui.status(
504 ui.status(
505 _(b'(sharing from existing pooled repository %s)\n') % basename
505 _(b'(sharing from existing pooled repository %s)\n') % basename
506 )
506 )
507 else:
507 else:
508 ui.status(
508 ui.status(
509 _(b'(sharing from new pooled repository %s)\n') % basename
509 _(b'(sharing from new pooled repository %s)\n') % basename
510 )
510 )
511 # Always use pull mode because hardlinks in share mode don't work
511 # Always use pull mode because hardlinks in share mode don't work
512 # well. Never update because working copies aren't necessary in
512 # well. Never update because working copies aren't necessary in
513 # share mode.
513 # share mode.
514 clone(
514 clone(
515 ui,
515 ui,
516 peeropts,
516 peeropts,
517 source,
517 source,
518 dest=sharepath,
518 dest=sharepath,
519 pull=True,
519 pull=True,
520 revs=rev,
520 revs=rev,
521 update=False,
521 update=False,
522 stream=stream,
522 stream=stream,
523 )
523 )
524
524
525 # Resolve the value to put in [paths] section for the source.
525 # Resolve the value to put in [paths] section for the source.
526 if islocal(source):
526 if islocal(source):
527 defaultpath = os.path.abspath(util.urllocalpath(source))
527 defaultpath = os.path.abspath(util.urllocalpath(source))
528 else:
528 else:
529 defaultpath = source
529 defaultpath = source
530
530
531 sharerepo = repository(ui, path=sharepath)
531 sharerepo = repository(ui, path=sharepath)
532 destrepo = share(
532 destrepo = share(
533 ui,
533 ui,
534 sharerepo,
534 sharerepo,
535 dest=dest,
535 dest=dest,
536 update=False,
536 update=False,
537 bookmarks=False,
537 bookmarks=False,
538 defaultpath=defaultpath,
538 defaultpath=defaultpath,
539 )
539 )
540
540
541 # We need to perform a pull against the dest repo to fetch bookmarks
541 # We need to perform a pull against the dest repo to fetch bookmarks
542 # and other non-store data that isn't shared by default. In the case of
542 # and other non-store data that isn't shared by default. In the case of
543 # non-existing shared repo, this means we pull from the remote twice. This
543 # non-existing shared repo, this means we pull from the remote twice. This
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
545 # way to pull just non-changegroup data.
545 # way to pull just non-changegroup data.
546 exchange.pull(destrepo, srcpeer, heads=revs)
546 exchange.pull(destrepo, srcpeer, heads=revs)
547
547
548 _postshareupdate(destrepo, update)
548 _postshareupdate(destrepo, update)
549
549
550 return srcpeer, peer(ui, peeropts, dest)
550 return srcpeer, peer(ui, peeropts, dest)
551
551
552
552
553 # Recomputing branch cache might be slow on big repos,
553 # Recomputing branch cache might be slow on big repos,
554 # so just copy it
554 # so just copy it
555 def _copycache(srcrepo, dstcachedir, fname):
555 def _copycache(srcrepo, dstcachedir, fname):
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
559 if os.path.exists(srcbranchcache):
559 if os.path.exists(srcbranchcache):
560 if not os.path.exists(dstcachedir):
560 if not os.path.exists(dstcachedir):
561 os.mkdir(dstcachedir)
561 os.mkdir(dstcachedir)
562 util.copyfile(srcbranchcache, dstbranchcache)
562 util.copyfile(srcbranchcache, dstbranchcache)
563
563
564
564
565 def clone(
565 def clone(
566 ui,
566 ui,
567 peeropts,
567 peeropts,
568 source,
568 source,
569 dest=None,
569 dest=None,
570 pull=False,
570 pull=False,
571 revs=None,
571 revs=None,
572 update=True,
572 update=True,
573 stream=False,
573 stream=False,
574 branch=None,
574 branch=None,
575 shareopts=None,
575 shareopts=None,
576 storeincludepats=None,
576 storeincludepats=None,
577 storeexcludepats=None,
577 storeexcludepats=None,
578 depth=None,
578 depth=None,
579 ):
579 ):
580 """Make a copy of an existing repository.
580 """Make a copy of an existing repository.
581
581
582 Create a copy of an existing repository in a new directory. The
582 Create a copy of an existing repository in a new directory. The
583 source and destination are URLs, as passed to the repository
583 source and destination are URLs, as passed to the repository
584 function. Returns a pair of repository peers, the source and
584 function. Returns a pair of repository peers, the source and
585 newly created destination.
585 newly created destination.
586
586
587 The location of the source is added to the new repository's
587 The location of the source is added to the new repository's
588 .hg/hgrc file, as the default to be used for future pulls and
588 .hg/hgrc file, as the default to be used for future pulls and
589 pushes.
589 pushes.
590
590
591 If an exception is raised, the partly cloned/updated destination
591 If an exception is raised, the partly cloned/updated destination
592 repository will be deleted.
592 repository will be deleted.
593
593
594 Arguments:
594 Arguments:
595
595
596 source: repository object or URL
596 source: repository object or URL
597
597
598 dest: URL of destination repository to create (defaults to base
598 dest: URL of destination repository to create (defaults to base
599 name of source repository)
599 name of source repository)
600
600
601 pull: always pull from source repository, even in local case or if the
601 pull: always pull from source repository, even in local case or if the
602 server prefers streaming
602 server prefers streaming
603
603
604 stream: stream raw data uncompressed from repository (fast over
604 stream: stream raw data uncompressed from repository (fast over
605 LAN, slow over WAN)
605 LAN, slow over WAN)
606
606
607 revs: revision to clone up to (implies pull=True)
607 revs: revision to clone up to (implies pull=True)
608
608
609 update: update working directory after clone completes, if
609 update: update working directory after clone completes, if
610 destination is local repository (True means update to default rev,
610 destination is local repository (True means update to default rev,
611 anything else is treated as a revision)
611 anything else is treated as a revision)
612
612
613 branch: branches to clone
613 branch: branches to clone
614
614
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
616 activates auto sharing mode and defines the directory for stores. The
616 activates auto sharing mode and defines the directory for stores. The
617 "mode" key determines how to construct the directory name of the shared
617 "mode" key determines how to construct the directory name of the shared
618 repository. "identity" means the name is derived from the node of the first
618 repository. "identity" means the name is derived from the node of the first
619 changeset in the repository. "remote" means the name is derived from the
619 changeset in the repository. "remote" means the name is derived from the
620 remote's path/URL. Defaults to "identity."
620 remote's path/URL. Defaults to "identity."
621
621
622 storeincludepats and storeexcludepats: sets of file patterns to include and
622 storeincludepats and storeexcludepats: sets of file patterns to include and
623 exclude in the repository copy, respectively. If not defined, all files
623 exclude in the repository copy, respectively. If not defined, all files
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
625 only the requested files will be performed. If ``storeincludepats`` is not
625 only the requested files will be performed. If ``storeincludepats`` is not
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
627 ``path:.``. If both are empty sets, no files will be cloned.
627 ``path:.``. If both are empty sets, no files will be cloned.
628 """
628 """
629
629
630 if isinstance(source, bytes):
630 if isinstance(source, bytes):
631 origsource = ui.expandpath(source)
631 origsource = ui.expandpath(source)
632 source, branches = parseurl(origsource, branch)
632 source, branches = parseurl(origsource, branch)
633 srcpeer = peer(ui, peeropts, source)
633 srcpeer = peer(ui, peeropts, source)
634 else:
634 else:
635 srcpeer = source.peer() # in case we were called with a localrepo
635 srcpeer = source.peer() # in case we were called with a localrepo
636 branches = (None, branch or [])
636 branches = (None, branch or [])
637 origsource = source = srcpeer.url()
637 origsource = source = srcpeer.url()
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
639
639
640 if dest is None:
640 if dest is None:
641 dest = defaultdest(source)
641 dest = defaultdest(source)
642 if dest:
642 if dest:
643 ui.status(_(b"destination directory: %s\n") % dest)
643 ui.status(_(b"destination directory: %s\n") % dest)
644 else:
644 else:
645 dest = ui.expandpath(dest)
645 dest = ui.expandpath(dest)
646
646
647 dest = util.urllocalpath(dest)
647 dest = util.urllocalpath(dest)
648 source = util.urllocalpath(source)
648 source = util.urllocalpath(source)
649
649
650 if not dest:
650 if not dest:
651 raise error.Abort(_(b"empty destination path is not valid"))
651 raise error.Abort(_(b"empty destination path is not valid"))
652
652
653 destvfs = vfsmod.vfs(dest, expandpath=True)
653 destvfs = vfsmod.vfs(dest, expandpath=True)
654 if destvfs.lexists():
654 if destvfs.lexists():
655 if not destvfs.isdir():
655 if not destvfs.isdir():
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
657 elif destvfs.listdir():
657 elif destvfs.listdir():
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
659
659
660 createopts = {}
660 createopts = {}
661 narrow = False
661 narrow = False
662
662
663 if storeincludepats is not None:
663 if storeincludepats is not None:
664 narrowspec.validatepatterns(storeincludepats)
664 narrowspec.validatepatterns(storeincludepats)
665 narrow = True
665 narrow = True
666
666
667 if storeexcludepats is not None:
667 if storeexcludepats is not None:
668 narrowspec.validatepatterns(storeexcludepats)
668 narrowspec.validatepatterns(storeexcludepats)
669 narrow = True
669 narrow = True
670
670
671 if narrow:
671 if narrow:
672 # Include everything by default if only exclusion patterns defined.
672 # Include everything by default if only exclusion patterns defined.
673 if storeexcludepats and not storeincludepats:
673 if storeexcludepats and not storeincludepats:
674 storeincludepats = {b'path:.'}
674 storeincludepats = {b'path:.'}
675
675
676 createopts[b'narrowfiles'] = True
676 createopts[b'narrowfiles'] = True
677
677
678 if depth:
678 if depth:
679 createopts[b'shallowfilestore'] = True
679 createopts[b'shallowfilestore'] = True
680
680
681 if srcpeer.capable(b'lfs-serve'):
681 if srcpeer.capable(b'lfs-serve'):
682 # Repository creation honors the config if it disabled the extension, so
682 # Repository creation honors the config if it disabled the extension, so
683 # we can't just announce that lfs will be enabled. This check avoids
683 # we can't just announce that lfs will be enabled. This check avoids
684 # saying that lfs will be enabled, and then saying it's an unknown
684 # saying that lfs will be enabled, and then saying it's an unknown
685 # feature. The lfs creation option is set in either case so that a
685 # feature. The lfs creation option is set in either case so that a
686 # requirement is added. If the extension is explicitly disabled but the
686 # requirement is added. If the extension is explicitly disabled but the
687 # requirement is set, the clone aborts early, before transferring any
687 # requirement is set, the clone aborts early, before transferring any
688 # data.
688 # data.
689 createopts[b'lfs'] = True
689 createopts[b'lfs'] = True
690
690
691 if extensions.disabledext(b'lfs'):
691 if extensions.disabledext(b'lfs'):
692 ui.status(
692 ui.status(
693 _(
693 _(
694 b'(remote is using large file support (lfs), but it is '
694 b'(remote is using large file support (lfs), but it is '
695 b'explicitly disabled in the local configuration)\n'
695 b'explicitly disabled in the local configuration)\n'
696 )
696 )
697 )
697 )
698 else:
698 else:
699 ui.status(
699 ui.status(
700 _(
700 _(
701 b'(remote is using large file support (lfs); lfs will '
701 b'(remote is using large file support (lfs); lfs will '
702 b'be enabled for this repository)\n'
702 b'be enabled for this repository)\n'
703 )
703 )
704 )
704 )
705
705
706 shareopts = shareopts or {}
706 shareopts = shareopts or {}
707 sharepool = shareopts.get(b'pool')
707 sharepool = shareopts.get(b'pool')
708 sharenamemode = shareopts.get(b'mode')
708 sharenamemode = shareopts.get(b'mode')
709 if sharepool and islocal(dest):
709 if sharepool and islocal(dest):
710 sharepath = None
710 sharepath = None
711 if sharenamemode == b'identity':
711 if sharenamemode == b'identity':
712 # Resolve the name from the initial changeset in the remote
712 # Resolve the name from the initial changeset in the remote
713 # repository. This returns nullid when the remote is empty. It
713 # repository. This returns nullid when the remote is empty. It
714 # raises RepoLookupError if revision 0 is filtered or otherwise
714 # raises RepoLookupError if revision 0 is filtered or otherwise
715 # not available. If we fail to resolve, sharing is not enabled.
715 # not available. If we fail to resolve, sharing is not enabled.
716 try:
716 try:
717 with srcpeer.commandexecutor() as e:
717 with srcpeer.commandexecutor() as e:
718 rootnode = e.callcommand(
718 rootnode = e.callcommand(
719 b'lookup', {b'key': b'0',}
719 b'lookup', {b'key': b'0',}
720 ).result()
720 ).result()
721
721
722 if rootnode != node.nullid:
722 if rootnode != node.nullid:
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
724 else:
724 else:
725 ui.status(
725 ui.status(
726 _(
726 _(
727 b'(not using pooled storage: '
727 b'(not using pooled storage: '
728 b'remote appears to be empty)\n'
728 b'remote appears to be empty)\n'
729 )
729 )
730 )
730 )
731 except error.RepoLookupError:
731 except error.RepoLookupError:
732 ui.status(
732 ui.status(
733 _(
733 _(
734 b'(not using pooled storage: '
734 b'(not using pooled storage: '
735 b'unable to resolve identity of remote)\n'
735 b'unable to resolve identity of remote)\n'
736 )
736 )
737 )
737 )
738 elif sharenamemode == b'remote':
738 elif sharenamemode == b'remote':
739 sharepath = os.path.join(
739 sharepath = os.path.join(
740 sharepool, node.hex(hashutil.sha1(source).digest())
740 sharepool, node.hex(hashutil.sha1(source).digest())
741 )
741 )
742 else:
742 else:
743 raise error.Abort(
743 raise error.Abort(
744 _(b'unknown share naming mode: %s') % sharenamemode
744 _(b'unknown share naming mode: %s') % sharenamemode
745 )
745 )
746
746
747 # TODO this is a somewhat arbitrary restriction.
747 # TODO this is a somewhat arbitrary restriction.
748 if narrow:
748 if narrow:
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
750 sharepath = None
750 sharepath = None
751
751
752 if sharepath:
752 if sharepath:
753 return clonewithshare(
753 return clonewithshare(
754 ui,
754 ui,
755 peeropts,
755 peeropts,
756 sharepath,
756 sharepath,
757 source,
757 source,
758 srcpeer,
758 srcpeer,
759 dest,
759 dest,
760 pull=pull,
760 pull=pull,
761 rev=revs,
761 rev=revs,
762 update=update,
762 update=update,
763 stream=stream,
763 stream=stream,
764 )
764 )
765
765
766 srclock = destlock = cleandir = None
766 srclock = destlock = cleandir = None
767 srcrepo = srcpeer.local()
767 srcrepo = srcpeer.local()
768 try:
768 try:
769 abspath = origsource
769 abspath = origsource
770 if islocal(origsource):
770 if islocal(origsource):
771 abspath = os.path.abspath(util.urllocalpath(origsource))
771 abspath = os.path.abspath(util.urllocalpath(origsource))
772
772
773 if islocal(dest):
773 if islocal(dest):
774 cleandir = dest
774 cleandir = dest
775
775
776 copy = False
776 copy = False
777 if (
777 if (
778 srcrepo
778 srcrepo
779 and srcrepo.cancopy()
779 and srcrepo.cancopy()
780 and islocal(dest)
780 and islocal(dest)
781 and not phases.hassecret(srcrepo)
781 and not phases.hassecret(srcrepo)
782 ):
782 ):
783 copy = not pull and not revs
783 copy = not pull and not revs
784
784
785 # TODO this is a somewhat arbitrary restriction.
785 # TODO this is a somewhat arbitrary restriction.
786 if narrow:
786 if narrow:
787 copy = False
787 copy = False
788
788
789 if copy:
789 if copy:
790 try:
790 try:
791 # we use a lock here because if we race with commit, we
791 # we use a lock here because if we race with commit, we
792 # can end up with extra data in the cloned revlogs that's
792 # can end up with extra data in the cloned revlogs that's
793 # not pointed to by changesets, thus causing verify to
793 # not pointed to by changesets, thus causing verify to
794 # fail
794 # fail
795 srclock = srcrepo.lock(wait=False)
795 srclock = srcrepo.lock(wait=False)
796 except error.LockError:
796 except error.LockError:
797 copy = False
797 copy = False
798
798
799 if copy:
799 if copy:
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
802 if not os.path.exists(dest):
802 if not os.path.exists(dest):
803 util.makedirs(dest)
803 util.makedirs(dest)
804 else:
804 else:
805 # only clean up directories we create ourselves
805 # only clean up directories we create ourselves
806 cleandir = hgdir
806 cleandir = hgdir
807 try:
807 try:
808 destpath = hgdir
808 destpath = hgdir
809 util.makedir(destpath, notindexed=True)
809 util.makedir(destpath, notindexed=True)
810 except OSError as inst:
810 except OSError as inst:
811 if inst.errno == errno.EEXIST:
811 if inst.errno == errno.EEXIST:
812 cleandir = None
812 cleandir = None
813 raise error.Abort(
813 raise error.Abort(
814 _(b"destination '%s' already exists") % dest
814 _(b"destination '%s' already exists") % dest
815 )
815 )
816 raise
816 raise
817
817
818 destlock = copystore(ui, srcrepo, destpath)
818 destlock = copystore(ui, srcrepo, destpath)
819 # copy bookmarks over
819 # copy bookmarks over
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
822 if os.path.exists(srcbookmarks):
822 if os.path.exists(srcbookmarks):
823 util.copyfile(srcbookmarks, dstbookmarks)
823 util.copyfile(srcbookmarks, dstbookmarks)
824
824
825 dstcachedir = os.path.join(destpath, b'cache')
825 dstcachedir = os.path.join(destpath, b'cache')
826 for cache in cacheutil.cachetocopy(srcrepo):
826 for cache in cacheutil.cachetocopy(srcrepo):
827 _copycache(srcrepo, dstcachedir, cache)
827 _copycache(srcrepo, dstcachedir, cache)
828
828
829 # we need to re-init the repo after manually copying the data
829 # we need to re-init the repo after manually copying the data
830 # into it
830 # into it
831 destpeer = peer(srcrepo, peeropts, dest)
831 destpeer = peer(srcrepo, peeropts, dest)
832 srcrepo.hook(
832 srcrepo.hook(
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
834 )
834 )
835 else:
835 else:
836 try:
836 try:
837 # only pass ui when no srcrepo
837 # only pass ui when no srcrepo
838 destpeer = peer(
838 destpeer = peer(
839 srcrepo or ui,
839 srcrepo or ui,
840 peeropts,
840 peeropts,
841 dest,
841 dest,
842 create=True,
842 create=True,
843 createopts=createopts,
843 createopts=createopts,
844 )
844 )
845 except OSError as inst:
845 except OSError as inst:
846 if inst.errno == errno.EEXIST:
846 if inst.errno == errno.EEXIST:
847 cleandir = None
847 cleandir = None
848 raise error.Abort(
848 raise error.Abort(
849 _(b"destination '%s' already exists") % dest
849 _(b"destination '%s' already exists") % dest
850 )
850 )
851 raise
851 raise
852
852
853 if revs:
853 if revs:
854 if not srcpeer.capable(b'lookup'):
854 if not srcpeer.capable(b'lookup'):
855 raise error.Abort(
855 raise error.Abort(
856 _(
856 _(
857 b"src repository does not support "
857 b"src repository does not support "
858 b"revision lookup and so doesn't "
858 b"revision lookup and so doesn't "
859 b"support clone by revision"
859 b"support clone by revision"
860 )
860 )
861 )
861 )
862
862
863 # TODO this is batchable.
863 # TODO this is batchable.
864 remoterevs = []
864 remoterevs = []
865 for rev in revs:
865 for rev in revs:
866 with srcpeer.commandexecutor() as e:
866 with srcpeer.commandexecutor() as e:
867 remoterevs.append(
867 remoterevs.append(
868 e.callcommand(b'lookup', {b'key': rev,}).result()
868 e.callcommand(b'lookup', {b'key': rev,}).result()
869 )
869 )
870 revs = remoterevs
870 revs = remoterevs
871
871
872 checkout = revs[0]
872 checkout = revs[0]
873 else:
873 else:
874 revs = None
874 revs = None
875 local = destpeer.local()
875 local = destpeer.local()
876 if local:
876 if local:
877 if narrow:
877 if narrow:
878 with local.wlock(), local.lock():
878 with local.wlock(), local.lock():
879 local.setnarrowpats(storeincludepats, storeexcludepats)
879 local.setnarrowpats(storeincludepats, storeexcludepats)
880 narrowspec.copytoworkingcopy(local)
880 narrowspec.copytoworkingcopy(local)
881
881
882 u = util.url(abspath)
882 u = util.url(abspath)
883 defaulturl = bytes(u)
883 defaulturl = bytes(u)
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
885 if not stream:
885 if not stream:
886 if pull:
886 if pull:
887 stream = False
887 stream = False
888 else:
888 else:
889 stream = None
889 stream = None
890 # internal config: ui.quietbookmarkmove
890 # internal config: ui.quietbookmarkmove
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
892 with local.ui.configoverride(overrides, b'clone'):
892 with local.ui.configoverride(overrides, b'clone'):
893 exchange.pull(
893 exchange.pull(
894 local,
894 local,
895 srcpeer,
895 srcpeer,
896 revs,
896 revs,
897 streamclonerequested=stream,
897 streamclonerequested=stream,
898 includepats=storeincludepats,
898 includepats=storeincludepats,
899 excludepats=storeexcludepats,
899 excludepats=storeexcludepats,
900 depth=depth,
900 depth=depth,
901 )
901 )
902 elif srcrepo:
902 elif srcrepo:
903 # TODO lift restriction once exchange.push() accepts narrow
903 # TODO lift restriction once exchange.push() accepts narrow
904 # push.
904 # push.
905 if narrow:
905 if narrow:
906 raise error.Abort(
906 raise error.Abort(
907 _(
907 _(
908 b'narrow clone not available for '
908 b'narrow clone not available for '
909 b'remote destinations'
909 b'remote destinations'
910 )
910 )
911 )
911 )
912
912
913 exchange.push(
913 exchange.push(
914 srcrepo,
914 srcrepo,
915 destpeer,
915 destpeer,
916 revs=revs,
916 revs=revs,
917 bookmarks=srcrepo._bookmarks.keys(),
917 bookmarks=srcrepo._bookmarks.keys(),
918 )
918 )
919 else:
919 else:
920 raise error.Abort(
920 raise error.Abort(
921 _(b"clone from remote to remote not supported")
921 _(b"clone from remote to remote not supported")
922 )
922 )
923
923
924 cleandir = None
924 cleandir = None
925
925
926 destrepo = destpeer.local()
926 destrepo = destpeer.local()
927 if destrepo:
927 if destrepo:
928 template = uimod.samplehgrcs[b'cloned']
928 template = uimod.samplehgrcs[b'cloned']
929 u = util.url(abspath)
929 u = util.url(abspath)
930 u.passwd = None
930 u.passwd = None
931 defaulturl = bytes(u)
931 defaulturl = bytes(u)
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
934
934
935 if ui.configbool(b'experimental', b'remotenames'):
935 if ui.configbool(b'experimental', b'remotenames'):
936 logexchange.pullremotenames(destrepo, srcpeer)
936 logexchange.pullremotenames(destrepo, srcpeer)
937
937
938 if update:
938 if update:
939 if update is not True:
939 if update is not True:
940 with srcpeer.commandexecutor() as e:
940 with srcpeer.commandexecutor() as e:
941 checkout = e.callcommand(
941 checkout = e.callcommand(
942 b'lookup', {b'key': update,}
942 b'lookup', {b'key': update,}
943 ).result()
943 ).result()
944
944
945 uprev = None
945 uprev = None
946 status = None
946 status = None
947 if checkout is not None:
947 if checkout is not None:
948 # Some extensions (at least hg-git and hg-subversion) have
948 # Some extensions (at least hg-git and hg-subversion) have
949 # a peer.lookup() implementation that returns a name instead
949 # a peer.lookup() implementation that returns a name instead
950 # of a nodeid. We work around it here until we've figured
950 # of a nodeid. We work around it here until we've figured
951 # out a better solution.
951 # out a better solution.
952 if len(checkout) == 20 and checkout in destrepo:
952 if len(checkout) == 20 and checkout in destrepo:
953 uprev = checkout
953 uprev = checkout
954 elif scmutil.isrevsymbol(destrepo, checkout):
954 elif scmutil.isrevsymbol(destrepo, checkout):
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
956 else:
956 else:
957 if update is not True:
957 if update is not True:
958 try:
958 try:
959 uprev = destrepo.lookup(update)
959 uprev = destrepo.lookup(update)
960 except error.RepoLookupError:
960 except error.RepoLookupError:
961 pass
961 pass
962 if uprev is None:
962 if uprev is None:
963 try:
963 try:
964 uprev = destrepo._bookmarks[b'@']
964 uprev = destrepo._bookmarks[b'@']
965 update = b'@'
965 update = b'@'
966 bn = destrepo[uprev].branch()
966 bn = destrepo[uprev].branch()
967 if bn == b'default':
967 if bn == b'default':
968 status = _(b"updating to bookmark @\n")
968 status = _(b"updating to bookmark @\n")
969 else:
969 else:
970 status = (
970 status = (
971 _(b"updating to bookmark @ on branch %s\n") % bn
971 _(b"updating to bookmark @ on branch %s\n") % bn
972 )
972 )
973 except KeyError:
973 except KeyError:
974 try:
974 try:
975 uprev = destrepo.branchtip(b'default')
975 uprev = destrepo.branchtip(b'default')
976 except error.RepoLookupError:
976 except error.RepoLookupError:
977 uprev = destrepo.lookup(b'tip')
977 uprev = destrepo.lookup(b'tip')
978 if not status:
978 if not status:
979 bn = destrepo[uprev].branch()
979 bn = destrepo[uprev].branch()
980 status = _(b"updating to branch %s\n") % bn
980 status = _(b"updating to branch %s\n") % bn
981 destrepo.ui.status(status)
981 destrepo.ui.status(status)
982 _update(destrepo, uprev)
982 _update(destrepo, uprev)
983 if update in destrepo._bookmarks:
983 if update in destrepo._bookmarks:
984 bookmarks.activate(destrepo, update)
984 bookmarks.activate(destrepo, update)
985 finally:
985 finally:
986 release(srclock, destlock)
986 release(srclock, destlock)
987 if cleandir is not None:
987 if cleandir is not None:
988 shutil.rmtree(cleandir, True)
988 shutil.rmtree(cleandir, True)
989 if srcpeer is not None:
989 if srcpeer is not None:
990 srcpeer.close()
990 srcpeer.close()
991 return srcpeer, destpeer
991 return srcpeer, destpeer
992
992
993
993
994 def _showstats(repo, stats, quietempty=False):
994 def _showstats(repo, stats, quietempty=False):
995 if quietempty and stats.isempty():
995 if quietempty and stats.isempty():
996 return
996 return
997 repo.ui.status(
997 repo.ui.status(
998 _(
998 _(
999 b"%d files updated, %d files merged, "
999 b"%d files updated, %d files merged, "
1000 b"%d files removed, %d files unresolved\n"
1000 b"%d files removed, %d files unresolved\n"
1001 )
1001 )
1002 % (
1002 % (
1003 stats.updatedcount,
1003 stats.updatedcount,
1004 stats.mergedcount,
1004 stats.mergedcount,
1005 stats.removedcount,
1005 stats.removedcount,
1006 stats.unresolvedcount,
1006 stats.unresolvedcount,
1007 )
1007 )
1008 )
1008 )
1009
1009
1010
1010
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1012 """Update the working directory to node.
1012 """Update the working directory to node.
1013
1013
1014 When overwrite is set, changes are clobbered, merged else
1014 When overwrite is set, changes are clobbered, merged else
1015
1015
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1017 return mergemod.update(
1017 return mergemod.update(
1018 repo,
1018 repo,
1019 node,
1019 node,
1020 branchmerge=False,
1020 branchmerge=False,
1021 force=overwrite,
1021 force=overwrite,
1022 labels=[b'working copy', b'destination'],
1022 labels=[b'working copy', b'destination'],
1023 updatecheck=updatecheck,
1023 updatecheck=updatecheck,
1024 )
1024 )
1025
1025
1026
1026
1027 def update(repo, node, quietempty=False, updatecheck=None):
1027 def update(repo, node, quietempty=False, updatecheck=None):
1028 """update the working directory to node"""
1028 """update the working directory to node"""
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1030 _showstats(repo, stats, quietempty)
1030 _showstats(repo, stats, quietempty)
1031 if stats.unresolvedcount:
1031 if stats.unresolvedcount:
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1033 return stats.unresolvedcount > 0
1033 return stats.unresolvedcount > 0
1034
1034
1035
1035
1036 # naming conflict in clone()
1036 # naming conflict in clone()
1037 _update = update
1037 _update = update
1038
1038
1039
1039
1040 def clean(repo, node, show_stats=True, quietempty=False):
1040 def clean(repo, node, show_stats=True, quietempty=False):
1041 """forcibly switch the working directory to node, clobbering changes"""
1041 """forcibly switch the working directory to node, clobbering changes"""
1042 stats = updaterepo(repo, node, True)
1042 stats = updaterepo(repo, node, True)
1043 assert stats.unresolvedcount == 0
1043 assert stats.unresolvedcount == 0
1044 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1045 if show_stats:
1044 if show_stats:
1046 _showstats(repo, stats, quietempty)
1045 _showstats(repo, stats, quietempty)
1047
1046
1048
1047
1049 # naming conflict in updatetotally()
1048 # naming conflict in updatetotally()
1050 _clean = clean
1049 _clean = clean
1051
1050
1052 _VALID_UPDATECHECKS = {
1051 _VALID_UPDATECHECKS = {
1053 mergemod.UPDATECHECK_ABORT,
1052 mergemod.UPDATECHECK_ABORT,
1054 mergemod.UPDATECHECK_NONE,
1053 mergemod.UPDATECHECK_NONE,
1055 mergemod.UPDATECHECK_LINEAR,
1054 mergemod.UPDATECHECK_LINEAR,
1056 mergemod.UPDATECHECK_NO_CONFLICT,
1055 mergemod.UPDATECHECK_NO_CONFLICT,
1057 }
1056 }
1058
1057
1059
1058
1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1059 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1061 """Update the working directory with extra care for non-file components
1060 """Update the working directory with extra care for non-file components
1062
1061
1063 This takes care of non-file components below:
1062 This takes care of non-file components below:
1064
1063
1065 :bookmark: might be advanced or (in)activated
1064 :bookmark: might be advanced or (in)activated
1066
1065
1067 This takes arguments below:
1066 This takes arguments below:
1068
1067
1069 :checkout: to which revision the working directory is updated
1068 :checkout: to which revision the working directory is updated
1070 :brev: a name, which might be a bookmark to be activated after updating
1069 :brev: a name, which might be a bookmark to be activated after updating
1071 :clean: whether changes in the working directory can be discarded
1070 :clean: whether changes in the working directory can be discarded
1072 :updatecheck: how to deal with a dirty working directory
1071 :updatecheck: how to deal with a dirty working directory
1073
1072
1074 Valid values for updatecheck are the UPDATECHECK_* constants
1073 Valid values for updatecheck are the UPDATECHECK_* constants
1075 defined in the merge module. Passing `None` will result in using the
1074 defined in the merge module. Passing `None` will result in using the
1076 configured default.
1075 configured default.
1077
1076
1078 * ABORT: abort if the working directory is dirty
1077 * ABORT: abort if the working directory is dirty
1079 * NONE: don't check (merge working directory changes into destination)
1078 * NONE: don't check (merge working directory changes into destination)
1080 * LINEAR: check that update is linear before merging working directory
1079 * LINEAR: check that update is linear before merging working directory
1081 changes into destination
1080 changes into destination
1082 * NO_CONFLICT: check that the update does not result in file merges
1081 * NO_CONFLICT: check that the update does not result in file merges
1083
1082
1084 This returns whether conflict is detected at updating or not.
1083 This returns whether conflict is detected at updating or not.
1085 """
1084 """
1086 if updatecheck is None:
1085 if updatecheck is None:
1087 updatecheck = ui.config(b'commands', b'update.check')
1086 updatecheck = ui.config(b'commands', b'update.check')
1088 if updatecheck not in _VALID_UPDATECHECKS:
1087 if updatecheck not in _VALID_UPDATECHECKS:
1089 # If not configured, or invalid value configured
1088 # If not configured, or invalid value configured
1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1089 updatecheck = mergemod.UPDATECHECK_LINEAR
1091 if updatecheck not in _VALID_UPDATECHECKS:
1090 if updatecheck not in _VALID_UPDATECHECKS:
1092 raise ValueError(
1091 raise ValueError(
1093 r'Invalid updatecheck value %r (can accept %r)'
1092 r'Invalid updatecheck value %r (can accept %r)'
1094 % (updatecheck, _VALID_UPDATECHECKS)
1093 % (updatecheck, _VALID_UPDATECHECKS)
1095 )
1094 )
1096 with repo.wlock():
1095 with repo.wlock():
1097 movemarkfrom = None
1096 movemarkfrom = None
1098 warndest = False
1097 warndest = False
1099 if checkout is None:
1098 if checkout is None:
1100 updata = destutil.destupdate(repo, clean=clean)
1099 updata = destutil.destupdate(repo, clean=clean)
1101 checkout, movemarkfrom, brev = updata
1100 checkout, movemarkfrom, brev = updata
1102 warndest = True
1101 warndest = True
1103
1102
1104 if clean:
1103 if clean:
1105 ret = _clean(repo, checkout)
1104 ret = _clean(repo, checkout)
1106 else:
1105 else:
1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1106 if updatecheck == mergemod.UPDATECHECK_ABORT:
1108 cmdutil.bailifchanged(repo, merge=False)
1107 cmdutil.bailifchanged(repo, merge=False)
1109 updatecheck = mergemod.UPDATECHECK_NONE
1108 updatecheck = mergemod.UPDATECHECK_NONE
1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1109 ret = _update(repo, checkout, updatecheck=updatecheck)
1111
1110
1112 if not ret and movemarkfrom:
1111 if not ret and movemarkfrom:
1113 if movemarkfrom == repo[b'.'].node():
1112 if movemarkfrom == repo[b'.'].node():
1114 pass # no-op update
1113 pass # no-op update
1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1114 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1115 b = ui.label(repo._activebookmark, b'bookmarks.active')
1117 ui.status(_(b"updating bookmark %s\n") % b)
1116 ui.status(_(b"updating bookmark %s\n") % b)
1118 else:
1117 else:
1119 # this can happen with a non-linear update
1118 # this can happen with a non-linear update
1120 b = ui.label(repo._activebookmark, b'bookmarks')
1119 b = ui.label(repo._activebookmark, b'bookmarks')
1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1120 ui.status(_(b"(leaving bookmark %s)\n") % b)
1122 bookmarks.deactivate(repo)
1121 bookmarks.deactivate(repo)
1123 elif brev in repo._bookmarks:
1122 elif brev in repo._bookmarks:
1124 if brev != repo._activebookmark:
1123 if brev != repo._activebookmark:
1125 b = ui.label(brev, b'bookmarks.active')
1124 b = ui.label(brev, b'bookmarks.active')
1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1125 ui.status(_(b"(activating bookmark %s)\n") % b)
1127 bookmarks.activate(repo, brev)
1126 bookmarks.activate(repo, brev)
1128 elif brev:
1127 elif brev:
1129 if repo._activebookmark:
1128 if repo._activebookmark:
1130 b = ui.label(repo._activebookmark, b'bookmarks')
1129 b = ui.label(repo._activebookmark, b'bookmarks')
1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1130 ui.status(_(b"(leaving bookmark %s)\n") % b)
1132 bookmarks.deactivate(repo)
1131 bookmarks.deactivate(repo)
1133
1132
1134 if warndest:
1133 if warndest:
1135 destutil.statusotherdests(ui, repo)
1134 destutil.statusotherdests(ui, repo)
1136
1135
1137 return ret
1136 return ret
1138
1137
1139
1138
1140 def merge(
1139 def merge(
1141 repo, node, force=None, remind=True, mergeforce=False, labels=None,
1140 repo, node, force=None, remind=True, mergeforce=False, labels=None,
1142 ):
1141 ):
1143 """Branch merge with node, resolving changes. Return true if any
1142 """Branch merge with node, resolving changes. Return true if any
1144 unresolved conflicts."""
1143 unresolved conflicts."""
1145 stats = mergemod.update(
1144 stats = mergemod.update(
1146 repo,
1145 repo,
1147 node,
1146 node,
1148 branchmerge=True,
1147 branchmerge=True,
1149 force=force,
1148 force=force,
1150 mergeforce=mergeforce,
1149 mergeforce=mergeforce,
1151 labels=labels,
1150 labels=labels,
1152 )
1151 )
1153 _showstats(repo, stats)
1152 _showstats(repo, stats)
1154 if stats.unresolvedcount:
1153 if stats.unresolvedcount:
1155 repo.ui.status(
1154 repo.ui.status(
1156 _(
1155 _(
1157 b"use 'hg resolve' to retry unresolved file merges "
1156 b"use 'hg resolve' to retry unresolved file merges "
1158 b"or 'hg merge --abort' to abandon\n"
1157 b"or 'hg merge --abort' to abandon\n"
1159 )
1158 )
1160 )
1159 )
1161 elif remind:
1160 elif remind:
1162 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1161 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1163 return stats.unresolvedcount > 0
1162 return stats.unresolvedcount > 0
1164
1163
1165
1164
1166 def abortmerge(ui, repo):
1165 def abortmerge(ui, repo):
1167 ms = mergemod.mergestate.read(repo)
1166 ms = mergemod.mergestate.read(repo)
1168 if ms.active():
1167 if ms.active():
1169 # there were conflicts
1168 # there were conflicts
1170 node = ms.localctx.hex()
1169 node = ms.localctx.hex()
1171 else:
1170 else:
1172 # there were no conficts, mergestate was not stored
1171 # there were no conficts, mergestate was not stored
1173 node = repo[b'.'].hex()
1172 node = repo[b'.'].hex()
1174
1173
1175 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1174 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1176 stats = mergemod.clean_update(repo[node])
1175 stats = mergemod.clean_update(repo[node])
1177 assert stats.unresolvedcount == 0
1176 assert stats.unresolvedcount == 0
1178 _showstats(repo, stats)
1177 _showstats(repo, stats)
1179
1178
1180
1179
1181 def _incoming(
1180 def _incoming(
1182 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1181 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1183 ):
1182 ):
1184 """
1183 """
1185 Helper for incoming / gincoming.
1184 Helper for incoming / gincoming.
1186 displaychlist gets called with
1185 displaychlist gets called with
1187 (remoterepo, incomingchangesetlist, displayer) parameters,
1186 (remoterepo, incomingchangesetlist, displayer) parameters,
1188 and is supposed to contain only code that can't be unified.
1187 and is supposed to contain only code that can't be unified.
1189 """
1188 """
1190 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1189 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1191 other = peer(repo, opts, source)
1190 other = peer(repo, opts, source)
1192 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1191 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1193 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1192 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1194
1193
1195 if revs:
1194 if revs:
1196 revs = [other.lookup(rev) for rev in revs]
1195 revs = [other.lookup(rev) for rev in revs]
1197 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1196 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1198 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1197 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1199 )
1198 )
1200 try:
1199 try:
1201 if not chlist:
1200 if not chlist:
1202 ui.status(_(b"no changes found\n"))
1201 ui.status(_(b"no changes found\n"))
1203 return subreporecurse()
1202 return subreporecurse()
1204 ui.pager(b'incoming')
1203 ui.pager(b'incoming')
1205 displayer = logcmdutil.changesetdisplayer(
1204 displayer = logcmdutil.changesetdisplayer(
1206 ui, other, opts, buffered=buffered
1205 ui, other, opts, buffered=buffered
1207 )
1206 )
1208 displaychlist(other, chlist, displayer)
1207 displaychlist(other, chlist, displayer)
1209 displayer.close()
1208 displayer.close()
1210 finally:
1209 finally:
1211 cleanupfn()
1210 cleanupfn()
1212 subreporecurse()
1211 subreporecurse()
1213 return 0 # exit code is zero since we found incoming changes
1212 return 0 # exit code is zero since we found incoming changes
1214
1213
1215
1214
1216 def incoming(ui, repo, source, opts):
1215 def incoming(ui, repo, source, opts):
1217 def subreporecurse():
1216 def subreporecurse():
1218 ret = 1
1217 ret = 1
1219 if opts.get(b'subrepos'):
1218 if opts.get(b'subrepos'):
1220 ctx = repo[None]
1219 ctx = repo[None]
1221 for subpath in sorted(ctx.substate):
1220 for subpath in sorted(ctx.substate):
1222 sub = ctx.sub(subpath)
1221 sub = ctx.sub(subpath)
1223 ret = min(ret, sub.incoming(ui, source, opts))
1222 ret = min(ret, sub.incoming(ui, source, opts))
1224 return ret
1223 return ret
1225
1224
1226 def display(other, chlist, displayer):
1225 def display(other, chlist, displayer):
1227 limit = logcmdutil.getlimit(opts)
1226 limit = logcmdutil.getlimit(opts)
1228 if opts.get(b'newest_first'):
1227 if opts.get(b'newest_first'):
1229 chlist.reverse()
1228 chlist.reverse()
1230 count = 0
1229 count = 0
1231 for n in chlist:
1230 for n in chlist:
1232 if limit is not None and count >= limit:
1231 if limit is not None and count >= limit:
1233 break
1232 break
1234 parents = [p for p in other.changelog.parents(n) if p != nullid]
1233 parents = [p for p in other.changelog.parents(n) if p != nullid]
1235 if opts.get(b'no_merges') and len(parents) == 2:
1234 if opts.get(b'no_merges') and len(parents) == 2:
1236 continue
1235 continue
1237 count += 1
1236 count += 1
1238 displayer.show(other[n])
1237 displayer.show(other[n])
1239
1238
1240 return _incoming(display, subreporecurse, ui, repo, source, opts)
1239 return _incoming(display, subreporecurse, ui, repo, source, opts)
1241
1240
1242
1241
1243 def _outgoing(ui, repo, dest, opts):
1242 def _outgoing(ui, repo, dest, opts):
1244 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1243 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1245 if not path:
1244 if not path:
1246 raise error.Abort(
1245 raise error.Abort(
1247 _(b'default repository not configured!'),
1246 _(b'default repository not configured!'),
1248 hint=_(b"see 'hg help config.paths'"),
1247 hint=_(b"see 'hg help config.paths'"),
1249 )
1248 )
1250 dest = path.pushloc or path.loc
1249 dest = path.pushloc or path.loc
1251 branches = path.branch, opts.get(b'branch') or []
1250 branches = path.branch, opts.get(b'branch') or []
1252
1251
1253 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1252 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1254 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1253 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1255 if revs:
1254 if revs:
1256 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1255 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1257
1256
1258 other = peer(repo, opts, dest)
1257 other = peer(repo, opts, dest)
1259 outgoing = discovery.findcommonoutgoing(
1258 outgoing = discovery.findcommonoutgoing(
1260 repo, other, revs, force=opts.get(b'force')
1259 repo, other, revs, force=opts.get(b'force')
1261 )
1260 )
1262 o = outgoing.missing
1261 o = outgoing.missing
1263 if not o:
1262 if not o:
1264 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1263 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1265 return o, other
1264 return o, other
1266
1265
1267
1266
1268 def outgoing(ui, repo, dest, opts):
1267 def outgoing(ui, repo, dest, opts):
1269 def recurse():
1268 def recurse():
1270 ret = 1
1269 ret = 1
1271 if opts.get(b'subrepos'):
1270 if opts.get(b'subrepos'):
1272 ctx = repo[None]
1271 ctx = repo[None]
1273 for subpath in sorted(ctx.substate):
1272 for subpath in sorted(ctx.substate):
1274 sub = ctx.sub(subpath)
1273 sub = ctx.sub(subpath)
1275 ret = min(ret, sub.outgoing(ui, dest, opts))
1274 ret = min(ret, sub.outgoing(ui, dest, opts))
1276 return ret
1275 return ret
1277
1276
1278 limit = logcmdutil.getlimit(opts)
1277 limit = logcmdutil.getlimit(opts)
1279 o, other = _outgoing(ui, repo, dest, opts)
1278 o, other = _outgoing(ui, repo, dest, opts)
1280 if not o:
1279 if not o:
1281 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1280 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1282 return recurse()
1281 return recurse()
1283
1282
1284 if opts.get(b'newest_first'):
1283 if opts.get(b'newest_first'):
1285 o.reverse()
1284 o.reverse()
1286 ui.pager(b'outgoing')
1285 ui.pager(b'outgoing')
1287 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1286 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1288 count = 0
1287 count = 0
1289 for n in o:
1288 for n in o:
1290 if limit is not None and count >= limit:
1289 if limit is not None and count >= limit:
1291 break
1290 break
1292 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1291 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1293 if opts.get(b'no_merges') and len(parents) == 2:
1292 if opts.get(b'no_merges') and len(parents) == 2:
1294 continue
1293 continue
1295 count += 1
1294 count += 1
1296 displayer.show(repo[n])
1295 displayer.show(repo[n])
1297 displayer.close()
1296 displayer.close()
1298 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1297 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1299 recurse()
1298 recurse()
1300 return 0 # exit code is zero since we found outgoing changes
1299 return 0 # exit code is zero since we found outgoing changes
1301
1300
1302
1301
1303 def verify(repo, level=None):
1302 def verify(repo, level=None):
1304 """verify the consistency of a repository"""
1303 """verify the consistency of a repository"""
1305 ret = verifymod.verify(repo, level=level)
1304 ret = verifymod.verify(repo, level=level)
1306
1305
1307 # Broken subrepo references in hidden csets don't seem worth worrying about,
1306 # Broken subrepo references in hidden csets don't seem worth worrying about,
1308 # since they can't be pushed/pulled, and --hidden can be used if they are a
1307 # since they can't be pushed/pulled, and --hidden can be used if they are a
1309 # concern.
1308 # concern.
1310
1309
1311 # pathto() is needed for -R case
1310 # pathto() is needed for -R case
1312 revs = repo.revs(
1311 revs = repo.revs(
1313 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1312 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1314 )
1313 )
1315
1314
1316 if revs:
1315 if revs:
1317 repo.ui.status(_(b'checking subrepo links\n'))
1316 repo.ui.status(_(b'checking subrepo links\n'))
1318 for rev in revs:
1317 for rev in revs:
1319 ctx = repo[rev]
1318 ctx = repo[rev]
1320 try:
1319 try:
1321 for subpath in ctx.substate:
1320 for subpath in ctx.substate:
1322 try:
1321 try:
1323 ret = (
1322 ret = (
1324 ctx.sub(subpath, allowcreate=False).verify() or ret
1323 ctx.sub(subpath, allowcreate=False).verify() or ret
1325 )
1324 )
1326 except error.RepoError as e:
1325 except error.RepoError as e:
1327 repo.ui.warn(b'%d: %s\n' % (rev, e))
1326 repo.ui.warn(b'%d: %s\n' % (rev, e))
1328 except Exception:
1327 except Exception:
1329 repo.ui.warn(
1328 repo.ui.warn(
1330 _(b'.hgsubstate is corrupt in revision %s\n')
1329 _(b'.hgsubstate is corrupt in revision %s\n')
1331 % node.short(ctx.node())
1330 % node.short(ctx.node())
1332 )
1331 )
1333
1332
1334 return ret
1333 return ret
1335
1334
1336
1335
1337 def remoteui(src, opts):
1336 def remoteui(src, opts):
1338 """build a remote ui from ui or repo and opts"""
1337 """build a remote ui from ui or repo and opts"""
1339 if util.safehasattr(src, b'baseui'): # looks like a repository
1338 if util.safehasattr(src, b'baseui'): # looks like a repository
1340 dst = src.baseui.copy() # drop repo-specific config
1339 dst = src.baseui.copy() # drop repo-specific config
1341 src = src.ui # copy target options from repo
1340 src = src.ui # copy target options from repo
1342 else: # assume it's a global ui object
1341 else: # assume it's a global ui object
1343 dst = src.copy() # keep all global options
1342 dst = src.copy() # keep all global options
1344
1343
1345 # copy ssh-specific options
1344 # copy ssh-specific options
1346 for o in b'ssh', b'remotecmd':
1345 for o in b'ssh', b'remotecmd':
1347 v = opts.get(o) or src.config(b'ui', o)
1346 v = opts.get(o) or src.config(b'ui', o)
1348 if v:
1347 if v:
1349 dst.setconfig(b"ui", o, v, b'copied')
1348 dst.setconfig(b"ui", o, v, b'copied')
1350
1349
1351 # copy bundle-specific options
1350 # copy bundle-specific options
1352 r = src.config(b'bundle', b'mainreporoot')
1351 r = src.config(b'bundle', b'mainreporoot')
1353 if r:
1352 if r:
1354 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1353 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1355
1354
1356 # copy selected local settings to the remote ui
1355 # copy selected local settings to the remote ui
1357 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1356 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1358 for key, val in src.configitems(sect):
1357 for key, val in src.configitems(sect):
1359 dst.setconfig(sect, key, val, b'copied')
1358 dst.setconfig(sect, key, val, b'copied')
1360 v = src.config(b'web', b'cacerts')
1359 v = src.config(b'web', b'cacerts')
1361 if v:
1360 if v:
1362 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1361 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1363
1362
1364 return dst
1363 return dst
1365
1364
1366
1365
1367 # Files of interest
1366 # Files of interest
1368 # Used to check if the repository has changed looking at mtime and size of
1367 # Used to check if the repository has changed looking at mtime and size of
1369 # these files.
1368 # these files.
1370 foi = [
1369 foi = [
1371 (b'spath', b'00changelog.i'),
1370 (b'spath', b'00changelog.i'),
1372 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1371 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1373 (b'spath', b'obsstore'),
1372 (b'spath', b'obsstore'),
1374 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1373 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1375 ]
1374 ]
1376
1375
1377
1376
1378 class cachedlocalrepo(object):
1377 class cachedlocalrepo(object):
1379 """Holds a localrepository that can be cached and reused."""
1378 """Holds a localrepository that can be cached and reused."""
1380
1379
1381 def __init__(self, repo):
1380 def __init__(self, repo):
1382 """Create a new cached repo from an existing repo.
1381 """Create a new cached repo from an existing repo.
1383
1382
1384 We assume the passed in repo was recently created. If the
1383 We assume the passed in repo was recently created. If the
1385 repo has changed between when it was created and when it was
1384 repo has changed between when it was created and when it was
1386 turned into a cache, it may not refresh properly.
1385 turned into a cache, it may not refresh properly.
1387 """
1386 """
1388 assert isinstance(repo, localrepo.localrepository)
1387 assert isinstance(repo, localrepo.localrepository)
1389 self._repo = repo
1388 self._repo = repo
1390 self._state, self.mtime = self._repostate()
1389 self._state, self.mtime = self._repostate()
1391 self._filtername = repo.filtername
1390 self._filtername = repo.filtername
1392
1391
1393 def fetch(self):
1392 def fetch(self):
1394 """Refresh (if necessary) and return a repository.
1393 """Refresh (if necessary) and return a repository.
1395
1394
1396 If the cached instance is out of date, it will be recreated
1395 If the cached instance is out of date, it will be recreated
1397 automatically and returned.
1396 automatically and returned.
1398
1397
1399 Returns a tuple of the repo and a boolean indicating whether a new
1398 Returns a tuple of the repo and a boolean indicating whether a new
1400 repo instance was created.
1399 repo instance was created.
1401 """
1400 """
1402 # We compare the mtimes and sizes of some well-known files to
1401 # We compare the mtimes and sizes of some well-known files to
1403 # determine if the repo changed. This is not precise, as mtimes
1402 # determine if the repo changed. This is not precise, as mtimes
1404 # are susceptible to clock skew and imprecise filesystems and
1403 # are susceptible to clock skew and imprecise filesystems and
1405 # file content can change while maintaining the same size.
1404 # file content can change while maintaining the same size.
1406
1405
1407 state, mtime = self._repostate()
1406 state, mtime = self._repostate()
1408 if state == self._state:
1407 if state == self._state:
1409 return self._repo, False
1408 return self._repo, False
1410
1409
1411 repo = repository(self._repo.baseui, self._repo.url())
1410 repo = repository(self._repo.baseui, self._repo.url())
1412 if self._filtername:
1411 if self._filtername:
1413 self._repo = repo.filtered(self._filtername)
1412 self._repo = repo.filtered(self._filtername)
1414 else:
1413 else:
1415 self._repo = repo.unfiltered()
1414 self._repo = repo.unfiltered()
1416 self._state = state
1415 self._state = state
1417 self.mtime = mtime
1416 self.mtime = mtime
1418
1417
1419 return self._repo, True
1418 return self._repo, True
1420
1419
1421 def _repostate(self):
1420 def _repostate(self):
1422 state = []
1421 state = []
1423 maxmtime = -1
1422 maxmtime = -1
1424 for attr, fname in foi:
1423 for attr, fname in foi:
1425 prefix = getattr(self._repo, attr)
1424 prefix = getattr(self._repo, attr)
1426 p = os.path.join(prefix, fname)
1425 p = os.path.join(prefix, fname)
1427 try:
1426 try:
1428 st = os.stat(p)
1427 st = os.stat(p)
1429 except OSError:
1428 except OSError:
1430 st = os.stat(prefix)
1429 st = os.stat(prefix)
1431 state.append((st[stat.ST_MTIME], st.st_size))
1430 state.append((st[stat.ST_MTIME], st.st_size))
1432 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1431 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1433
1432
1434 return tuple(state), maxmtime
1433 return tuple(state), maxmtime
1435
1434
1436 def copy(self):
1435 def copy(self):
1437 """Obtain a copy of this class instance.
1436 """Obtain a copy of this class instance.
1438
1437
1439 A new localrepository instance is obtained. The new instance should be
1438 A new localrepository instance is obtained. The new instance should be
1440 completely independent of the original.
1439 completely independent of the original.
1441 """
1440 """
1442 repo = repository(self._repo.baseui, self._repo.origroot)
1441 repo = repository(self._repo.baseui, self._repo.origroot)
1443 if self._filtername:
1442 if self._filtername:
1444 repo = repo.filtered(self._filtername)
1443 repo = repo.filtered(self._filtername)
1445 else:
1444 else:
1446 repo = repo.unfiltered()
1445 repo = repo.unfiltered()
1447 c = cachedlocalrepo(repo)
1446 c = cachedlocalrepo(repo)
1448 c._state = self._state
1447 c._state = self._state
1449 c.mtime = self.mtime
1448 c.mtime = self.mtime
1450 return c
1449 return c
General Comments 0
You need to be logged in to leave comments. Login now